diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000000000000000000000000000000000..208216bb03a9635da47e868b7ae8e94dadf91518 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,25 @@ +# CITATION.cff file for Detection, Pose Estimation and Segmentation for Multiple Bodies: Closing the Virtuous Circle +# This file provides metadata for the software and its preferred citation format. +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +authors: +- family-names: Purkrabek + given-names: Miroslav +- family-names: Matas + given-names: Jiri +title: "Detection, Pose Estimation and Segmentation for Multiple Bodies: Closing the Virtuous Circle" +version: 1.0.0 +date-released: 2025-06-20 +preferred-citation: + type: conference-paper + authors: + - family-names: Purkrabek + given-names: Miroslav + - family-names: Matas + given-names: Jiri + collection-title: "Proceedings of the IEEE/CVF International Conference on Computer Vision" + month: 10 + start: 1 # First page number + end: 8 # Last page number + title: "Detection, Pose Estimation and Segmentation for Multiple Bodies: Closing the Virtuous Circle" + year: 2025 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..becda72a0a428d462b42b255f6ac76844eb81685 --- /dev/null +++ b/app.py @@ -0,0 +1,262 @@ +import gradio as gr +import spaces + +from pathlib import Path + +import numpy as np +import yaml +from demo.demo_utils import DotDict, concat_instances, filter_instances, pose_nms, visualize_demo +from demo.mm_utils import run_MMDetector, run_MMPose +from mmdet.apis import init_detector +from demo.sam2_utils import prepare_model as prepare_sam2_model +from demo.sam2_utils import process_image_with_SAM + +from mmpose.apis import init_model as init_pose_estimator +from mmpose.utils import adapt_mmdet_pipeline + +# Default thresholds +DEFAULT_CAT_ID: int = 0 + +DEFAULT_BBOX_THR: float = 0.3 +DEFAULT_NMS_THR: float = 0.3 +DEFAULT_KPT_THR: float = 0.3 + +# Global models variable +det_model = None +pose_model = None +sam2_model = None + +def _parse_yaml_config(yaml_path: Path) -> DotDict: + """ + Load BMP configuration from a YAML file. + + Args: + yaml_path (Path): Path to YAML config. + Returns: + DotDict: Nested config dictionary. + """ + with open(yaml_path, "r") as f: + cfg = yaml.safe_load(f) + return DotDict(cfg) + +def load_models(bmp_config): + device = 'cuda:0' + + global det_model, pose_model, sam2_model + + # build detectors + det_model = init_detector(bmp_config.detector.det_config, bmp_config.detector.det_checkpoint, device='cpu') # Detect with CPU because of installation issues on HF + det_model.cfg = adapt_mmdet_pipeline(det_model.cfg) + + + # build pose estimator + pose_model = init_pose_estimator( + bmp_config.pose_estimator.pose_config, + bmp_config.pose_estimator.pose_checkpoint, + device=device, + cfg_options=dict(model=dict(test_cfg=dict(output_heatmaps=False))), + ) + + sam2_model = prepare_sam2_model( + model_cfg=bmp_config.sam2.sam2_config, + model_checkpoint=bmp_config.sam2.sam2_checkpoint, + ) + + return det_model, pose_model, sam2_model + +@spaces.GPU(duration=60) +def process_image_with_BMP( + img: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """ + Run the full BMP pipeline on a single image: detection, pose, SAM mask refinement, and visualization. + + Args: + args (Namespace): Parsed CLI arguments. + bmp_config (DotDict): Configuration parameters. + img_path (Path): Path to the input image. + detector: Primary MMDetection model. + detector_prime: Secondary MMDetection model for iterations. + pose_estimator: MMPose model for keypoint estimation. + sam2_model: SAM model for mask refinement. + Returns: + InstanceData: Final merged detections and refined masks. + """ + bmp_config = _parse_yaml_config(Path("configs/bmp_D3.yaml")) + load_models(bmp_config) + + # img: RGB -> BGR + img = img[..., ::-1] + + img_for_detection = img.copy() + rtmdet_result = None + all_detections = None + for iteration in range(bmp_config.num_bmp_iters): + + # Step 1: Detection + det_instances = run_MMDetector( + det_model, + img_for_detection, + det_cat_id=DEFAULT_CAT_ID, + bbox_thr=DEFAULT_BBOX_THR, + nms_thr=DEFAULT_NMS_THR, + ) + if len(det_instances.bboxes) == 0: + continue + + # Step 2: Pose estimation + pose_instances = run_MMPose( + pose_model, + img.copy(), + detections=det_instances, + kpt_thr=DEFAULT_KPT_THR, + ) + + # Restrict to first 17 COCO keypoints + pose_instances.keypoints = pose_instances.keypoints[:, :17, :] + pose_instances.keypoint_scores = pose_instances.keypoint_scores[:, :17] + pose_instances.keypoints = np.concatenate( + [pose_instances.keypoints, pose_instances.keypoint_scores[:, :, None]], axis=-1 + ) + + # Step 3: Pose-NMS and SAM refinement + all_keypoints = ( + pose_instances.keypoints + if all_detections is None + else np.concatenate([all_detections.keypoints, pose_instances.keypoints], axis=0) + ) + all_bboxes = ( + pose_instances.bboxes + if all_detections is None + else np.concatenate([all_detections.bboxes, pose_instances.bboxes], axis=0) + ) + num_valid_kpts = np.sum(all_keypoints[:, :, 2] > bmp_config.sam2.prompting.confidence_thr, axis=1) + keep_indices = pose_nms( + DotDict({"confidence_thr": bmp_config.sam2.prompting.confidence_thr, "oks_thr": bmp_config.oks_nms_thr}), + image_kpts=all_keypoints, + image_bboxes=all_bboxes, + num_valid_kpts=num_valid_kpts, + ) + keep_indices = sorted(keep_indices) # Sort by original index + num_old_detections = 0 if all_detections is None else len(all_detections.bboxes) + keep_new_indices = [i - num_old_detections for i in keep_indices if i >= num_old_detections] + keep_old_indices = [i for i in keep_indices if i < num_old_detections] + if len(keep_new_indices) == 0: + continue + # filter new detections and compute scores + new_dets = filter_instances(pose_instances, keep_new_indices) + new_dets.scores = pose_instances.keypoint_scores[keep_new_indices].mean(axis=-1) + old_dets = None + if len(keep_old_indices) > 0: + old_dets = filter_instances(all_detections, keep_old_indices) + + new_detections = process_image_with_SAM( + DotDict(bmp_config.sam2.prompting), + img.copy(), + sam2_model, + new_dets, + old_dets if old_dets is not None else None, + ) + + # Merge detections + if all_detections is None: + all_detections = new_detections + else: + all_detections = concat_instances(all_detections, new_dets) + + # Step 4: Visualization + img_for_detection, rtmdet_r, _ = visualize_demo( + img.copy(), + all_detections, + ) + + if iteration == 0: + rtmdet_result = rtmdet_r + + _, _, bmp_result = visualize_demo( + img.copy(), + all_detections, + ) + + # img: BGR -> RGB + rtmdet_result = rtmdet_result[..., ::-1] + bmp_result = bmp_result[..., ::-1] + + return rtmdet_result, bmp_result + + +with gr.Blocks() as app: + gr.Markdown("# BBoxMaskPose Image Demo") + gr.Markdown( + "Official demo for paper **Detection, Pose Estimation and Segmentation for Multiple Bodies: Closing the Virtuous Circle.** [ICCV 2025]" + ) + gr.Markdown( + "For details, see the [project website](https://mirapurkrabek.github.io/BBox-Mask-Pose/) or [arXiv paper](https://arxiv.org/abs/2412.01562). " + "The demo showcases the capabilities of the BBoxMaskPose framework on any image. " + "If you want to play around with parameters, use the [GitHub demo](https://github.com/MiraPurkrabek/BBoxMaskPose). " + "Please note that due to HuggingFace restrictions, the demo runs much slower than the GitHub implementation." + ) + + with gr.Row(): + with gr.Column(): + original_image_input = gr.Image(type="numpy", label="Original Image") + submit_button = gr.Button("Run Inference") + + with gr.Column(): + output_standard = gr.Image(type="numpy", label="RTMDet-L + MaskPose-B") + + with gr.Column(): + output_sahi_sliced = gr.Image(type="numpy", label="BBoxMaskPose") + + + gr.Examples( + label="OCHuman examples", + examples=[ + ["examples/004806.jpg"], + ["examples/005056.jpg"], + ["examples/004981.jpg"], + ["examples/004655.jpg"], + ["examples/004684.jpg"], + ["examples/004974.jpg"], + ["examples/004983.jpg"], + ["examples/005017.jpg"], + ["examples/004849.jpg"], + ], + inputs=[ + original_image_input, + ], + outputs=[output_standard, output_sahi_sliced], + fn=process_image_with_BMP, + cache_examples=True, + ) + gr.Examples( + label="In-the-wild examples", + examples=[ + ["examples/prochazka_MMA.jpg"], + ["examples/riner_judo.jpg"], + ["examples/tackle3.jpg"], + ["examples/tackle1.jpg"], + ["examples/tackle2.jpg"], + ["examples/tackle5.jpg"], + ["examples/floorball_SKV_3.jpg"], + ["examples/santa_o_crop.jpg"], + ["examples/floorball_SKV_2.jpg"], + ], + inputs=[ + original_image_input, + ], + outputs=[output_standard, output_sahi_sliced], + fn=process_image_with_BMP, + cache_examples=True, + ) + + submit_button.click( + fn=process_image_with_BMP, + inputs=[ + original_image_input, + ], + outputs=[output_standard, output_sahi_sliced], + ) + +# Launch the demo +app.launch() \ No newline at end of file diff --git a/configs/README.md b/configs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ca1ea632f28d299b045b4e21416773a6fdfe3591 --- /dev/null +++ b/configs/README.md @@ -0,0 +1,30 @@ +# Configuration Files Overview + +This directory contains configuration files for reproducing experiments and running inference across different components of the BBoxMaskPose project. + +## Which configs are available? + +Here you can find configs setting-up hyperparameters of the whole loop. +These are mainly: +- How to prompt SAM +- Which models to use (detection, pose, SAM) +- How to chain models +- ... + +For easier reference, the configs have the same names as in the supplementary material of the ICCV paper. +So for example config [**bmp_D3.yaml**](bmp_D3.yaml) is the prompting experiment used in the BMP loop. +For details, see Tabs. 6 - 8 of the supplementary. + + +## Where are appropriate configs? + +- **/configs** (this folder) + - Hyperparameter configurations for the BMP loop experiments. Use these files to reproduce training and evaluation settings. + +- **/mmpose/configs** + - Configuration files for MMPose, following the same format and structure as MMPose v1.3.1. Supports models, datasets, and training pipelines. + +- **/sam2/configs** + - Configuration files for SAM2, matching the format and directory layout of the original SAM v2.1 repository. Use these for prompt-driven segmentation and related tasks. + + diff --git a/configs/bmp_D3.yaml b/configs/bmp_D3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a537f13ee67cbba1ba0b32e8b00d4a574a46fe19 --- /dev/null +++ b/configs/bmp_D3.yaml @@ -0,0 +1,37 @@ +# BBoxMaskPose Hyperparameters from Experiment D3. +# For details, see the paper: https://arxiv.org/abs/2412.01562, Tab 8. in the supplementary. + +# This configuration is good for the BMP loop as was used for most of the experiments. +detector: + det_config: 'mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_8xb32-300e_coco.py' + det_checkpoint: 'https://huggingface.co/vrg-prague/BBoxMaskPose/resolve/main/rtmdet-ins-l-mask.pth' + + # Detectors D and D' could be different. + det_prime_config: null + det_prime_checkpoint: null + +pose_estimator: + pose_config: 'mmpose/configs/MaskPose/ViTb-multi_mask.py' + pose_checkpoint: 'https://huggingface.co/vrg-prague/BBoxMaskPose/resolve/main/MaskPose-b.pth' + +sam2: + sam2_config: 'configs/samurai/sam2.1_hiera_b+.yaml' # Use SAMURAI as it has img_size 1024 (SAM-2.1 has 512) + sam2_checkpoint: 'models/SAM/sam2.1_hiera_base_plus.pt' + prompting: + batch: False + use_bbox: False + num_pos_keypoints: 6 + num_pos_keypoints_if_crowd: 6 + num_neg_keypoints: 0 + confidence_thr: 0.3 + visibility_thr: 0.3 + selection_method: 'distance+confidence' + extend_bbox: False + pose_mask_consistency: False + crowd_by_max_iou: False # Determine if the instance is in the multi-body scenario. If yes, use different amount of keypoints and NO BBOX. If no, use bbox according to 'use_bbox' argument. + crop: False + exclusive_masks: True + ignore_small_bboxes: False + +num_bmp_iters: 2 +oks_nms_thr: 0.8 \ No newline at end of file diff --git a/configs/bmp_J1.yaml b/configs/bmp_J1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..51a43715a877f084e531356d1a02d4413dc776ce --- /dev/null +++ b/configs/bmp_J1.yaml @@ -0,0 +1,39 @@ +# BBoxMaskPose Hyperparameters from Experiment J1. +# For details, see the paper: https://arxiv.org/abs/2412.01562, Tab 8. in the supplementary. + +# This configuration is good for getting extra AP points when the estimates are already good. +# It is not recommended for the whole loop (as done here -- this is for the demo) but rather for +# the det-pose-sam-pose studied in Tab. 4. +detector: + det_config: 'mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_8xb32-300e_coco.py' + det_checkpoint: 'https://huggingface.co/vrg-prague/BBoxMaskPose/resolve/main/rtmdet-ins-l-mask.pth' + + # Detectors D and D' could be different. + det_prime_config: null + det_prime_checkpoint: null + +pose_estimator: + pose_config: 'mmpose/configs/MaskPose/ViTb-multi_mask.py' + pose_checkpoint: 'https://huggingface.co/vrg-prague/BBoxMaskPose/resolve/main/MaskPose-b.pth' + +sam2: + sam2_config: 'configs/samurai/sam2.1_hiera_b+.yaml' # Use SAMURAI as it has img_size 1024 (SAM-2.1 has 512) + sam2_checkpoint: 'models/SAM/sam2.1_hiera_base_plus.pt' + prompting: + batch: True + use_bbox: False + num_pos_keypoints: 4 + num_pos_keypoints_if_crowd: 6 + num_neg_keypoints: 0 + confidence_thr: 0.5 + visibility_thr: 0.5 + selection_method: 'distance+confidence' + extend_bbox: False + pose_mask_consistency: False + crowd_by_max_iou: 0.5 # Determine if the instance is in the multi-body scenario. If yes, use different amount of keypoints and NO BBOX. If no, use bbox according to 'use_bbox' argument. + crop: False + exclusive_masks: True + ignore_small_bboxes: False + +num_bmp_iters: 2 +oks_nms_thr: 0.8 \ No newline at end of file diff --git a/demo/bmp_demo.py b/demo/bmp_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..b360b6ebcf7da06f9b0da580be1d7bf45d44f2fa --- /dev/null +++ b/demo/bmp_demo.py @@ -0,0 +1,250 @@ +# Copyright (c) OpenMMLab. All rights reserved. +""" +BMP Demo script: sequentially runs detection, pose estimation, SAM-based mask refinement, and visualization. +Usage: + python bmp_demo.py [--output-root ] +""" + +import os +import shutil +from argparse import ArgumentParser, Namespace +from pathlib import Path + +import mmcv +import mmengine +import numpy as np +import yaml +from demo_utils import DotDict, concat_instances, create_GIF, filter_instances, pose_nms, visualize_itteration +from mm_utils import run_MMDetector, run_MMPose +from mmdet.apis import init_detector +from mmengine.logging import print_log +from mmengine.structures import InstanceData +from sam2_utils import prepare_model as prepare_sam2_model +from sam2_utils import process_image_with_SAM + +from mmpose.apis import init_model as init_pose_estimator +from mmpose.utils import adapt_mmdet_pipeline + +# Default thresholds +DEFAULT_DET_CAT_ID: int = 0 # "person" +DEFAULT_BBOX_THR: float = 0.3 +DEFAULT_NMS_THR: float = 0.3 +DEFAULT_KPT_THR: float = 0.3 + + +def parse_args() -> Namespace: + """ + Parse command-line arguments for BMP demo. + + Returns: + Namespace: Contains bmp_config (Path), input (Path), output_root (Path), device (str). + """ + parser = ArgumentParser(description="BBoxMaskPose demo") + parser.add_argument("bmp_config", type=Path, help="Path to BMP YAML config file") + parser.add_argument("input", type=Path, help="Input image file") + parser.add_argument("--output-root", type=Path, default=None, help="Directory to save outputs (default: ./outputs)") + parser.add_argument("--device", type=str, default="cuda:0", help="Device for inference (e.g., cuda:0 or cpu)") + parser.add_argument("--create-gif", action="store_true", default=False, help="Create GIF of all BMP iterations") + args = parser.parse_args() + if args.output_root is None: + args.output_root = os.path.join(Path(__file__).parent, "outputs") + return args + + +def parse_yaml_config(yaml_path: Path) -> DotDict: + """ + Load BMP configuration from a YAML file. + + Args: + yaml_path (Path): Path to YAML config. + Returns: + DotDict: Nested config dictionary. + """ + with open(yaml_path, "r") as f: + cfg = yaml.safe_load(f) + return DotDict(cfg) + + +def process_one_image( + args: Namespace, + bmp_config: DotDict, + img_path: Path, + detector: object, + detector_prime: object, + pose_estimator: object, + sam2_model: object, +) -> InstanceData: + """ + Run the full BMP pipeline on a single image: detection, pose, SAM mask refinement, and visualization. + + Args: + args (Namespace): Parsed CLI arguments. + bmp_config (DotDict): Configuration parameters. + img_path (Path): Path to the input image. + detector: Primary MMDetection model. + detector_prime: Secondary MMDetection model for iterations. + pose_estimator: MMPose model for keypoint estimation. + sam2_model: SAM model for mask refinement. + Returns: + InstanceData: Final merged detections and refined masks. + """ + # Load image + img = mmcv.imread(str(img_path), channel_order="bgr") + if img is None: + raise ValueError("Failed to read image from {}.".format(img_path)) + + # Prepare output directory + output_dir = os.path.join(args.output_root, img_path.stem) + shutil.rmtree(str(output_dir), ignore_errors=True) + mmengine.mkdir_or_exist(str(output_dir)) + + img_for_detection = img.copy() + all_detections = None + for iteration in range(bmp_config.num_bmp_iters): + print_log("BMP Iteration {}/{} started".format(iteration + 1, bmp_config.num_bmp_iters), logger="current") + + # Step 1: Detection + det_instances = run_MMDetector( + detector if iteration == 0 else detector_prime, + img_for_detection, + det_cat_id=DEFAULT_DET_CAT_ID, + bbox_thr=DEFAULT_BBOX_THR, + nms_thr=DEFAULT_NMS_THR, + ) + print_log("Detected {} instances".format(len(det_instances.bboxes)), logger="current") + if len(det_instances.bboxes) == 0: + print_log("No detections found, skipping.", logger="current") + continue + + # Step 2: Pose estimation + pose_instances = run_MMPose( + pose_estimator, + img.copy(), + detections=det_instances, + kpt_thr=DEFAULT_KPT_THR, + ) + # Restrict to first 17 COCO keypoints + pose_instances.keypoints = pose_instances.keypoints[:, :17, :] + pose_instances.keypoint_scores = pose_instances.keypoint_scores[:, :17] + pose_instances.keypoints = np.concatenate( + [pose_instances.keypoints, pose_instances.keypoint_scores[:, :, None]], axis=-1 + ) + + # Step 3: Pose-NMS and SAM refinement + all_keypoints = ( + pose_instances.keypoints + if all_detections is None + else np.concatenate([all_detections.keypoints, pose_instances.keypoints], axis=0) + ) + all_bboxes = ( + pose_instances.bboxes + if all_detections is None + else np.concatenate([all_detections.bboxes, pose_instances.bboxes], axis=0) + ) + num_valid_kpts = np.sum(all_keypoints[:, :, 2] > bmp_config.sam2.prompting.confidence_thr, axis=1) + keep_indices = pose_nms( + DotDict({"confidence_thr": bmp_config.sam2.prompting.confidence_thr, "oks_thr": bmp_config.oks_nms_thr}), + image_kpts=all_keypoints, + image_bboxes=all_bboxes, + num_valid_kpts=num_valid_kpts, + ) + keep_indices = sorted(keep_indices) # Sort by original index + num_old_detections = 0 if all_detections is None else len(all_detections.bboxes) + keep_new_indices = [i - num_old_detections for i in keep_indices if i >= num_old_detections] + keep_old_indices = [i for i in keep_indices if i < num_old_detections] + if len(keep_new_indices) == 0: + print_log("No new instances passed pose NMS, skipping SAM refinement.", logger="current") + continue + # filter new detections and compute scores + new_dets = filter_instances(pose_instances, keep_new_indices) + new_dets.scores = pose_instances.keypoint_scores[keep_new_indices].mean(axis=-1) + old_dets = None + if len(keep_old_indices) > 0: + old_dets = filter_instances(all_detections, keep_old_indices) + print_log( + "Pose NMS reduced instances to {:d} ({:d}+{:d}) instances".format( + len(new_dets.bboxes) + num_old_detections, num_old_detections, len(new_dets.bboxes) + ), + logger="current", + ) + + new_detections = process_image_with_SAM( + DotDict(bmp_config.sam2.prompting), + img.copy(), + sam2_model, + new_dets, + old_dets if old_dets is not None else None, + ) + + # Merge detections + if all_detections is None: + all_detections = new_detections + else: + all_detections = concat_instances(all_detections, new_dets) + + # Step 4: Visualization + img_for_detection = visualize_itteration( + img.copy(), + all_detections, + iteration_idx=iteration, + output_root=str(output_dir), + img_name=img_path.stem, + ) + print_log("Iteration {} completed".format(iteration + 1), logger="current") + + # Create GIF of iterations if requested + if args.create_gif: + image_file = os.path.join(output_dir, "{:s}.jpg".format(img_path.stem)) + create_GIF( + img_path=str(image_file), + output_root=str(output_dir), + bmp_x=bmp_config.num_bmp_iters, + ) + return all_detections + + +def main() -> None: + """ + Entry point for the BMP demo: loads models and processes one image. + """ + args = parse_args() + bmp_config = parse_yaml_config(args.bmp_config) + + # Ensure output root exists + mmengine.mkdir_or_exist(str(args.output_root)) + + # build detectors + detector = init_detector(bmp_config.detector.det_config, bmp_config.detector.det_checkpoint, device=args.device) + detector.cfg = adapt_mmdet_pipeline(detector.cfg) + if ( + bmp_config.detector.det_config == bmp_config.detector.det_prime_config + and bmp_config.detector.det_checkpoint == bmp_config.detector.det_prime_checkpoint + ) or (bmp_config.detector.det_prime_config is None or bmp_config.detector.det_prime_checkpoint is None): + print_log("Using the same detector as D and D'", logger="current") + detector_prime = detector + else: + detector_prime = init_detector( + bmp_config.detector.det_prime_config, bmp_config.detector.det_prime_checkpoint, device=args.device + ) + detector_prime.cfg = adapt_mmdet_pipeline(detector_prime.cfg) + print_log("Using a different detector for D'", logger="current") + + # build pose estimator + pose_estimator = init_pose_estimator( + bmp_config.pose_estimator.pose_config, + bmp_config.pose_estimator.pose_checkpoint, + device=args.device, + cfg_options=dict(model=dict(test_cfg=dict(output_heatmaps=False))), + ) + + sam2 = prepare_sam2_model( + model_cfg=bmp_config.sam2.sam2_config, + model_checkpoint=bmp_config.sam2.sam2_checkpoint, + ) + + # Run inference on one image + _ = process_one_image(args, bmp_config, args.input, detector, detector_prime, pose_estimator, sam2) + + +if __name__ == "__main__": + main() diff --git a/demo/demo_utils.py b/demo/demo_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..eb3285eac703bbdc56f1a090c6b568994210c679 --- /dev/null +++ b/demo/demo_utils.py @@ -0,0 +1,705 @@ +""" +Utilities for the BMP demo: +- Visualization of detections, masks, and poses +- Mask and bounding-box processing +- Pose non-maximum suppression (NMS) +- Animated GIF creation of demo iterations +""" + +import logging +import os +import shutil +import subprocess +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple + +import cv2 +import numpy as np +from mmengine.logging import print_log +from mmengine.structures import InstanceData +from pycocotools import mask as Mask +from sam2.distinctipy import get_colors +from tqdm import tqdm + +### Visualization hyperparameters +MIN_CONTOUR_AREA: int = 50 +BBOX_WEIGHT: float = 0.9 +MASK_WEIGHT: float = 0.6 +BACK_MASK_WEIGHT: float = 0.6 +POSE_WEIGHT: float = 0.8 + + +""" +posevis is our custom visualization library for pose estimation. For compatibility, we also provide a lite version that has fewer features but still reproduces visualization from the paper. +""" +try: + from posevis import pose_visualization +except ImportError: + from .posevis_lite import pose_visualization + + +class DotDict(dict): + """Dictionary with attribute access and nested dict wrapping.""" + + def __getattr__(self, name: str) -> any: + if name in self: + val = self[name] + if isinstance(val, dict): + val = DotDict(val) + self[name] = val + return val + raise AttributeError("No attribute named {!r}".format(name)) + + def __setattr__(self, name: str, value: any) -> None: + self[name] = value + + def __delattr__(self, name: str) -> None: + if name in self: + del self[name] + else: + raise AttributeError("No attribute named {!r}".format(name)) + + +def filter_instances(instances: InstanceData, indices): + """ + Return a new InstanceData containing only the entries of 'instances' at the given indices. + """ + if instances is None: + return None + data = {} + # Attributes to filter + for attr in [ + "bboxes", + "bbox_scores", + "keypoints", + "keypoint_scores", + "scores", + "pred_masks", + "refined_masks", + "sam_scores", + "sam_kpts", + ]: + if hasattr(instances, attr): + arr = getattr(instances, attr) + data[attr] = arr[indices] if arr is not None else None + return InstanceData(**data) + + +def concat_instances(instances1: InstanceData, instances2: InstanceData): + """ + Concatenate two InstanceData objects along the first axis, preserving order. + If instances1 or instances2 is None, returns the other. + """ + if instances1 is None: + return instances2 + if instances2 is None: + return instances1 + data = {} + for attr in [ + "bboxes", + "bbox_scores", + "keypoints", + "keypoint_scores", + "scores", + "pred_masks", + "refined_masks", + "sam_scores", + "sam_kpts", + ]: + arr1 = getattr(instances1, attr, None) + arr2 = getattr(instances2, attr, None) + if arr1 is None and arr2 is None: + continue + if arr1 is None: + data[attr] = arr2 + elif arr2 is None: + data[attr] = arr1 + else: + data[attr] = np.concatenate([arr1, arr2], axis=0) + return InstanceData(**data) + + +def _visualize_predictions( + img: np.ndarray, + bboxes: np.ndarray, + scores: np.ndarray, + masks: List[Optional[List[np.ndarray]]], + poses: List[Optional[np.ndarray]], + vis_type: str = "mask", + mask_is_binary: bool = False, +) -> Tuple[np.ndarray, np.ndarray]: + """ + Render bounding boxes, segmentation masks, and poses on the input image. + + Args: + img (np.ndarray): BGR image of shape (H, W, 3). + bboxes (np.ndarray): Array of bounding boxes [x, y, w, h]. + scores (np.ndarray): Confidence scores for each bbox. + masks (List[Optional[List[np.ndarray]]]): Polygon masks per instance. + poses (List[Optional[np.ndarray]]): Keypoint arrays per instance. + vis_type (str): Flags for visualization types separated by '+'. + mask_is_binary (bool): Whether input masks are binary arrays. + + Returns: + Tuple[np.ndarray, np.ndarray]: The visualized image and color map. + """ + vis_types = vis_type.split("+") + + # # Filter-out small detections to make the visualization more clear + # new_bboxes = [] + # new_scores = [] + # new_masks = [] + # new_poses = [] + # size_thr = img.shape[0] * img.shape[1] * 0.01 + # for bbox, score, mask, pose in zip(bboxes, scores, masks, poses): + # area = mask.sum() # Assume binary mask. OK for demo purposes + # if area > size_thr: + # new_bboxes.append(bbox) + # new_scores.append(score) + # new_masks.append(mask) + # new_poses.append(pose) + # bboxes = np.array(new_bboxes) + # scores = np.array(new_scores) + # masks = new_masks + # poses = new_poses + + if mask_is_binary: + poly_masks: List[Optional[List[np.ndarray]]] = [] + for binary_mask in masks: + if binary_mask is not None: + contours, _ = cv2.findContours( + (binary_mask * 255).astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) + polys = [cnt.flatten() for cnt in contours if cv2.contourArea(cnt) >= MIN_CONTOUR_AREA] + else: + polys = None + poly_masks.append(polys) + masks = poly_masks # type: ignore + + # Exclude white, black, and green colors from the palette as they are not distinctive + colors = (np.array(get_colors(len(bboxes), exclude_colors=[(0, 1, 0), (.5, .5, .5), (0, 0, 0), (1, 1, 1)], rng=0)) * 255).astype( + int + ) + + + if "inv-mask" in vis_types: + stencil = np.zeros_like(img) + + for bbox, score, mask_poly, pose, color in zip(bboxes, scores, masks, poses, colors): + bbox = _update_bbox_by_mask(list(map(int, bbox)), mask_poly, img.shape) + color_list = color.tolist() + img_copy = img.copy() + + if "bbox" in vis_types: + x, y, w, h = bbox + cv2.rectangle(img_copy, (x, y), (x + w, y + h), color_list, 2) + img = cv2.addWeighted(img, 1 - BBOX_WEIGHT, img_copy, BBOX_WEIGHT, 0) + + if mask_poly is not None and "mask" in vis_types: + for seg in mask_poly: + seg_pts = np.array(seg).reshape(-1, 1, 2).astype(int) + cv2.fillPoly(img_copy, [seg_pts], color_list) + img = cv2.addWeighted(img, 1 - MASK_WEIGHT, img_copy, MASK_WEIGHT, 0) + + if mask_poly is not None and "mask-out" in vis_types: + for seg in mask_poly: + seg_pts = np.array(seg).reshape(-1, 1, 2).astype(int) + cv2.fillPoly(img, [seg_pts], (0, 0, 0)) + + if mask_poly is not None and "inv-mask" in vis_types: + for seg in mask_poly: + seg = np.array(seg).reshape(-1, 1, 2).astype(int) + if cv2.contourArea(seg) < MIN_CONTOUR_AREA: + continue + cv2.fillPoly(stencil, [seg], (255, 255, 255)) + + if pose is not None and "pose" in vis_types: + vis_img = pose_visualization( + img.copy(), + pose.reshape(-1, 3), + width_multiplier=8, + differ_individuals=True, + color=color_list, + keep_image_size=True, + ) + img = cv2.addWeighted(img, 1 - POSE_WEIGHT, vis_img, POSE_WEIGHT, 0) + + if "inv-mask" in vis_types: + img = cv2.addWeighted(img, 1 - BACK_MASK_WEIGHT, cv2.bitwise_and(img, stencil), BACK_MASK_WEIGHT, 0) + + return img, colors + + +def visualize_itteration( + img: np.ndarray, detections: Any, iteration_idx: int, output_root: Path, img_name: str, with_text: bool = True +) -> Optional[np.ndarray]: + """ + Generate and save visualization images for each BMP iteration. + + Args: + img (np.ndarray): Original input image. + detections: InstanceData containing bboxes, scores, masks, keypoints. + iteration_idx (int): Current iteration index (0-based). + output_root (Path): Directory to save output images. + img_name (str): Base name of the image without extension. + with_text (bool): Whether to overlay text labels. + + Returns: + Optional[np.ndarray]: The masked-out image if generated, else None. + """ + bboxes = detections.bboxes + scores = detections.scores + pred_masks = detections.pred_masks + refined_masks = detections.refined_masks + keypoints = detections.keypoints + sam_kpts = detections.sam_kpts + + masked_out = None + for vis_def in [ + {"type": "bbox+mask", "masks": pred_masks, "label": "Detector (out)"}, + {"type": "inv-mask", "masks": pred_masks, "label": "MaskPose (in)"}, + {"type": "inv-mask+pose", "masks": pred_masks, "label": "MaskPose (out)"}, + {"type": "mask", "masks": refined_masks, "label": "SAM Masks"}, + {"type": "mask-out", "masks": refined_masks, "label": "Mask-Out"}, + {"type": "pose", "masks": refined_masks, "label": "Final Poses"}, + ]: + vis_img, colors = _visualize_predictions( + img.copy(), bboxes, scores, vis_def["masks"], keypoints, vis_type=vis_def["type"], mask_is_binary=True + ) + if vis_def["type"] == "mask-out": + masked_out = vis_img + if with_text: + label = "BMP {:d}x: {}".format(iteration_idx + 1, vis_def["label"]) + cv2.putText(vis_img, label, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 3) + cv2.putText(vis_img, label, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) + out_path = os.path.join( + output_root, "{}_iter{}_{}.jpg".format(img_name, iteration_idx + 1, vis_def["label"].replace(" ", "_")) + ) + cv2.imwrite(str(out_path), vis_img) + + # Show prompting keypoints + tmp_img = img.copy() + for i, _ in enumerate(bboxes): + if len(sam_kpts[i]) > 0: + instance_color = colors[i].astype(int).tolist() + for kpt in sam_kpts[i]: + cv2.drawMarker( + tmp_img, + (int(kpt[0]), int(kpt[1])), + instance_color, + markerType=cv2.MARKER_CROSS, + markerSize=20, + thickness=3, + ) + # Write the keypoint confidence next to the marker + cv2.putText( + tmp_img, + f"{kpt[2]:.2f}", + (int(kpt[0]) + 10, int(kpt[1]) - 10), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + instance_color, + 1, + cv2.LINE_AA, + ) + if with_text: + text = "BMP {:d}x: SAM prompts".format(iteration_idx + 1) + cv2.putText(tmp_img, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 3, cv2.LINE_AA) + cv2.putText(tmp_img, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_AA) + cv2.imwrite("{:s}/{:s}_iter{:d}_prompting_kpts.jpg".format(output_root, img_name, iteration_idx + 1), tmp_img) + + return masked_out + + +def visualize_demo( + img: np.ndarray, detections: Any, +) -> Optional[np.ndarray]: + """ + Generate and save visualization images for each BMP iteration. + + Args: + img (np.ndarray): Original input image. + detections: InstanceData containing bboxes, scores, masks, keypoints. + iteration_idx (int): Current iteration index (0-based). + output_root (Path): Directory to save output images. + img_name (str): Base name of the image without extension. + with_text (bool): Whether to overlay text labels. + + Returns: + Optional[np.ndarray]: The masked-out image if generated, else None. + """ + bboxes = detections.bboxes + scores = detections.scores + pred_masks = detections.pred_masks + refined_masks = detections.refined_masks + keypoints = detections.keypoints + + returns = [] + for vis_def in [ + {"type": "mask-out", "masks": refined_masks, "label": ""}, + {"type": "mask+pose", "masks": pred_masks, "label": "RTMDet-L"}, + {"type": "mask+pose", "masks": refined_masks, "label": "BMP"}, + ]: + vis_img, colors = _visualize_predictions( + img.copy(), bboxes, scores, vis_def["masks"], keypoints, vis_type=vis_def["type"], mask_is_binary=True + ) + returns.append(vis_img) + + return returns + + +def create_GIF( + img_path: Path, + output_root: Path, + bmp_x: int = 2, +) -> None: + """ + Compile iteration images into an animated GIF using ffmpeg. + + Args: + img_path (Path): Path to a sample iteration image. + output_root (Path): Directory to save the GIF. + bmp_x (int): Number of BMP iterations. + duration_per_frame (int): Frame display duration in ms. + + Raises: + RuntimeError: If ffmpeg is not available or images are missing. + """ + display_dur = 1.5 # seconds + fade_dur = 1.0 + fps = 10 + scale_width = 300 # Resize width for GIF, height will be auto-scaled to maintain aspect ratio + + # Check if ffmpeg is installed. If not, raise warning and return + if shutil.which("ffmpeg") is None: + print_log("FFMpeg is not installed. GIF creation will be skipped.", logger="current", level=logging.WARNING) + return + print_log("Creating GIF with FFmpeg...", logger="current") + + dirname, filename = os.path.split(img_path) + img_name_wo_ext, _ = os.path.splitext(filename) + + gif_image_names = [ + "Detector_(out)", + "MaskPose_(in)", + "MaskPose_(out)", + "prompting_kpts", + "SAM_Masks", + "Mask-Out", + ] + + # Create black image of the same size as the last image + last_img_path = os.path.join(dirname, "{}_iter1_{}".format(img_name_wo_ext, gif_image_names[0]) + ".jpg") + last_img = cv2.imread(last_img_path) + if last_img is None: + print_log("Could not read image {}.".format(last_img_path), logger="current", level=logging.ERROR) + return + black_img = np.zeros_like(last_img) + cv2.imwrite(os.path.join(dirname, "black_image.jpg"), black_img) + + gif_images = [] + for iter in range(bmp_x): + iter_img_path = os.path.join(dirname, "{}_iter{}_".format(img_name_wo_ext, iter + 1)) + for img_name in gif_image_names: + + if iter + 1 == bmp_x and img_name == "Mask-Out": + # Skip the last iteration's Mask-Out image + continue + + img_file = "{}{}.jpg".format(iter_img_path, img_name) + if not os.path.exists(img_file): + print_log("{} does not exist, skipping.".format(img_file), logger="current", level=logging.WARNING) + continue + gif_images.append(img_file) + + if len(gif_images) == 0: + print_log("No images found for GIF creation.", logger="current", level=logging.WARNING) + return + + # Add 'before' and 'after' images + after1_img = os.path.join(dirname, "{}_iter{}_Final_Poses.jpg".format(img_name_wo_ext, bmp_x)) + after2_img = os.path.join(dirname, "{}_iter{}_SAM_Masks.jpg".format(img_name_wo_ext, bmp_x)) + # gif_images.append(os.path.join(dirname, "black_image.jpg")) # Add black image at the end + gif_images.append(after1_img) + gif_images.append(after2_img) + gif_images.append(os.path.join(dirname, "black_image.jpg")) # Add black image at the end + + # Create a GIF from the images + gif_output_path = os.path.join(output_root, "{}_bmp_{}x.gif".format(img_name_wo_ext, bmp_x)) + + # 0. Make sure images exist and are divisible by 2 + for img in gif_images: + if not os.path.exists(img): + print_log("Image {} does not exist, skipping GIF creation.".format(img), logger="current", level=logging.WARNING) + return + # Check if image dimensions are divisible by 2 + img_data = cv2.imread(img) + if img_data.shape[1] % 2 != 0 or img_data.shape[0] % 2 != 0: + print_log( + "Image {} dimensions are not divisible by 2, resizing.".format(img), + logger="current", + level=logging.WARNING, + ) + resized_img = cv2.resize(img_data, (img_data.shape[1] // 2 * 2, img_data.shape[0] // 2 * 2)) + cv2.imwrite(img, resized_img) + + # 1. inputs + in_args = [] + for p in gif_images: + in_args += ["-loop", "1", "-t", str(display_dur), "-i", p] + + # 2. build xfade chain + n = len(gif_images) + parts = [] + for i in range(1, n): + # left label: first is input [0:v], then [v1], [v2], … + left = "[{}:v]".format(i - 1) if i == 1 else "[v{}]".format(i - 1) + right = "[{}:v]".format(i) + out = "[v{}]".format(i) + offset = (i - 1) * (display_dur + fade_dur) + display_dur + parts.append( + "{}{}xfade=transition=fade:".format(left, right) + + "duration={}:offset={:.3f}{}".format(fade_dur, offset, out) + ) + filter_complex = ";".join(parts) + + # 3. make MP4 slideshow + mp4 = "slideshow.mp4" + cmd1 = [ + "ffmpeg", + "-loglevel", + "error", + "-v", + "quiet", + "-hide_banner", + "-y", + *in_args, + "-filter_complex", + filter_complex, + "-map", + "[v{}]".format(n - 1), + "-c:v", + "libx264", + "-pix_fmt", + "yuv420p", + mp4, + ] + subprocess.run(cmd1, check=True) + + # 4. palette + palette = "palette.png" + vf = "fps={}".format(fps) + if scale_width: + vf += ",scale={}: -1:flags=lanczos".format(scale_width) + + # 5. generate palette + subprocess.run( + [ + "ffmpeg", + "-loglevel", + "error", + "-v", + "quiet", + "-hide_banner", + "-y", + "-i", + mp4, + "-vf", + vf + ",palettegen", + palette, + ], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, + ) + + # 6. build final GIF + subprocess.run( + [ + "ffmpeg", + "-loglevel", + "error", + "-v", + "quiet", + "-hide_banner", + "-y", + "-i", + mp4, + "-i", + palette, + "-lavfi", + vf + "[x];[x][1:v]paletteuse", + gif_output_path, + ], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, + ) + + # Clean up temporary files + os.remove(mp4) + os.remove(palette) + os.remove(os.path.join(dirname, "black_image.jpg")) + + print_log(f"GIF saved as '{gif_output_path}'", logger="current") + + +def _update_bbox_by_mask( + bbox: List[int], mask_poly: Optional[List[List[int]]], image_shape: Tuple[int, int, int] +) -> List[int]: + """ + Adjust bounding box to tightly fit mask polygon. + + Args: + bbox (List[int]): Original [x, y, w, h]. + mask_poly (Optional[List[List[int]]]): Polygon coordinates. + image_shape (Tuple[int,int,int]): Image shape (H, W, C). + + Returns: + List[int]: Updated [x, y, w, h] bounding box. + """ + if mask_poly is None or len(mask_poly) == 0: + return bbox + + mask_rle = Mask.frPyObjects(mask_poly, image_shape[0], image_shape[1]) + mask_rle = Mask.merge(mask_rle) + bbox_segm_xywh = Mask.toBbox(mask_rle) + bbox_segm_xyxy = np.array( + [ + bbox_segm_xywh[0], + bbox_segm_xywh[1], + bbox_segm_xywh[0] + bbox_segm_xywh[2], + bbox_segm_xywh[1] + bbox_segm_xywh[3], + ] + ) + + bbox = bbox_segm_xywh + + return bbox.astype(int).tolist() + + +def pose_nms(config: Any, image_kpts: np.ndarray, image_bboxes: np.ndarray, num_valid_kpts: np.ndarray) -> np.ndarray: + """ + Perform OKS-based non-maximum suppression on detected poses. + + Args: + config (Any): Configuration with confidence_thr and oks_thr. + image_kpts (np.ndarray): Detected keypoints of shape (N, K, 3). + image_bboxes (np.ndarray): Corresponding bboxes (N,4). + num_valid_kpts (np.ndarray): Count of valid keypoints per instance. + + Returns: + np.ndarray: Indices of kept instances. + """ + # Sort image kpts by average score - lowest first + # scores = image_kpts[:, :, 2].mean(axis=1) + # sort_idx = np.argsort(scores) + # image_kpts = image_kpts[sort_idx, :, :] + + # Compute OKS between all pairs of poses + oks_matrix = np.zeros((image_kpts.shape[0], image_kpts.shape[0])) + for i in range(image_kpts.shape[0]): + for j in range(image_kpts.shape[0]): + gt_bbox_xywh = image_bboxes[i].copy() + gt_bbox_xyxy = gt_bbox_xywh.copy() + gt_bbox_xyxy[2:] += gt_bbox_xyxy[:2] + gt = { + "keypoints": image_kpts[i].copy(), + "bbox": gt_bbox_xyxy, + "area": gt_bbox_xywh[2] * gt_bbox_xywh[3], + } + dt = {"keypoints": image_kpts[j].copy(), "bbox": gt_bbox_xyxy} + gt["keypoints"][:, 2] = (gt["keypoints"][:, 2] > config.confidence_thr) * 2 + oks = compute_oks(gt, dt) + if oks > 1: + breakpoint() + oks_matrix[i, j] = oks + + np.fill_diagonal(oks_matrix, -1) + is_subset = oks_matrix > config.oks_thr + + remove_instances = [] + while is_subset.any(): + # Find the pair with the highest OKS + i, j = np.unravel_index(np.argmax(oks_matrix), oks_matrix.shape) + + # Keep the one with the highest number of keypoints + if num_valid_kpts[i] > num_valid_kpts[j]: + remove_idx = j + else: + remove_idx = i + + # Remove the column from is_subset + oks_matrix[:, remove_idx] = 0 + oks_matrix[remove_idx, j] = 0 + remove_instances.append(remove_idx) + is_subset = oks_matrix > config.oks_thr + + keep_instances = np.setdiff1d(np.arange(image_kpts.shape[0]), remove_instances) + + return keep_instances + + +def compute_oks(gt: Dict[str, Any], dt: Dict[str, Any], use_area: bool = True, per_kpt: bool = False) -> float: + """ + Compute Object Keypoint Similarity (OKS) between ground-truth and detected poses. + + Args: + gt (Dict): Ground-truth keypoints and bbox info. + dt (Dict): Detected keypoints and bbox info. + use_area (bool): Whether to normalize by GT area. + per_kpt (bool): Whether to return per-keypoint OKS array. + + Returns: + float: OKS score or mean OKS. + """ + sigmas = ( + np.array([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, 1.07, 0.87, 0.87, 0.89, 0.89]) + / 10.0 + ) + vars = (sigmas * 2) ** 2 + k = len(sigmas) + visibility_condition = lambda x: x > 0 + g = np.array(gt["keypoints"]).reshape(k, 3) + xg = g[:, 0] + yg = g[:, 1] + vg = g[:, 2] + k1 = np.count_nonzero(visibility_condition(vg)) + bb = gt["bbox"] + x0 = bb[0] - bb[2] + x1 = bb[0] + bb[2] * 2 + y0 = bb[1] - bb[3] + y1 = bb[1] + bb[3] * 2 + + d = np.array(dt["keypoints"]).reshape((k, 3)) + xd = d[:, 0] + yd = d[:, 1] + + if k1 > 0: + # measure the per-keypoint distance if keypoints visible + dx = xd - xg + dy = yd - yg + + else: + # measure minimum distance to keypoints in (x0,y0) & (x1,y1) + z = np.zeros((k)) + dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0) + dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0) + + if use_area: + e = (dx**2 + dy**2) / vars / (gt["area"] + np.spacing(1)) / 2 + else: + tmparea = gt["bbox"][3] * gt["bbox"][2] * 0.53 + e = (dx**2 + dy**2) / vars / (tmparea + np.spacing(1)) / 2 + + if per_kpt: + oks = np.exp(-e) + if k1 > 0: + oks[~visibility_condition(vg)] = 0 + + else: + if k1 > 0: + e = e[visibility_condition(vg)] + oks = np.sum(np.exp(-e)) / e.shape[0] + + return oks diff --git a/demo/mm_utils.py b/demo/mm_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dda709ccc37b64eb8a43ed945c4827a9f1a69e4e --- /dev/null +++ b/demo/mm_utils.py @@ -0,0 +1,106 @@ +""" +This module provides high-level interfaces to run MMDetection and MMPose +models sequentially. Users can call run_MMDetector and run_MMPose from +other scripts (e.g., bmp_demo.py) to perform object detection and +pose estimation in a clean, modular fashion. +""" + +import numpy as np +from mmdet.apis import inference_detector +from mmengine.structures import InstanceData + +from mmpose.apis import inference_topdown +from mmpose.evaluation.functional import nms +from mmpose.structures import merge_data_samples + + +def run_MMDetector(detector, image, det_cat_id: int = 0, bbox_thr: float = 0.3, nms_thr: float = 0.3) -> InstanceData: + """ + Run an MMDetection model to detect bounding boxes (and masks) in an image. + + Args: + detector: An initialized MMDetection detector model. + image: Input image as file path or BGR numpy array. + det_cat_id: Category ID to filter detections (default is 0 for 'person'). + bbox_thr: Minimum bounding box score threshold. + nms_thr: IoU threshold for Non-Maximum Suppression (NMS). + + Returns: + InstanceData: A structure containing filtered bboxes, bbox_scores, and masks (if available). + """ + # Run detection + det_result = inference_detector(detector, image) + pred_instances = det_result.pred_instances.cpu().numpy() + + # Aggregate bboxes and scores into an (N, 5) array + bboxes_all = np.concatenate((pred_instances.bboxes, pred_instances.scores[:, None]), axis=1) + + # Filter by category and score + keep_mask = np.logical_and(pred_instances.labels == det_cat_id, pred_instances.scores > bbox_thr) + if not np.any(keep_mask): + # Return empty structure if nothing passes threshold + return InstanceData(bboxes=np.zeros((0, 4)), bbox_scores=np.zeros((0,)), masks=np.zeros((0, 1, 1))) + + bboxes = bboxes_all[keep_mask] + masks = getattr(pred_instances, "masks", None) + if masks is not None: + masks = masks[keep_mask] + + # Sort detections by descending score + order = np.argsort(bboxes[:, 4])[::-1] + bboxes = bboxes[order] + if masks is not None: + masks = masks[order] + + # Apply Non-Maximum Suppression + keep_indices = nms(bboxes, nms_thr) + bboxes = bboxes[keep_indices] + if masks is not None: + masks = masks[keep_indices] + + # Construct InstanceData to return + det_instances = InstanceData(bboxes=bboxes[:, :4], bbox_scores=bboxes[:, 4], masks=masks) + return det_instances + + +def run_MMPose(pose_estimator, image, detections: InstanceData, kpt_thr: float = 0.3) -> InstanceData: + """ + Run an MMPose top-down model to estimate human pose given detected bounding boxes. + + Args: + pose_estimator: An initialized MMPose model. + image: Input image as file path or RGB/BGR numpy array. + detections: InstanceData from run_MMDetector containing bboxes and masks. + kpt_thr: Minimum keypoint score threshold to filter low-confidence joints. + + Returns: + InstanceData: A structure containing estimated keypoints, keypoint_scores, + original bboxes, and masks (if provided). + """ + # Extract bounding boxes + bboxes = detections.bboxes + if bboxes.shape[0] == 0: + # No detections => empty pose data + return InstanceData( + keypoints=np.zeros((0, 17, 3)), + keypoint_scores=np.zeros((0, 17)), + bboxes=bboxes, + bbox_scores=detections.bbox_scores, + masks=detections.masks, + ) + + # Run top-down pose estimation + pose_results = inference_topdown(pose_estimator, image, bboxes, masks=detections.masks) + data_samples = merge_data_samples(pose_results) + + # Attach masks back into the data_samples if available + if detections.masks is not None: + data_samples.pred_instances.pred_masks = detections.masks + + # Filter out low-confidence keypoints + kp_scores = data_samples.pred_instances.keypoint_scores + kp_mask = kp_scores >= kpt_thr + # data_samples.pred_instances.keypoints[~kp_mask] = [0, 0, 0] + + # Return final InstanceData for poses + return data_samples.pred_instances diff --git a/demo/posevis_lite.py b/demo/posevis_lite.py new file mode 100644 index 0000000000000000000000000000000000000000..89de044228c0c3e82f36b704dd4b29926cdbd96d --- /dev/null +++ b/demo/posevis_lite.py @@ -0,0 +1,507 @@ +import os +from typing import Any, Dict, List, Optional, Tuple, Union + +import cv2 +import numpy as np + +NEUTRAL_COLOR = (52, 235, 107) + +LEFT_ARM_COLOR = (216, 235, 52) +LEFT_LEG_COLOR = (235, 107, 52) +LEFT_SIDE_COLOR = (245, 188, 113) +LEFT_FACE_COLOR = (235, 52, 107) + +RIGHT_ARM_COLOR = (52, 235, 216) +RIGHT_LEG_COLOR = (52, 107, 235) +RIGHT_SIDE_COLOR = (52, 171, 235) +RIGHT_FACE_COLOR = (107, 52, 235) + +COCO_MARKERS = [ + ["nose", cv2.MARKER_CROSS, NEUTRAL_COLOR], + ["left_eye", cv2.MARKER_SQUARE, LEFT_FACE_COLOR], + ["right_eye", cv2.MARKER_SQUARE, RIGHT_FACE_COLOR], + ["left_ear", cv2.MARKER_CROSS, LEFT_FACE_COLOR], + ["right_ear", cv2.MARKER_CROSS, RIGHT_FACE_COLOR], + ["left_shoulder", cv2.MARKER_TRIANGLE_UP, LEFT_ARM_COLOR], + ["right_shoulder", cv2.MARKER_TRIANGLE_UP, RIGHT_ARM_COLOR], + ["left_elbow", cv2.MARKER_SQUARE, LEFT_ARM_COLOR], + ["right_elbow", cv2.MARKER_SQUARE, RIGHT_ARM_COLOR], + ["left_wrist", cv2.MARKER_CROSS, LEFT_ARM_COLOR], + ["right_wrist", cv2.MARKER_CROSS, RIGHT_ARM_COLOR], + ["left_hip", cv2.MARKER_TRIANGLE_UP, LEFT_LEG_COLOR], + ["right_hip", cv2.MARKER_TRIANGLE_UP, RIGHT_LEG_COLOR], + ["left_knee", cv2.MARKER_SQUARE, LEFT_LEG_COLOR], + ["right_knee", cv2.MARKER_SQUARE, RIGHT_LEG_COLOR], + ["left_ankle", cv2.MARKER_TILTED_CROSS, LEFT_LEG_COLOR], + ["right_ankle", cv2.MARKER_TILTED_CROSS, RIGHT_LEG_COLOR], +] + + +COCO_SKELETON = [ + [[16, 14], LEFT_LEG_COLOR], # Left ankle - Left knee + [[14, 12], LEFT_LEG_COLOR], # Left knee - Left hip + [[17, 15], RIGHT_LEG_COLOR], # Right ankle - Right knee + [[15, 13], RIGHT_LEG_COLOR], # Right knee - Right hip + [[12, 13], NEUTRAL_COLOR], # Left hip - Right hip + [[6, 12], LEFT_SIDE_COLOR], # Left hip - Left shoulder + [[7, 13], RIGHT_SIDE_COLOR], # Right hip - Right shoulder + [[6, 7], NEUTRAL_COLOR], # Left shoulder - Right shoulder + [[6, 8], LEFT_ARM_COLOR], # Left shoulder - Left elbow + [[7, 9], RIGHT_ARM_COLOR], # Right shoulder - Right elbow + [[8, 10], LEFT_ARM_COLOR], # Left elbow - Left wrist + [[9, 11], RIGHT_ARM_COLOR], # Right elbow - Right wrist + [[2, 3], NEUTRAL_COLOR], # Left eye - Right eye + [[1, 2], LEFT_FACE_COLOR], # Nose - Left eye + [[1, 3], RIGHT_FACE_COLOR], # Nose - Right eye + [[2, 4], LEFT_FACE_COLOR], # Left eye - Left ear + [[3, 5], RIGHT_FACE_COLOR], # Right eye - Right ear + [[4, 6], LEFT_FACE_COLOR], # Left ear - Left shoulder + [[5, 7], RIGHT_FACE_COLOR], # Right ear - Right shoulder +] + + +def _draw_line( + img: np.ndarray, + start: Tuple[float, float], + stop: Tuple[float, float], + color: Tuple[int, int, int], + line_type: str, + thickness: int = 1, +) -> np.ndarray: + """ + Draw a line segment on an image, supporting solid, dashed, or dotted styles. + + Args: + img (np.ndarray): BGR image of shape (H, W, 3). + start (tuple of float): (x, y) start coordinates. + stop (tuple of float): (x, y) end coordinates. + color (tuple of int): BGR color values. + line_type (str): One of 'solid', 'dashed', or 'doted'. + thickness (int): Line thickness in pixels. + + Returns: + np.ndarray: Image with the line drawn. + """ + start = np.array(start)[:2] + stop = np.array(stop)[:2] + if line_type.lower() == "solid": + img = cv2.line( + img, + (int(start[0]), int(start[1])), + (int(stop[0]), int(stop[1])), + color=(0, 0, 0), + thickness=thickness+1, + lineType=cv2.LINE_AA, + ) + img = cv2.line( + img, + (int(start[0]), int(start[1])), + (int(stop[0]), int(stop[1])), + color=color, + thickness=thickness, + lineType=cv2.LINE_AA, + ) + elif line_type.lower() == "dashed": + delta = stop - start + length = np.linalg.norm(delta) + frac = np.linspace(0, 1, num=int(length / 5), endpoint=True) + for i in range(0, len(frac) - 1, 2): + s = start + frac[i] * delta + e = start + frac[i + 1] * delta + img = cv2.line( + img, + (int(s[0]), int(s[1])), + (int(e[0]), int(e[1])), + color=color, + thickness=thickness, + lineType=cv2.LINE_AA, + ) + elif line_type.lower() == "doted": + delta = stop - start + length = np.linalg.norm(delta) + frac = np.linspace(0, 1, num=int(length / 5), endpoint=True) + for i in range(0, len(frac)): + s = start + frac[i] * delta + img = cv2.circle( + img, + (int(s[0]), int(s[1])), + radius=max(thickness // 2, 1), + color=color, + thickness=-1, + lineType=cv2.LINE_AA, + ) + return img + + +def pose_visualization( + img: Union[str, np.ndarray], + keypoints: Union[Dict[str, Any], np.ndarray], + format: str = "COCO", + greyness: float = 1.0, + show_markers: bool = True, + show_bones: bool = True, + line_type: str = "solid", + width_multiplier: float = 1.0, + bbox_width_multiplier: float = 1.0, + show_bbox: bool = False, + differ_individuals: bool = False, + confidence_thr: float = 0.3, + errors: Optional[np.ndarray] = None, + color: Optional[Tuple[int, int, int]] = None, + keep_image_size: bool = False, + return_padding: bool = False, +) -> Union[np.ndarray, Tuple[np.ndarray, List[int]]]: + """ + Overlay pose keypoints and skeleton on an image. + + Args: + img (str or np.ndarray): Path to image file or BGR image array. + keypoints (dict or np.ndarray): Either a dict with 'bbox' and 'keypoints' or + an array of shape (17, 2 or 3) or multiple poses stacked. + format (str): Keypoint format, currently only 'COCO'. + greyness (float): Factor for bone/marker color intensity (0.0-1.0). + show_markers (bool): Whether to draw keypoint markers. + show_bones (bool): Whether to draw skeleton bones. + line_type (str): One of 'solid', 'dashed', 'doted' for bone style. + width_multiplier (float): Line width scaling factor for bones. + bbox_width_multiplier (float): Line width scaling factor for bounding box. + show_bbox (bool): Whether to draw bounding box around keypoints. + differ_individuals (bool): Use distinct color per individual pose. + confidence_thr (float): Confidence threshold for keypoint visibility. + errors (np.ndarray or None): Optional array of per-kpt errors (17,1). + color (tuple or None): Override color for markers and bones. + keep_image_size (bool): Prevent image padding for out-of-bounds keypoints. + return_padding (bool): If True, also return padding offsets [top,bottom,left,right]. + + Returns: + np.ndarray or (np.ndarray, list of int): Annotated image, and optional + padding offsets if `return_padding` is True. + """ + + bbox = None + if isinstance(keypoints, dict): + try: + bbox = np.array(keypoints["bbox"]).flatten() + except KeyError: + pass + keypoints = np.array(keypoints["keypoints"]) + + # If keypoints is a list of poses, draw them all + if len(keypoints) % 17 != 0 or keypoints.ndim == 3: + + if color is not None: + if not isinstance(color, (list, tuple)): + color = [color for keypoint in keypoints] + else: + color = [None for keypoint in keypoints] + + max_padding = [0, 0, 0, 0] + for keypoint, clr in zip(keypoints, color): + img = pose_visualization( + img, + keypoint, + format=format, + greyness=greyness, + show_markers=show_markers, + show_bones=show_bones, + line_type=line_type, + width_multiplier=width_multiplier, + bbox_width_multiplier=bbox_width_multiplier, + show_bbox=show_bbox, + differ_individuals=differ_individuals, + color=clr, + confidence_thr=confidence_thr, + keep_image_size=keep_image_size, + return_padding=return_padding, + ) + if return_padding: + img, padding = img + max_padding = [max(max_padding[i], int(padding[i])) for i in range(4)] + + if return_padding: + return img, max_padding + else: + return img + + keypoints = np.array(keypoints).reshape(17, -1) + # If keypoint visibility is not provided, assume all keypoints are visible + if keypoints.shape[1] == 2: + keypoints = np.hstack([keypoints, np.ones((17, 1)) * 2]) + + assert keypoints.shape[1] == 3, "Keypoints should be in the format (x, y, visibility)" + assert keypoints.shape[0] == 17, "Keypoints should be in the format (x, y, visibility)" + + if errors is not None: + errors = np.array(errors).reshape(17, -1) + assert errors.shape[1] == 1, "Errors should be in the format (K, r)" + assert errors.shape[0] == 17, "Errors should be in the format (K, r)" + else: + errors = np.ones((17, 1)) * np.nan + + # If keypoint visibility is float between 0 and 1, it is detection + # If conf < confidence_thr: conf = 1 + # If conf >= confidence_thr: conf = 2 + vis_is_float = np.any(np.logical_and(keypoints[:, -1] > 0, keypoints[:, -1] < 1)) + if keypoints.shape[1] == 3 and vis_is_float: + # print("before", keypoints[:, -1]) + lower_idx = keypoints[:, -1] < confidence_thr + keypoints[lower_idx, -1] = 1 + keypoints[~lower_idx, -1] = 2 + # print("after", keypoints[:, -1]) + # print("-"*20) + + # All visibility values should be ints + keypoints[:, -1] = keypoints[:, -1].astype(int) + + if isinstance(img, str): + img = cv2.imread(img) + + if img is None: + if return_padding: + return None, [0, 0, 0, 0] + else: + return None + + if not (keypoints[:, 2] > 0).any(): + if return_padding: + return img, [0, 0, 0, 0] + else: + return img + + valid_kpts = (keypoints[:, 0] > 0) & (keypoints[:, 1] > 0) + num_valid_kpts = np.sum(valid_kpts) + + if num_valid_kpts == 0: + if return_padding: + return img, [0, 0, 0, 0] + else: + return img + + min_x_kpts = np.min(keypoints[keypoints[:, 2] > 0, 0]) + min_y_kpts = np.min(keypoints[keypoints[:, 2] > 0, 1]) + max_x_kpts = np.max(keypoints[keypoints[:, 2] > 0, 0]) + max_y_kpts = np.max(keypoints[keypoints[:, 2] > 0, 1]) + if bbox is None: + min_x = min_x_kpts + min_y = min_y_kpts + max_x = max_x_kpts + max_y = max_y_kpts + else: + min_x = bbox[0] + min_y = bbox[1] + max_x = bbox[2] + max_y = bbox[3] + + max_area = (max_x - min_x) * (max_y - min_y) + diagonal = np.sqrt((max_x - min_x) ** 2 + (max_y - min_y) ** 2) + line_width = max(int(np.sqrt(max_area) / 500 * width_multiplier), 1) + bbox_line_width = max(int(np.sqrt(max_area) / 500 * bbox_width_multiplier), 1) + marker_size = max(int(np.sqrt(max_area) / 80), 1) + invisible_marker_size = max(int(np.sqrt(max_area) / 100), 1) + marker_thickness = max(int(np.sqrt(max_area) / 100), 1) + + if differ_individuals: + if color is not None: + instance_color = color + else: + instance_color = np.random.randint(0, 255, size=(3,)).tolist() + instance_color = tuple(instance_color) + + # Pad image with dark gray if keypoints are outside the image + if not keep_image_size: + padding = [ + max(0, -min_y_kpts), + max(0, max_y_kpts - img.shape[0]), + max(0, -min_x_kpts), + max(0, max_x_kpts - img.shape[1]), + ] + padding = [int(p) for p in padding] + img = cv2.copyMakeBorder( + img, + padding[0], + padding[1], + padding[2], + padding[3], + cv2.BORDER_CONSTANT, + value=(80, 80, 80), + ) + + # Add padding to bbox and kpts + value_x_to_add = max(0, -min_x_kpts) + value_y_to_add = max(0, -min_y_kpts) + keypoints[keypoints[:, 2] > 0, 0] += value_x_to_add + keypoints[keypoints[:, 2] > 0, 1] += value_y_to_add + if bbox is not None: + bbox[0] += value_x_to_add + bbox[1] += value_y_to_add + bbox[2] += value_x_to_add + bbox[3] += value_y_to_add + + if show_bbox and not (bbox is None): + pts = [ + (bbox[0], bbox[1]), + (bbox[0], bbox[3]), + (bbox[2], bbox[3]), + (bbox[2], bbox[1]), + (bbox[0], bbox[1]), + ] + for i in range(len(pts) - 1): + if differ_individuals: + img = _draw_line(img, pts[i], pts[i + 1], instance_color, "doted", thickness=bbox_line_width) + else: + img = _draw_line(img, pts[i], pts[i + 1], (0, 255, 0), line_type, thickness=bbox_line_width) + + if show_markers: + for kpt, marker_info, err in zip(keypoints, COCO_MARKERS, errors): + if kpt[0] == 0 and kpt[1] == 0: + continue + + if kpt[2] != 2: + color = (140, 140, 140) + elif differ_individuals: + color = instance_color + else: + color = marker_info[2] + + if kpt[2] == 1: + img_overlay = img.copy() + img_overlay = cv2.drawMarker( + img_overlay, + (int(kpt[0]), int(kpt[1])), + color=color, + markerType=marker_info[1], + markerSize=marker_size, + thickness=marker_thickness, + ) + img = cv2.addWeighted(img_overlay, 0.4, img, 0.6, 0) + + else: + img = cv2.drawMarker( + img, + (int(kpt[0]), int(kpt[1])), + color=color, + markerType=marker_info[1], + markerSize=invisible_marker_size if kpt[2] == 1 else marker_size, + thickness=marker_thickness, + ) + + if not np.isnan(err).any(): + radius = err * diagonal + clr = (0, 0, 255) if "solid" in line_type else (0, 255, 0) + plus = 1 if "solid" in line_type else -1 + img = cv2.circle( + img, + (int(kpt[0]), int(kpt[1])), + radius=int(radius), + color=clr, + thickness=1, + lineType=cv2.LINE_AA, + ) + dx = np.sqrt(radius**2 / 2) + img = cv2.line( + img, + (int(kpt[0]), int(kpt[1])), + (int(kpt[0] + plus * dx), int(kpt[1] - dx)), + color=clr, + thickness=1, + lineType=cv2.LINE_AA, + ) + + if show_bones: + for bone_info in COCO_SKELETON: + kp1 = keypoints[bone_info[0][0] - 1, :] + kp2 = keypoints[bone_info[0][1] - 1, :] + + if (kp1[0] == 0 and kp1[1] == 0) or (kp2[0] == 0 and kp2[1] == 0): + continue + + dashed = kp1[2] == 1 or kp2[2] == 1 + + if differ_individuals: + color = np.array(instance_color) + else: + color = np.array(bone_info[1]) + color = (color * greyness).astype(int).tolist() + + if dashed: + img_overlay = img.copy() + img_overlay = _draw_line(img_overlay, kp1, kp2, color, line_type, thickness=line_width) + img = cv2.addWeighted(img_overlay, 0.4, img, 0.6, 0) + + else: + img = _draw_line(img, kp1, kp2, color, line_type, thickness=line_width) + + if return_padding: + return img, padding + else: + return img + + +if __name__ == "__main__": + kpts = np.array( + [ + 344, + 222, + 2, + 356, + 211, + 2, + 330, + 211, + 2, + 372, + 220, + 2, + 309, + 224, + 2, + 413, + 279, + 2, + 274, + 300, + 2, + 444, + 372, + 2, + 261, + 396, + 2, + 398, + 359, + 2, + 316, + 372, + 2, + 407, + 489, + 2, + 185, + 580, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + ) + + kpts = kpts.reshape(-1, 3) + kpts[:, -1] = np.random.randint(1, 3, size=(17,)) + + img = pose_visualization("demo/posevis_test.jpg", kpts, show_markers=True, line_type="solid") + + kpts2 = kpts.copy() + kpts2[kpts2[:, 1] > 0, :2] += 10 + img = pose_visualization(img, kpts2, show_markers=False, line_type="doted") + + os.makedirs("demo/outputs", exist_ok=True) + cv2.imwrite("demo/outputs/posevis_test_out.jpg", img) diff --git a/demo/sam2_utils.py b/demo/sam2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0b8834c68ccfcca4d020b4254fa40906c300625d --- /dev/null +++ b/demo/sam2_utils.py @@ -0,0 +1,714 @@ +""" +SAM2 utilities for BMP demo: +- Build and prepare SAM model +- Convert poses to segmentation +- Compute mask-pose consistency +""" + +from typing import Any, List, Optional, Tuple + +import numpy as np +import torch +from mmengine.structures import InstanceData +from pycocotools import mask as Mask +from sam2.build_sam import build_sam2 +from sam2.sam2_image_predictor import SAM2ImagePredictor + +# Threshold for keypoint validity in mask-pose consistency +STRICT_KPT_THRESHOLD: float = 0.5 + + +def _validate_sam_args(sam_args): + """ + Validate that all required sam_args attributes are present. + """ + required = [ + "crop", + "use_bbox", + "confidence_thr", + "ignore_small_bboxes", + "num_pos_keypoints", + "num_pos_keypoints_if_crowd", + "crowd_by_max_iou", + "batch", + "exclusive_masks", + "extend_bbox", + "pose_mask_consistency", + "visibility_thr", + ] + for param in required: + if not hasattr(sam_args, param): + raise AttributeError(f"Missing required arg {param} in sam_args") + + +def _get_max_ious(bboxes: List[np.ndarray]) -> np.ndarray: + """ + Compute maximum IoU for each bbox against others. + """ + is_crowd = [0] * len(bboxes) + ious = Mask.iou(bboxes, bboxes, is_crowd) + mat = np.array(ious) + np.fill_diagonal(mat, 0) + return mat.max(axis=1) + + +def _compute_one_mask_pose_consistency( + mask: np.ndarray, pos_keypoints: Optional[np.ndarray] = None, neg_keypoints: Optional[np.ndarray] = None +) -> float: + """ + Compute a consistency score between a mask and given keypoints. + + Args: + mask (np.ndarray): Binary mask of shape (H, W). + pos_keypoints (Optional[np.ndarray]): Positive keypoints array (N, 3). + neg_keypoints (Optional[np.ndarray]): Negative keypoints array (M, 3). + + Returns: + float: Weighted mean of positive and negative keypoint consistency. + """ + if mask is None: + return 0.0 + + def _mean_inside(points: np.ndarray) -> float: + if points.size == 0: + return 0.0 + pts_int = np.floor(points[:, :2]).astype(int) + pts_int[:, 0] = np.clip(pts_int[:, 0], 0, mask.shape[1] - 1) + pts_int[:, 1] = np.clip(pts_int[:, 1], 0, mask.shape[0] - 1) + vals = mask[pts_int[:, 1], pts_int[:, 0]] + return vals.mean() if vals.size > 0 else 0.0 + + pos_mean = 0.0 + if pos_keypoints is not None: + valid = pos_keypoints[:, 2] > STRICT_KPT_THRESHOLD + pos_mean = _mean_inside(pos_keypoints[valid]) + + neg_mean = 0.0 + if neg_keypoints is not None: + valid = neg_keypoints[:, 2] > STRICT_KPT_THRESHOLD + pts = neg_keypoints[valid][:, :2] + inside = mask[np.floor(pts[:, 1]).astype(int), np.floor(pts[:, 0]).astype(int)] + neg_mean = (~inside.astype(bool)).mean() if inside.size > 0 else 0.0 + + return 0.5 * pos_mean + 0.5 * neg_mean + + +def _select_keypoints( + args: Any, + kpts: np.ndarray, + num_visible: int, + bbox: Optional[Tuple[float, float, float, float]] = None, + method: Optional[str] = "distance+confidence", +) -> Tuple[np.ndarray, np.ndarray]: + """ + Select and order keypoints for SAM prompting based on specified method. + + Args: + args: Configuration object with selection_method and visibility_thr attributes. + kpts (np.ndarray): Keypoints array of shape (K, 3). + num_visible (int): Number of keypoints above visibility threshold. + bbox (Optional[Tuple]): Optional bbox for distance methods. + method (Optional[str]): Override selection method. + + Returns: + Tuple[np.ndarray, np.ndarray]: Selected keypoint coordinates (N,2) and confidences (N,). + + Raises: + ValueError: If an unknown method is specified. + """ + if num_visible == 0: + return kpts[:, :2], kpts[:, 2] + + methods = ["confidence", "distance", "distance+confidence", "closest"] + sel_method = method or args.selection_method + if sel_method not in methods: + raise ValueError("Unknown method for keypoint selection: {}".format(sel_method)) + + # Select at maximum keypoint from the face + facial_kpts = kpts[:3, :] + facial_conf = kpts[:3, 2] + facial_point = facial_kpts[np.argmax(facial_conf)] + if facial_point[-1] >= args.visibility_thr: + kpts = np.concatenate([facial_point[None, :], kpts[3:]], axis=0) + + conf = kpts[:, 2] + vis_mask = conf >= args.visibility_thr + coords = kpts[vis_mask, :2] + confs = conf[vis_mask] + + if sel_method == "confidence": + order = np.argsort(confs)[::-1] + coords = coords[order] + confs = confs[order] + elif sel_method == "distance": + if bbox is None: + bbox_center = np.array([coords[:, 0].mean(), coords[:, 1].mean()]) + else: + bbox_center = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]) + dists = np.linalg.norm(coords[:, :2] - bbox_center, axis=1) + dist_matrix = np.linalg.norm(coords[:, None, :2] - coords[None, :, :2], axis=2) + np.fill_diagonal(dist_matrix, np.inf) + min_inter_dist = np.min(dist_matrix, axis=1) + order = np.argsort(dists + 3 * min_inter_dist)[::-1] + coords = coords[order, :2] + confs = confs[order] + elif sel_method == "distance+confidence": + order = np.argsort(confs)[::-1] + confidences = kpts[order, 2] + coords = coords[order, :2] + confs = confs[order] + + dist_matrix = np.linalg.norm(coords[:, None, :2] - coords[None, :, :2], axis=2) + + selected_idx = [0] + confidences[0] = -1 + for _ in range(coords.shape[0] - 1): + min_dist = np.min(dist_matrix[:, selected_idx], axis=1) + min_dist[confidences < np.percentile(confidences, 80)] = -1 + + next_idx = np.argmax(min_dist) + selected_idx.append(next_idx) + confidences[next_idx] = -1 + + coords = coords[selected_idx] + confs = confs[selected_idx] + elif sel_method == "closest": + coords = coords[confs > STRICT_KPT_THRESHOLD, :] + confs = confs[confs > STRICT_KPT_THRESHOLD] + if bbox is None: + bbox_center = np.array([coords[:, 0].mean(), coords[:, 1].mean()]) + else: + bbox_center = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]) + dists = np.linalg.norm(coords[:, :2] - bbox_center, axis=1) + order = np.argsort(dists) + coords = coords[order, :2] + confs = confs[order] + + return coords, confs + + +def prepare_model(model_cfg: Any, model_checkpoint: str) -> SAM2ImagePredictor: + """ + Build and return a SAM2ImagePredictor model on the appropriate device. + + Args: + model_cfg: Configuration for SAM2 model. + model_checkpoint (str): Path to model checkpoint. + + Returns: + SAM2ImagePredictor: Initialized SAM2 image predictor. + """ + if torch.cuda.is_available(): + device = torch.device("cuda") + elif torch.backends.mps.is_available(): + device = torch.device("mps") + else: + device = torch.device("cpu") + + sam2 = build_sam2(model_cfg, model_checkpoint, device=device, apply_postprocessing=True) + model = SAM2ImagePredictor( + sam2, + max_hole_area=10.0, + max_sprinkle_area=50.0, + ) + return model + + +def _compute_mask_pose_consistency(masks: List[np.ndarray], keypoints_list: List[np.ndarray]) -> np.ndarray: + """ + Compute mask-pose consistency score for each mask-keypoints pair. + + Args: + masks (List[np.ndarray]): Binary masks list. + keypoints_list (List[np.ndarray]): List of keypoint arrays per instance. + + Returns: + np.ndarray: Consistency scores array of shape (N,). + """ + scores: List[float] = [] + for mask, kpts in zip(masks, keypoints_list): + other_kpts = np.concatenate([keypoints_list[:idx], keypoints_list[idx + 1 :]], axis=0).reshape(-1, 3) + score = _compute_one_mask_pose_consistency(mask, kpts, other_kpts) + scores.append(score) + + return np.array(scores) + + +def _pose2seg( + args: Any, + model: SAM2ImagePredictor, + bbox_xyxy: Optional[List[float]] = None, + pos_kpts: Optional[np.ndarray] = None, + neg_kpts: Optional[np.ndarray] = None, + image: Optional[np.ndarray] = None, + gt_mask: Optional[Any] = None, + num_pos_keypoints: Optional[int] = None, + gt_mask_is_binary: bool = False, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]: + """ + Run SAM segmentation conditioned on pose keypoints and optional ground truth mask. + + Args: + args: Configuration object with prompting settings. + model (SAM2ImagePredictor): Prepared SAM2 model. + bbox_xyxy (Optional[List[float]]): Bounding box coordinates in xyxy format. + pos_kpts (Optional[np.ndarray]): Positive keypoints array. + neg_kpts (Optional[np.ndarray]): Negative keypoints array. + image (Optional[np.ndarray]): Input image array. + gt_mask (Optional[Any]): Ground truth mask (optional). + num_pos_keypoints (Optional[int]): Number of positive keypoints to use. + gt_mask_is_binary (bool): Flag indicating if ground truth mask is binary. + + Returns: + Tuple of (mask, pos_kpts_backup, neg_kpts_backup, score). + """ + num_pos_keypoints = args.num_pos_keypoints if num_pos_keypoints is None else num_pos_keypoints + + # Filter-out un-annotated and invisible keypoints + if pos_kpts is not None: + pos_kpts = pos_kpts.reshape(-1, 3) + valid_kpts = pos_kpts[:, 2] > args.visibility_thr + + pose_bbox = np.array([pos_kpts[:, 0].min(), pos_kpts[:, 1].min(), pos_kpts[:, 0].max(), pos_kpts[:, 1].max()]) + pos_kpts, conf = _select_keypoints(args, pos_kpts, num_visible=valid_kpts.sum(), bbox=bbox_xyxy) + + pos_kpts_backup = np.concatenate([pos_kpts, conf[:, None]], axis=1) + + if pos_kpts.shape[0] > num_pos_keypoints: + pos_kpts = pos_kpts[:num_pos_keypoints, :] + pos_kpts_backup = pos_kpts_backup[:num_pos_keypoints, :] + + else: + pose_bbox = None + pos_kpts = np.empty((0, 2), dtype=np.float32) + pos_kpts_backup = np.empty((0, 3), dtype=np.float32) + + if neg_kpts is not None: + neg_kpts = neg_kpts.reshape(-1, 3) + valid_kpts = neg_kpts[:, 2] > args.visibility_thr + + neg_kpts, conf = _select_keypoints( + args, neg_kpts, num_visible=valid_kpts.sum(), bbox=bbox_xyxy, method="closest" + ) + selected_neg_kpts = neg_kpts + neg_kpts_backup = np.concatenate([neg_kpts, conf[:, None]], axis=1) + + if neg_kpts.shape[0] > args.num_neg_keypoints: + selected_neg_kpts = neg_kpts[: args.num_neg_keypoints, :] + + else: + selected_neg_kpts = np.empty((0, 2), dtype=np.float32) + neg_kpts_backup = np.empty((0, 3), dtype=np.float32) + + # Concatenate positive and negative keypoints + kpts = np.concatenate([pos_kpts, selected_neg_kpts], axis=0) + kpts_labels = np.concatenate([np.ones(pos_kpts.shape[0]), np.zeros(selected_neg_kpts.shape[0])], axis=0) + + bbox = bbox_xyxy if args.use_bbox else None + + if args.extend_bbox and not bbox is None: + # Expand the bbox such that it contains all positive keypoints + pose_bbox = np.array( + [pos_kpts[:, 0].min() - 2, pos_kpts[:, 1].min() - 2, pos_kpts[:, 0].max() + 2, pos_kpts[:, 1].max() + 2] + ) + expanded_bbox = np.array(bbox) + expanded_bbox[:2] = np.minimum(bbox[:2], pose_bbox[:2]) + expanded_bbox[2:] = np.maximum(bbox[2:], pose_bbox[2:]) + bbox = expanded_bbox + + if args.crop and args.use_bbox and image is not None: + # Crop the image to the 1.5 * bbox size + crop_bbox = np.array(bbox) + bbox_center = np.array([(crop_bbox[0] + crop_bbox[2]) / 2, (crop_bbox[1] + crop_bbox[3]) / 2]) + bbox_size = np.array([crop_bbox[2] - crop_bbox[0], crop_bbox[3] - crop_bbox[1]]) + bbox_size = 1.5 * bbox_size + crop_bbox = np.array( + [ + bbox_center[0] - bbox_size[0] / 2, + bbox_center[1] - bbox_size[1] / 2, + bbox_center[0] + bbox_size[0] / 2, + bbox_center[1] + bbox_size[1] / 2, + ] + ) + crop_bbox = np.round(crop_bbox).astype(int) + crop_bbox = np.clip(crop_bbox, 0, [image.shape[1], image.shape[0], image.shape[1], image.shape[0]]) + original_image_size = image.shape[:2] + image = image[crop_bbox[1] : crop_bbox[3], crop_bbox[0] : crop_bbox[2], :] + + # Update the keypoints + kpts = kpts - crop_bbox[:2] + bbox[:2] = bbox[:2] - crop_bbox[:2] + bbox[2:] = bbox[2:] - crop_bbox[:2] + + model.set_image(image) + + masks, scores, logits = model.predict( + point_coords=kpts, + point_labels=kpts_labels, + box=bbox, + multimask_output=False, + ) + mask = masks[0] + scores = scores[0] + + if args.crop and args.use_bbox and image is not None: + # Pad the mask to the original image size + mask_padded = np.zeros(original_image_size, dtype=np.uint8) + mask_padded[crop_bbox[1] : crop_bbox[3], crop_bbox[0] : crop_bbox[2]] = mask + mask = mask_padded + + bbox[:2] = bbox[:2] + crop_bbox[:2] + bbox[2:] = bbox[2:] + crop_bbox[:2] + + if args.pose_mask_consistency: + if gt_mask_is_binary: + gt_mask_binary = gt_mask + else: + gt_mask_binary = Mask.decode(gt_mask).astype(bool) if gt_mask is not None else None + + gt_mask_pose_consistency = _compute_one_mask_pose_consistency(gt_mask_binary, pos_kpts_backup, neg_kpts_backup) + dt_mask_pose_consistency = _compute_one_mask_pose_consistency(mask, pos_kpts_backup, neg_kpts_backup) + + tol = 0.1 + dt_is_same = np.abs(dt_mask_pose_consistency - gt_mask_pose_consistency) < tol + if dt_is_same: + mask = gt_mask_binary if gt_mask_binary.sum() < mask.sum() else mask + else: + mask = gt_mask_binary if gt_mask_pose_consistency > dt_mask_pose_consistency else mask + + return mask, pos_kpts_backup, neg_kpts_backup, scores + + +def process_image_with_SAM( + sam_args: Any, + image: np.ndarray, + model: SAM2ImagePredictor, + new_dets: InstanceData, + old_dets: Optional[InstanceData] = None, +) -> InstanceData: + """ + Wrapper that validates args and routes to single or batch processing. + """ + _validate_sam_args(sam_args) + if sam_args.batch: + return _process_image_batch(sam_args, image, model, new_dets, old_dets) + return _process_image_single(sam_args, image, model, new_dets, old_dets) + + +def _process_image_single( + sam_args: Any, + image: np.ndarray, + model: SAM2ImagePredictor, + new_dets: InstanceData, + old_dets: Optional[InstanceData] = None, +) -> InstanceData: + """ + Refine instance segmentation masks using SAM2 with pose-conditioned prompts. + + Args: + sam_args (Any): DotDict containing required SAM parameters: + crop (bool), use_bbox (bool), confidence_thr (float), + ignore_small_bboxes (bool), num_pos_keypoints (int), + num_pos_keypoints_if_crowd (int), crowd_by_max_iou (Optional[float]), + batch (bool), exclusive_masks (bool), extend_bbox (bool), pose_mask_consistency (bool). + image (np.ndarray): BGR image array of shape (H, W, 3). + model (SAM2ImagePredictor): Initialized SAM2 predictor. + new_dets (InstanceData): New detections with attributes: + bboxes, pred_masks, keypoints, bbox_scores. + old_dets (Optional[InstanceData]): Previous detections for negative prompts. + + Returns: + InstanceData: `new_dets` updated in-place with + `.refined_masks`, `.sam_scores`, and `.sam_kpts`. + """ + _validate_sam_args(sam_args) + + if not (sam_args.crop and sam_args.use_bbox): + model.set_image(image) + + # Ignore all keypoints with confidence below the threshold + new_keypoints = new_dets.keypoints.copy() + for kpts in new_keypoints: + conf_mask = kpts[:, 2] < sam_args.confidence_thr + kpts[conf_mask, :] = 0 + n_new_dets = len(new_dets.bboxes) + n_old_dets = 0 + if old_dets is not None: + n_old_dets = len(old_dets.bboxes) + old_keypoints = old_dets.keypoints.copy() + for kpts in old_keypoints: + conf_mask = kpts[:, 2] < sam_args.confidence_thr + kpts[conf_mask, :] = 0 + + all_bboxes = new_dets.bboxes.copy() + if old_dets is not None: + all_bboxes = np.concatenate([all_bboxes, old_dets.bboxes], axis=0) + + max_ious = _get_max_ious(all_bboxes) + + gt_bboxes = [] + new_dets.refined_masks = np.zeros((n_new_dets, image.shape[0], image.shape[1]), dtype=np.uint8) + new_dets.sam_scores = np.zeros_like(new_dets.bbox_scores) + new_dets.sam_kpts = np.zeros((len(new_dets.bboxes), sam_args.num_pos_keypoints, 3), dtype=np.float32) + for instance_idx in range(len(new_dets.bboxes)): + bbox_xywh = new_dets.bboxes[instance_idx] + bbox_area = bbox_xywh[2] * bbox_xywh[3] + + if sam_args.ignore_small_bboxes and bbox_area < 100 * 100: + continue + dt_mask = new_dets.pred_masks[instance_idx] if new_dets.pred_masks is not None else None + + bbox_xyxy = [bbox_xywh[0], bbox_xywh[1], bbox_xywh[0] + bbox_xywh[2], bbox_xywh[1] + bbox_xywh[3]] + gt_bboxes.append(bbox_xyxy) + this_kpts = new_keypoints[instance_idx].reshape(1, -1, 3) + other_kpts = None + if old_dets is not None: + other_kpts = old_keypoints.copy().reshape(n_old_dets, -1, 3) + if len(new_keypoints) > 1: + other_new_kpts = np.concatenate([new_keypoints[:instance_idx], new_keypoints[instance_idx + 1 :]], axis=0) + other_kpts = ( + np.concatenate([other_kpts, other_new_kpts], axis=0) if other_kpts is not None else other_new_kpts + ) + + num_pos_keypoints = sam_args.num_pos_keypoints + if sam_args.crowd_by_max_iou is not None and max_ious[instance_idx] > sam_args.crowd_by_max_iou: + bbox_xyxy = None + num_pos_keypoints = sam_args.num_pos_keypoints_if_crowd + + dt_mask, pos_kpts, neg_kpts, scores = _pose2seg( + sam_args, + model, + bbox_xyxy, + pos_kpts=this_kpts, + neg_kpts=other_kpts, + image=image if (sam_args.crop and sam_args.use_bbox) else None, + gt_mask=dt_mask, + num_pos_keypoints=num_pos_keypoints, + gt_mask_is_binary=True, + ) + + new_dets.refined_masks[instance_idx] = dt_mask + new_dets.sam_scores[instance_idx] = scores + + # If the number of positive keypoints is less than the required number, fill the rest with zeros + if len(pos_kpts) != sam_args.num_pos_keypoints: + pos_kpts = np.concatenate( + [pos_kpts, np.zeros((sam_args.num_pos_keypoints - len(pos_kpts), 3), dtype=np.float32)], axis=0 + ) + new_dets.sam_kpts[instance_idx] = pos_kpts + + n_masks = len(new_dets.refined_masks) + (len(old_dets.refined_masks) if old_dets is not None else 0) + + if sam_args.exclusive_masks and n_masks > 1: + all_masks = ( + np.concatenate([new_dets.refined_masks, old_dets.refined_masks], axis=0) + if old_dets is not None + else new_dets.refined_masks + ) + all_scores = ( + np.concatenate([new_dets.sam_scores, old_dets.sam_scores], axis=0) + if old_dets is not None + else new_dets.sam_scores + ) + refined_masks = _apply_exclusive_masks(all_masks, all_scores) + new_dets.refined_masks = refined_masks[: len(new_dets.refined_masks)] + + return new_dets + + +def _process_image_batch( + sam_args: Any, + image: np.ndarray, + model: SAM2ImagePredictor, + new_dets: InstanceData, + old_dets: Optional[InstanceData] = None, +) -> InstanceData: + """ + Batch process multiple detection instances with SAM2 refinement. + + Args: + sam_args (Any): DotDict of SAM parameters (same as `process_image_with_SAM`). + image (np.ndarray): Input BGR image. + model (SAM2ImagePredictor): Prepared SAM2 predictor. + new_dets (InstanceData): New detection instances. + old_dets (Optional[InstanceData]): Previous detections for negative prompts. + + Returns: + InstanceData: `new_dets` updated as in `process_image_with_SAM`. + """ + n_new_dets = len(new_dets.bboxes) + + model.set_image(image) + + image_kpts = [] + image_bboxes = [] + num_valid_kpts = [] + for instance_idx in range(len(new_dets.bboxes)): + + bbox_xywh = new_dets.bboxes[instance_idx].copy() + bbox_area = bbox_xywh[2] * bbox_xywh[3] + if sam_args.ignore_small_bboxes and bbox_area < 100 * 100: + continue + + this_kpts = new_dets.keypoints[instance_idx].copy().reshape(-1, 3) + kpts_vis = np.array(this_kpts[:, 2]) + visible_kpts = (kpts_vis > sam_args.visibility_thr) & (this_kpts[:, 2] > sam_args.confidence_thr) + num_visible = (visible_kpts).sum() + if num_visible <= 0: + continue + num_valid_kpts.append(num_visible) + image_bboxes.append(np.array(bbox_xywh)) + this_kpts[~visible_kpts, :2] = 0 + this_kpts[:, 2] = visible_kpts + image_kpts.append(this_kpts) + if old_dets is not None: + for instance_idx in range(len(old_dets.bboxes)): + bbox_xywh = old_dets.bboxes[instance_idx].copy() + bbox_area = bbox_xywh[2] * bbox_xywh[3] + if sam_args.ignore_small_bboxes and bbox_area < 100 * 100: + continue + this_kpts = old_dets.keypoints[instance_idx].reshape(-1, 3) + kpts_vis = np.array(this_kpts[:, 2]) + visible_kpts = (kpts_vis > sam_args.visibility_thr) & (this_kpts[:, 2] > sam_args.confidence_thr) + num_visible = (visible_kpts).sum() + if num_visible <= 0: + continue + num_valid_kpts.append(num_visible) + image_bboxes.append(np.array(bbox_xywh)) + this_kpts[~visible_kpts, :2] = 0 + this_kpts[:, 2] = visible_kpts + image_kpts.append(this_kpts) + + image_kpts = np.array(image_kpts) + image_bboxes = np.array(image_bboxes) + num_valid_kpts = np.array(num_valid_kpts) + + image_kpts_backup = image_kpts.copy() + + # Prepare keypoints such that all instances have the same number of keypoints + # First sort keypoints by their distance to the center of the bounding box + # If some are missing, duplicate the last one + prepared_kpts = [] + prepared_kpts_backup = [] + for bbox, kpts, num_visible in zip(image_bboxes, image_kpts, num_valid_kpts): + + this_kpts, this_conf = _select_keypoints(sam_args, kpts, num_visible, bbox) + + # Duplicate the last keypoint if some are missing + if this_kpts.shape[0] < num_valid_kpts.max(): + this_kpts = np.concatenate( + [this_kpts, np.tile(this_kpts[-1], (num_valid_kpts.max() - this_kpts.shape[0], 1))], axis=0 + ) + this_conf = np.concatenate( + [this_conf, np.tile(this_conf[-1], (num_valid_kpts.max() - this_conf.shape[0],))], axis=0 + ) + + prepared_kpts.append(this_kpts) + prepared_kpts_backup.append(np.concatenate([this_kpts, this_conf[:, None]], axis=1)) + image_kpts = np.array(prepared_kpts) + image_kpts_backup = np.array(prepared_kpts_backup) + kpts_labels = np.ones(image_kpts.shape[:2]) + + # Compute IoUs between all bounding boxes + max_ious = _get_max_ious(image_bboxes) + num_pos_keypoints = sam_args.num_pos_keypoints + use_bbox = sam_args.use_bbox + if sam_args.crowd_by_max_iou is not None and max_ious[instance_idx] > sam_args.crowd_by_max_iou: + use_bbox = False + num_pos_keypoints = sam_args.num_pos_keypoints_if_crowd + + # Threshold the number of positive keypoints + if num_pos_keypoints > 0 and num_pos_keypoints < image_kpts.shape[1]: + image_kpts = image_kpts[:, :num_pos_keypoints, :] + kpts_labels = kpts_labels[:, :num_pos_keypoints] + image_kpts_backup = image_kpts_backup[:, :num_pos_keypoints, :] + + elif num_pos_keypoints == 0: + image_kpts = None + kpts_labels = None + image_kpts_backup = np.empty((0, 3), dtype=np.float32) + + image_bboxes_xyxy = None + if use_bbox: + image_bboxes_xyxy = np.array(image_bboxes) + image_bboxes_xyxy[:, 2:] += image_bboxes_xyxy[:, :2] + + # Expand the bbox to include the positive keypoints + if sam_args.extend_bbox: + pose_bbox = np.stack( + [ + np.min(image_kpts[:, :, 0], axis=1) - 2, + np.min(image_kpts[:, :, 1], axis=1) - 2, + np.max(image_kpts[:, :, 0], axis=1) + 2, + np.max(image_kpts[:, :, 1], axis=1) + 2, + ], + axis=1, + ) + expanded_bbox = np.array(image_bboxes_xyxy) + expanded_bbox[:, :2] = np.minimum(expanded_bbox[:, :2], pose_bbox[:, :2]) + expanded_bbox[:, 2:] = np.maximum(expanded_bbox[:, 2:], pose_bbox[:, 2:]) + # bbox_expanded = (np.abs(expanded_bbox - image_bboxes_xyxy) > 1e-4).any(axis=1) + image_bboxes_xyxy = expanded_bbox + + # Process even old detections to get their 'negative' keypoints + masks, scores, logits = model.predict( + point_coords=image_kpts, + point_labels=kpts_labels, + box=image_bboxes_xyxy, + multimask_output=False, + ) + + # Reshape the masks to (N, C, H, W). If the model outputs (C, H, W), add a number of masks dimension + if len(masks.shape) == 3: + masks = masks[None, :, :, :] + masks = masks[:, 0, :, :] + N = masks.shape[0] + scores = scores.reshape(N) + + if sam_args.exclusive_masks and N > 1: + # Make sure the masks are non-overlapping + # If two masks overlap, set the pixel to the one with the highest score + masks = _apply_exclusive_masks(masks, scores) + + gt_masks = new_dets.pred_masks.copy() if new_dets.pred_masks is not None else None + if sam_args.pose_mask_consistency and gt_masks is not None: + # Measure 'mask-pose_conistency' by computing number of keypoints inside the mask + # Compute for both gt (if available) and predicted masks and then choose the one with higher consistency + dt_mask_pose_consistency = _compute_mask_pose_consistency(masks, image_kpts_backup) + gt_mask_pose_consistency = _compute_mask_pose_consistency(gt_masks, image_kpts_backup) + + dt_masks_area = np.array([m.sum() for m in masks]) + gt_masks_area = np.array([m.sum() for m in gt_masks]) if gt_masks is not None else np.zeros_like(dt_masks_area) + + # If PM-c is approx the same, prefer the smaller mask + tol = 0.1 + pmc_is_equal = np.isclose(dt_mask_pose_consistency, gt_mask_pose_consistency, atol=tol) + dt_is_worse = (dt_mask_pose_consistency < (gt_mask_pose_consistency - tol)) | pmc_is_equal & ( + dt_masks_area > gt_masks_area + ) + + new_masks = [] + for dt_mask, gt_mask, dt_worse in zip(masks, gt_masks, dt_is_worse): + if dt_worse: + new_masks.append(gt_mask) + else: + new_masks.append(dt_mask) + masks = np.array(new_masks) + + new_dets.refined_masks = masks[:n_new_dets] + new_dets.sam_scores = scores[:n_new_dets] + new_dets.sam_kpts = image_kpts_backup[:n_new_dets] + + return new_dets + + +def _apply_exclusive_masks(masks: np.ndarray, scores: np.ndarray) -> np.ndarray: + """ + Ensure masks are non-overlapping by keeping at each pixel the mask with the highest score. + """ + no_mask = masks.sum(axis=0) == 0 + masked_scores = masks * scores[:, None, None] + argmax_masks = np.argmax(masked_scores, axis=0) + new_masks = argmax_masks[None, :, :] == (np.arange(masks.shape[0])[:, None, None]) + new_masks[:, no_mask] = 0 + return new_masks diff --git a/mmpose/__init__.py b/mmpose/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dda49513faf22bf632da4f03ce57f175c3d7f853 --- /dev/null +++ b/mmpose/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import mmengine +from mmengine.utils import digit_version + +from .version import __version__, short_version + +mmcv_minimum_version = '2.0.0rc4' +mmcv_maximum_version = '2.3.0' +mmcv_version = digit_version(mmcv.__version__) + +mmengine_minimum_version = '0.6.0' +mmengine_maximum_version = '1.0.0' +mmengine_version = digit_version(mmengine.__version__) + +assert (mmcv_version >= digit_version(mmcv_minimum_version) + and mmcv_version <= digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' + +assert (mmengine_version >= digit_version(mmengine_minimum_version) + and mmengine_version <= digit_version(mmengine_maximum_version)), \ + f'MMEngine=={mmengine.__version__} is used but incompatible. ' \ + f'Please install mmengine>={mmengine_minimum_version}, ' \ + f'<={mmengine_maximum_version}.' + +__all__ = ['__version__', 'short_version'] diff --git a/mmpose/apis/__init__.py b/mmpose/apis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..322ee9cf73d9fe7c796d5f47093f4c0a94b623fd --- /dev/null +++ b/mmpose/apis/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .inference import (collect_multi_frames, inference_bottomup, + inference_topdown, init_model) +from .inference_3d import (collate_pose_sequence, convert_keypoint_definition, + extract_pose_sequence, inference_pose_lifter_model) +from .inference_tracking import _compute_iou, _track_by_iou, _track_by_oks +from .inferencers import MMPoseInferencer, Pose2DInferencer +from .visualization import visualize + +__all__ = [ + 'init_model', 'inference_topdown', 'inference_bottomup', + 'collect_multi_frames', 'Pose2DInferencer', 'MMPoseInferencer', + '_track_by_iou', '_track_by_oks', '_compute_iou', + 'inference_pose_lifter_model', 'extract_pose_sequence', + 'convert_keypoint_definition', 'collate_pose_sequence', 'visualize' +] diff --git a/mmpose/apis/inference.py b/mmpose/apis/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..e88ea6dfb3fccd2a6d4faf7be424be2556c353f0 --- /dev/null +++ b/mmpose/apis/inference.py @@ -0,0 +1,280 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from pathlib import Path +from typing import List, Optional, Union + +import numpy as np +import torch +import torch.nn as nn +from mmengine.config import Config +from mmengine.dataset import Compose, pseudo_collate +from mmengine.model.utils import revert_sync_batchnorm +from mmengine.registry import init_default_scope +from mmengine.runner import load_checkpoint +from PIL import Image + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.models.builder import build_pose_estimator +from mmpose.structures import PoseDataSample +from mmpose.structures.bbox import bbox_xywh2xyxy + +import cv2 + +def dataset_meta_from_config(config: Config, + dataset_mode: str = 'train') -> Optional[dict]: + """Get dataset metainfo from the model config. + + Args: + config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, + :obj:`Path`, or the config object. + dataset_mode (str): Specify the dataset of which to get the metainfo. + Options are ``'train'``, ``'val'`` and ``'test'``. Defaults to + ``'train'`` + + Returns: + dict, optional: The dataset metainfo. See + ``mmpose.datasets.datasets.utils.parse_pose_metainfo`` for details. + Return ``None`` if failing to get dataset metainfo from the config. + """ + try: + if dataset_mode == 'train': + dataset_cfg = config.train_dataloader.dataset + elif dataset_mode == 'val': + dataset_cfg = config.val_dataloader.dataset + elif dataset_mode == 'test': + dataset_cfg = config.test_dataloader.dataset + else: + raise ValueError( + f'Invalid dataset {dataset_mode} to get metainfo. ' + 'Should be one of "train", "val", or "test".') + + if 'metainfo' in dataset_cfg: + metainfo = dataset_cfg.metainfo + else: + import mmpose.datasets.datasets # noqa: F401, F403 + from mmpose.registry import DATASETS + + dataset_class = dataset_cfg.type if isinstance( + dataset_cfg.type, type) else DATASETS.get(dataset_cfg.type) + metainfo = dataset_class.METAINFO + + metainfo = parse_pose_metainfo(metainfo) + + except AttributeError: + metainfo = None + + return metainfo + + +def init_model(config: Union[str, Path, Config], + checkpoint: Optional[str] = None, + device: str = 'cuda:0', + cfg_options: Optional[dict] = None) -> nn.Module: + """Initialize a pose estimator from a config file. + + Args: + config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, + :obj:`Path`, or the config object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. Defaults to ``None`` + device (str): The device where the anchors will be put on. + Defaults to ``'cuda:0'``. + cfg_options (dict, optional): Options to override some settings in + the used config. Defaults to ``None`` + + Returns: + nn.Module: The constructed pose estimator. + """ + + if isinstance(config, (str, Path)): + config = Config.fromfile(config) + elif not isinstance(config, Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if cfg_options is not None: + config.merge_from_dict(cfg_options) + elif 'init_cfg' in config.model.backbone: + config.model.backbone.init_cfg = None + config.model.train_cfg = None + + # register all modules in mmpose into the registries + scope = config.get('default_scope', 'mmpose') + if scope is not None: + init_default_scope(scope) + + model = build_pose_estimator(config.model) + model = revert_sync_batchnorm(model) + # get dataset_meta in this priority: checkpoint > config > default (COCO) + dataset_meta = None + + if checkpoint is not None: + ckpt = load_checkpoint(model, checkpoint, map_location='cpu') + + if 'dataset_meta' in ckpt.get('meta', {}): + # checkpoint from mmpose 1.x + dataset_meta = ckpt['meta']['dataset_meta'] + + if dataset_meta is None: + dataset_meta = dataset_meta_from_config(config, dataset_mode='train') + + if dataset_meta is None: + warnings.simplefilter('once') + warnings.warn('Can not load dataset_meta from the checkpoint or the ' + 'model config. Use COCO metainfo by default.') + dataset_meta = parse_pose_metainfo( + dict(from_file='configs/_base_/datasets/coco.py')) + + model.dataset_meta = dataset_meta + + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +def inference_topdown(model: nn.Module, + img: Union[np.ndarray, str], + bboxes: Optional[Union[List, np.ndarray]] = None, + masks: Optional[Union[List, np.ndarray]] = None, + bbox_format: str = 'xyxy') -> List[PoseDataSample]: + """Inference image with a top-down pose estimator. + + Args: + model (nn.Module): The top-down pose estimator + img (np.ndarray | str): The loaded image or image file to inference + bboxes (np.ndarray, optional): The bboxes in shape (N, 4), each row + represents a bbox. If not given, the entire image will be regarded + as a single bbox area. Defaults to ``None`` + bbox_format (str): The bbox format indicator. Options are ``'xywh'`` + and ``'xyxy'``. Defaults to ``'xyxy'`` + + Returns: + List[:obj:`PoseDataSample`]: The inference results. Specifically, the + predicted keypoints and scores are saved at + ``data_sample.pred_instances.keypoints`` and + ``data_sample.pred_instances.keypoint_scores``. + """ + scope = model.cfg.get('default_scope', 'mmpose') + if scope is not None: + init_default_scope(scope) + pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) + + if bboxes is None or len(bboxes) == 0: + # get bbox from the image size + if isinstance(img, str): + w, h = Image.open(img).size + else: + h, w = img.shape[:2] + + bboxes = np.array([[0, 0, w, h]], dtype=np.float32) + else: + if isinstance(bboxes, list): + bboxes = np.array(bboxes) + + assert bbox_format in {'xyxy', 'xywh'}, \ + f'Invalid bbox_format "{bbox_format}".' + + if bbox_format == 'xywh': + bboxes = bbox_xywh2xyxy(bboxes) + + if masks is None or len(masks) == 0: + masks = np.zeros((bboxes.shape[0], img.shape[0], img.shape[1]), + dtype=np.uint8) + + # Masks are expected in polygon format + poly_masks = [] + for mask in masks: + if np.sum(mask) == 0: + poly_masks.append(None) + else: + contours, _ = cv2.findContours((mask*255).astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + polygons = [contour.flatten() for contour in contours if len(contour) > 3] + poly_masks.append(polygons if polygons else None) + + # construct batch data samples + data_list = [] + for bbox, pmask in zip(bboxes, poly_masks): + if isinstance(img, str): + data_info = dict(img_path=img) + else: + data_info = dict(img=img) + data_info['bbox'] = bbox[None] # shape (1, 4) + data_info['segmentation'] = pmask + data_info['bbox_score'] = np.ones(1, dtype=np.float32) # shape (1,) + data_info.update(model.dataset_meta) + data_list.append(pipeline(data_info)) + + if data_list: + # collate data list into a batch, which is a dict with following keys: + # batch['inputs']: a list of input images + # batch['data_samples']: a list of :obj:`PoseDataSample` + batch = pseudo_collate(data_list) + with torch.no_grad(): + results = model.test_step(batch) + else: + results = [] + + return results + + +def inference_bottomup(model: nn.Module, img: Union[np.ndarray, str]): + """Inference image with a bottom-up pose estimator. + + Args: + model (nn.Module): The bottom-up pose estimator + img (np.ndarray | str): The loaded image or image file to inference + + Returns: + List[:obj:`PoseDataSample`]: The inference results. Specifically, the + predicted keypoints and scores are saved at + ``data_sample.pred_instances.keypoints`` and + ``data_sample.pred_instances.keypoint_scores``. + """ + pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) + + # prepare data batch + if isinstance(img, str): + data_info = dict(img_path=img) + else: + data_info = dict(img=img) + data_info.update(model.dataset_meta) + data = pipeline(data_info) + batch = pseudo_collate([data]) + + with torch.no_grad(): + results = model.test_step(batch) + + return results + + +def collect_multi_frames(video, frame_id, indices, online=False): + """Collect multi frames from the video. + + Args: + video (mmcv.VideoReader): A VideoReader of the input video file. + frame_id (int): index of the current frame + indices (list(int)): index offsets of the frames to collect + online (bool): inference mode, if set to True, can not use future + frame information. + + Returns: + list(ndarray): multi frames collected from the input video file. + """ + num_frames = len(video) + frames = [] + # put the current frame at first + frames.append(video[frame_id]) + # use multi frames for inference + for idx in indices: + # skip current frame + if idx == 0: + continue + support_idx = frame_id + idx + # online mode, can not use future frame information + if online: + support_idx = np.clip(support_idx, 0, frame_id) + else: + support_idx = np.clip(support_idx, 0, num_frames - 1) + frames.append(video[support_idx]) + + return frames diff --git a/mmpose/apis/inference_3d.py b/mmpose/apis/inference_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..b4151e804a593da7cb5355ece804924ccbd7f0b0 --- /dev/null +++ b/mmpose/apis/inference_3d.py @@ -0,0 +1,360 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmengine.dataset import Compose, pseudo_collate +from mmengine.registry import init_default_scope +from mmengine.structures import InstanceData + +from mmpose.structures import PoseDataSample + + +def convert_keypoint_definition(keypoints, pose_det_dataset, + pose_lift_dataset): + """Convert pose det dataset keypoints definition to pose lifter dataset + keypoints definition, so that they are compatible with the definitions + required for 3D pose lifting. + + Args: + keypoints (ndarray[N, K, 2 or 3]): 2D keypoints to be transformed. + pose_det_dataset, (str): Name of the dataset for 2D pose detector. + pose_lift_dataset (str): Name of the dataset for pose lifter model. + + Returns: + ndarray[K, 2 or 3]: the transformed 2D keypoints. + """ + assert pose_lift_dataset in [ + 'h36m', 'h3wb'], '`pose_lift_dataset` should be ' \ + f'`h36m`, but got {pose_lift_dataset}.' + + keypoints_new = np.zeros((keypoints.shape[0], 17, keypoints.shape[2]), + dtype=keypoints.dtype) + if pose_lift_dataset in ['h36m', 'h3wb']: + if pose_det_dataset in ['h36m', 'coco_wholebody']: + keypoints_new = keypoints + elif pose_det_dataset in ['coco', 'posetrack18']: + # pelvis (root) is in the middle of l_hip and r_hip + keypoints_new[:, 0] = (keypoints[:, 11] + keypoints[:, 12]) / 2 + # thorax is in the middle of l_shoulder and r_shoulder + keypoints_new[:, 8] = (keypoints[:, 5] + keypoints[:, 6]) / 2 + # spine is in the middle of thorax and pelvis + keypoints_new[:, + 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 + # in COCO, head is in the middle of l_eye and r_eye + # in PoseTrack18, head is in the middle of head_bottom and head_top + keypoints_new[:, 10] = (keypoints[:, 1] + keypoints[:, 2]) / 2 + # rearrange other keypoints + keypoints_new[:, [1, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 15, 16]] = \ + keypoints[:, [12, 14, 16, 11, 13, 15, 0, 5, 7, 9, 6, 8, 10]] + elif pose_det_dataset in ['aic']: + # pelvis (root) is in the middle of l_hip and r_hip + keypoints_new[:, 0] = (keypoints[:, 9] + keypoints[:, 6]) / 2 + # thorax is in the middle of l_shoulder and r_shoulder + keypoints_new[:, 8] = (keypoints[:, 3] + keypoints[:, 0]) / 2 + # spine is in the middle of thorax and pelvis + keypoints_new[:, + 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 + # neck base (top end of neck) is 1/4 the way from + # neck (bottom end of neck) to head top + keypoints_new[:, 9] = (3 * keypoints[:, 13] + keypoints[:, 12]) / 4 + # head (spherical centre of head) is 7/12 the way from + # neck (bottom end of neck) to head top + keypoints_new[:, 10] = (5 * keypoints[:, 13] + + 7 * keypoints[:, 12]) / 12 + + keypoints_new[:, [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16]] = \ + keypoints[:, [6, 7, 8, 9, 10, 11, 3, 4, 5, 0, 1, 2]] + elif pose_det_dataset in ['crowdpose']: + # pelvis (root) is in the middle of l_hip and r_hip + keypoints_new[:, 0] = (keypoints[:, 6] + keypoints[:, 7]) / 2 + # thorax is in the middle of l_shoulder and r_shoulder + keypoints_new[:, 8] = (keypoints[:, 0] + keypoints[:, 1]) / 2 + # spine is in the middle of thorax and pelvis + keypoints_new[:, + 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 + # neck base (top end of neck) is 1/4 the way from + # neck (bottom end of neck) to head top + keypoints_new[:, 9] = (3 * keypoints[:, 13] + keypoints[:, 12]) / 4 + # head (spherical centre of head) is 7/12 the way from + # neck (bottom end of neck) to head top + keypoints_new[:, 10] = (5 * keypoints[:, 13] + + 7 * keypoints[:, 12]) / 12 + + keypoints_new[:, [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16]] = \ + keypoints[:, [7, 9, 11, 6, 8, 10, 0, 2, 4, 1, 3, 5]] + else: + raise NotImplementedError( + f'unsupported conversion between {pose_lift_dataset} and ' + f'{pose_det_dataset}') + + return keypoints_new + + +def extract_pose_sequence(pose_results, frame_idx, causal, seq_len, step=1): + """Extract the target frame from 2D pose results, and pad the sequence to a + fixed length. + + Args: + pose_results (List[List[:obj:`PoseDataSample`]]): Multi-frame pose + detection results stored in a list. + frame_idx (int): The index of the frame in the original video. + causal (bool): If True, the target frame is the last frame in + a sequence. Otherwise, the target frame is in the middle of + a sequence. + seq_len (int): The number of frames in the input sequence. + step (int): Step size to extract frames from the video. + + Returns: + List[List[:obj:`PoseDataSample`]]: Multi-frame pose detection results + stored in a nested list with a length of seq_len. + """ + if causal: + frames_left = seq_len - 1 + frames_right = 0 + else: + frames_left = (seq_len - 1) // 2 + frames_right = frames_left + num_frames = len(pose_results) + + # get the padded sequence + pad_left = max(0, frames_left - frame_idx // step) + pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step) + start = max(frame_idx % step, frame_idx - frames_left * step) + end = min(num_frames - (num_frames - 1 - frame_idx) % step, + frame_idx + frames_right * step + 1) + pose_results_seq = [pose_results[0]] * pad_left + \ + pose_results[start:end:step] + [pose_results[-1]] * pad_right + return pose_results_seq + + +def collate_pose_sequence(pose_results_2d, + with_track_id=True, + target_frame=-1): + """Reorganize multi-frame pose detection results into individual pose + sequences. + + Note: + - The temporal length of the pose detection results: T + - The number of the person instances: N + - The number of the keypoints: K + - The channel number of each keypoint: C + + Args: + pose_results_2d (List[List[:obj:`PoseDataSample`]]): Multi-frame pose + detection results stored in a nested list. Each element of the + outer list is the pose detection results of a single frame, and + each element of the inner list is the pose information of one + person, which contains: + + - keypoints (ndarray[K, 2 or 3]): x, y, [score] + - track_id (int): unique id of each person, required when + ``with_track_id==True``` + + with_track_id (bool): If True, the element in pose_results is expected + to contain "track_id", which will be used to gather the pose + sequence of a person from multiple frames. Otherwise, the pose + results in each frame are expected to have a consistent number and + order of identities. Default is True. + target_frame (int): The index of the target frame. Default: -1. + + Returns: + List[:obj:`PoseDataSample`]: Indivisual pose sequence in with length N. + """ + T = len(pose_results_2d) + assert T > 0 + + target_frame = (T + target_frame) % T # convert negative index to positive + + N = len( + pose_results_2d[target_frame]) # use identities in the target frame + if N == 0: + return [] + + B, K, C = pose_results_2d[target_frame][0].pred_instances.keypoints.shape + + track_ids = None + if with_track_id: + track_ids = [res.track_id for res in pose_results_2d[target_frame]] + + pose_sequences = [] + for idx in range(N): + pose_seq = PoseDataSample() + pred_instances = InstanceData() + + gt_instances = pose_results_2d[target_frame][idx].gt_instances.clone() + pred_instances = pose_results_2d[target_frame][ + idx].pred_instances.clone() + pose_seq.pred_instances = pred_instances + pose_seq.gt_instances = gt_instances + + if not with_track_id: + pose_seq.pred_instances.keypoints = np.stack([ + frame[idx].pred_instances.keypoints + for frame in pose_results_2d + ], + axis=1) + else: + keypoints = np.zeros((B, T, K, C), dtype=np.float32) + keypoints[:, target_frame] = pose_results_2d[target_frame][ + idx].pred_instances.keypoints + # find the left most frame containing track_ids[idx] + for frame_idx in range(target_frame - 1, -1, -1): + contains_idx = False + for res in pose_results_2d[frame_idx]: + if res.track_id == track_ids[idx]: + keypoints[:, frame_idx] = res.pred_instances.keypoints + contains_idx = True + break + if not contains_idx: + # replicate the left most frame + keypoints[:, :frame_idx + 1] = keypoints[:, frame_idx + 1] + break + # find the right most frame containing track_idx[idx] + for frame_idx in range(target_frame + 1, T): + contains_idx = False + for res in pose_results_2d[frame_idx]: + if res.track_id == track_ids[idx]: + keypoints[:, frame_idx] = res.pred_instances.keypoints + contains_idx = True + break + if not contains_idx: + # replicate the right most frame + keypoints[:, frame_idx + 1:] = keypoints[:, frame_idx] + break + pose_seq.pred_instances.set_field(keypoints, 'keypoints') + pose_sequences.append(pose_seq) + + return pose_sequences + + +def inference_pose_lifter_model(model, + pose_results_2d, + with_track_id=True, + image_size=None, + norm_pose_2d=False): + """Inference 3D pose from 2D pose sequences using a pose lifter model. + + Args: + model (nn.Module): The loaded pose lifter model + pose_results_2d (List[List[:obj:`PoseDataSample`]]): The 2D pose + sequences stored in a nested list. + with_track_id: If True, the element in pose_results_2d is expected to + contain "track_id", which will be used to gather the pose sequence + of a person from multiple frames. Otherwise, the pose results in + each frame are expected to have a consistent number and order of + identities. Default is True. + image_size (tuple|list): image width, image height. If None, image size + will not be contained in dict ``data``. + norm_pose_2d (bool): If True, scale the bbox (along with the 2D + pose) to the average bbox scale of the dataset, and move the bbox + (along with the 2D pose) to the average bbox center of the dataset. + + Returns: + List[:obj:`PoseDataSample`]: 3D pose inference results. Specifically, + the predicted keypoints and scores are saved at + ``data_sample.pred_instances.keypoints_3d``. + """ + init_default_scope(model.cfg.get('default_scope', 'mmpose')) + pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) + + causal = model.cfg.test_dataloader.dataset.get('causal', False) + target_idx = -1 if causal else len(pose_results_2d) // 2 + + dataset_info = model.dataset_meta + if dataset_info is not None: + if 'stats_info' in dataset_info: + bbox_center = dataset_info['stats_info']['bbox_center'] + bbox_scale = dataset_info['stats_info']['bbox_scale'] + else: + if norm_pose_2d: + # compute the average bbox center and scale from the + # datasamples in pose_results_2d + bbox_center = np.zeros((1, 2), dtype=np.float32) + bbox_scale = 0 + num_bbox = 0 + for pose_res in pose_results_2d: + for data_sample in pose_res: + for bbox in data_sample.pred_instances.bboxes: + bbox_center += np.array([[(bbox[0] + bbox[2]) / 2, + (bbox[1] + bbox[3]) / 2] + ]) + bbox_scale += max(bbox[2] - bbox[0], + bbox[3] - bbox[1]) + num_bbox += 1 + bbox_center /= num_bbox + bbox_scale /= num_bbox + else: + bbox_center = None + bbox_scale = None + + pose_results_2d_copy = [] + for i, pose_res in enumerate(pose_results_2d): + pose_res_copy = [] + for j, data_sample in enumerate(pose_res): + data_sample_copy = PoseDataSample() + data_sample_copy.gt_instances = data_sample.gt_instances.clone() + data_sample_copy.pred_instances = data_sample.pred_instances.clone( + ) + data_sample_copy.track_id = data_sample.track_id + kpts = data_sample.pred_instances.keypoints + bboxes = data_sample.pred_instances.bboxes + keypoints = [] + for k in range(len(kpts)): + kpt = kpts[k] + if norm_pose_2d: + bbox = bboxes[k] + center = np.array([[(bbox[0] + bbox[2]) / 2, + (bbox[1] + bbox[3]) / 2]]) + scale = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) + keypoints.append((kpt[:, :2] - center) / scale * + bbox_scale + bbox_center) + else: + keypoints.append(kpt[:, :2]) + data_sample_copy.pred_instances.set_field( + np.array(keypoints), 'keypoints') + pose_res_copy.append(data_sample_copy) + pose_results_2d_copy.append(pose_res_copy) + + pose_sequences_2d = collate_pose_sequence(pose_results_2d_copy, + with_track_id, target_idx) + + if not pose_sequences_2d: + return [] + + data_list = [] + for i, pose_seq in enumerate(pose_sequences_2d): + data_info = dict() + + keypoints_2d = pose_seq.pred_instances.keypoints + keypoints_2d = np.squeeze( + keypoints_2d, axis=0) if keypoints_2d.ndim == 4 else keypoints_2d + + T, K, C = keypoints_2d.shape + + data_info['keypoints'] = keypoints_2d + data_info['keypoints_visible'] = np.ones(( + T, + K, + ), dtype=np.float32) + data_info['lifting_target'] = np.zeros((1, K, 3), dtype=np.float32) + data_info['factor'] = np.zeros((T, ), dtype=np.float32) + data_info['lifting_target_visible'] = np.ones((1, K, 1), + dtype=np.float32) + + if image_size is not None: + assert len(image_size) == 2 + data_info['camera_param'] = dict(w=image_size[0], h=image_size[1]) + + data_info.update(model.dataset_meta) + data_list.append(pipeline(data_info)) + + if data_list: + # collate data list into a batch, which is a dict with following keys: + # batch['inputs']: a list of input images + # batch['data_samples']: a list of :obj:`PoseDataSample` + batch = pseudo_collate(data_list) + with torch.no_grad(): + results = model.test_step(batch) + else: + results = [] + + return results diff --git a/mmpose/apis/inference_tracking.py b/mmpose/apis/inference_tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..c823adcfc7107e1e63ba0a62ad48148d7fc354c9 --- /dev/null +++ b/mmpose/apis/inference_tracking.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np + +from mmpose.evaluation.functional.nms import oks_iou + + +def _compute_iou(bboxA, bboxB): + """Compute the Intersection over Union (IoU) between two boxes . + + Args: + bboxA (list): The first bbox info (left, top, right, bottom, score). + bboxB (list): The second bbox info (left, top, right, bottom, score). + + Returns: + float: The IoU value. + """ + + x1 = max(bboxA[0], bboxB[0]) + y1 = max(bboxA[1], bboxB[1]) + x2 = min(bboxA[2], bboxB[2]) + y2 = min(bboxA[3], bboxB[3]) + + inter_area = max(0, x2 - x1) * max(0, y2 - y1) + + bboxA_area = (bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1]) + bboxB_area = (bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1]) + union_area = float(bboxA_area + bboxB_area - inter_area) + if union_area == 0: + union_area = 1e-5 + warnings.warn('union_area=0 is unexpected') + + iou = inter_area / union_area + + return iou + + +def _track_by_iou(res, results_last, thr): + """Get track id using IoU tracking greedily.""" + + bbox = list(np.squeeze(res.pred_instances.bboxes, axis=0)) + + max_iou_score = -1 + max_index = -1 + match_result = {} + for index, res_last in enumerate(results_last): + bbox_last = list(np.squeeze(res_last.pred_instances.bboxes, axis=0)) + + iou_score = _compute_iou(bbox, bbox_last) + if iou_score > max_iou_score: + max_iou_score = iou_score + max_index = index + + if max_iou_score > thr: + track_id = results_last[max_index].track_id + match_result = results_last[max_index] + del results_last[max_index] + else: + track_id = -1 + + return track_id, results_last, match_result + + +def _track_by_oks(res, results_last, thr, sigmas=None): + """Get track id using OKS tracking greedily.""" + keypoint = np.concatenate((res.pred_instances.keypoints, + res.pred_instances.keypoint_scores[:, :, None]), + axis=2) + keypoint = np.squeeze(keypoint, axis=0).reshape((-1)) + area = np.squeeze(res.pred_instances.areas, axis=0) + max_index = -1 + match_result = {} + + if len(results_last) == 0: + return -1, results_last, match_result + + keypoints_last = np.array([ + np.squeeze( + np.concatenate( + (res_last.pred_instances.keypoints, + res_last.pred_instances.keypoint_scores[:, :, None]), + axis=2), + axis=0).reshape((-1)) for res_last in results_last + ]) + area_last = np.array([ + np.squeeze(res_last.pred_instances.areas, axis=0) + for res_last in results_last + ]) + + oks_score = oks_iou( + keypoint, keypoints_last, area, area_last, sigmas=sigmas) + + max_index = np.argmax(oks_score) + + if oks_score[max_index] > thr: + track_id = results_last[max_index].track_id + match_result = results_last[max_index] + del results_last[max_index] + else: + track_id = -1 + + return track_id, results_last, match_result diff --git a/mmpose/apis/inferencers/__init__.py b/mmpose/apis/inferencers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0e2b5c8293f261ef5651f2d379e35c484ae53e40 --- /dev/null +++ b/mmpose/apis/inferencers/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hand3d_inferencer import Hand3DInferencer +from .mmpose_inferencer import MMPoseInferencer +from .pose2d_inferencer import Pose2DInferencer +from .pose3d_inferencer import Pose3DInferencer +from .utils import get_model_aliases + +__all__ = [ + 'Pose2DInferencer', 'MMPoseInferencer', 'get_model_aliases', + 'Pose3DInferencer', 'Hand3DInferencer' +] diff --git a/mmpose/apis/inferencers/base_mmpose_inferencer.py b/mmpose/apis/inferencers/base_mmpose_inferencer.py new file mode 100644 index 0000000000000000000000000000000000000000..574063e824198bb535d3737df472300a78229c3f --- /dev/null +++ b/mmpose/apis/inferencers/base_mmpose_inferencer.py @@ -0,0 +1,691 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +import logging +import mimetypes +import os +from collections import defaultdict +from typing import (Callable, Dict, Generator, Iterable, List, Optional, + Sequence, Tuple, Union) + +import cv2 +import mmcv +import mmengine +import numpy as np +import torch.nn as nn +from mmengine.config import Config, ConfigDict +from mmengine.dataset import Compose +from mmengine.fileio import (get_file_backend, isdir, join_path, + list_dir_or_file) +from mmengine.infer.infer import BaseInferencer, ModelType +from mmengine.logging import print_log +from mmengine.registry import init_default_scope +from mmengine.runner.checkpoint import _load_checkpoint_to_model +from mmengine.structures import InstanceData +from mmengine.utils import mkdir_or_exist +from rich.progress import track + +from mmpose.apis.inference import dataset_meta_from_config +from mmpose.registry import DATASETS +from mmpose.structures import PoseDataSample, split_instances +from .utils import default_det_models + +try: + from mmdet.apis.det_inferencer import DetInferencer + has_mmdet = True +except (ImportError, ModuleNotFoundError): + has_mmdet = False + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +class BaseMMPoseInferencer(BaseInferencer): + """The base class for MMPose inferencers.""" + + preprocess_kwargs: set = {'bbox_thr', 'nms_thr', 'bboxes'} + forward_kwargs: set = set() + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_bbox', 'radius', 'thickness', + 'kpt_thr', 'vis_out_dir', 'black_background' + } + postprocess_kwargs: set = {'pred_out_dir', 'return_datasample'} + + def __init__(self, + model: Union[ModelType, str, None] = None, + weights: Optional[str] = None, + device: Optional[str] = None, + scope: Optional[str] = None, + show_progress: bool = False) -> None: + super().__init__( + model, weights, device, scope, show_progress=show_progress) + + def _init_detector( + self, + det_model: Optional[Union[ModelType, str]] = None, + det_weights: Optional[str] = None, + det_cat_ids: Optional[Union[int, Tuple]] = None, + device: Optional[str] = None, + ): + object_type = DATASETS.get(self.cfg.dataset_type).__module__.split( + 'datasets.')[-1].split('.')[0].lower() + + if det_model in ('whole_image', 'whole-image') or \ + (det_model is None and + object_type not in default_det_models): + self.detector = None + + else: + det_scope = 'mmdet' + if det_model is None: + det_info = default_det_models[object_type] + det_model, det_weights, det_cat_ids = det_info[ + 'model'], det_info['weights'], det_info['cat_ids'] + elif os.path.exists(det_model): + det_cfg = Config.fromfile(det_model) + det_scope = det_cfg.default_scope + + if has_mmdet: + det_kwargs = dict( + model=det_model, + weights=det_weights, + device=device, + scope=det_scope, + ) + # for compatibility with low version of mmdet + if 'show_progress' in inspect.signature( + DetInferencer).parameters: + det_kwargs['show_progress'] = False + + self.detector = DetInferencer(**det_kwargs) + else: + raise RuntimeError( + 'MMDetection (v3.0.0 or above) is required to build ' + 'inferencers for top-down pose estimation models.') + + if isinstance(det_cat_ids, (tuple, list)): + self.det_cat_ids = det_cat_ids + else: + self.det_cat_ids = (det_cat_ids, ) + + def _load_weights_to_model(self, model: nn.Module, + checkpoint: Optional[dict], + cfg: Optional[ConfigType]) -> None: + """Loading model weights and meta information from cfg and checkpoint. + + Subclasses could override this method to load extra meta information + from ``checkpoint`` and ``cfg`` to model. + + Args: + model (nn.Module): Model to load weights and meta information. + checkpoint (dict, optional): The loaded checkpoint. + cfg (Config or ConfigDict, optional): The loaded config. + """ + if checkpoint is not None: + _load_checkpoint_to_model(model, checkpoint) + checkpoint_meta = checkpoint.get('meta', {}) + # save the dataset_meta in the model for convenience + if 'dataset_meta' in checkpoint_meta: + # mmpose 1.x + model.dataset_meta = checkpoint_meta['dataset_meta'] + else: + print_log( + 'dataset_meta are not saved in the checkpoint\'s ' + 'meta data, load via config.', + logger='current', + level=logging.WARNING) + model.dataset_meta = dataset_meta_from_config( + cfg, dataset_mode='train') + else: + print_log( + 'Checkpoint is not loaded, and the inference ' + 'result is calculated by the randomly initialized ' + 'model!', + logger='current', + level=logging.WARNING) + model.dataset_meta = dataset_meta_from_config( + cfg, dataset_mode='train') + + def _inputs_to_list(self, inputs: InputsType) -> Iterable: + """Preprocess the inputs to a list. + + Preprocess inputs to a list according to its type: + + - list or tuple: return inputs + - str: + - Directory path: return all files in the directory + - other cases: return a list containing the string. The string + could be a path to file, a url or other types of string + according to the task. + + Args: + inputs (InputsType): Inputs for the inferencer. + + Returns: + list: List of input for the :meth:`preprocess`. + """ + self._video_input = False + + if isinstance(inputs, str): + backend = get_file_backend(inputs) + if hasattr(backend, 'isdir') and isdir(inputs): + # Backends like HttpsBackend do not implement `isdir`, so only + # those backends that implement `isdir` could accept the + # inputs as a directory + filepath_list = [ + join_path(inputs, fname) + for fname in list_dir_or_file(inputs, list_dir=False) + ] + inputs = [] + for filepath in filepath_list: + input_type = mimetypes.guess_type(filepath)[0].split( + '/')[0] + if input_type == 'image': + inputs.append(filepath) + inputs.sort() + else: + # if inputs is a path to a video file, it will be converted + # to a list containing separated frame filenames + input_type = mimetypes.guess_type(inputs)[0].split('/')[0] + if input_type == 'video': + self._video_input = True + video = mmcv.VideoReader(inputs) + self.video_info = dict( + fps=video.fps, + name=os.path.basename(inputs), + writer=None, + width=video.width, + height=video.height, + predictions=[]) + inputs = video + elif input_type == 'image': + inputs = [inputs] + else: + raise ValueError(f'Expected input to be an image, video, ' + f'or folder, but received {inputs} of ' + f'type {input_type}.') + + elif isinstance(inputs, np.ndarray): + inputs = [inputs] + + return inputs + + def _get_webcam_inputs(self, inputs: str) -> Generator: + """Sets up and returns a generator function that reads frames from a + webcam input. The generator function returns a new frame each time it + is iterated over. + + Args: + inputs (str): A string describing the webcam input, in the format + "webcam:id". + + Returns: + A generator function that yields frames from the webcam input. + + Raises: + ValueError: If the inputs string is not in the expected format. + """ + + # Ensure the inputs string is in the expected format. + inputs = inputs.lower() + assert inputs.startswith('webcam'), f'Expected input to start with ' \ + f'"webcam", but got "{inputs}"' + + # Parse the camera ID from the inputs string. + inputs_ = inputs.split(':') + if len(inputs_) == 1: + camera_id = 0 + elif len(inputs_) == 2 and str.isdigit(inputs_[1]): + camera_id = int(inputs_[1]) + else: + raise ValueError( + f'Expected webcam input to have format "webcam:id", ' + f'but got "{inputs}"') + + # Attempt to open the video capture object. + vcap = cv2.VideoCapture(camera_id) + if not vcap.isOpened(): + print_log( + f'Cannot open camera (ID={camera_id})', + logger='current', + level=logging.WARNING) + return [] + + # Set video input flag and metadata. + self._video_input = True + (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') + if int(major_ver) < 3: + fps = vcap.get(cv2.cv.CV_CAP_PROP_FPS) + width = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH) + height = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT) + else: + fps = vcap.get(cv2.CAP_PROP_FPS) + width = vcap.get(cv2.CAP_PROP_FRAME_WIDTH) + height = vcap.get(cv2.CAP_PROP_FRAME_HEIGHT) + self.video_info = dict( + fps=fps, + name='webcam.mp4', + writer=None, + width=width, + height=height, + predictions=[]) + + def _webcam_reader() -> Generator: + while True: + if cv2.waitKey(5) & 0xFF == 27: + vcap.release() + break + + ret_val, frame = vcap.read() + if not ret_val: + break + + yield frame + + return _webcam_reader() + + def _init_pipeline(self, cfg: ConfigType) -> Callable: + """Initialize the test pipeline. + + Args: + cfg (ConfigType): model config path or dict + + Returns: + A pipeline to handle various input data, such as ``str``, + ``np.ndarray``. The returned pipeline will be used to process + a single data. + """ + scope = cfg.get('default_scope', 'mmpose') + if scope is not None: + init_default_scope(scope) + return Compose(cfg.test_dataloader.dataset.pipeline) + + def update_model_visualizer_settings(self, **kwargs): + """Update the settings of models and visualizer according to inference + arguments.""" + + pass + + def preprocess(self, + inputs: InputsType, + batch_size: int = 1, + bboxes: Optional[List] = None, + bbox_thr: float = 0.3, + nms_thr: float = 0.3, + **kwargs): + """Process the inputs into a model-feedable format. + + Args: + inputs (InputsType): Inputs given by user. + batch_size (int): batch size. Defaults to 1. + bbox_thr (float): threshold for bounding box detection. + Defaults to 0.3. + nms_thr (float): IoU threshold for bounding box NMS. + Defaults to 0.3. + + Yields: + Any: Data processed by the ``pipeline`` and ``collate_fn``. + List[str or np.ndarray]: List of original inputs in the batch + """ + + # One-stage pose estimators perform prediction filtering within the + # head's `predict` method. Here, we set the arguments for filtering + if self.cfg.model.type == 'BottomupPoseEstimator': + # 1. init with default arguments + test_cfg = self.model.head.test_cfg.copy() + # 2. update the score_thr and nms_thr in the test_cfg of the head + if 'score_thr' in test_cfg: + test_cfg['score_thr'] = bbox_thr + if 'nms_thr' in test_cfg: + test_cfg['nms_thr'] = nms_thr + self.model.test_cfg = test_cfg + + for i, input in enumerate(inputs): + bbox = bboxes[i] if bboxes else [] + data_infos = self.preprocess_single( + input, + index=i, + bboxes=bbox, + bbox_thr=bbox_thr, + nms_thr=nms_thr, + **kwargs) + # only supports inference with batch size 1 + yield self.collate_fn(data_infos), [input] + + def __call__( + self, + inputs: InputsType, + return_datasamples: bool = False, + batch_size: int = 1, + out_dir: Optional[str] = None, + **kwargs, + ) -> dict: + """Call the inferencer. + + Args: + inputs (InputsType): Inputs for the inferencer. + return_datasamples (bool): Whether to return results as + :obj:`BaseDataElement`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + out_dir (str, optional): directory to save visualization + results and predictions. Will be overoden if vis_out_dir or + pred_out_dir are given. Defaults to None + **kwargs: Key words arguments passed to :meth:`preprocess`, + :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. + Each key in kwargs should be in the corresponding set of + ``preprocess_kwargs``, ``forward_kwargs``, + ``visualize_kwargs`` and ``postprocess_kwargs``. + + Returns: + dict: Inference and visualization results. + """ + if out_dir is not None: + if 'vis_out_dir' not in kwargs: + kwargs['vis_out_dir'] = f'{out_dir}/visualizations' + if 'pred_out_dir' not in kwargs: + kwargs['pred_out_dir'] = f'{out_dir}/predictions' + + ( + preprocess_kwargs, + forward_kwargs, + visualize_kwargs, + postprocess_kwargs, + ) = self._dispatch_kwargs(**kwargs) + + self.update_model_visualizer_settings(**kwargs) + + # preprocessing + if isinstance(inputs, str) and inputs.startswith('webcam'): + inputs = self._get_webcam_inputs(inputs) + batch_size = 1 + if not visualize_kwargs.get('show', False): + print_log( + 'The display mode is closed when using webcam ' + 'input. It will be turned on automatically.', + logger='current', + level=logging.WARNING) + visualize_kwargs['show'] = True + else: + inputs = self._inputs_to_list(inputs) + + # check the compatibility between inputs/outputs + if not self._video_input and len(inputs) > 0: + vis_out_dir = visualize_kwargs.get('vis_out_dir', None) + if vis_out_dir is not None: + _, file_extension = os.path.splitext(vis_out_dir) + assert not file_extension, f'the argument `vis_out_dir` ' \ + f'should be a folder while the input contains multiple ' \ + f'images, but got {vis_out_dir}' + + if 'bbox_thr' in self.forward_kwargs: + forward_kwargs['bbox_thr'] = preprocess_kwargs.get('bbox_thr', -1) + inputs = self.preprocess( + inputs, batch_size=batch_size, **preprocess_kwargs) + + preds = [] + + for proc_inputs, ori_inputs in (track(inputs, description='Inference') + if self.show_progress else inputs): + preds = self.forward(proc_inputs, **forward_kwargs) + + visualization = self.visualize(ori_inputs, preds, + **visualize_kwargs) + results = self.postprocess( + preds, + visualization, + return_datasamples=return_datasamples, + **postprocess_kwargs) + yield results + + if self._video_input: + self._finalize_video_processing( + postprocess_kwargs.get('pred_out_dir', '')) + + # In 3D Inferencers, some intermediate results (e.g. 2d keypoints) + # will be temporarily stored in `self._buffer`. It's essential to + # clear this information to prevent any interference with subsequent + # inferences. + if hasattr(self, '_buffer'): + self._buffer.clear() + + def visualize(self, + inputs: list, + preds: List[PoseDataSample], + return_vis: bool = False, + show: bool = False, + draw_bbox: bool = False, + wait_time: float = 0, + radius: int = 3, + thickness: int = 1, + kpt_thr: float = 0.3, + vis_out_dir: str = '', + window_name: str = '', + black_background: bool = False, + **kwargs) -> List[np.ndarray]: + """Visualize predictions. + + Args: + inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. + preds (Any): Predictions of the model. + return_vis (bool): Whether to return images with predicted results. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (ms). Defaults to 0 + draw_bbox (bool): Whether to draw the bounding boxes. + Defaults to False + radius (int): Keypoint radius for visualization. Defaults to 3 + thickness (int): Link thickness for visualization. Defaults to 1 + kpt_thr (float): The threshold to visualize the keypoints. + Defaults to 0.3 + vis_out_dir (str, optional): Directory to save visualization + results w/o predictions. If left as empty, no file will + be saved. Defaults to ''. + window_name (str, optional): Title of display window. + black_background (bool, optional): Whether to plot keypoints on a + black image instead of the input image. Defaults to False. + + Returns: + List[np.ndarray]: Visualization results. + """ + if (not return_vis) and (not show) and (not vis_out_dir): + return + + if getattr(self, 'visualizer', None) is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + self.visualizer.radius = radius + self.visualizer.line_width = thickness + + results = [] + + for single_input, pred in zip(inputs, preds): + if isinstance(single_input, str): + img = mmcv.imread(single_input, channel_order='rgb') + elif isinstance(single_input, np.ndarray): + img = mmcv.bgr2rgb(single_input) + else: + raise ValueError('Unsupported input type: ' + f'{type(single_input)}') + if black_background: + img = img * 0 + + img_name = os.path.basename(pred.metainfo['img_path']) + window_name = window_name if window_name else img_name + + # since visualization and inference utilize the same process, + # the wait time is reduced when a video input is utilized, + # thereby eliminating the issue of inference getting stuck. + wait_time = 1e-5 if self._video_input else wait_time + + visualization = self.visualizer.add_datasample( + window_name, + img, + pred, + draw_gt=False, + draw_bbox=draw_bbox, + show=show, + wait_time=wait_time, + kpt_thr=kpt_thr, + **kwargs) + results.append(visualization) + + if vis_out_dir: + self.save_visualization( + visualization, + vis_out_dir, + img_name=img_name, + ) + + if return_vis: + return results + else: + return [] + + def save_visualization(self, visualization, vis_out_dir, img_name=None): + out_img = mmcv.rgb2bgr(visualization) + _, file_extension = os.path.splitext(vis_out_dir) + if file_extension: + dir_name = os.path.dirname(vis_out_dir) + file_name = os.path.basename(vis_out_dir) + else: + dir_name = vis_out_dir + file_name = None + mkdir_or_exist(dir_name) + + if self._video_input: + + if self.video_info['writer'] is None: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + if file_name is None: + file_name = os.path.basename(self.video_info['name']) + out_file = join_path(dir_name, file_name) + self.video_info['output_file'] = out_file + self.video_info['writer'] = cv2.VideoWriter( + out_file, fourcc, self.video_info['fps'], + (visualization.shape[1], visualization.shape[0])) + self.video_info['writer'].write(out_img) + + else: + if file_name is None: + file_name = img_name if img_name else 'visualization.jpg' + + out_file = join_path(dir_name, file_name) + mmcv.imwrite(out_img, out_file) + print_log( + f'the output image has been saved at {out_file}', + logger='current', + level=logging.INFO) + + def postprocess( + self, + preds: List[PoseDataSample], + visualization: List[np.ndarray], + return_datasample=None, + return_datasamples=False, + pred_out_dir: str = '', + ) -> dict: + """Process the predictions and visualization results from ``forward`` + and ``visualize``. + + This method should be responsible for the following tasks: + + 1. Convert datasamples into a json-serializable dict if needed. + 2. Pack the predictions and visualization results and return them. + 3. Dump or log the predictions. + + Args: + preds (List[Dict]): Predictions of the model. + visualization (np.ndarray): Visualized predictions. + return_datasamples (bool): Whether to return results as + datasamples. Defaults to False. + pred_out_dir (str): Directory to save the inference results w/o + visualization. If left as empty, no file will be saved. + Defaults to ''. + + Returns: + dict: Inference and visualization results with key ``predictions`` + and ``visualization`` + + - ``visualization (Any)``: Returned by :meth:`visualize` + - ``predictions`` (dict or DataSample): Returned by + :meth:`forward` and processed in :meth:`postprocess`. + If ``return_datasamples=False``, it usually should be a + json-serializable dict containing only basic data elements such + as strings and numbers. + """ + if return_datasample is not None: + print_log( + 'The `return_datasample` argument is deprecated ' + 'and will be removed in future versions. Please ' + 'use `return_datasamples`.', + logger='current', + level=logging.WARNING) + return_datasamples = return_datasample + + result_dict = defaultdict(list) + + result_dict['visualization'] = visualization + for pred in preds: + if not return_datasamples: + # convert datasamples to list of instance predictions + pred = split_instances(pred.pred_instances) + result_dict['predictions'].append(pred) + + if pred_out_dir != '': + for pred, data_sample in zip(result_dict['predictions'], preds): + if self._video_input: + # For video or webcam input, predictions for each frame + # are gathered in the 'predictions' key of 'video_info' + # dictionary. All frame predictions are then stored into + # a single file after processing all frames. + self.video_info['predictions'].append(pred) + else: + # For non-video inputs, predictions are stored in separate + # JSON files. The filename is determined by the basename + # of the input image path with a '.json' extension. The + # predictions are then dumped into this file. + fname = os.path.splitext( + os.path.basename( + data_sample.metainfo['img_path']))[0] + '.json' + mmengine.dump( + pred, join_path(pred_out_dir, fname), indent=' ') + + return result_dict + + def _finalize_video_processing( + self, + pred_out_dir: str = '', + ): + """Finalize video processing by releasing the video writer and saving + predictions to a file. + + This method should be called after completing the video processing. It + releases the video writer, if it exists, and saves the predictions to a + JSON file if a prediction output directory is provided. + """ + + # Release the video writer if it exists + if self.video_info['writer'] is not None: + out_file = self.video_info['output_file'] + print_log( + f'the output video has been saved at {out_file}', + logger='current', + level=logging.INFO) + self.video_info['writer'].release() + + # Save predictions + if pred_out_dir: + fname = os.path.splitext( + os.path.basename(self.video_info['name']))[0] + '.json' + predictions = [ + dict(frame_id=i, instances=pred) + for i, pred in enumerate(self.video_info['predictions']) + ] + + mmengine.dump( + predictions, join_path(pred_out_dir, fname), indent=' ') diff --git a/mmpose/apis/inferencers/hand3d_inferencer.py b/mmpose/apis/inferencers/hand3d_inferencer.py new file mode 100644 index 0000000000000000000000000000000000000000..a7db53cb84bf0fc8abc0903a9d311da42502f097 --- /dev/null +++ b/mmpose/apis/inferencers/hand3d_inferencer.py @@ -0,0 +1,344 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import os +from collections import defaultdict +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from mmengine.config import Config, ConfigDict +from mmengine.infer.infer import ModelType +from mmengine.logging import print_log +from mmengine.model import revert_sync_batchnorm +from mmengine.registry import init_default_scope +from mmengine.structures import InstanceData + +from mmpose.evaluation.functional import nms +from mmpose.registry import INFERENCERS +from mmpose.structures import PoseDataSample, merge_data_samples +from .base_mmpose_inferencer import BaseMMPoseInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module() +class Hand3DInferencer(BaseMMPoseInferencer): + """The inferencer for 3D hand pose estimation. + + Args: + model (str, optional): Pretrained 2D pose estimation algorithm. + It's the path to the config file or the model name defined in + metafile. For example, it could be: + + - model alias, e.g. ``'body'``, + - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, + - config path + + Defaults to ``None``. + weights (str, optional): Path to the checkpoint. If it is not + specified and "model" is a model name of metafile, the weights + will be loaded from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the + available device will be automatically used. Defaults to None. + scope (str, optional): The scope of the model. Defaults to "mmpose". + det_model (str, optional): Config path or alias of detection model. + Defaults to None. + det_weights (str, optional): Path to the checkpoints of detection + model. Defaults to None. + det_cat_ids (int or list[int], optional): Category id for + detection model. Defaults to None. + """ + + preprocess_kwargs: set = {'bbox_thr', 'nms_thr', 'bboxes'} + forward_kwargs: set = {'disable_rebase_keypoint'} + visualize_kwargs: set = { + 'return_vis', + 'show', + 'wait_time', + 'draw_bbox', + 'radius', + 'thickness', + 'kpt_thr', + 'vis_out_dir', + 'num_instances', + } + postprocess_kwargs: set = {'pred_out_dir', 'return_datasample'} + + def __init__(self, + model: Union[ModelType, str], + weights: Optional[str] = None, + device: Optional[str] = None, + scope: Optional[str] = 'mmpose', + det_model: Optional[Union[ModelType, str]] = None, + det_weights: Optional[str] = None, + det_cat_ids: Optional[Union[int, Tuple]] = None, + show_progress: bool = False) -> None: + + init_default_scope(scope) + super().__init__( + model=model, + weights=weights, + device=device, + scope=scope, + show_progress=show_progress) + self.model = revert_sync_batchnorm(self.model) + + # assign dataset metainfo to self.visualizer + self.visualizer.set_dataset_meta(self.model.dataset_meta) + + # initialize hand detector + self._init_detector( + det_model=det_model, + det_weights=det_weights, + det_cat_ids=det_cat_ids, + device=device, + ) + + self._video_input = False + self._buffer = defaultdict(list) + + def preprocess_single(self, + input: InputType, + index: int, + bbox_thr: float = 0.3, + nms_thr: float = 0.3, + bboxes: Union[List[List], List[np.ndarray], + np.ndarray] = []): + """Process a single input into a model-feedable format. + + Args: + input (InputType): Input given by user. + index (int): index of the input + bbox_thr (float): threshold for bounding box detection. + Defaults to 0.3. + nms_thr (float): IoU threshold for bounding box NMS. + Defaults to 0.3. + + Yields: + Any: Data processed by the ``pipeline`` and ``collate_fn``. + """ + + if isinstance(input, str): + data_info = dict(img_path=input) + else: + data_info = dict(img=input, img_path=f'{index}.jpg'.rjust(10, '0')) + data_info.update(self.model.dataset_meta) + + if self.detector is not None: + try: + det_results = self.detector( + input, return_datasamples=True)['predictions'] + except ValueError: + print_log( + 'Support for mmpose and mmdet versions up to 3.1.0 ' + 'will be discontinued in upcoming releases. To ' + 'ensure ongoing compatibility, please upgrade to ' + 'mmdet version 3.2.0 or later.', + logger='current', + level=logging.WARNING) + det_results = self.detector( + input, return_datasample=True)['predictions'] + pred_instance = det_results[0].pred_instances.cpu().numpy() + bboxes = np.concatenate( + (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1) + + label_mask = np.zeros(len(bboxes), dtype=np.uint8) + for cat_id in self.det_cat_ids: + label_mask = np.logical_or(label_mask, + pred_instance.labels == cat_id) + + bboxes = bboxes[np.logical_and(label_mask, + pred_instance.scores > bbox_thr)] + bboxes = bboxes[nms(bboxes, nms_thr)] + + data_infos = [] + if len(bboxes) > 0: + for bbox in bboxes: + inst = data_info.copy() + inst['bbox'] = bbox[None, :4] + inst['bbox_score'] = bbox[4:5] + data_infos.append(self.pipeline(inst)) + else: + inst = data_info.copy() + + # get bbox from the image size + if isinstance(input, str): + input = mmcv.imread(input) + h, w = input.shape[:2] + + inst['bbox'] = np.array([[0, 0, w, h]], dtype=np.float32) + inst['bbox_score'] = np.ones(1, dtype=np.float32) + data_infos.append(self.pipeline(inst)) + + return data_infos + + @torch.no_grad() + def forward(self, + inputs: Union[dict, tuple], + disable_rebase_keypoint: bool = False): + """Performs a forward pass through the model. + + Args: + inputs (Union[dict, tuple]): The input data to be processed. Can + be either a dictionary or a tuple. + disable_rebase_keypoint (bool, optional): Flag to disable rebasing + the height of the keypoints. Defaults to False. + + Returns: + A list of data samples with prediction instances. + """ + data_samples = self.model.test_step(inputs) + data_samples_2d = [] + + for idx, res in enumerate(data_samples): + pred_instances = res.pred_instances + keypoints = pred_instances.keypoints + rel_root_depth = pred_instances.rel_root_depth + scores = pred_instances.keypoint_scores + hand_type = pred_instances.hand_type + + res_2d = PoseDataSample() + gt_instances = res.gt_instances.clone() + pred_instances = pred_instances.clone() + res_2d.gt_instances = gt_instances + res_2d.pred_instances = pred_instances + + # add relative root depth to left hand joints + keypoints[:, 21:, 2] += rel_root_depth + + # set joint scores according to hand type + scores[:, :21] *= hand_type[:, [0]] + scores[:, 21:] *= hand_type[:, [1]] + # normalize kpt score + if scores.max() > 1: + scores /= 255 + + res_2d.pred_instances.set_field(keypoints[..., :2].copy(), + 'keypoints') + + # rotate the keypoint to make z-axis correspondent to height + # for better visualization + vis_R = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + keypoints[..., :3] = keypoints[..., :3] @ vis_R + + # rebase height (z-axis) + if not disable_rebase_keypoint: + valid = scores > 0 + keypoints[..., 2] -= np.min( + keypoints[valid, 2], axis=-1, keepdims=True) + + data_samples[idx].pred_instances.keypoints = keypoints + data_samples[idx].pred_instances.keypoint_scores = scores + data_samples_2d.append(res_2d) + + data_samples = [merge_data_samples(data_samples)] + data_samples_2d = merge_data_samples(data_samples_2d) + + self._buffer['pose2d_results'] = data_samples_2d + + return data_samples + + def visualize( + self, + inputs: list, + preds: List[PoseDataSample], + return_vis: bool = False, + show: bool = False, + draw_bbox: bool = False, + wait_time: float = 0, + radius: int = 3, + thickness: int = 1, + kpt_thr: float = 0.3, + num_instances: int = 1, + vis_out_dir: str = '', + window_name: str = '', + ) -> List[np.ndarray]: + """Visualize predictions. + + Args: + inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. + preds (Any): Predictions of the model. + return_vis (bool): Whether to return images with predicted results. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (ms). Defaults to 0 + draw_bbox (bool): Whether to draw the bounding boxes. + Defaults to False + radius (int): Keypoint radius for visualization. Defaults to 3 + thickness (int): Link thickness for visualization. Defaults to 1 + kpt_thr (float): The threshold to visualize the keypoints. + Defaults to 0.3 + vis_out_dir (str, optional): Directory to save visualization + results w/o predictions. If left as empty, no file will + be saved. Defaults to ''. + window_name (str, optional): Title of display window. + window_close_event_handler (callable, optional): + + Returns: + List[np.ndarray]: Visualization results. + """ + if (not return_vis) and (not show) and (not vis_out_dir): + return + + if getattr(self, 'visualizer', None) is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + self.visualizer.radius = radius + self.visualizer.line_width = thickness + + results = [] + + for single_input, pred in zip(inputs, preds): + if isinstance(single_input, str): + img = mmcv.imread(single_input, channel_order='rgb') + elif isinstance(single_input, np.ndarray): + img = mmcv.bgr2rgb(single_input) + else: + raise ValueError('Unsupported input type: ' + f'{type(single_input)}') + img_name = os.path.basename(pred.metainfo['img_path']) + + # since visualization and inference utilize the same process, + # the wait time is reduced when a video input is utilized, + # thereby eliminating the issue of inference getting stuck. + wait_time = 1e-5 if self._video_input else wait_time + + if num_instances < 0: + num_instances = len(pred.pred_instances) + + visualization = self.visualizer.add_datasample( + window_name, + img, + data_sample=pred, + det_data_sample=self._buffer['pose2d_results'], + draw_gt=False, + draw_bbox=draw_bbox, + show=show, + wait_time=wait_time, + convert_keypoint=False, + axis_azimuth=-115, + axis_limit=200, + axis_elev=15, + kpt_thr=kpt_thr, + num_instances=num_instances) + results.append(visualization) + + if vis_out_dir: + self.save_visualization( + visualization, + vis_out_dir, + img_name=img_name, + ) + + if return_vis: + return results + else: + return [] diff --git a/mmpose/apis/inferencers/mmpose_inferencer.py b/mmpose/apis/inferencers/mmpose_inferencer.py new file mode 100644 index 0000000000000000000000000000000000000000..4ade56cb04cf7a5b18758fd90430ae894d34983f --- /dev/null +++ b/mmpose/apis/inferencers/mmpose_inferencer.py @@ -0,0 +1,250 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Dict, List, Optional, Sequence, Union + +import numpy as np +import torch +from mmengine.config import Config, ConfigDict +from mmengine.infer.infer import ModelType +from mmengine.structures import InstanceData +from rich.progress import track + +from .base_mmpose_inferencer import BaseMMPoseInferencer +from .hand3d_inferencer import Hand3DInferencer +from .pose2d_inferencer import Pose2DInferencer +from .pose3d_inferencer import Pose3DInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +class MMPoseInferencer(BaseMMPoseInferencer): + """MMPose Inferencer. It's a unified inferencer interface for pose + estimation task, currently including: Pose2D. and it can be used to perform + 2D keypoint detection. + + Args: + pose2d (str, optional): Pretrained 2D pose estimation algorithm. + It's the path to the config file or the model name defined in + metafile. For example, it could be: + + - model alias, e.g. ``'body'``, + - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, + - config path + + Defaults to ``None``. + pose2d_weights (str, optional): Path to the custom checkpoint file of + the selected pose2d model. If it is not specified and "pose2d" is + a model name of metafile, the weights will be loaded from + metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the + available device will be automatically used. Defaults to None. + scope (str, optional): The scope of the model. Defaults to "mmpose". + det_model(str, optional): Config path or alias of detection model. + Defaults to None. + det_weights(str, optional): Path to the checkpoints of detection + model. Defaults to None. + det_cat_ids(int or list[int], optional): Category id for + detection model. Defaults to None. + output_heatmaps (bool, optional): Flag to visualize predicted + heatmaps. If set to None, the default setting from the model + config will be used. Default is None. + """ + + preprocess_kwargs: set = { + 'bbox_thr', 'nms_thr', 'bboxes', 'use_oks_tracking', 'tracking_thr', + 'disable_norm_pose_2d' + } + forward_kwargs: set = { + 'merge_results', 'disable_rebase_keypoint', 'pose_based_nms' + } + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_bbox', 'radius', 'thickness', + 'kpt_thr', 'vis_out_dir', 'skeleton_style', 'draw_heatmap', + 'black_background', 'num_instances' + } + postprocess_kwargs: set = {'pred_out_dir', 'return_datasample'} + + def __init__(self, + pose2d: Optional[str] = None, + pose2d_weights: Optional[str] = None, + pose3d: Optional[str] = None, + pose3d_weights: Optional[str] = None, + device: Optional[str] = None, + scope: str = 'mmpose', + det_model: Optional[Union[ModelType, str]] = None, + det_weights: Optional[str] = None, + det_cat_ids: Optional[Union[int, List]] = None, + show_progress: bool = False) -> None: + + self.visualizer = None + self.show_progress = show_progress + if pose3d is not None: + if 'hand3d' in pose3d: + self.inferencer = Hand3DInferencer(pose3d, pose3d_weights, + device, scope, det_model, + det_weights, det_cat_ids, + show_progress) + else: + self.inferencer = Pose3DInferencer(pose3d, pose3d_weights, + pose2d, pose2d_weights, + device, scope, det_model, + det_weights, det_cat_ids, + show_progress) + elif pose2d is not None: + self.inferencer = Pose2DInferencer(pose2d, pose2d_weights, device, + scope, det_model, det_weights, + det_cat_ids, show_progress) + else: + raise ValueError('Either 2d or 3d pose estimation algorithm ' + 'should be provided.') + + def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs): + """Process the inputs into a model-feedable format. + + Args: + inputs (InputsType): Inputs given by user. + batch_size (int): batch size. Defaults to 1. + + Yields: + Any: Data processed by the ``pipeline`` and ``collate_fn``. + List[str or np.ndarray]: List of original inputs in the batch + """ + for data in self.inferencer.preprocess(inputs, batch_size, **kwargs): + yield data + + @torch.no_grad() + def forward(self, inputs: InputType, **forward_kwargs) -> PredType: + """Forward the inputs to the model. + + Args: + inputs (InputsType): The inputs to be forwarded. + + Returns: + Dict: The prediction results. Possibly with keys "pose2d". + """ + return self.inferencer.forward(inputs, **forward_kwargs) + + def __call__( + self, + inputs: InputsType, + return_datasamples: bool = False, + batch_size: int = 1, + out_dir: Optional[str] = None, + **kwargs, + ) -> dict: + """Call the inferencer. + + Args: + inputs (InputsType): Inputs for the inferencer. + return_datasamples (bool): Whether to return results as + :obj:`BaseDataElement`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + out_dir (str, optional): directory to save visualization + results and predictions. Will be overoden if vis_out_dir or + pred_out_dir are given. Defaults to None + **kwargs: Key words arguments passed to :meth:`preprocess`, + :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. + Each key in kwargs should be in the corresponding set of + ``preprocess_kwargs``, ``forward_kwargs``, + ``visualize_kwargs`` and ``postprocess_kwargs``. + + Returns: + dict: Inference and visualization results. + """ + if out_dir is not None: + if 'vis_out_dir' not in kwargs: + kwargs['vis_out_dir'] = f'{out_dir}/visualizations' + if 'pred_out_dir' not in kwargs: + kwargs['pred_out_dir'] = f'{out_dir}/predictions' + + kwargs = { + key: value + for key, value in kwargs.items() + if key in set.union(self.inferencer.preprocess_kwargs, + self.inferencer.forward_kwargs, + self.inferencer.visualize_kwargs, + self.inferencer.postprocess_kwargs) + } + ( + preprocess_kwargs, + forward_kwargs, + visualize_kwargs, + postprocess_kwargs, + ) = self._dispatch_kwargs(**kwargs) + + self.inferencer.update_model_visualizer_settings(**kwargs) + + # preprocessing + if isinstance(inputs, str) and inputs.startswith('webcam'): + inputs = self.inferencer._get_webcam_inputs(inputs) + batch_size = 1 + if not visualize_kwargs.get('show', False): + warnings.warn('The display mode is closed when using webcam ' + 'input. It will be turned on automatically.') + visualize_kwargs['show'] = True + else: + inputs = self.inferencer._inputs_to_list(inputs) + self._video_input = self.inferencer._video_input + if self._video_input: + self.video_info = self.inferencer.video_info + + inputs = self.preprocess( + inputs, batch_size=batch_size, **preprocess_kwargs) + + # forward + if 'bbox_thr' in self.inferencer.forward_kwargs: + forward_kwargs['bbox_thr'] = preprocess_kwargs.get('bbox_thr', -1) + + preds = [] + + for proc_inputs, ori_inputs in (track(inputs, description='Inference') + if self.show_progress else inputs): + preds = self.forward(proc_inputs, **forward_kwargs) + + visualization = self.visualize(ori_inputs, preds, + **visualize_kwargs) + results = self.postprocess( + preds, + visualization, + return_datasamples=return_datasamples, + **postprocess_kwargs) + yield results + + if self._video_input: + self._finalize_video_processing( + postprocess_kwargs.get('pred_out_dir', '')) + + def visualize(self, inputs: InputsType, preds: PredType, + **kwargs) -> List[np.ndarray]: + """Visualize predictions. + + Args: + inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. + preds (Any): Predictions of the model. + return_vis (bool): Whether to return images with predicted results. + show (bool): Whether to display the image in a popup window. + Defaults to False. + show_interval (int): The interval of show (s). Defaults to 0 + radius (int): Keypoint radius for visualization. Defaults to 3 + thickness (int): Link thickness for visualization. Defaults to 1 + kpt_thr (float): The threshold to visualize the keypoints. + Defaults to 0.3 + vis_out_dir (str, optional): directory to save visualization + results w/o predictions. If left as empty, no file will + be saved. Defaults to ''. + + Returns: + List[np.ndarray]: Visualization results. + """ + window_name = '' + if self.inferencer._video_input: + window_name = self.inferencer.video_info['name'] + + return self.inferencer.visualize( + inputs, preds, window_name=window_name, **kwargs) diff --git a/mmpose/apis/inferencers/pose2d_inferencer.py b/mmpose/apis/inferencers/pose2d_inferencer.py new file mode 100644 index 0000000000000000000000000000000000000000..8b6a2c3e96f9d537bccab05eed01de4a951377ca --- /dev/null +++ b/mmpose/apis/inferencers/pose2d_inferencer.py @@ -0,0 +1,262 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from mmengine.config import Config, ConfigDict +from mmengine.infer.infer import ModelType +from mmengine.logging import print_log +from mmengine.model import revert_sync_batchnorm +from mmengine.registry import init_default_scope +from mmengine.structures import InstanceData + +from mmpose.evaluation.functional import nearby_joints_nms, nms +from mmpose.registry import INFERENCERS +from mmpose.structures import merge_data_samples +from .base_mmpose_inferencer import BaseMMPoseInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module(name='pose-estimation') +@INFERENCERS.register_module() +class Pose2DInferencer(BaseMMPoseInferencer): + """The inferencer for 2D pose estimation. + + Args: + model (str, optional): Pretrained 2D pose estimation algorithm. + It's the path to the config file or the model name defined in + metafile. For example, it could be: + + - model alias, e.g. ``'body'``, + - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, + - config path + + Defaults to ``None``. + weights (str, optional): Path to the checkpoint. If it is not + specified and "model" is a model name of metafile, the weights + will be loaded from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the + available device will be automatically used. Defaults to None. + scope (str, optional): The scope of the model. Defaults to "mmpose". + det_model (str, optional): Config path or alias of detection model. + Defaults to None. + det_weights (str, optional): Path to the checkpoints of detection + model. Defaults to None. + det_cat_ids (int or list[int], optional): Category id for + detection model. Defaults to None. + """ + + preprocess_kwargs: set = {'bbox_thr', 'nms_thr', 'bboxes'} + forward_kwargs: set = {'merge_results', 'pose_based_nms'} + visualize_kwargs: set = { + 'return_vis', + 'show', + 'wait_time', + 'draw_bbox', + 'radius', + 'thickness', + 'kpt_thr', + 'vis_out_dir', + 'skeleton_style', + 'draw_heatmap', + 'black_background', + } + postprocess_kwargs: set = {'pred_out_dir', 'return_datasample'} + + def __init__(self, + model: Union[ModelType, str], + weights: Optional[str] = None, + device: Optional[str] = None, + scope: Optional[str] = 'mmpose', + det_model: Optional[Union[ModelType, str]] = None, + det_weights: Optional[str] = None, + det_cat_ids: Optional[Union[int, Tuple]] = None, + show_progress: bool = False) -> None: + + init_default_scope(scope) + super().__init__( + model=model, + weights=weights, + device=device, + scope=scope, + show_progress=show_progress) + self.model = revert_sync_batchnorm(self.model) + + # assign dataset metainfo to self.visualizer + self.visualizer.set_dataset_meta(self.model.dataset_meta) + + # initialize detector for top-down models + if self.cfg.data_mode == 'topdown': + self._init_detector( + det_model=det_model, + det_weights=det_weights, + det_cat_ids=det_cat_ids, + device=device, + ) + + self._video_input = False + + def update_model_visualizer_settings(self, + draw_heatmap: bool = False, + skeleton_style: str = 'mmpose', + **kwargs) -> None: + """Update the settings of models and visualizer according to inference + arguments. + + Args: + draw_heatmaps (bool, optional): Flag to visualize predicted + heatmaps. If not provided, it defaults to False. + skeleton_style (str, optional): Skeleton style selection. Valid + options are 'mmpose' and 'openpose'. Defaults to 'mmpose'. + """ + self.model.test_cfg['output_heatmaps'] = draw_heatmap + + if skeleton_style not in ['mmpose', 'openpose']: + raise ValueError('`skeleton_style` must be either \'mmpose\' ' + 'or \'openpose\'') + + if skeleton_style == 'openpose': + self.visualizer.set_dataset_meta(self.model.dataset_meta, + skeleton_style) + + def preprocess_single(self, + input: InputType, + index: int, + bbox_thr: float = 0.3, + nms_thr: float = 0.3, + bboxes: Union[List[List], List[np.ndarray], + np.ndarray] = []): + """Process a single input into a model-feedable format. + + Args: + input (InputType): Input given by user. + index (int): index of the input + bbox_thr (float): threshold for bounding box detection. + Defaults to 0.3. + nms_thr (float): IoU threshold for bounding box NMS. + Defaults to 0.3. + + Yields: + Any: Data processed by the ``pipeline`` and ``collate_fn``. + """ + + if isinstance(input, str): + data_info = dict(img_path=input) + else: + data_info = dict(img=input, img_path=f'{index}.jpg'.rjust(10, '0')) + data_info.update(self.model.dataset_meta) + + if self.cfg.data_mode == 'topdown': + bboxes = [] + if self.detector is not None: + try: + det_results = self.detector( + input, return_datasamples=True)['predictions'] + except ValueError: + print_log( + 'Support for mmpose and mmdet versions up to 3.1.0 ' + 'will be discontinued in upcoming releases. To ' + 'ensure ongoing compatibility, please upgrade to ' + 'mmdet version 3.2.0 or later.', + logger='current', + level=logging.WARNING) + det_results = self.detector( + input, return_datasample=True)['predictions'] + pred_instance = det_results[0].pred_instances.cpu().numpy() + bboxes = np.concatenate( + (pred_instance.bboxes, pred_instance.scores[:, None]), + axis=1) + + label_mask = np.zeros(len(bboxes), dtype=np.uint8) + for cat_id in self.det_cat_ids: + label_mask = np.logical_or(label_mask, + pred_instance.labels == cat_id) + + bboxes = bboxes[np.logical_and( + label_mask, pred_instance.scores > bbox_thr)] + bboxes = bboxes[nms(bboxes, nms_thr)] + + data_infos = [] + if len(bboxes) > 0: + for bbox in bboxes: + inst = data_info.copy() + inst['bbox'] = bbox[None, :4] + inst['bbox_score'] = bbox[4:5] + data_infos.append(self.pipeline(inst)) + else: + inst = data_info.copy() + + # get bbox from the image size + if isinstance(input, str): + input = mmcv.imread(input) + h, w = input.shape[:2] + + inst['bbox'] = np.array([[0, 0, w, h]], dtype=np.float32) + inst['bbox_score'] = np.ones(1, dtype=np.float32) + data_infos.append(self.pipeline(inst)) + + else: # bottom-up + data_infos = [self.pipeline(data_info)] + + return data_infos + + @torch.no_grad() + def forward(self, + inputs: Union[dict, tuple], + merge_results: bool = True, + bbox_thr: float = -1, + pose_based_nms: bool = False): + """Performs a forward pass through the model. + + Args: + inputs (Union[dict, tuple]): The input data to be processed. Can + be either a dictionary or a tuple. + merge_results (bool, optional): Whether to merge data samples, + default to True. This is only applicable when the data_mode + is 'topdown'. + bbox_thr (float, optional): A threshold for the bounding box + scores. Bounding boxes with scores greater than this value + will be retained. Default value is -1 which retains all + bounding boxes. + + Returns: + A list of data samples with prediction instances. + """ + data_samples = self.model.test_step(inputs) + if self.cfg.data_mode == 'topdown' and merge_results: + data_samples = [merge_data_samples(data_samples)] + + if bbox_thr > 0: + for ds in data_samples: + if 'bbox_scores' in ds.pred_instances: + ds.pred_instances = ds.pred_instances[ + ds.pred_instances.bbox_scores > bbox_thr] + + if pose_based_nms: + for ds in data_samples: + if len(ds.pred_instances) == 0: + continue + + kpts = ds.pred_instances.keypoints + scores = ds.pred_instances.bbox_scores + num_keypoints = kpts.shape[-2] + + kept_indices = nearby_joints_nms( + [ + dict(keypoints=kpts[i], score=scores[i]) + for i in range(len(kpts)) + ], + num_nearby_joints_thr=num_keypoints // 3, + ) + ds.pred_instances = ds.pred_instances[kept_indices] + + return data_samples diff --git a/mmpose/apis/inferencers/pose3d_inferencer.py b/mmpose/apis/inferencers/pose3d_inferencer.py new file mode 100644 index 0000000000000000000000000000000000000000..f372438298c8ac8c4a6aaa9e171b9c799a9450b1 --- /dev/null +++ b/mmpose/apis/inferencers/pose3d_inferencer.py @@ -0,0 +1,457 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from collections import defaultdict +from functools import partial +from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from mmengine.config import Config, ConfigDict +from mmengine.infer.infer import ModelType +from mmengine.model import revert_sync_batchnorm +from mmengine.registry import init_default_scope +from mmengine.structures import InstanceData + +from mmpose.apis import (_track_by_iou, _track_by_oks, collate_pose_sequence, + convert_keypoint_definition, extract_pose_sequence) +from mmpose.registry import INFERENCERS +from mmpose.structures import PoseDataSample, merge_data_samples +from .base_mmpose_inferencer import BaseMMPoseInferencer +from .pose2d_inferencer import Pose2DInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module(name='pose-estimation-3d') +@INFERENCERS.register_module() +class Pose3DInferencer(BaseMMPoseInferencer): + """The inferencer for 3D pose estimation. + + Args: + model (str, optional): Pretrained 2D pose estimation algorithm. + It's the path to the config file or the model name defined in + metafile. For example, it could be: + + - model alias, e.g. ``'body'``, + - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, + - config path + + Defaults to ``None``. + weights (str, optional): Path to the checkpoint. If it is not + specified and "model" is a model name of metafile, the weights + will be loaded from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the + available device will be automatically used. Defaults to None. + scope (str, optional): The scope of the model. Defaults to "mmpose". + det_model (str, optional): Config path or alias of detection model. + Defaults to None. + det_weights (str, optional): Path to the checkpoints of detection + model. Defaults to None. + det_cat_ids (int or list[int], optional): Category id for + detection model. Defaults to None. + output_heatmaps (bool, optional): Flag to visualize predicted + heatmaps. If set to None, the default setting from the model + config will be used. Default is None. + """ + + preprocess_kwargs: set = { + 'bbox_thr', 'nms_thr', 'bboxes', 'use_oks_tracking', 'tracking_thr', + 'disable_norm_pose_2d' + } + forward_kwargs: set = {'disable_rebase_keypoint'} + visualize_kwargs: set = { + 'return_vis', + 'show', + 'wait_time', + 'draw_bbox', + 'radius', + 'thickness', + 'num_instances', + 'kpt_thr', + 'vis_out_dir', + } + postprocess_kwargs: set = {'pred_out_dir', 'return_datasample'} + + def __init__(self, + model: Union[ModelType, str], + weights: Optional[str] = None, + pose2d_model: Optional[Union[ModelType, str]] = None, + pose2d_weights: Optional[str] = None, + device: Optional[str] = None, + scope: Optional[str] = 'mmpose', + det_model: Optional[Union[ModelType, str]] = None, + det_weights: Optional[str] = None, + det_cat_ids: Optional[Union[int, Tuple]] = None, + show_progress: bool = False) -> None: + + init_default_scope(scope) + super().__init__( + model=model, + weights=weights, + device=device, + scope=scope, + show_progress=show_progress) + self.model = revert_sync_batchnorm(self.model) + + # assign dataset metainfo to self.visualizer + self.visualizer.set_dataset_meta(self.model.dataset_meta) + + # initialize 2d pose estimator + self.pose2d_model = Pose2DInferencer( + pose2d_model if pose2d_model else 'human', pose2d_weights, device, + scope, det_model, det_weights, det_cat_ids) + + # helper functions + self._keypoint_converter = partial( + convert_keypoint_definition, + pose_det_dataset=self.pose2d_model.model. + dataset_meta['dataset_name'], + pose_lift_dataset=self.model.dataset_meta['dataset_name'], + ) + + self._pose_seq_extractor = partial( + extract_pose_sequence, + causal=self.cfg.test_dataloader.dataset.get('causal', False), + seq_len=self.cfg.test_dataloader.dataset.get('seq_len', 1), + step=self.cfg.test_dataloader.dataset.get('seq_step', 1)) + + self._video_input = False + self._buffer = defaultdict(list) + + def preprocess_single(self, + input: InputType, + index: int, + bbox_thr: float = 0.3, + nms_thr: float = 0.3, + bboxes: Union[List[List], List[np.ndarray], + np.ndarray] = [], + use_oks_tracking: bool = False, + tracking_thr: float = 0.3, + disable_norm_pose_2d: bool = False): + """Process a single input into a model-feedable format. + + Args: + input (InputType): The input provided by the user. + index (int): The index of the input. + bbox_thr (float, optional): The threshold for bounding box + detection. Defaults to 0.3. + nms_thr (float, optional): The Intersection over Union (IoU) + threshold for bounding box Non-Maximum Suppression (NMS). + Defaults to 0.3. + bboxes (Union[List[List], List[np.ndarray], np.ndarray]): + The bounding boxes to use. Defaults to []. + use_oks_tracking (bool, optional): A flag that indicates + whether OKS-based tracking should be used. Defaults to False. + tracking_thr (float, optional): The threshold for tracking. + Defaults to 0.3. + disable_norm_pose_2d (bool, optional): A flag that indicates + whether 2D pose normalization should be used. + Defaults to False. + + Yields: + Any: The data processed by the pipeline and collate_fn. + + This method first calculates 2D keypoints using the provided + pose2d_model. The method also performs instance matching, which + can use either OKS-based tracking or IOU-based tracking. + """ + + # calculate 2d keypoints + results_pose2d = next( + self.pose2d_model( + input, + bbox_thr=bbox_thr, + nms_thr=nms_thr, + bboxes=bboxes, + merge_results=False, + return_datasamples=True))['predictions'] + + for ds in results_pose2d: + ds.pred_instances.set_field( + (ds.pred_instances.bboxes[..., 2:] - + ds.pred_instances.bboxes[..., :2]).prod(-1), 'areas') + + if not self._video_input: + height, width = results_pose2d[0].metainfo['ori_shape'] + + # Clear the buffer if inputs are individual images to prevent + # carryover effects from previous images + self._buffer.clear() + + else: + height = self.video_info['height'] + width = self.video_info['width'] + img_path = results_pose2d[0].metainfo['img_path'] + + # instance matching + if use_oks_tracking: + _track = partial(_track_by_oks) + else: + _track = _track_by_iou + + for result in results_pose2d: + track_id, self._buffer['results_pose2d_last'], _ = _track( + result, self._buffer['results_pose2d_last'], tracking_thr) + if track_id == -1: + pred_instances = result.pred_instances.cpu().numpy() + keypoints = pred_instances.keypoints + if np.count_nonzero(keypoints[:, :, 1]) >= 3: + next_id = self._buffer.get('next_id', 0) + result.set_field(next_id, 'track_id') + self._buffer['next_id'] = next_id + 1 + else: + # If the number of keypoints detected is small, + # delete that person instance. + result.pred_instances.keypoints[..., 1] = -10 + result.pred_instances.bboxes *= 0 + result.set_field(-1, 'track_id') + else: + result.set_field(track_id, 'track_id') + self._buffer['pose2d_results'] = merge_data_samples(results_pose2d) + + # convert keypoints + results_pose2d_converted = [ds.cpu().numpy() for ds in results_pose2d] + for ds in results_pose2d_converted: + ds.pred_instances.keypoints = self._keypoint_converter( + ds.pred_instances.keypoints) + self._buffer['pose_est_results_list'].append(results_pose2d_converted) + + # extract and pad input pose2d sequence + pose_results_2d = self._pose_seq_extractor( + self._buffer['pose_est_results_list'], + frame_idx=index if self._video_input else 0) + causal = self.cfg.test_dataloader.dataset.get('causal', False) + target_idx = -1 if causal else len(pose_results_2d) // 2 + + stats_info = self.model.dataset_meta.get('stats_info', {}) + bbox_center = stats_info.get('bbox_center', None) + bbox_scale = stats_info.get('bbox_scale', None) + + pose_results_2d_copy = [] + for pose_res in pose_results_2d: + pose_res_copy = [] + for data_sample in pose_res: + + data_sample_copy = PoseDataSample() + data_sample_copy.gt_instances = \ + data_sample.gt_instances.clone() + data_sample_copy.pred_instances = \ + data_sample.pred_instances.clone() + data_sample_copy.track_id = data_sample.track_id + + kpts = data_sample.pred_instances.keypoints + bboxes = data_sample.pred_instances.bboxes + keypoints = [] + for k in range(len(kpts)): + kpt = kpts[k] + if not disable_norm_pose_2d: + bbox = bboxes[k] + center = np.array([[(bbox[0] + bbox[2]) / 2, + (bbox[1] + bbox[3]) / 2]]) + scale = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) + keypoints.append((kpt[:, :2] - center) / scale * + bbox_scale + bbox_center) + else: + keypoints.append(kpt[:, :2]) + data_sample_copy.pred_instances.set_field( + np.array(keypoints), 'keypoints') + pose_res_copy.append(data_sample_copy) + + pose_results_2d_copy.append(pose_res_copy) + pose_sequences_2d = collate_pose_sequence(pose_results_2d_copy, True, + target_idx) + if not pose_sequences_2d: + return [] + + data_list = [] + for i, pose_seq in enumerate(pose_sequences_2d): + data_info = dict() + + keypoints_2d = pose_seq.pred_instances.keypoints + keypoints_2d = np.squeeze( + keypoints_2d, + axis=0) if keypoints_2d.ndim == 4 else keypoints_2d + + T, K, C = keypoints_2d.shape + + data_info['keypoints'] = keypoints_2d + data_info['keypoints_visible'] = np.ones(( + T, + K, + ), + dtype=np.float32) + data_info['lifting_target'] = np.zeros((1, K, 3), dtype=np.float32) + data_info['factor'] = np.zeros((T, ), dtype=np.float32) + data_info['lifting_target_visible'] = np.ones((1, K, 1), + dtype=np.float32) + data_info['camera_param'] = dict(w=width, h=height) + + data_info.update(self.model.dataset_meta) + data_info = self.pipeline(data_info) + data_info['data_samples'].set_field( + img_path, 'img_path', field_type='metainfo') + data_list.append(data_info) + + return data_list + + @torch.no_grad() + def forward(self, + inputs: Union[dict, tuple], + disable_rebase_keypoint: bool = False): + """Perform forward pass through the model and process the results. + + Args: + inputs (Union[dict, tuple]): The inputs for the model. + disable_rebase_keypoint (bool, optional): Flag to disable rebasing + the height of the keypoints. Defaults to False. + + Returns: + list: A list of data samples, each containing the model's output + results. + """ + pose_lift_results = self.model.test_step(inputs) + + # Post-processing of pose estimation results + pose_est_results_converted = self._buffer['pose_est_results_list'][-1] + for idx, pose_lift_res in enumerate(pose_lift_results): + # Update track_id from the pose estimation results + pose_lift_res.track_id = pose_est_results_converted[idx].get( + 'track_id', 1e4) + + # align the shape of output keypoints coordinates and scores + keypoints = pose_lift_res.pred_instances.keypoints + keypoint_scores = pose_lift_res.pred_instances.keypoint_scores + if keypoint_scores.ndim == 3: + pose_lift_results[idx].pred_instances.keypoint_scores = \ + np.squeeze(keypoint_scores, axis=1) + if keypoints.ndim == 4: + keypoints = np.squeeze(keypoints, axis=1) + + # Invert x and z values of the keypoints + keypoints = keypoints[..., [0, 2, 1]] + keypoints[..., 0] = -keypoints[..., 0] + keypoints[..., 2] = -keypoints[..., 2] + + # If rebase_keypoint_height is True, adjust z-axis values + if not disable_rebase_keypoint: + keypoints[..., 2] -= np.min( + keypoints[..., 2], axis=-1, keepdims=True) + + pose_lift_results[idx].pred_instances.keypoints = keypoints + + pose_lift_results = sorted( + pose_lift_results, key=lambda x: x.get('track_id', 1e4)) + + data_samples = [merge_data_samples(pose_lift_results)] + return data_samples + + def visualize(self, + inputs: list, + preds: List[PoseDataSample], + return_vis: bool = False, + show: bool = False, + draw_bbox: bool = False, + wait_time: float = 0, + radius: int = 3, + thickness: int = 1, + kpt_thr: float = 0.3, + num_instances: int = 1, + vis_out_dir: str = '', + window_name: str = '', + window_close_event_handler: Optional[Callable] = None + ) -> List[np.ndarray]: + """Visualize predictions. + + Args: + inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. + preds (Any): Predictions of the model. + return_vis (bool): Whether to return images with predicted results. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (ms). Defaults to 0 + draw_bbox (bool): Whether to draw the bounding boxes. + Defaults to False + radius (int): Keypoint radius for visualization. Defaults to 3 + thickness (int): Link thickness for visualization. Defaults to 1 + kpt_thr (float): The threshold to visualize the keypoints. + Defaults to 0.3 + vis_out_dir (str, optional): Directory to save visualization + results w/o predictions. If left as empty, no file will + be saved. Defaults to ''. + window_name (str, optional): Title of display window. + window_close_event_handler (callable, optional): + + Returns: + List[np.ndarray]: Visualization results. + """ + if (not return_vis) and (not show) and (not vis_out_dir): + return + + if getattr(self, 'visualizer', None) is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + self.visualizer.radius = radius + self.visualizer.line_width = thickness + det_kpt_color = self.pose2d_model.visualizer.kpt_color + det_dataset_skeleton = self.pose2d_model.visualizer.skeleton + det_dataset_link_color = self.pose2d_model.visualizer.link_color + self.visualizer.det_kpt_color = det_kpt_color + self.visualizer.det_dataset_skeleton = det_dataset_skeleton + self.visualizer.det_dataset_link_color = det_dataset_link_color + + results = [] + + for single_input, pred in zip(inputs, preds): + if isinstance(single_input, str): + img = mmcv.imread(single_input, channel_order='rgb') + elif isinstance(single_input, np.ndarray): + img = mmcv.bgr2rgb(single_input) + else: + raise ValueError('Unsupported input type: ' + f'{type(single_input)}') + + # since visualization and inference utilize the same process, + # the wait time is reduced when a video input is utilized, + # thereby eliminating the issue of inference getting stuck. + wait_time = 1e-5 if self._video_input else wait_time + + if num_instances < 0: + num_instances = len(pred.pred_instances) + + visualization = self.visualizer.add_datasample( + window_name, + img, + data_sample=pred, + det_data_sample=self._buffer['pose2d_results'], + draw_gt=False, + draw_bbox=draw_bbox, + show=show, + wait_time=wait_time, + dataset_2d=self.pose2d_model.model. + dataset_meta['dataset_name'], + dataset_3d=self.model.dataset_meta['dataset_name'], + kpt_thr=kpt_thr, + num_instances=num_instances) + results.append(visualization) + + if vis_out_dir: + img_name = os.path.basename(pred.metainfo['img_path']) \ + if 'img_path' in pred.metainfo else None + self.save_visualization( + visualization, + vis_out_dir, + img_name=img_name, + ) + + if return_vis: + return results + else: + return [] diff --git a/mmpose/apis/inferencers/utils/__init__.py b/mmpose/apis/inferencers/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5cc40535b0d42a3b2ff41e97e26dcc30c440622b --- /dev/null +++ b/mmpose/apis/inferencers/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .default_det_models import default_det_models +from .get_model_alias import get_model_aliases + +__all__ = ['default_det_models', 'get_model_aliases'] diff --git a/mmpose/apis/inferencers/utils/default_det_models.py b/mmpose/apis/inferencers/utils/default_det_models.py new file mode 100644 index 0000000000000000000000000000000000000000..a2deca961b00c75fe05d09b30b05394c175acf5b --- /dev/null +++ b/mmpose/apis/inferencers/utils/default_det_models.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from mmengine.config.utils import MODULE2PACKAGE +from mmengine.utils import get_installed_path + +mmpose_path = get_installed_path(MODULE2PACKAGE['mmpose']) + +default_det_models = dict( + human=dict( + model=osp.join( + mmpose_path, '.mim', 'demo/mmdetection_cfg/' + 'rtmdet_m_640-8xb32_coco-person.py'), + weights='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth', + cat_ids=(0, )), + face=dict( + model=osp.join(mmpose_path, '.mim', + 'demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py'), + weights='https://download.openmmlab.com/mmpose/mmdet_pretrained/' + 'yolo-x_8xb8-300e_coco-face_13274d7c.pth', + cat_ids=(0, )), + hand=dict( + model=osp.join(mmpose_path, '.mim', 'demo/mmdetection_cfg/' + 'rtmdet_nano_320-8xb32_hand.py'), + weights='https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/' + 'rtmdet_nano_8xb32-300e_hand-267f9c8f.pth', + cat_ids=(0, )), + animal=dict( + model='rtmdet-m', + weights=None, + cat_ids=(15, 16, 17, 18, 19, 20, 21, 22, 23)), +) + +default_det_models['body'] = default_det_models['human'] +default_det_models['wholebody'] = default_det_models['human'] diff --git a/mmpose/apis/inferencers/utils/get_model_alias.py b/mmpose/apis/inferencers/utils/get_model_alias.py new file mode 100644 index 0000000000000000000000000000000000000000..49de6528d6ea0df58cf7ae987176defbd4953739 --- /dev/null +++ b/mmpose/apis/inferencers/utils/get_model_alias.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict + +from mmengine.infer import BaseInferencer + + +def get_model_aliases(scope: str = 'mmpose') -> Dict[str, str]: + """Retrieve model aliases and their corresponding configuration names. + + Args: + scope (str, optional): The scope for the model aliases. Defaults + to 'mmpose'. + + Returns: + Dict[str, str]: A dictionary containing model aliases as keys and + their corresponding configuration names as values. + """ + + # Get a list of model configurations from the metafile + repo_or_mim_dir = BaseInferencer._get_repo_or_mim_dir(scope) + model_cfgs = BaseInferencer._get_models_from_metafile(repo_or_mim_dir) + + model_alias_dict = dict() + for model_cfg in model_cfgs: + if 'Alias' in model_cfg: + if isinstance(model_cfg['Alias'], str): + model_alias_dict[model_cfg['Alias']] = model_cfg['Name'] + elif isinstance(model_cfg['Alias'], list): + for alias in model_cfg['Alias']: + model_alias_dict[alias] = model_cfg['Name'] + else: + raise ValueError( + 'encounter an unexpected alias type. Please raise an ' + 'issue at https://github.com/open-mmlab/mmpose/issues ' + 'to announce us') + + return model_alias_dict diff --git a/mmpose/apis/visualization.py b/mmpose/apis/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..ffc951ea427c363285b4b0daa5e48bab7716a5a0 --- /dev/null +++ b/mmpose/apis/visualization.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Union + +import mmcv +import numpy as np +from mmengine.structures import InstanceData + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.structures import PoseDataSample +from mmpose.visualization import PoseLocalVisualizer + +# from posevis import pose_visualization + +# def visualize( +# img: Union[np.ndarray, str], +# keypoints: np.ndarray, +# keypoint_score: np.ndarray = None, +# metainfo: Union[str, dict] = None, +# visualizer: PoseLocalVisualizer = None, +# show_kpt_idx: bool = False, +# skeleton_style: str = 'mmpose', +# show: bool = False, +# kpt_thr: float = 0.3, +# ): +# """Visualize 2d keypoints on an image. + +# Args: +# img (str | np.ndarray): The image to be displayed. +# keypoints (np.ndarray): The keypoint to be displayed. +# keypoint_score (np.ndarray): The score of each keypoint. +# metainfo (str | dict): The metainfo of dataset. +# visualizer (PoseLocalVisualizer): The visualizer. +# show_kpt_idx (bool): Whether to show the index of keypoints. +# skeleton_style (str): Skeleton style. Options are 'mmpose' and +# 'openpose'. +# show (bool): Whether to show the image. +# wait_time (int): Value of waitKey param. +# kpt_thr (float): Keypoint threshold. +# """ +# kpts = keypoints.reshape(-1, 2) +# kpts = np.concatenate([kpts, keypoint_score[:, None]], axis=1) +# kpts[kpts[:, 2] < kpt_thr, :] = 0 +# pose_results = [{ +# 'keypoints': kpts, +# }] + +# img = pose_visualization( +# img, +# pose_results, +# format="COCO", +# greyness=1.0, +# show_markers=True, +# show_bones=True, +# line_type="solid", +# width_multiplier=1.0, +# bbox_width_multiplier=1.0, +# show_bbox=False, +# differ_individuals=False, +# ) +# return img + + +def visualize( + img: Union[np.ndarray, str], + keypoints: np.ndarray, + keypoint_score: np.ndarray = None, + metainfo: Union[str, dict] = None, + visualizer: PoseLocalVisualizer = None, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose', + show: bool = False, + kpt_thr: float = 0.3, +): + """Visualize 2d keypoints on an image. + + Args: + img (str | np.ndarray): The image to be displayed. + keypoints (np.ndarray): The keypoint to be displayed. + keypoint_score (np.ndarray): The score of each keypoint. + metainfo (str | dict): The metainfo of dataset. + visualizer (PoseLocalVisualizer): The visualizer. + show_kpt_idx (bool): Whether to show the index of keypoints. + skeleton_style (str): Skeleton style. Options are 'mmpose' and + 'openpose'. + show (bool): Whether to show the image. + wait_time (int): Value of waitKey param. + kpt_thr (float): Keypoint threshold. + """ + assert skeleton_style in [ + 'mmpose', 'openpose' + ], (f'Only support skeleton style in {["mmpose", "openpose"]}, ') + + if visualizer is None: + visualizer = PoseLocalVisualizer() + else: + visualizer = deepcopy(visualizer) + + if isinstance(metainfo, str): + metainfo = parse_pose_metainfo(dict(from_file=metainfo)) + elif isinstance(metainfo, dict): + metainfo = parse_pose_metainfo(metainfo) + + if metainfo is not None: + visualizer.set_dataset_meta(metainfo, skeleton_style=skeleton_style) + + if isinstance(img, str): + img = mmcv.imread(img, channel_order='rgb') + elif isinstance(img, np.ndarray): + img = mmcv.bgr2rgb(img) + + if keypoint_score is None: + keypoint_score = np.ones(keypoints.shape[0]) + + tmp_instances = InstanceData() + tmp_instances.keypoints = keypoints + tmp_instances.keypoint_score = keypoint_score + + tmp_datasample = PoseDataSample() + tmp_datasample.pred_instances = tmp_instances + + visualizer.add_datasample( + 'visualization', + img, + tmp_datasample, + show_kpt_idx=show_kpt_idx, + skeleton_style=skeleton_style, + show=show, + wait_time=0, + kpt_thr=kpt_thr) + + return visualizer.get_image() diff --git a/mmpose/codecs/__init__.py b/mmpose/codecs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..31bc874a13d5db1d8d42093359940bda24db814f --- /dev/null +++ b/mmpose/codecs/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .annotation_processors import YOLOXPoseAnnotationProcessor +from .associative_embedding import AssociativeEmbedding +from .decoupled_heatmap import DecoupledHeatmap +from .edpose_label import EDPoseLabel +from .hand_3d_heatmap import Hand3DHeatmap +from .image_pose_lifting import ImagePoseLifting +from .integral_regression_label import IntegralRegressionLabel +from .megvii_heatmap import MegviiHeatmap +from .motionbert_label import MotionBERTLabel +from .msra_heatmap import MSRAHeatmap +from .regression_label import RegressionLabel +from .simcc_label import SimCCLabel +from .spr import SPR +from .udp_heatmap import UDPHeatmap +from .video_pose_lifting import VideoPoseLifting +from .onehot_heatmap import OneHotHeatmap + +__all__ = [ + 'MSRAHeatmap', 'MegviiHeatmap', 'UDPHeatmap', 'RegressionLabel', + 'SimCCLabel', 'IntegralRegressionLabel', 'AssociativeEmbedding', 'SPR', + 'DecoupledHeatmap', 'VideoPoseLifting', 'ImagePoseLifting', + 'MotionBERTLabel', 'YOLOXPoseAnnotationProcessor', 'EDPoseLabel', + 'Hand3DHeatmap', 'OneHotHeatmap' +] diff --git a/mmpose/codecs/annotation_processors.py b/mmpose/codecs/annotation_processors.py new file mode 100644 index 0000000000000000000000000000000000000000..72a578df7000707ceb122469a4fe9ab85959625f --- /dev/null +++ b/mmpose/codecs/annotation_processors.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + +INF = 1e6 +NEG_INF = -1e6 + + +class BaseAnnotationProcessor(BaseKeypointCodec): + """Base class for annotation processors.""" + + def decode(self, *args, **kwargs): + pass + + +@KEYPOINT_CODECS.register_module() +class YOLOXPoseAnnotationProcessor(BaseAnnotationProcessor): + """Convert dataset annotations to the input format of YOLOX-Pose. + + This processor expands bounding boxes and converts category IDs to labels. + + Args: + extend_bbox (bool, optional): Whether to expand the bounding box + to include all keypoints. Defaults to False. + input_size (tuple, optional): The size of the input image for the + model, formatted as (h, w). This argument is necessary for the + codec in deployment but is not used indeed. + """ + + auxiliary_encode_keys = {'category_id', 'bbox'} + label_mapping_table = dict( + bbox='bboxes', + bbox_labels='labels', + keypoints='keypoints', + keypoints_visible='keypoints_visible', + area='areas', + ) + instance_mapping_table = dict( + bbox='bboxes', + bbox_score='bbox_scores', + keypoints='keypoints', + keypoints_visible='keypoints_visible', + # remove 'bbox_scales' in default instance_mapping_table to avoid + # length mismatch during training with multiple datasets + ) + + def __init__(self, + extend_bbox: bool = False, + input_size: Optional[Tuple] = None): + super().__init__() + self.extend_bbox = extend_bbox + + def encode(self, + keypoints: Optional[np.ndarray] = None, + keypoints_visible: Optional[np.ndarray] = None, + bbox: Optional[np.ndarray] = None, + category_id: Optional[List[int]] = None + ) -> Dict[str, np.ndarray]: + """Encode keypoints, bounding boxes, and category IDs. + + Args: + keypoints (np.ndarray, optional): Keypoints array. Defaults + to None. + keypoints_visible (np.ndarray, optional): Visibility array for + keypoints. Defaults to None. + bbox (np.ndarray, optional): Bounding box array. Defaults to None. + category_id (List[int], optional): List of category IDs. Defaults + to None. + + Returns: + Dict[str, np.ndarray]: Encoded annotations. + """ + results = {} + + if self.extend_bbox and bbox is not None: + # Handle keypoints visibility + if keypoints_visible.ndim == 3: + keypoints_visible = keypoints_visible[..., 0] + + # Expand bounding box to include keypoints + kpts_min = keypoints.copy() + kpts_min[keypoints_visible == 0] = INF + bbox[..., :2] = np.minimum(bbox[..., :2], kpts_min.min(axis=1)) + + kpts_max = keypoints.copy() + kpts_max[keypoints_visible == 0] = NEG_INF + bbox[..., 2:] = np.maximum(bbox[..., 2:], kpts_max.max(axis=1)) + + results['bbox'] = bbox + + if category_id is not None: + # Convert category IDs to labels + bbox_labels = np.array(category_id).astype(np.int8) - 1 + results['bbox_labels'] = bbox_labels + + return results diff --git a/mmpose/codecs/associative_embedding.py b/mmpose/codecs/associative_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..def9bfd89ed9157ca45b60d5dcd33861e7eac9ec --- /dev/null +++ b/mmpose/codecs/associative_embedding.py @@ -0,0 +1,522 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Any, List, Optional, Tuple + +import numpy as np +import torch +from munkres import Munkres +from torch import Tensor + +from mmpose.registry import KEYPOINT_CODECS +from mmpose.utils.tensor_utils import to_numpy +from .base import BaseKeypointCodec +from .utils import (batch_heatmap_nms, generate_gaussian_heatmaps, + generate_udp_gaussian_heatmaps, refine_keypoints, + refine_keypoints_dark_udp) + + +def _py_max_match(scores): + """Apply munkres algorithm to get the best match. + + Args: + scores(np.ndarray): cost matrix. + + Returns: + np.ndarray: best match. + """ + m = Munkres() + tmp = m.compute(scores) + tmp = np.array(tmp).astype(int) + return tmp + + +def _group_keypoints_by_tags(vals: np.ndarray, + tags: np.ndarray, + locs: np.ndarray, + keypoint_order: List[int], + val_thr: float, + tag_thr: float = 1.0, + max_groups: Optional[int] = None) -> np.ndarray: + """Group the keypoints by tags using Munkres algorithm. + + Note: + + - keypoint number: K + - candidate number: M + - tag dimenssion: L + - coordinate dimension: D + - group number: G + + Args: + vals (np.ndarray): The heatmap response values of keypoints in shape + (K, M) + tags (np.ndarray): The tags of the keypoint candidates in shape + (K, M, L) + locs (np.ndarray): The locations of the keypoint candidates in shape + (K, M, D) + keypoint_order (List[int]): The grouping order of the keypoints. + The groupping usually starts from a keypoints around the head and + torso, and gruadually moves out to the limbs + val_thr (float): The threshold of the keypoint response value + tag_thr (float): The maximum allowed tag distance when matching a + keypoint to a group. A keypoint with larger tag distance to any + of the existing groups will initializes a new group + max_groups (int, optional): The maximum group number. ``None`` means + no limitation. Defaults to ``None`` + + Returns: + np.ndarray: grouped keypoints in shape (G, K, D+1), where the last + dimenssion is the concatenated keypoint coordinates and scores. + """ + + tag_k, loc_k, val_k = tags, locs, vals + K, M, D = locs.shape + assert vals.shape == tags.shape[:2] == (K, M) + assert len(keypoint_order) == K + + default_ = np.zeros((K, 3 + tag_k.shape[2]), dtype=np.float32) + + joint_dict = {} + tag_dict = {} + for i in range(K): + idx = keypoint_order[i] + + tags = tag_k[idx] + joints = np.concatenate((loc_k[idx], val_k[idx, :, None], tags), 1) + mask = joints[:, 2] > val_thr + tags = tags[mask] # shape: [M, L] + joints = joints[mask] # shape: [M, 3 + L], 3: x, y, val + + if joints.shape[0] == 0: + continue + + if i == 0 or len(joint_dict) == 0: + for tag, joint in zip(tags, joints): + key = tag[0] + joint_dict.setdefault(key, np.copy(default_))[idx] = joint + tag_dict[key] = [tag] + else: + # shape: [M] + grouped_keys = list(joint_dict.keys()) + # shape: [M, L] + grouped_tags = [np.mean(tag_dict[i], axis=0) for i in grouped_keys] + + # shape: [M, M, L] + diff = joints[:, None, 3:] - np.array(grouped_tags)[None, :, :] + # shape: [M, M] + diff_normed = np.linalg.norm(diff, ord=2, axis=2) + diff_saved = np.copy(diff_normed) + diff_normed = np.round(diff_normed) * 100 - joints[:, 2:3] + + num_added = diff.shape[0] + num_grouped = diff.shape[1] + + if num_added > num_grouped: + diff_normed = np.concatenate( + (diff_normed, + np.zeros((num_added, num_added - num_grouped), + dtype=np.float32) + 1e10), + axis=1) + + pairs = _py_max_match(diff_normed) + for row, col in pairs: + if (row < num_added and col < num_grouped + and diff_saved[row][col] < tag_thr): + key = grouped_keys[col] + joint_dict[key][idx] = joints[row] + tag_dict[key].append(tags[row]) + else: + key = tags[row][0] + joint_dict.setdefault(key, np.copy(default_))[idx] = \ + joints[row] + tag_dict[key] = [tags[row]] + + joint_dict_keys = list(joint_dict.keys())[:max_groups] + + if joint_dict_keys: + results = np.array([joint_dict[i] + for i in joint_dict_keys]).astype(np.float32) + results = results[..., :D + 1] + else: + results = np.empty((0, K, D + 1), dtype=np.float32) + return results + + +@KEYPOINT_CODECS.register_module() +class AssociativeEmbedding(BaseKeypointCodec): + """Encode/decode keypoints with the method introduced in "Associative + Embedding". This is an asymmetric codec, where the keypoints are + represented as gaussian heatmaps and position indices during encoding, and + restored from predicted heatmaps and group tags. + + See the paper `Associative Embedding: End-to-End Learning for Joint + Detection and Grouping`_ by Newell et al (2017) for details + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - embedding tag dimension: L + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) + where [W, H] is the `heatmap_size` + - keypoint_indices (np.ndarray): The keypoint position indices in shape + (N, K, 2). Each keypoint's index is [i, v], where i is the position + index in the heatmap (:math:`i=y*w+x`) and v is the visibility + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float): The sigma value of the Gaussian heatmap + use_udp (bool): Whether use unbiased data processing. See + `UDP (CVPR 2020)`_ for details. Defaults to ``False`` + decode_keypoint_order (List[int]): The grouping order of the + keypoint indices. The groupping usually starts from a keypoints + around the head and torso, and gruadually moves out to the limbs + decode_keypoint_thr (float): The threshold of keypoint response value + in heatmaps. Defaults to 0.1 + decode_tag_thr (float): The maximum allowed tag distance when matching + a keypoint to a group. A keypoint with larger tag distance to any + of the existing groups will initializes a new group. Defaults to + 1.0 + decode_nms_kernel (int): The kernel size of the NMS during decoding, + which should be an odd integer. Defaults to 5 + decode_gaussian_kernel (int): The kernel size of the Gaussian blur + during decoding, which should be an odd integer. It is only used + when ``self.use_udp==True``. Defaults to 3 + decode_topk (int): The number top-k candidates of each keypoints that + will be retrieved from the heatmaps during dedocding. Defaults to + 20 + decode_max_instances (int, optional): The maximum number of instances + to decode. ``None`` means no limitation to the instance number. + Defaults to ``None`` + + .. _`Associative Embedding: End-to-End Learning for Joint Detection and + Grouping`: https://arxiv.org/abs/1611.05424 + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + + def __init__( + self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + sigma: Optional[float] = None, + use_udp: bool = False, + decode_keypoint_order: List[int] = [], + decode_nms_kernel: int = 5, + decode_gaussian_kernel: int = 3, + decode_keypoint_thr: float = 0.1, + decode_tag_thr: float = 1.0, + decode_topk: int = 30, + decode_center_shift=0.0, + decode_max_instances: Optional[int] = None, + ) -> None: + super().__init__() + self.input_size = input_size + self.heatmap_size = heatmap_size + self.use_udp = use_udp + self.decode_nms_kernel = decode_nms_kernel + self.decode_gaussian_kernel = decode_gaussian_kernel + self.decode_keypoint_thr = decode_keypoint_thr + self.decode_tag_thr = decode_tag_thr + self.decode_topk = decode_topk + self.decode_center_shift = decode_center_shift + self.decode_max_instances = decode_max_instances + self.decode_keypoint_order = decode_keypoint_order.copy() + + if self.use_udp: + self.scale_factor = ((np.array(input_size) - 1) / + (np.array(heatmap_size) - 1)).astype( + np.float32) + else: + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + if sigma is None: + sigma = (heatmap_size[0] * heatmap_size[1])**0.5 / 64 + self.sigma = sigma + + def encode( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Encode keypoints into heatmaps and position indices. Note that the + original keypoint coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_indices (np.ndarray): The keypoint position indices + in shape (N, K, 2). Each keypoint's index is [i, v], where i + is the position index in the heatmap (:math:`i=y*w+x`) and v + is the visibility + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + # keypoint coordinates in heatmap + _keypoints = keypoints / self.scale_factor + + if self.use_udp: + heatmaps, keypoint_weights = generate_udp_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=_keypoints, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + else: + heatmaps, keypoint_weights = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=_keypoints, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + + keypoint_indices = self._encode_keypoint_indices( + heatmap_size=self.heatmap_size, + keypoints=_keypoints, + keypoints_visible=keypoints_visible) + + encoded = dict( + heatmaps=heatmaps, + keypoint_indices=keypoint_indices, + keypoint_weights=keypoint_weights) + + return encoded + + def _encode_keypoint_indices(self, heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray) -> np.ndarray: + w, h = heatmap_size + N, K, _ = keypoints.shape + keypoint_indices = np.zeros((N, K, 2), dtype=np.int64) + + for n, k in product(range(N), range(K)): + x, y = (keypoints[n, k] + 0.5).astype(np.int64) + index = y * w + x + vis = (keypoints_visible[n, k] > 0.5 and 0 <= x < w and 0 <= y < h) + keypoint_indices[n, k] = [index, vis] + + return keypoint_indices + + def decode(self, encoded: Any) -> Tuple[np.ndarray, np.ndarray]: + raise NotImplementedError() + + def _get_batch_topk(self, batch_heatmaps: Tensor, batch_tags: Tensor, + k: int): + """Get top-k response values from the heatmaps and corresponding tag + values from the tagging heatmaps. + + Args: + batch_heatmaps (Tensor): Keypoint detection heatmaps in shape + (B, K, H, W) + batch_tags (Tensor): Tagging heatmaps in shape (B, C, H, W), where + the tag dim C is 2*K when using flip testing, or K otherwise + k (int): The number of top responses to get + + Returns: + tuple: + - topk_vals (Tensor): Top-k response values of each heatmap in + shape (B, K, Topk) + - topk_tags (Tensor): The corresponding embedding tags of the + top-k responses, in shape (B, K, Topk, L) + - topk_locs (Tensor): The location of the top-k responses in each + heatmap, in shape (B, K, Topk, 2) where last dimension + represents x and y coordinates + """ + B, K, H, W = batch_heatmaps.shape + L = batch_tags.shape[1] // K + + # shape of topk_val, top_indices: (B, K, TopK) + topk_vals, topk_indices = batch_heatmaps.flatten(-2, -1).topk( + k, dim=-1) + + topk_tags_per_kpts = [ + torch.gather(_tag, dim=2, index=topk_indices) + for _tag in torch.unbind(batch_tags.view(B, L, K, H * W), dim=1) + ] + + topk_tags = torch.stack(topk_tags_per_kpts, dim=-1) # (B, K, TopK, L) + topk_locs = torch.stack([topk_indices % W, topk_indices // W], + dim=-1) # (B, K, TopK, 2) + + return topk_vals, topk_tags, topk_locs + + def _group_keypoints(self, batch_vals: np.ndarray, batch_tags: np.ndarray, + batch_locs: np.ndarray): + """Group keypoints into groups (each represents an instance) by tags. + + Args: + batch_vals (Tensor): Heatmap response values of keypoint + candidates in shape (B, K, Topk) + batch_tags (Tensor): Tags of keypoint candidates in shape + (B, K, Topk, L) + batch_locs (Tensor): Locations of keypoint candidates in shape + (B, K, Topk, 2) + + Returns: + List[np.ndarray]: Grouping results of a batch, each element is a + np.ndarray (in shape [N, K, D+1]) that contains the groups + detected in an image, including both keypoint coordinates and + scores. + """ + + def _group_func(inputs: Tuple): + vals, tags, locs = inputs + return _group_keypoints_by_tags( + vals, + tags, + locs, + keypoint_order=self.decode_keypoint_order, + val_thr=self.decode_keypoint_thr, + tag_thr=self.decode_tag_thr, + max_groups=self.decode_max_instances) + + _results = map(_group_func, zip(batch_vals, batch_tags, batch_locs)) + results = list(_results) + return results + + def _fill_missing_keypoints(self, keypoints: np.ndarray, + keypoint_scores: np.ndarray, + heatmaps: np.ndarray, tags: np.ndarray): + """Fill the missing keypoints in the initial predictions. + + Args: + keypoints (np.ndarray): Keypoint predictions in shape (N, K, D) + keypoint_scores (np.ndarray): Keypint score predictions in shape + (N, K), in which 0 means the corresponding keypoint is + missing in the initial prediction + heatmaps (np.ndarry): Heatmaps in shape (K, H, W) + tags (np.ndarray): Tagging heatmaps in shape (C, H, W) where + C=L*K + + Returns: + tuple: + - keypoints (np.ndarray): Keypoint predictions with missing + ones filled + - keypoint_scores (np.ndarray): Keypoint score predictions with + missing ones filled + """ + + N, K = keypoints.shape[:2] + H, W = heatmaps.shape[1:] + L = tags.shape[0] // K + keypoint_tags = [tags[k::K] for k in range(K)] + + for n in range(N): + # Calculate the instance tag (mean tag of detected keypoints) + _tag = [] + for k in range(K): + if keypoint_scores[n, k] > 0: + x, y = keypoints[n, k, :2].astype(np.int64) + x = np.clip(x, 0, W - 1) + y = np.clip(y, 0, H - 1) + _tag.append(keypoint_tags[k][:, y, x]) + + tag = np.mean(_tag, axis=0) + tag = tag.reshape(L, 1, 1) + # Search maximum response of the missing keypoints + for k in range(K): + if keypoint_scores[n, k] > 0: + continue + dist_map = np.linalg.norm( + keypoint_tags[k] - tag, ord=2, axis=0) + cost_map = np.round(dist_map) * 100 - heatmaps[k] # H, W + y, x = np.unravel_index(np.argmin(cost_map), shape=(H, W)) + keypoints[n, k] = [x, y] + keypoint_scores[n, k] = heatmaps[k, y, x] + + return keypoints, keypoint_scores + + def batch_decode(self, batch_heatmaps: Tensor, batch_tags: Tensor + ) -> Tuple[List[np.ndarray], List[np.ndarray]]: + """Decode the keypoint coordinates from a batch of heatmaps and tagging + heatmaps. The decoded keypoint coordinates are in the input image + space. + + Args: + batch_heatmaps (Tensor): Keypoint detection heatmaps in shape + (B, K, H, W) + batch_tags (Tensor): Tagging heatmaps in shape (B, C, H, W), where + :math:`C=L*K` + + Returns: + tuple: + - batch_keypoints (List[np.ndarray]): Decoded keypoint coordinates + of the batch, each is in shape (N, K, D) + - batch_scores (List[np.ndarray]): Decoded keypoint scores of the + batch, each is in shape (N, K). It usually represents the + confidience of the keypoint prediction + """ + B, _, H, W = batch_heatmaps.shape + assert batch_tags.shape[0] == B and batch_tags.shape[2:4] == (H, W), ( + f'Mismatched shapes of heatmap ({batch_heatmaps.shape}) and ' + f'tagging map ({batch_tags.shape})') + + # Heatmap NMS + batch_heatmaps_peak = batch_heatmap_nms(batch_heatmaps, + self.decode_nms_kernel) + + # Get top-k in each heatmap and and convert to numpy + batch_topk_vals, batch_topk_tags, batch_topk_locs = to_numpy( + self._get_batch_topk( + batch_heatmaps_peak, batch_tags, k=self.decode_topk)) + + # Group keypoint candidates into groups (instances) + batch_groups = self._group_keypoints(batch_topk_vals, batch_topk_tags, + batch_topk_locs) + + # Convert to numpy + batch_heatmaps_np = to_numpy(batch_heatmaps) + batch_tags_np = to_numpy(batch_tags) + + # Refine the keypoint prediction + batch_keypoints = [] + batch_keypoint_scores = [] + batch_instance_scores = [] + for i, (groups, heatmaps, tags) in enumerate( + zip(batch_groups, batch_heatmaps_np, batch_tags_np)): + + keypoints, scores = groups[..., :-1], groups[..., -1] + instance_scores = scores.mean(axis=-1) + + if keypoints.size > 0: + # refine keypoint coordinates according to heatmap distribution + if self.use_udp: + keypoints = refine_keypoints_dark_udp( + keypoints, + heatmaps, + blur_kernel_size=self.decode_gaussian_kernel) + else: + keypoints = refine_keypoints(keypoints, heatmaps) + keypoints += self.decode_center_shift * \ + (scores > 0).astype(keypoints.dtype)[..., None] + + # identify missing keypoints + keypoints, scores = self._fill_missing_keypoints( + keypoints, scores, heatmaps, tags) + + batch_keypoints.append(keypoints) + batch_keypoint_scores.append(scores) + batch_instance_scores.append(instance_scores) + + # restore keypoint scale + batch_keypoints = [ + kpts * self.scale_factor for kpts in batch_keypoints + ] + + return batch_keypoints, batch_keypoint_scores, batch_instance_scores diff --git a/mmpose/codecs/base.py b/mmpose/codecs/base.py new file mode 100644 index 0000000000000000000000000000000000000000..b01e8c4b2c974a3cf115a005400baad8e0bf9cd6 --- /dev/null +++ b/mmpose/codecs/base.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Any, List, Optional, Tuple + +import numpy as np +from mmengine.utils import is_method_overridden + + +class BaseKeypointCodec(metaclass=ABCMeta): + """The base class of the keypoint codec. + + A keypoint codec is a module to encode keypoint coordinates to specific + representation (e.g. heatmap) and vice versa. A subclass should implement + the methods :meth:`encode` and :meth:`decode`. + """ + + # pass additional encoding arguments to the `encode` method, beyond the + # mandatory `keypoints` and `keypoints_visible` arguments. + auxiliary_encode_keys = set() + + field_mapping_table = dict() + instance_mapping_table = dict() + label_mapping_table = dict() + + @abstractmethod + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibility in shape + (N, K, D) + + Returns: + dict: Encoded items. + """ + + @abstractmethod + def decode(self, encoded: Any) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoints. + + Args: + encoded (any): Encoded keypoint representation using the codec + + Returns: + tuple: + - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + - keypoints_visible (np.ndarray): Keypoint visibility in shape + (N, K, D) + """ + + def batch_decode(self, batch_encoded: Any + ) -> Tuple[List[np.ndarray], List[np.ndarray]]: + """Decode keypoints. + + Args: + batch_encoded (any): A batch of encoded keypoint + representations + + Returns: + tuple: + - batch_keypoints (List[np.ndarray]): Each element is keypoint + coordinates in shape (N, K, D) + - batch_keypoints (List[np.ndarray]): Each element is keypoint + visibility in shape (N, K) + """ + raise NotImplementedError() + + @property + def support_batch_decoding(self) -> bool: + """Return whether the codec support decoding from batch data.""" + return is_method_overridden('batch_decode', BaseKeypointCodec, + self.__class__) diff --git a/mmpose/codecs/decoupled_heatmap.py b/mmpose/codecs/decoupled_heatmap.py new file mode 100644 index 0000000000000000000000000000000000000000..b5929e3dcf3f24092d6aa2887e4a2ff7e4903b9b --- /dev/null +++ b/mmpose/codecs/decoupled_heatmap.py @@ -0,0 +1,274 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import (generate_gaussian_heatmaps, get_diagonal_lengths, + get_instance_bbox, get_instance_root) +from .utils.post_processing import get_heatmap_maximum +from .utils.refinement import refine_keypoints + + +@KEYPOINT_CODECS.register_module() +class DecoupledHeatmap(BaseKeypointCodec): + """Encode/decode keypoints with the method introduced in the paper CID. + + See the paper Contextual Instance Decoupling for Robust Multi-Person + Pose Estimation`_ by Wang et al (2022) for details + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + - heatmaps (np.ndarray): The coupled heatmap in shape + (1+K, H, W) where [W, H] is the `heatmap_size`. + - instance_heatmaps (np.ndarray): The decoupled heatmap in shape + (M*K, H, W) where M is the number of instances. + - keypoint_weights (np.ndarray): The weight for heatmaps in shape + (M*K). + - instance_coords (np.ndarray): The coordinates of instance roots + in shape (M, 2) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + root_type (str): The method to generate the instance root. Options + are: + + - ``'kpt_center'``: Average coordinate of all visible keypoints. + - ``'bbox_center'``: Center point of bounding boxes outlined by + all visible keypoints. + + Defaults to ``'kpt_center'`` + + heatmap_min_overlap (float): Minimum overlap rate among instances. + Used when calculating sigmas for instances. Defaults to 0.7 + background_weight (float): Loss weight of background pixels. + Defaults to 0.1 + encode_max_instances (int): The maximum number of instances + to encode for each sample. Defaults to 30 + + .. _`CID`: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_ + Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_ + CVPR_2022_paper.html + """ + + # DecoupledHeatmap requires bounding boxes to determine the size of each + # instance, so that it can assign varying sigmas based on their size + auxiliary_encode_keys = {'bbox'} + + label_mapping_table = dict( + keypoint_weights='keypoint_weights', + instance_coords='instance_coords', + ) + field_mapping_table = dict( + heatmaps='heatmaps', + instance_heatmaps='instance_heatmaps', + ) + + def __init__( + self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + root_type: str = 'kpt_center', + heatmap_min_overlap: float = 0.7, + encode_max_instances: int = 30, + ): + super().__init__() + + self.input_size = input_size + self.heatmap_size = heatmap_size + self.root_type = root_type + self.encode_max_instances = encode_max_instances + self.heatmap_min_overlap = heatmap_min_overlap + + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + def _get_instance_wise_sigmas( + self, + bbox: np.ndarray, + ) -> np.ndarray: + """Get sigma values for each instance according to their size. + + Args: + bbox (np.ndarray): Bounding box in shape (N, 4, 2) + + Returns: + np.ndarray: Array containing the sigma values for each instance. + """ + sigmas = np.zeros((bbox.shape[0], ), dtype=np.float32) + + heights = np.sqrt(np.power(bbox[:, 0] - bbox[:, 1], 2).sum(axis=-1)) + widths = np.sqrt(np.power(bbox[:, 0] - bbox[:, 2], 2).sum(axis=-1)) + + for i in range(bbox.shape[0]): + h, w = heights[i], widths[i] + + # compute sigma for each instance + # condition 1 + a1, b1 = 1, h + w + c1 = w * h * (1 - self.heatmap_min_overlap) / ( + 1 + self.heatmap_min_overlap) + sq1 = np.sqrt(b1**2 - 4 * a1 * c1) + r1 = (b1 + sq1) / 2 + + # condition 2 + a2 = 4 + b2 = 2 * (h + w) + c2 = (1 - self.heatmap_min_overlap) * w * h + sq2 = np.sqrt(b2**2 - 4 * a2 * c2) + r2 = (b2 + sq2) / 2 + + # condition 3 + a3 = 4 * self.heatmap_min_overlap + b3 = -2 * self.heatmap_min_overlap * (h + w) + c3 = (self.heatmap_min_overlap - 1) * w * h + sq3 = np.sqrt(b3**2 - 4 * a3 * c3) + r3 = (b3 + sq3) / 2 + + sigmas[i] = min(r1, r2, r3) / 3 + + return sigmas + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + bbox: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + bbox (np.ndarray): Bounding box in shape (N, 8) which includes + coordinates of 4 corners. + + Returns: + dict: + - heatmaps (np.ndarray): The coupled heatmap in shape + (1+K, H, W) where [W, H] is the `heatmap_size`. + - instance_heatmaps (np.ndarray): The decoupled heatmap in shape + (N*K, H, W) where M is the number of instances. + - keypoint_weights (np.ndarray): The weight for heatmaps in shape + (N*K). + - instance_coords (np.ndarray): The coordinates of instance roots + in shape (N, 2) + """ + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + if bbox is None: + # generate pseudo bbox via visible keypoints + bbox = get_instance_bbox(keypoints, keypoints_visible) + bbox = np.tile(bbox, 2).reshape(-1, 4, 2) + # corner order: left_top, left_bottom, right_top, right_bottom + bbox[:, 1:3, 0] = bbox[:, 0:2, 0] + + # keypoint coordinates in heatmap + _keypoints = keypoints / self.scale_factor + _bbox = bbox.reshape(-1, 4, 2) / self.scale_factor + + # compute the root and scale of each instance + roots, roots_visible = get_instance_root(_keypoints, keypoints_visible, + self.root_type) + + sigmas = self._get_instance_wise_sigmas(_bbox) + + # generate global heatmaps + heatmaps, keypoint_weights = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=np.concatenate((_keypoints, roots[:, None]), axis=1), + keypoints_visible=np.concatenate( + (keypoints_visible, roots_visible[:, None]), axis=1), + sigma=sigmas) + roots_visible = keypoint_weights[:, -1] + + # select instances + inst_roots, inst_indices = [], [] + diagonal_lengths = get_diagonal_lengths(_keypoints, keypoints_visible) + for i in np.argsort(diagonal_lengths): + if roots_visible[i] < 1: + continue + # rand root point in 3x3 grid + x, y = roots[i] + np.random.randint(-1, 2, (2, )) + x = max(0, min(x, self.heatmap_size[0] - 1)) + y = max(0, min(y, self.heatmap_size[1] - 1)) + if (x, y) not in inst_roots: + inst_roots.append((x, y)) + inst_indices.append(i) + if len(inst_indices) > self.encode_max_instances: + rand_indices = random.sample( + range(len(inst_indices)), self.encode_max_instances) + inst_roots = [inst_roots[i] for i in rand_indices] + inst_indices = [inst_indices[i] for i in rand_indices] + + # generate instance-wise heatmaps + inst_heatmaps, inst_heatmap_weights = [], [] + for i in inst_indices: + inst_heatmap, inst_heatmap_weight = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=_keypoints[i:i + 1], + keypoints_visible=keypoints_visible[i:i + 1], + sigma=sigmas[i].item()) + inst_heatmaps.append(inst_heatmap) + inst_heatmap_weights.append(inst_heatmap_weight) + + if len(inst_indices) > 0: + inst_heatmaps = np.concatenate(inst_heatmaps) + inst_heatmap_weights = np.concatenate(inst_heatmap_weights) + inst_roots = np.array(inst_roots, dtype=np.int32) + else: + inst_heatmaps = np.empty((0, *self.heatmap_size[::-1])) + inst_heatmap_weights = np.empty((0, )) + inst_roots = np.empty((0, 2), dtype=np.int32) + + encoded = dict( + heatmaps=heatmaps, + instance_heatmaps=inst_heatmaps, + keypoint_weights=inst_heatmap_weights, + instance_coords=inst_roots) + + return encoded + + def decode(self, instance_heatmaps: np.ndarray, + instance_scores: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from decoupled heatmaps. The decoded + keypoint coordinates are in the input image space. + + Args: + instance_heatmaps (np.ndarray): Heatmaps in shape (N, K, H, W) + instance_scores (np.ndarray): Confidence of instance roots + prediction in shape (N, 1) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + keypoints, keypoint_scores = [], [] + + for i in range(instance_heatmaps.shape[0]): + heatmaps = instance_heatmaps[i].copy() + kpts, scores = get_heatmap_maximum(heatmaps) + keypoints.append(refine_keypoints(kpts[None], heatmaps)) + keypoint_scores.append(scores[None]) + + keypoints = np.concatenate(keypoints) + # Restore the keypoint scale + keypoints = keypoints * self.scale_factor + + keypoint_scores = np.concatenate(keypoint_scores) + keypoint_scores *= instance_scores + + return keypoints, keypoint_scores diff --git a/mmpose/codecs/edpose_label.py b/mmpose/codecs/edpose_label.py new file mode 100644 index 0000000000000000000000000000000000000000..0433784886bbd28a38832d0b5ac614e75d446869 --- /dev/null +++ b/mmpose/codecs/edpose_label.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from mmpose.structures import bbox_cs2xyxy, bbox_xyxy2cs +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class EDPoseLabel(BaseKeypointCodec): + r"""Generate keypoint and label coordinates for `ED-Pose`_ by + Yang J. et al (2023). + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + + Encoded: + + - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + - keypoints_visible (np.ndarray): Keypoint visibility in shape + (N, K, D) + - area (np.ndarray): Area in shape (N) + - bbox (np.ndarray): Bbox in shape (N, 4) + + Args: + num_select (int): The number of candidate instances + num_keypoints (int): The Number of keypoints + """ + + auxiliary_encode_keys = {'area', 'bboxes', 'img_shape'} + instance_mapping_table = dict( + bbox='bboxes', + keypoints='keypoints', + keypoints_visible='keypoints_visible', + area='areas', + ) + + def __init__(self, num_select: int = 100, num_keypoints: int = 17): + super().__init__() + + self.num_select = num_select + self.num_keypoints = num_keypoints + + def encode( + self, + img_shape, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + area: Optional[np.ndarray] = None, + bboxes: Optional[np.ndarray] = None, + ) -> dict: + """Encoding keypoints, area and bbox from input image space to + normalized space. + + Args: + - img_shape (Sequence[int]): The shape of image in the format + of (width, height). + - keypoints (np.ndarray): Keypoint coordinates in + shape (N, K, D). + - keypoints_visible (np.ndarray): Keypoint visibility in shape + (N, K) + - area (np.ndarray): + - bboxes (np.ndarray): + + Returns: + encoded (dict): Contains the following items: + + - keypoint_labels (np.ndarray): The processed keypoints in + shape like (N, K, D). + - keypoints_visible (np.ndarray): Keypoint visibility in shape + (N, K, D) + - area_labels (np.ndarray): The processed target + area in shape (N). + - bboxes_labels: The processed target bbox in + shape (N, 4). + """ + w, h = img_shape + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if bboxes is not None: + bboxes = np.concatenate(bbox_xyxy2cs(bboxes), axis=-1) + bboxes = bboxes / np.array([w, h, w, h], dtype=np.float32) + + if area is not None: + area = area / float(w * h) + + if keypoints is not None: + keypoints = keypoints / np.array([w, h], dtype=np.float32) + + encoded = dict( + keypoints=keypoints, + area=area, + bbox=bboxes, + keypoints_visible=keypoints_visible) + + return encoded + + def decode(self, input_shapes: np.ndarray, pred_logits: np.ndarray, + pred_boxes: np.ndarray, pred_keypoints: np.ndarray): + """Select the final top-k keypoints, and decode the results from + normalize size to origin input size. + + Args: + input_shapes (Tensor): The size of input image resize. + test_cfg (ConfigType): Config of testing. + pred_logits (Tensor): The result of score. + pred_boxes (Tensor): The result of bbox. + pred_keypoints (Tensor): The result of keypoints. + + Returns: + tuple: Decoded boxes, keypoints, and keypoint scores. + """ + + # Initialization + num_keypoints = self.num_keypoints + prob = pred_logits.reshape(-1) + + # Select top-k instances based on prediction scores + topk_indexes = np.argsort(-prob)[:self.num_select] + topk_values = np.take_along_axis(prob, topk_indexes, axis=0) + scores = np.tile(topk_values[:, np.newaxis], [1, num_keypoints]) + + # Decode bounding boxes + topk_boxes = topk_indexes // pred_logits.shape[1] + boxes = bbox_cs2xyxy(*np.split(pred_boxes, [2], axis=-1)) + boxes = np.take_along_axis( + boxes, np.tile(topk_boxes[:, np.newaxis], [1, 4]), axis=0) + + # Convert from relative to absolute coordinates + img_h, img_w = np.split(input_shapes, 2, axis=0) + scale_fct = np.hstack([img_w, img_h, img_w, img_h]) + boxes = boxes * scale_fct[np.newaxis, :] + + # Decode keypoints + topk_keypoints = topk_indexes // pred_logits.shape[1] + keypoints = np.take_along_axis( + pred_keypoints, + np.tile(topk_keypoints[:, np.newaxis], [1, num_keypoints * 3]), + axis=0) + keypoints = keypoints[:, :(num_keypoints * 2)] + keypoints = keypoints * np.tile( + np.hstack([img_w, img_h]), [num_keypoints])[np.newaxis, :] + keypoints = keypoints.reshape(-1, num_keypoints, 2) + + return boxes, keypoints, scores diff --git a/mmpose/codecs/hand_3d_heatmap.py b/mmpose/codecs/hand_3d_heatmap.py new file mode 100644 index 0000000000000000000000000000000000000000..b088e0d7faa27e0775152dd0579c2933ec481860 --- /dev/null +++ b/mmpose/codecs/hand_3d_heatmap.py @@ -0,0 +1,202 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils.gaussian_heatmap import generate_3d_gaussian_heatmaps +from .utils.post_processing import get_heatmap_3d_maximum + + +@KEYPOINT_CODECS.register_module() +class Hand3DHeatmap(BaseKeypointCodec): + r"""Generate target 3d heatmap and relative root depth for hand datasets. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + + Args: + image_size (tuple): Size of image. Default: ``[256, 256]``. + root_heatmap_size (int): Size of heatmap of root head. + Default: 64. + heatmap_size (tuple): Size of heatmap. Default: ``[64, 64, 64]``. + heatmap3d_depth_bound (float): Boundary for 3d heatmap depth. + Default: 400.0. + heatmap_size_root (int): Size of 3d heatmap root. Default: 64. + depth_size (int): Number of depth discretization size, used for + decoding. Defaults to 64. + root_depth_bound (float): Boundary for 3d heatmap root depth. + Default: 400.0. + use_different_joint_weights (bool): Whether to use different joint + weights. Default: ``False``. + sigma (int): Sigma of heatmap gaussian. Default: 2. + joint_indices (list, optional): Indices of joints used for heatmap + generation. If None (default) is given, all joints will be used. + Default: ``None``. + max_bound (float): The maximal value of heatmap. Default: 1.0. + """ + + auxiliary_encode_keys = { + 'dataset_keypoint_weights', 'rel_root_depth', 'rel_root_valid', + 'hand_type', 'hand_type_valid', 'focal', 'principal_pt' + } + + instance_mapping_table = { + 'keypoints': 'keypoints', + 'keypoints_visible': 'keypoints_visible', + 'keypoints_cam': 'keypoints_cam', + } + + label_mapping_table = { + 'keypoint_weights': 'keypoint_weights', + 'root_depth_weight': 'root_depth_weight', + 'type_weight': 'type_weight', + 'root_depth': 'root_depth', + 'type': 'type' + } + + def __init__(self, + image_size: Tuple[int, int] = [256, 256], + root_heatmap_size: int = 64, + heatmap_size: Tuple[int, int, int] = [64, 64, 64], + heatmap3d_depth_bound: float = 400.0, + heatmap_size_root: int = 64, + root_depth_bound: float = 400.0, + depth_size: int = 64, + use_different_joint_weights: bool = False, + sigma: int = 2, + joint_indices: Optional[list] = None, + max_bound: float = 1.0): + super().__init__() + + self.image_size = np.array(image_size) + self.root_heatmap_size = root_heatmap_size + self.heatmap_size = np.array(heatmap_size) + self.heatmap3d_depth_bound = heatmap3d_depth_bound + self.heatmap_size_root = heatmap_size_root + self.root_depth_bound = root_depth_bound + self.depth_size = depth_size + self.use_different_joint_weights = use_different_joint_weights + + self.sigma = sigma + self.joint_indices = joint_indices + self.max_bound = max_bound + self.scale_factor = (np.array(image_size) / + heatmap_size[:-1]).astype(np.float32) + + def encode( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray], + dataset_keypoint_weights: Optional[np.ndarray], + rel_root_depth: np.float32, + rel_root_valid: np.float32, + hand_type: np.ndarray, + hand_type_valid: np.ndarray, + focal: np.ndarray, + principal_pt: np.ndarray, + ) -> dict: + """Encoding keypoints from input image space to input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D). + keypoints_visible (np.ndarray, optional): Keypoint visibilities in + shape (N, K). + dataset_keypoint_weights (np.ndarray, optional): Keypoints weight + in shape (K, ). + rel_root_depth (np.float32): Relative root depth. + rel_root_valid (float): Validity of relative root depth. + hand_type (np.ndarray): Type of hand encoded as a array. + hand_type_valid (np.ndarray): Validity of hand type. + focal (np.ndarray): Focal length of camera. + principal_pt (np.ndarray): Principal point of camera. + + Returns: + encoded (dict): Contains the following items: + + - heatmaps (np.ndarray): The generated heatmap in shape + (K * D, H, W) where [W, H, D] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + - root_depth (np.ndarray): Encoded relative root depth + - root_depth_weight (np.ndarray): The weights of relative root + depth + - type (np.ndarray): Encoded hand type + - type_weight (np.ndarray): The weights of hand type + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:-1], dtype=np.float32) + + if self.use_different_joint_weights: + assert dataset_keypoint_weights is not None, 'To use different ' \ + 'joint weights,`dataset_keypoint_weights` cannot be None.' + + heatmaps, keypoint_weights = generate_3d_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=keypoints, + keypoints_visible=keypoints_visible, + sigma=self.sigma, + image_size=self.image_size, + heatmap3d_depth_bound=self.heatmap3d_depth_bound, + joint_indices=self.joint_indices, + max_bound=self.max_bound, + use_different_joint_weights=self.use_different_joint_weights, + dataset_keypoint_weights=dataset_keypoint_weights) + + rel_root_depth = (rel_root_depth / self.root_depth_bound + + 0.5) * self.heatmap_size_root + rel_root_valid = rel_root_valid * (rel_root_depth >= 0) * ( + rel_root_depth <= self.heatmap_size_root) + + encoded = dict( + heatmaps=heatmaps, + keypoint_weights=keypoint_weights, + root_depth=rel_root_depth * np.ones(1, dtype=np.float32), + type=hand_type, + type_weight=hand_type_valid, + root_depth_weight=rel_root_valid * np.ones(1, dtype=np.float32)) + return encoded + + def decode(self, heatmaps: np.ndarray, root_depth: np.ndarray, + hand_type: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + heatmaps (np.ndarray): Heatmaps in shape (K, D, H, W) + root_depth (np.ndarray): Root depth prediction. + hand_type (np.ndarray): Hand type prediction. + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + heatmap3d = heatmaps.copy() + + keypoints, scores = get_heatmap_3d_maximum(heatmap3d) + + # transform keypoint depth to camera space + keypoints[..., 2] = (keypoints[..., 2] / self.depth_size - + 0.5) * self.heatmap3d_depth_bound + + # Unsqueeze the instance dimension for single-instance results + keypoints, scores = keypoints[None], scores[None] + + # Restore the keypoint scale + keypoints[..., :2] = keypoints[..., :2] * self.scale_factor + + # decode relative hand root depth + # transform relative root depth to camera space + rel_root_depth = ((root_depth / self.root_heatmap_size - 0.5) * + self.root_depth_bound) + + hand_type = (hand_type > 0).reshape(1, -1).astype(int) + + return keypoints, scores, rel_root_depth, hand_type diff --git a/mmpose/codecs/image_pose_lifting.py b/mmpose/codecs/image_pose_lifting.py new file mode 100644 index 0000000000000000000000000000000000000000..1665d88e1d90afc843db4fa453f4004d9ecd12d3 --- /dev/null +++ b/mmpose/codecs/image_pose_lifting.py @@ -0,0 +1,280 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class ImagePoseLifting(BaseKeypointCodec): + r"""Generate keypoint coordinates for pose lifter. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - pose-lifitng target dimension: C + + Args: + num_keypoints (int): The number of keypoints in the dataset. + root_index (Union[int, List]): Root keypoint index in the pose. + remove_root (bool): If true, remove the root keypoint from the pose. + Default: ``False``. + save_index (bool): If true, store the root position separated from the + original pose. Default: ``False``. + reshape_keypoints (bool): If true, reshape the keypoints into shape + (-1, N). Default: ``True``. + concat_vis (bool): If true, concat the visibility item of keypoints. + Default: ``False``. + keypoints_mean (np.ndarray, optional): Mean values of keypoints + coordinates in shape (K, D). + keypoints_std (np.ndarray, optional): Std values of keypoints + coordinates in shape (K, D). + target_mean (np.ndarray, optional): Mean values of pose-lifitng target + coordinates in shape (K, C). + target_std (np.ndarray, optional): Std values of pose-lifitng target + coordinates in shape (K, C). + """ + + auxiliary_encode_keys = {'lifting_target', 'lifting_target_visible'} + + instance_mapping_table = dict( + lifting_target='lifting_target', + lifting_target_visible='lifting_target_visible', + ) + label_mapping_table = dict( + trajectory_weights='trajectory_weights', + lifting_target_label='lifting_target_label', + lifting_target_weight='lifting_target_weight') + + def __init__(self, + num_keypoints: int, + root_index: Union[int, List] = 0, + remove_root: bool = False, + save_index: bool = False, + reshape_keypoints: bool = True, + concat_vis: bool = False, + keypoints_mean: Optional[np.ndarray] = None, + keypoints_std: Optional[np.ndarray] = None, + target_mean: Optional[np.ndarray] = None, + target_std: Optional[np.ndarray] = None, + additional_encode_keys: Optional[List[str]] = None): + super().__init__() + + self.num_keypoints = num_keypoints + if isinstance(root_index, int): + root_index = [root_index] + self.root_index = root_index + self.remove_root = remove_root + self.save_index = save_index + self.reshape_keypoints = reshape_keypoints + self.concat_vis = concat_vis + if keypoints_mean is not None: + assert keypoints_std is not None, 'keypoints_std is None' + keypoints_mean = np.array( + keypoints_mean, + dtype=np.float32).reshape(1, num_keypoints, -1) + keypoints_std = np.array( + keypoints_std, dtype=np.float32).reshape(1, num_keypoints, -1) + + assert keypoints_mean.shape == keypoints_std.shape, ( + f'keypoints_mean.shape {keypoints_mean.shape} != ' + f'keypoints_std.shape {keypoints_std.shape}') + if target_mean is not None: + assert target_std is not None, 'target_std is None' + target_dim = num_keypoints - 1 if remove_root else num_keypoints + target_mean = np.array( + target_mean, dtype=np.float32).reshape(1, target_dim, -1) + target_std = np.array( + target_std, dtype=np.float32).reshape(1, target_dim, -1) + + assert target_mean.shape == target_std.shape, ( + f'target_mean.shape {target_mean.shape} != ' + f'target_std.shape {target_std.shape}') + self.keypoints_mean = keypoints_mean + self.keypoints_std = keypoints_std + self.target_mean = target_mean + self.target_std = target_std + + if additional_encode_keys is not None: + self.auxiliary_encode_keys.update(additional_encode_keys) + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + lifting_target: Optional[np.ndarray] = None, + lifting_target_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D). + keypoints_visible (np.ndarray, optional): Keypoint visibilities in + shape (N, K). + lifting_target (np.ndarray, optional): 3d target coordinate in + shape (T, K, C). + lifting_target_visible (np.ndarray, optional): Target coordinate in + shape (T, K, ). + + Returns: + encoded (dict): Contains the following items: + + - keypoint_labels (np.ndarray): The processed keypoints in + shape like (N, K, D) or (K * D, N). + - keypoint_labels_visible (np.ndarray): The processed + keypoints' weights in shape (N, K, ) or (N-1, K, ). + - lifting_target_label: The processed target coordinate in + shape (K, C) or (K-1, C). + - lifting_target_weight (np.ndarray): The target weights in + shape (K, ) or (K-1, ). + - trajectory_weights (np.ndarray): The trajectory weights in + shape (K, ). + - target_root (np.ndarray): The root coordinate of target in + shape (C, ). + + In addition, there are some optional items it may contain: + + - target_root (np.ndarray): The root coordinate of target in + shape (C, ). Exists if ``zero_center`` is ``True``. + - target_root_removed (bool): Indicate whether the root of + pose-lifitng target is removed. Exists if + ``remove_root`` is ``True``. + - target_root_index (int): An integer indicating the index of + root. Exists if ``remove_root`` and ``save_index`` + are ``True``. + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if lifting_target is None: + lifting_target = [keypoints[0]] + + # set initial value for `lifting_target_weight` + # and `trajectory_weights` + if lifting_target_visible is None: + lifting_target_visible = np.ones( + lifting_target.shape[:-1], dtype=np.float32) + lifting_target_weight = lifting_target_visible + trajectory_weights = (1 / lifting_target[:, 2]) + else: + valid = lifting_target_visible > 0.5 + lifting_target_weight = np.where(valid, 1., 0.).astype(np.float32) + trajectory_weights = lifting_target_weight + + encoded = dict() + + # Zero-center the target pose around a given root keypoint + assert (lifting_target.ndim >= 2 and + lifting_target.shape[-2] > max(self.root_index)), \ + f'Got invalid joint shape {lifting_target.shape}' + + root = np.mean( + lifting_target[..., self.root_index, :], axis=-2, dtype=np.float32) + lifting_target_label = lifting_target - root[np.newaxis, ...] + + if self.remove_root and len(self.root_index) == 1: + root_index = self.root_index[0] + lifting_target_label = np.delete( + lifting_target_label, root_index, axis=-2) + lifting_target_visible = np.delete( + lifting_target_visible, root_index, axis=-2) + assert lifting_target_weight.ndim in { + 2, 3 + }, (f'lifting_target_weight.ndim {lifting_target_weight.ndim} ' + 'is not in {2, 3}') + + axis_to_remove = -2 if lifting_target_weight.ndim == 3 else -1 + lifting_target_weight = np.delete( + lifting_target_weight, root_index, axis=axis_to_remove) + # Add a flag to avoid latter transforms that rely on the root + # joint or the original joint index + encoded['target_root_removed'] = True + + # Save the root index which is necessary to restore the global pose + if self.save_index: + encoded['target_root_index'] = root_index + + # Normalize the 2D keypoint coordinate with mean and std + keypoint_labels = keypoints.copy() + + if self.keypoints_mean is not None: + assert self.keypoints_mean.shape[1:] == keypoints.shape[1:], ( + f'self.keypoints_mean.shape[1:] {self.keypoints_mean.shape[1:]} ' # noqa + f'!= keypoints.shape[1:] {keypoints.shape[1:]}') + encoded['keypoints_mean'] = self.keypoints_mean.copy() + encoded['keypoints_std'] = self.keypoints_std.copy() + + keypoint_labels = (keypoint_labels - + self.keypoints_mean) / self.keypoints_std + if self.target_mean is not None: + assert self.target_mean.shape == lifting_target_label.shape, ( + f'self.target_mean.shape {self.target_mean.shape} ' + f'!= lifting_target_label.shape {lifting_target_label.shape}' # noqa + ) + encoded['target_mean'] = self.target_mean.copy() + encoded['target_std'] = self.target_std.copy() + + lifting_target_label = (lifting_target_label - + self.target_mean) / self.target_std + + # Generate reshaped keypoint coordinates + assert keypoint_labels.ndim in { + 2, 3 + }, (f'keypoint_labels.ndim {keypoint_labels.ndim} is not in {2, 3}') + if keypoint_labels.ndim == 2: + keypoint_labels = keypoint_labels[None, ...] + + if self.concat_vis: + keypoints_visible_ = keypoints_visible + if keypoints_visible.ndim == 2: + keypoints_visible_ = keypoints_visible[..., None] + keypoint_labels = np.concatenate( + (keypoint_labels, keypoints_visible_), axis=2) + + if self.reshape_keypoints: + N = keypoint_labels.shape[0] + keypoint_labels = keypoint_labels.transpose(1, 2, 0).reshape(-1, N) + + encoded['keypoint_labels'] = keypoint_labels + encoded['keypoint_labels_visible'] = keypoints_visible + encoded['lifting_target_label'] = lifting_target_label + encoded['lifting_target_weight'] = lifting_target_weight + encoded['trajectory_weights'] = trajectory_weights + encoded['target_root'] = root + + return encoded + + def decode(self, + encoded: np.ndarray, + target_root: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, C). + target_root (np.ndarray, optional): The target root coordinate. + Default: ``None``. + + Returns: + keypoints (np.ndarray): Decoded coordinates in shape (N, K, C). + scores (np.ndarray): The keypoint scores in shape (N, K). + """ + keypoints = encoded.copy() + + if self.target_mean is not None and self.target_std is not None: + assert self.target_mean.shape == keypoints.shape, ( + f'self.target_mean.shape {self.target_mean.shape} ' + f'!= keypoints.shape {keypoints.shape}') + keypoints = keypoints * self.target_std + self.target_mean + + if target_root is not None and target_root.size > 0: + keypoints = keypoints + target_root + if self.remove_root and len(self.root_index) == 1: + keypoints = np.insert( + keypoints, self.root_index, target_root, axis=1) + scores = np.ones(keypoints.shape[:-1], dtype=np.float32) + + return keypoints, scores diff --git a/mmpose/codecs/integral_regression_label.py b/mmpose/codecs/integral_regression_label.py new file mode 100644 index 0000000000000000000000000000000000000000..a3ded1f00b89cfe6c67107529d0787eb1acc49cb --- /dev/null +++ b/mmpose/codecs/integral_regression_label.py @@ -0,0 +1,121 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .msra_heatmap import MSRAHeatmap +from .regression_label import RegressionLabel + + +@KEYPOINT_CODECS.register_module() +class IntegralRegressionLabel(BaseKeypointCodec): + """Generate keypoint coordinates and normalized heatmaps. See the paper: + `DSNT`_ by Nibali et al(2018). + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + + Encoded: + + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where + [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Input image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float): The sigma value of the Gaussian heatmap + unbiased (bool): Whether use unbiased method (DarkPose) in ``'msra'`` + encoding. See `Dark Pose`_ for details. Defaults to ``False`` + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation in DarkPose. The kernel size and sigma should follow + the expirical formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8`. + Defaults to 11 + normalize (bool): Whether to normalize the heatmaps. Defaults to True. + + .. _`DSNT`: https://arxiv.org/abs/1801.07372 + """ + + label_mapping_table = dict( + keypoint_labels='keypoint_labels', + keypoint_weights='keypoint_weights', + ) + field_mapping_table = dict(heatmaps='heatmaps', ) + + def __init__(self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + sigma: float, + unbiased: bool = False, + blur_kernel_size: int = 11, + normalize: bool = True) -> None: + super().__init__() + + self.heatmap_codec = MSRAHeatmap(input_size, heatmap_size, sigma, + unbiased, blur_kernel_size) + self.keypoint_codec = RegressionLabel(input_size) + self.normalize = normalize + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints to regression labels and heatmaps. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + encoded_hm = self.heatmap_codec.encode(keypoints, keypoints_visible) + encoded_kp = self.keypoint_codec.encode(keypoints, keypoints_visible) + + heatmaps = encoded_hm['heatmaps'] + keypoint_labels = encoded_kp['keypoint_labels'] + keypoint_weights = encoded_kp['keypoint_weights'] + + if self.normalize: + val_sum = heatmaps.sum(axis=(-1, -2)).reshape(-1, 1, 1) + 1e-24 + heatmaps = heatmaps / val_sum + + encoded = dict( + keypoint_labels=keypoint_labels, + heatmaps=heatmaps, + keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, D) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) + - socres (np.ndarray): The keypoint scores in shape (N, K). + It usually represents the confidence of the keypoint prediction + """ + + keypoints, scores = self.keypoint_codec.decode(encoded) + + return keypoints, scores diff --git a/mmpose/codecs/megvii_heatmap.py b/mmpose/codecs/megvii_heatmap.py new file mode 100644 index 0000000000000000000000000000000000000000..3af0a54ff832f87e3e546e5e0b754dd95fa40bba --- /dev/null +++ b/mmpose/codecs/megvii_heatmap.py @@ -0,0 +1,147 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Optional, Tuple + +import cv2 +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import gaussian_blur, get_heatmap_maximum + + +@KEYPOINT_CODECS.register_module() +class MegviiHeatmap(BaseKeypointCodec): + """Represent keypoints as heatmaps via "Megvii" approach. See `MSPN`_ + (2019) and `CPN`_ (2018) for details. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) + where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + kernel_size (tuple): The kernel size of the heatmap gaussian in + [ks_x, ks_y] + + .. _`MSPN`: https://arxiv.org/abs/1901.00148 + .. _`CPN`: https://arxiv.org/abs/1711.07319 + """ + + label_mapping_table = dict(keypoint_weights='keypoint_weights', ) + field_mapping_table = dict(heatmaps='heatmaps', ) + + def __init__( + self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + kernel_size: int, + ) -> None: + + super().__init__() + self.input_size = input_size + self.heatmap_size = heatmap_size + self.kernel_size = kernel_size + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. Note that the original keypoint + coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + N, K, _ = keypoints.shape + W, H = self.heatmap_size + + assert N == 1, ( + f'{self.__class__.__name__} only support single-instance ' + 'keypoint encoding') + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + # get center coordinates + kx, ky = (keypoints[n, k] / self.scale_factor).astype(np.int64) + if kx < 0 or kx >= W or ky < 0 or ky >= H: + keypoint_weights[n, k] = 0 + continue + + heatmaps[k, ky, kx] = 1. + kernel_size = (self.kernel_size, self.kernel_size) + heatmaps[k] = cv2.GaussianBlur(heatmaps[k], kernel_size, 0) + + # normalize the heatmap + heatmaps[k] = heatmaps[k] / heatmaps[k, ky, kx] * 255. + + encoded = dict(heatmaps=heatmaps, keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + encoded (np.ndarray): Heatmaps in shape (K, H, W) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (K, D) + - scores (np.ndarray): The keypoint scores in shape (K,). It + usually represents the confidence of the keypoint prediction + """ + heatmaps = gaussian_blur(encoded.copy(), self.kernel_size) + K, H, W = heatmaps.shape + + keypoints, scores = get_heatmap_maximum(heatmaps) + + for k in range(K): + heatmap = heatmaps[k] + px = int(keypoints[k, 0]) + py = int(keypoints[k, 1]) + if 1 < px < W - 1 and 1 < py < H - 1: + diff = np.array([ + heatmap[py][px + 1] - heatmap[py][px - 1], + heatmap[py + 1][px] - heatmap[py - 1][px] + ]) + keypoints[k] += (np.sign(diff) * 0.25 + 0.5) + + scores = scores / 255.0 + 0.5 + + # Unsqueeze the instance dimension for single-instance results + # and restore the keypoint scales + keypoints = keypoints[None] * self.scale_factor + scores = scores[None] + + return keypoints, scores diff --git a/mmpose/codecs/motionbert_label.py b/mmpose/codecs/motionbert_label.py new file mode 100644 index 0000000000000000000000000000000000000000..98024ea4e63d1ca836808c950d72b4760b969c41 --- /dev/null +++ b/mmpose/codecs/motionbert_label.py @@ -0,0 +1,240 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from copy import deepcopy +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import camera_to_image_coord + + +@KEYPOINT_CODECS.register_module() +class MotionBERTLabel(BaseKeypointCodec): + r"""Generate keypoint and label coordinates for `MotionBERT`_ by Zhu et al + (2022). + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - pose-lifitng target dimension: C + + Args: + num_keypoints (int): The number of keypoints in the dataset. + root_index (int): Root keypoint index in the pose. Default: 0. + remove_root (bool): If true, remove the root keypoint from the pose. + Default: ``False``. + save_index (bool): If true, store the root position separated from the + original pose, only takes effect if ``remove_root`` is ``True``. + Default: ``False``. + concat_vis (bool): If true, concat the visibility item of keypoints. + Default: ``False``. + rootrel (bool): If true, the root keypoint will be set to the + coordinate origin. Default: ``False``. + mode (str): Indicating whether the current mode is 'train' or 'test'. + Default: ``'test'``. + """ + + auxiliary_encode_keys = { + 'lifting_target', 'lifting_target_visible', 'camera_param', 'factor' + } + + instance_mapping_table = dict( + lifting_target='lifting_target', + lifting_target_visible='lifting_target_visible', + ) + label_mapping_table = dict( + trajectory_weights='trajectory_weights', + lifting_target_label='lifting_target_label', + lifting_target_weight='lifting_target_weight') + + def __init__(self, + num_keypoints: int, + root_index: int = 0, + remove_root: bool = False, + save_index: bool = False, + concat_vis: bool = False, + rootrel: bool = False, + mode: str = 'test'): + super().__init__() + + self.num_keypoints = num_keypoints + self.root_index = root_index + self.remove_root = remove_root + self.save_index = save_index + self.concat_vis = concat_vis + self.rootrel = rootrel + assert mode.lower() in {'train', 'test' + }, (f'Unsupported mode {mode}, ' + 'mode should be one of ("train", "test").') + self.mode = mode.lower() + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + lifting_target: Optional[np.ndarray] = None, + lifting_target_visible: Optional[np.ndarray] = None, + camera_param: Optional[dict] = None, + factor: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (B, T, K, D). + keypoints_visible (np.ndarray, optional): Keypoint visibilities in + shape (B, T, K). + lifting_target (np.ndarray, optional): 3d target coordinate in + shape (T, K, C). + lifting_target_visible (np.ndarray, optional): Target coordinate in + shape (T, K, ). + camera_param (dict, optional): The camera parameter dictionary. + factor (np.ndarray, optional): The factor mapping camera and image + coordinate in shape (T, ). + + Returns: + encoded (dict): Contains the following items: + + - keypoint_labels (np.ndarray): The processed keypoints in + shape like (N, K, D). + - keypoint_labels_visible (np.ndarray): The processed + keypoints' weights in shape (N, K, ) or (N, K-1, ). + - lifting_target_label: The processed target coordinate in + shape (K, C) or (K-1, C). + - lifting_target_weight (np.ndarray): The target weights in + shape (K, ) or (K-1, ). + - factor (np.ndarray): The factor mapping camera and image + coordinate in shape (T, 1). + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + # set initial value for `lifting_target_weight` + if lifting_target_visible is None: + lifting_target_visible = np.ones( + lifting_target.shape[:-1], dtype=np.float32) + lifting_target_weight = lifting_target_visible + else: + valid = lifting_target_visible > 0.5 + lifting_target_weight = np.where(valid, 1., 0.).astype(np.float32) + + if camera_param is None: + camera_param = dict() + + encoded = dict() + + assert lifting_target is not None + lifting_target_label = lifting_target.copy() + keypoint_labels = keypoints.copy() + + assert keypoint_labels.ndim in { + 2, 3 + }, (f'Keypoint labels should have 2 or 3 dimensions, ' + f'but got {keypoint_labels.ndim}.') + if keypoint_labels.ndim == 2: + keypoint_labels = keypoint_labels[None, ...] + + # Normalize the 2D keypoint coordinate with image width and height + _camera_param = deepcopy(camera_param) + assert 'w' in _camera_param and 'h' in _camera_param, ( + 'Camera parameters should contain "w" and "h".') + w, h = _camera_param['w'], _camera_param['h'] + keypoint_labels[ + ..., :2] = keypoint_labels[..., :2] / w * 2 - [1, h / w] + + # convert target to image coordinate + T = keypoint_labels.shape[0] + factor_ = np.array([4] * T, dtype=np.float32).reshape(T, ) + if 'f' in _camera_param and 'c' in _camera_param: + lifting_target_label, factor_ = camera_to_image_coord( + self.root_index, lifting_target_label, _camera_param) + if self.mode == 'train': + w, h = w / 1000, h / 1000 + lifting_target_label[ + ..., :2] = lifting_target_label[..., :2] / w * 2 - [1, h / w] + lifting_target_label[..., 2] = lifting_target_label[..., 2] / w * 2 + lifting_target_label[..., :, :] = lifting_target_label[ + ..., :, :] - lifting_target_label[..., + self.root_index:self.root_index + + 1, :] + if factor is None or factor[0] == 0: + factor = factor_ + if factor.ndim == 1: + factor = factor[:, None] + if self.mode == 'test': + lifting_target_label *= factor[..., None] + + if self.concat_vis: + keypoints_visible_ = keypoints_visible + if keypoints_visible.ndim == 2: + keypoints_visible_ = keypoints_visible[..., None] + keypoint_labels = np.concatenate( + (keypoint_labels, keypoints_visible_), axis=2) + + encoded['keypoint_labels'] = keypoint_labels + encoded['keypoint_labels_visible'] = keypoints_visible + encoded['lifting_target_label'] = lifting_target_label + encoded['lifting_target_weight'] = lifting_target_weight + encoded['lifting_target'] = lifting_target_label + encoded['lifting_target_visible'] = lifting_target_visible + encoded['factor'] = factor + + return encoded + + def decode( + self, + encoded: np.ndarray, + w: Optional[np.ndarray] = None, + h: Optional[np.ndarray] = None, + factor: Optional[np.ndarray] = None, + ) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, C). + w (np.ndarray, optional): The image widths in shape (N, ). + Default: ``None``. + h (np.ndarray, optional): The image heights in shape (N, ). + Default: ``None``. + factor (np.ndarray, optional): The factor for projection in shape + (N, ). Default: ``None``. + + Returns: + keypoints (np.ndarray): Decoded coordinates in shape (N, K, C). + scores (np.ndarray): The keypoint scores in shape (N, K). + """ + keypoints = encoded.copy() + scores = np.ones(keypoints.shape[:-1], dtype=np.float32) + + if self.rootrel: + keypoints[..., 0, :] = 0 + + if w is not None and w.size > 0: + assert w.shape == h.shape, (f'w and h should have the same shape, ' + f'but got {w.shape} and {h.shape}.') + assert w.shape[0] == keypoints.shape[0], ( + f'w and h should have the same batch size, ' + f'but got {w.shape[0]} and {keypoints.shape[0]}.') + assert w.ndim in {1, + 2}, (f'w and h should have 1 or 2 dimensions, ' + f'but got {w.ndim}.') + if w.ndim == 1: + w = w[:, None] + h = h[:, None] + trans = np.append( + np.ones((w.shape[0], 1)), h / w, axis=1)[:, None, :] + keypoints[..., :2] = (keypoints[..., :2] + trans) * w[:, None] / 2 + keypoints[..., 2:] = keypoints[..., 2:] * w[:, None] / 2 + + if factor is not None and factor.size > 0: + assert factor.shape[0] == keypoints.shape[0], ( + f'factor should have the same batch size, ' + f'but got {factor.shape[0]} and {keypoints.shape[0]}.') + keypoints *= factor[..., None] + + keypoints[..., :, :] = keypoints[..., :, :] - keypoints[ + ..., self.root_index:self.root_index + 1, :] + keypoints /= 1000. + return keypoints, scores diff --git a/mmpose/codecs/msra_heatmap.py b/mmpose/codecs/msra_heatmap.py new file mode 100644 index 0000000000000000000000000000000000000000..15742555b495560c9dfa095a3cdc93ba0eb5d928 --- /dev/null +++ b/mmpose/codecs/msra_heatmap.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils.gaussian_heatmap import (generate_gaussian_heatmaps, + generate_unbiased_gaussian_heatmaps) +from .utils.post_processing import get_heatmap_maximum +from .utils.refinement import refine_keypoints, refine_keypoints_dark + + +@KEYPOINT_CODECS.register_module() +class MSRAHeatmap(BaseKeypointCodec): + """Represent keypoints as heatmaps via "MSRA" approach. See the paper: + `Simple Baselines for Human Pose Estimation and Tracking`_ by Xiao et al + (2018) for details. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) + where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float): The sigma value of the Gaussian heatmap + unbiased (bool): Whether use unbiased method (DarkPose) in ``'msra'`` + encoding. See `Dark Pose`_ for details. Defaults to ``False`` + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation in DarkPose. The kernel size and sigma should follow + the expirical formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8`. + Defaults to 11 + + .. _`Simple Baselines for Human Pose Estimation and Tracking`: + https://arxiv.org/abs/1804.06208 + .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 + """ + + label_mapping_table = dict(keypoint_weights='keypoint_weights', ) + field_mapping_table = dict(heatmaps='heatmaps', ) + + def __init__(self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + sigma: float, + unbiased: bool = False, + blur_kernel_size: int = 11) -> None: + super().__init__() + self.input_size = input_size + self.heatmap_size = heatmap_size + self.sigma = sigma + self.unbiased = unbiased + + # The Gaussian blur kernel size of the heatmap modulation + # in DarkPose and the sigma value follows the expirical + # formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8` + # which gives: + # sigma~=3 if ks=17 + # sigma=2 if ks=11; + # sigma~=1.5 if ks=7; + # sigma~=1 if ks=3; + self.blur_kernel_size = blur_kernel_size + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. Note that the original keypoint + coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + assert keypoints.shape[0] == 1, ( + f'{self.__class__.__name__} only support single-instance ' + 'keypoint encoding') + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if self.unbiased: + heatmaps, keypoint_weights = generate_unbiased_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=keypoints / self.scale_factor, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + else: + heatmaps, keypoint_weights = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=keypoints / self.scale_factor, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + + encoded = dict(heatmaps=heatmaps, keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + encoded (np.ndarray): Heatmaps in shape (K, H, W) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + heatmaps = encoded.copy() + K, H, W = heatmaps.shape + + keypoints, scores = get_heatmap_maximum(heatmaps) + + # Unsqueeze the instance dimension for single-instance results + keypoints, scores = keypoints[None], scores[None] + + if self.unbiased: + # Alleviate biased coordinate + keypoints = refine_keypoints_dark( + keypoints, heatmaps, blur_kernel_size=self.blur_kernel_size) + + else: + keypoints = refine_keypoints(keypoints, heatmaps) + + # Restore the keypoint scale + keypoints = keypoints * self.scale_factor + + return keypoints, scores diff --git a/mmpose/codecs/onehot_heatmap.py b/mmpose/codecs/onehot_heatmap.py new file mode 100644 index 0000000000000000000000000000000000000000..e820271f6c92ec93cb3abec3009b7acb9d804e1f --- /dev/null +++ b/mmpose/codecs/onehot_heatmap.py @@ -0,0 +1,263 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import cv2 +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import (generate_offset_heatmap, generate_onehot_heatmaps, + get_heatmap_maximum, refine_keypoints_dark_udp) + + +@KEYPOINT_CODECS.register_module() +class OneHotHeatmap(BaseKeypointCodec): + r"""Generate keypoint heatmaps by Unbiased Data Processing (UDP). + See the paper: `The Devil is in the Details: Delving into Unbiased Data + Processing for Human Pose Estimation`_ by Huang et al (2020) for details. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmap (np.ndarray): The generated heatmap in shape (C_out, H, W) + where [W, H] is the `heatmap_size`, and the C_out is the output + channel number which depends on the `heatmap_type`. If + `heatmap_type=='gaussian'`, C_out equals to keypoint number K; + if `heatmap_type=='combined'`, C_out equals to K*3 + (x_offset, y_offset and class label) + - keypoint_weights (np.ndarray): The target weights in shape (K,) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + heatmap_type (str): The heatmap type to encode the keypoitns. Options + are: + + - ``'gaussian'``: Gaussian heatmap + - ``'combined'``: Combination of a binary label map and offset + maps for X and Y axes. + + sigma (float): The sigma value of the Gaussian heatmap when + ``heatmap_type=='gaussian'``. Defaults to 2.0 + radius_factor (float): The radius factor of the binary label + map when ``heatmap_type=='combined'``. The positive region is + defined as the neighbor of the keypoit with the radius + :math:`r=radius_factor*max(W, H)`. Defaults to 0.0546875 + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation in DarkPose. Defaults to 11 + + .. _`The Devil is in the Details: Delving into Unbiased Data Processing for + Human Pose Estimation`: https://arxiv.org/abs/1911.07524 + """ + + label_mapping_table = dict(keypoint_weights='keypoint_weights', ) + field_mapping_table = dict(heatmaps='heatmaps', ) + + def __init__(self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + heatmap_type: str = 'gaussian', + sigma: float = 2., + radius_factor: float = 0.0546875, + blur_kernel_size: int = 11, + increase_sigma_with_padding=False, + amap_scale: float = 1.0, + normalize=None, + ) -> None: + super().__init__() + self.input_size = np.array(input_size) + self.heatmap_size = np.array(heatmap_size) + self.sigma = sigma + self.radius_factor = radius_factor + self.heatmap_type = heatmap_type + self.blur_kernel_size = blur_kernel_size + self.increase_sigma_with_padding = increase_sigma_with_padding + self.normalize = normalize + + self.amap_size = self.input_size * amap_scale + self.scale_factor = ((self.amap_size - 1) / + (self.heatmap_size - 1)).astype(np.float32) + self.input_center = self.input_size / 2 + self.top_left = self.input_center - self.amap_size / 2 + + if self.heatmap_type not in {'gaussian', 'combined'}: + raise ValueError( + f'{self.__class__.__name__} got invalid `heatmap_type` value' + f'{self.heatmap_type}. Should be one of ' + '{"gaussian", "combined"}') + + def _kpts_to_activation_pts(self, keypoints: np.ndarray) -> np.ndarray: + """ + Transform the keypoint coordinates to the activation space. + In the original UDPHeatmap, activation map is the same as the input image space with + different resolution but in this case we allow the activation map to have different + size (padding) than the input image space. + Centers of activation map and input image space are aligned. + """ + transformed_keypoints = keypoints - self.top_left + transformed_keypoints = transformed_keypoints / self.scale_factor + return transformed_keypoints + + def _activation_pts_to_kpts(self, keypoints: np.ndarray) -> np.ndarray: + """ + Transform the points in activation map to the keypoint coordinates. + In the original UDPHeatmap, activation map is the same as the input image space with + different resolution but in this case we allow the activation map to have different + size (padding) than the input image space. + Centers of activation map and input image space are aligned. + """ + W, H = self.heatmap_size + transformed_keypoints = keypoints / [W - 1, H - 1] * self.amap_size + transformed_keypoints += self.top_left + return transformed_keypoints + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + id_similarity: Optional[float] = 0.0, + keypoints_visibility: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. Note that the original keypoint + coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + id_similarity (float): The usefulness of the identity information + for the whole pose. Defaults to 0.0 + keypoints_visibility (np.ndarray): The visibility bit for each + keypoint (N, K). Defaults to None + + Returns: + dict: + - heatmap (np.ndarray): The generated heatmap in shape + (C_out, H, W) where [W, H] is the `heatmap_size`, and the + C_out is the output channel number which depends on the + `heatmap_type`. If `heatmap_type=='gaussian'`, C_out equals to + keypoint number K; if `heatmap_type=='combined'`, C_out + equals to K*3 (x_offset, y_offset and class label) + - keypoint_weights (np.ndarray): The target weights in shape + (K,) + """ + assert keypoints.shape[0] == 1, ( + f'{self.__class__.__name__} only support single-instance ' + 'keypoint encoding') + + if keypoints_visibility is None: + keypoints_visibility = np.zeros(keypoints.shape[:2], dtype=np.float32) + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if self.heatmap_type == 'gaussian': + heatmaps, keypoint_weights = generate_onehot_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=self._kpts_to_activation_pts(keypoints), + keypoints_visible=keypoints_visible, + sigma=self.sigma, + keypoints_visibility=keypoints_visibility, + increase_sigma_with_padding=self.increase_sigma_with_padding) + elif self.heatmap_type == 'combined': + heatmaps, keypoint_weights = generate_offset_heatmap( + heatmap_size=self.heatmap_size, + keypoints=self._kpts_to_activation_pts(keypoints), + keypoints_visible=keypoints_visible, + radius_factor=self.radius_factor) + else: + raise ValueError( + f'{self.__class__.__name__} got invalid `heatmap_type` value' + f'{self.heatmap_type}. Should be one of ' + '{"gaussian", "combined"}') + + if self.normalize is not None: + heatmaps_sum = np.sum(heatmaps, axis=(1, 2), keepdims=False) + mask = heatmaps_sum > 0 + heatmaps[mask, :, :] = heatmaps[mask, :, :] / (heatmaps_sum[mask, None, None] + np.finfo(np.float32).eps) + heatmaps = heatmaps * self.normalize + + annotated = keypoints_visible > 0 + + heatmap_keypoints = self._kpts_to_activation_pts(keypoints) + in_image = np.logical_and( + heatmap_keypoints[:, :, 0] >= 0, + heatmap_keypoints[:, :, 0] < self.heatmap_size[0], + ) + in_image = np.logical_and( + in_image, + heatmap_keypoints[:, :, 1] >= 0, + ) + in_image = np.logical_and( + in_image, + heatmap_keypoints[:, :, 1] < self.heatmap_size[1], + ) + + encoded = dict( + heatmaps=heatmaps, + keypoint_weights=keypoint_weights, + annotated=annotated, + in_image=in_image, + keypoints_scaled=keypoints, + heatmap_keypoints=heatmap_keypoints, + identification_similarity=id_similarity, + ) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + encoded (np.ndarray): Heatmaps in shape (K, H, W) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + heatmaps = encoded.copy() + + if self.heatmap_type == 'gaussian': + keypoints, scores = get_heatmap_maximum(heatmaps) + # unsqueeze the instance dimension for single-instance results + keypoints = keypoints[None] + scores = scores[None] + + keypoints = refine_keypoints_dark_udp( + keypoints, heatmaps, blur_kernel_size=self.blur_kernel_size) + + elif self.heatmap_type == 'combined': + _K, H, W = heatmaps.shape + K = _K // 3 + + for cls_heatmap in heatmaps[::3]: + # Apply Gaussian blur on classification maps + ks = 2 * self.blur_kernel_size + 1 + cv2.GaussianBlur(cls_heatmap, (ks, ks), 0, cls_heatmap) + + # valid radius + radius = self.radius_factor * max(W, H) + + x_offset = heatmaps[1::3].flatten() * radius + y_offset = heatmaps[2::3].flatten() * radius + keypoints, scores = get_heatmap_maximum(heatmaps=heatmaps[::3]) + index = (keypoints[..., 0] + keypoints[..., 1] * W).flatten() + index += W * H * np.arange(0, K) + index = index.astype(int) + keypoints += np.stack((x_offset[index], y_offset[index]), axis=-1) + # unsqueeze the instance dimension for single-instance results + keypoints = keypoints[None].astype(np.float32) + scores = scores[None] + + keypoints = self._activation_pts_to_kpts(keypoints) + + return keypoints, scores diff --git a/mmpose/codecs/regression_label.py b/mmpose/codecs/regression_label.py new file mode 100644 index 0000000000000000000000000000000000000000..74cd21b73dcadc5ad4df2a5f270da9e0a2ce3a68 --- /dev/null +++ b/mmpose/codecs/regression_label.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class RegressionLabel(BaseKeypointCodec): + r"""Generate keypoint coordinates. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + + Encoded: + + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Input image size in [w, h] + + """ + + label_mapping_table = dict( + keypoint_labels='keypoint_labels', + keypoint_weights='keypoint_weights', + ) + + def __init__(self, input_size: Tuple[int, int]) -> None: + super().__init__() + + self.input_size = input_size + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + w, h = self.input_size + valid = ((keypoints >= 0) & + (keypoints <= [w - 1, h - 1])).all(axis=-1) & ( + keypoints_visible > 0.5) + + keypoint_labels = (keypoints / np.array([w, h])).astype(np.float32) + keypoint_weights = np.where(valid, 1., 0.).astype(np.float32) + + encoded = dict( + keypoint_labels=keypoint_labels, keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, D) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). + It usually represents the confidence of the keypoint prediction + """ + + if encoded.shape[-1] == 2: + N, K, _ = encoded.shape + normalized_coords = encoded.copy() + scores = np.ones((N, K), dtype=np.float32) + elif encoded.shape[-1] == 4: + # split coords and sigma if outputs contain output_sigma + normalized_coords = encoded[..., :2].copy() + output_sigma = encoded[..., 2:4].copy() + + scores = (1 - output_sigma).mean(axis=-1) + else: + raise ValueError( + 'Keypoint dimension should be 2 or 4 (with sigma), ' + f'but got {encoded.shape[-1]}') + + w, h = self.input_size + keypoints = normalized_coords * np.array([w, h]) + + return keypoints, scores diff --git a/mmpose/codecs/simcc_label.py b/mmpose/codecs/simcc_label.py new file mode 100644 index 0000000000000000000000000000000000000000..e83960faafbf6e0852ae0dbdd361989cbcfaa24b --- /dev/null +++ b/mmpose/codecs/simcc_label.py @@ -0,0 +1,311 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Optional, Tuple, Union + +import numpy as np + +from mmpose.codecs.utils import get_simcc_maximum +from mmpose.codecs.utils.refinement import refine_simcc_dark +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class SimCCLabel(BaseKeypointCodec): + r"""Generate keypoint representation via "SimCC" approach. + See the paper: `SimCC: a Simple Coordinate Classification Perspective for + Human Pose Estimation`_ by Li et al (2022) for more details. + Old name: SimDR + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + + Encoded: + + - keypoint_x_labels (np.ndarray): The generated SimCC label for x-axis. + The label shape is (N, K, Wx) if ``smoothing_type=='gaussian'`` + and (N, K) if `smoothing_type=='standard'``, where + :math:`Wx=w*simcc_split_ratio` + - keypoint_y_labels (np.ndarray): The generated SimCC label for y-axis. + The label shape is (N, K, Wy) if ``smoothing_type=='gaussian'`` + and (N, K) if `smoothing_type=='standard'``, where + :math:`Wy=h*simcc_split_ratio` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Input image size in [w, h] + smoothing_type (str): The SimCC label smoothing strategy. Options are + ``'gaussian'`` and ``'standard'``. Defaults to ``'gaussian'`` + sigma (float | int | tuple): The sigma value in the Gaussian SimCC + label. Defaults to 6.0 + simcc_split_ratio (float): The ratio of the label size to the input + size. For example, if the input width is ``w``, the x label size + will be :math:`w*simcc_split_ratio`. Defaults to 2.0 + label_smooth_weight (float): Label Smoothing weight. Defaults to 0.0 + normalize (bool): Whether to normalize the heatmaps. Defaults to True. + use_dark (bool): Whether to use the DARK post processing. Defaults to + False. + decode_visibility (bool): Whether to decode the visibility. Defaults + to False. + decode_beta (float): The beta value for decoding visibility. Defaults + to 150.0. + + .. _`SimCC: a Simple Coordinate Classification Perspective for Human Pose + Estimation`: https://arxiv.org/abs/2107.03332 + """ + + label_mapping_table = dict( + keypoint_x_labels='keypoint_x_labels', + keypoint_y_labels='keypoint_y_labels', + keypoint_weights='keypoint_weights', + ) + + def __init__( + self, + input_size: Tuple[int, int], + smoothing_type: str = 'gaussian', + sigma: Union[float, int, Tuple[float]] = 6.0, + simcc_split_ratio: float = 2.0, + label_smooth_weight: float = 0.0, + normalize: bool = True, + use_dark: bool = False, + decode_visibility: bool = False, + decode_beta: float = 150.0, + ) -> None: + super().__init__() + + self.input_size = input_size + self.smoothing_type = smoothing_type + self.simcc_split_ratio = simcc_split_ratio + self.label_smooth_weight = label_smooth_weight + self.normalize = normalize + self.use_dark = use_dark + self.decode_visibility = decode_visibility + self.decode_beta = decode_beta + + if isinstance(sigma, (float, int)): + self.sigma = np.array([sigma, sigma]) + else: + self.sigma = np.array(sigma) + + if self.smoothing_type not in {'gaussian', 'standard'}: + raise ValueError( + f'{self.__class__.__name__} got invalid `smoothing_type` value' + f'{self.smoothing_type}. Should be one of ' + '{"gaussian", "standard"}') + + if self.smoothing_type == 'gaussian' and self.label_smooth_weight > 0: + raise ValueError('Attribute `label_smooth_weight` is only ' + 'used for `standard` mode.') + + if self.label_smooth_weight < 0.0 or self.label_smooth_weight > 1.0: + raise ValueError('`label_smooth_weight` should be in range [0, 1]') + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints into SimCC labels. Note that the original + keypoint coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - keypoint_x_labels (np.ndarray): The generated SimCC label for + x-axis. + The label shape is (N, K, Wx) if ``smoothing_type=='gaussian'`` + and (N, K) if `smoothing_type=='standard'``, where + :math:`Wx=w*simcc_split_ratio` + - keypoint_y_labels (np.ndarray): The generated SimCC label for + y-axis. + The label shape is (N, K, Wy) if ``smoothing_type=='gaussian'`` + and (N, K) if `smoothing_type=='standard'``, where + :math:`Wy=h*simcc_split_ratio` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if self.smoothing_type == 'gaussian': + x_labels, y_labels, keypoint_weights = self._generate_gaussian( + keypoints, keypoints_visible) + elif self.smoothing_type == 'standard': + x_labels, y_labels, keypoint_weights = self._generate_standard( + keypoints, keypoints_visible) + else: + raise ValueError( + f'{self.__class__.__name__} got invalid `smoothing_type` value' + f'{self.smoothing_type}. Should be one of ' + '{"gaussian", "standard"}') + + encoded = dict( + keypoint_x_labels=x_labels, + keypoint_y_labels=y_labels, + keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, simcc_x: np.ndarray, + simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from SimCC representations. The decoded + coordinates are in the input image space. + + Args: + encoded (Tuple[np.ndarray, np.ndarray]): SimCC labels for x-axis + and y-axis + simcc_x (np.ndarray): SimCC label for x-axis + simcc_y (np.ndarray): SimCC label for y-axis + + Returns: + tuple: + - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) + - socres (np.ndarray): The keypoint scores in shape (N, K). + It usually represents the confidence of the keypoint prediction + """ + + keypoints, scores = get_simcc_maximum(simcc_x, simcc_y) + + # Unsqueeze the instance dimension for single-instance results + if keypoints.ndim == 2: + keypoints = keypoints[None, :] + scores = scores[None, :] + + if self.use_dark: + x_blur = int((self.sigma[0] * 20 - 7) // 3) + y_blur = int((self.sigma[1] * 20 - 7) // 3) + x_blur -= int((x_blur % 2) == 0) + y_blur -= int((y_blur % 2) == 0) + keypoints[:, :, 0] = refine_simcc_dark(keypoints[:, :, 0], simcc_x, + x_blur) + keypoints[:, :, 1] = refine_simcc_dark(keypoints[:, :, 1], simcc_y, + y_blur) + + keypoints /= self.simcc_split_ratio + + if self.decode_visibility: + _, visibility = get_simcc_maximum( + simcc_x * self.decode_beta * self.sigma[0], + simcc_y * self.decode_beta * self.sigma[1], + apply_softmax=True) + return keypoints, (scores, visibility) + else: + return keypoints, scores + + def _map_coordinates( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray]: + """Mapping keypoint coordinates into SimCC space.""" + + keypoints_split = keypoints.copy() + keypoints_split = np.around(keypoints_split * self.simcc_split_ratio) + keypoints_split = keypoints_split.astype(np.int64) + keypoint_weights = keypoints_visible.copy() + + return keypoints_split, keypoint_weights + + def _generate_standard( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Encoding keypoints into SimCC labels with Standard Label Smoothing + strategy. + + Labels will be one-hot vectors if self.label_smooth_weight==0.0 + """ + + N, K, _ = keypoints.shape + w, h = self.input_size + W = np.around(w * self.simcc_split_ratio).astype(int) + H = np.around(h * self.simcc_split_ratio).astype(int) + + keypoints_split, keypoint_weights = self._map_coordinates( + keypoints, keypoints_visible) + + target_x = np.zeros((N, K, W), dtype=np.float32) + target_y = np.zeros((N, K, H), dtype=np.float32) + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + # get center coordinates + mu_x, mu_y = keypoints_split[n, k].astype(np.int64) + + # detect abnormal coords and assign the weight 0 + if mu_x >= W or mu_y >= H or mu_x < 0 or mu_y < 0: + keypoint_weights[n, k] = 0 + continue + + if self.label_smooth_weight > 0: + target_x[n, k] = self.label_smooth_weight / (W - 1) + target_y[n, k] = self.label_smooth_weight / (H - 1) + + target_x[n, k, mu_x] = 1.0 - self.label_smooth_weight + target_y[n, k, mu_y] = 1.0 - self.label_smooth_weight + + return target_x, target_y, keypoint_weights + + def _generate_gaussian( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Encoding keypoints into SimCC labels with Gaussian Label Smoothing + strategy.""" + + N, K, _ = keypoints.shape + w, h = self.input_size + W = np.around(w * self.simcc_split_ratio).astype(int) + H = np.around(h * self.simcc_split_ratio).astype(int) + + keypoints_split, keypoint_weights = self._map_coordinates( + keypoints, keypoints_visible) + + target_x = np.zeros((N, K, W), dtype=np.float32) + target_y = np.zeros((N, K, H), dtype=np.float32) + + # 3-sigma rule + radius = self.sigma * 3 + + # xy grid + x = np.arange(0, W, 1, dtype=np.float32) + y = np.arange(0, H, 1, dtype=np.float32) + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + mu = keypoints_split[n, k] + + # check that the gaussian has in-bounds part + left, top = mu - radius + right, bottom = mu + radius + 1 + + if left >= W or top >= H or right < 0 or bottom < 0: + keypoint_weights[n, k] = 0 + continue + + mu_x, mu_y = mu + + target_x[n, k] = np.exp(-((x - mu_x)**2) / (2 * self.sigma[0]**2)) + target_y[n, k] = np.exp(-((y - mu_y)**2) / (2 * self.sigma[1]**2)) + + if self.normalize: + norm_value = self.sigma * np.sqrt(np.pi * 2) + target_x /= norm_value[0] + target_y /= norm_value[1] + + return target_x, target_y, keypoint_weights diff --git a/mmpose/codecs/spr.py b/mmpose/codecs/spr.py new file mode 100644 index 0000000000000000000000000000000000000000..fba17f15982f1b38ac07bf5f6d61bfec0286a660 --- /dev/null +++ b/mmpose/codecs/spr.py @@ -0,0 +1,306 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple, Union + +import numpy as np +import torch +from torch import Tensor + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import (batch_heatmap_nms, generate_displacement_heatmap, + generate_gaussian_heatmaps, get_diagonal_lengths, + get_instance_root) + + +@KEYPOINT_CODECS.register_module() +class SPR(BaseKeypointCodec): + """Encode/decode keypoints with Structured Pose Representation (SPR). + + See the paper `Single-stage multi-person pose machines`_ + by Nie et al (2017) for details + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (1, H, W) + where [W, H] is the `heatmap_size`. If the keypoint heatmap is + generated together, the output heatmap shape is (K+1, H, W) + - heatmap_weights (np.ndarray): The target weights for heatmaps which + has same shape with heatmaps. + - displacements (np.ndarray): The dense keypoint displacement in + shape (K*2, H, W). + - displacement_weights (np.ndarray): The target weights for heatmaps + which has same shape with displacements. + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float or tuple, optional): The sigma values of the Gaussian + heatmaps. If sigma is a tuple, it includes both sigmas for root + and keypoint heatmaps. ``None`` means the sigmas are computed + automatically from the heatmap size. Defaults to ``None`` + generate_keypoint_heatmaps (bool): Whether to generate Gaussian + heatmaps for each keypoint. Defaults to ``False`` + root_type (str): The method to generate the instance root. Options + are: + + - ``'kpt_center'``: Average coordinate of all visible keypoints. + - ``'bbox_center'``: Center point of bounding boxes outlined by + all visible keypoints. + + Defaults to ``'kpt_center'`` + + minimal_diagonal_length (int or float): The threshold of diagonal + length of instance bounding box. Small instances will not be + used in training. Defaults to 32 + background_weight (float): Loss weight of background pixels. + Defaults to 0.1 + decode_thr (float): The threshold of keypoint response value in + heatmaps. Defaults to 0.01 + decode_nms_kernel (int): The kernel size of the NMS during decoding, + which should be an odd integer. Defaults to 5 + decode_max_instances (int): The maximum number of instances + to decode. Defaults to 30 + + .. _`Single-stage multi-person pose machines`: + https://arxiv.org/abs/1908.09220 + """ + + field_mapping_table = dict( + heatmaps='heatmaps', + heatmap_weights='heatmap_weights', + displacements='displacements', + displacement_weights='displacement_weights', + ) + + def __init__( + self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + sigma: Optional[Union[float, Tuple[float]]] = None, + generate_keypoint_heatmaps: bool = False, + root_type: str = 'kpt_center', + minimal_diagonal_length: Union[int, float] = 5, + background_weight: float = 0.1, + decode_nms_kernel: int = 5, + decode_max_instances: int = 30, + decode_thr: float = 0.01, + ): + super().__init__() + + self.input_size = input_size + self.heatmap_size = heatmap_size + self.generate_keypoint_heatmaps = generate_keypoint_heatmaps + self.root_type = root_type + self.minimal_diagonal_length = minimal_diagonal_length + self.background_weight = background_weight + self.decode_nms_kernel = decode_nms_kernel + self.decode_max_instances = decode_max_instances + self.decode_thr = decode_thr + + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + if sigma is None: + sigma = (heatmap_size[0] * heatmap_size[1])**0.5 / 32 + if generate_keypoint_heatmaps: + # sigma for root heatmap and keypoint heatmaps + self.sigma = (sigma, sigma // 2) + else: + self.sigma = (sigma, ) + else: + if not isinstance(sigma, (tuple, list)): + sigma = (sigma, ) + if generate_keypoint_heatmaps: + assert len(sigma) == 2, 'sigma for keypoints must be given ' \ + 'if `generate_keypoint_heatmaps` ' \ + 'is True. e.g. sigma=(4, 2)' + self.sigma = sigma + + def _get_heatmap_weights(self, + heatmaps, + fg_weight: float = 1, + bg_weight: float = 0): + """Generate weight array for heatmaps. + + Args: + heatmaps (np.ndarray): Root and keypoint (optional) heatmaps + fg_weight (float): Weight for foreground pixels. Defaults to 1.0 + bg_weight (float): Weight for background pixels. Defaults to 0.0 + + Returns: + np.ndarray: Heatmap weight array in the same shape with heatmaps + """ + heatmap_weights = np.ones(heatmaps.shape, dtype=np.float32) * bg_weight + heatmap_weights[heatmaps > 0] = fg_weight + return heatmap_weights + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into root heatmaps and keypoint displacement + fields. Note that the original keypoint coordinates should be in the + input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (1, H, W) where [W, H] is the `heatmap_size`. If keypoint + heatmaps are generated together, the shape is (K+1, H, W) + - heatmap_weights (np.ndarray): The pixel-wise weight for heatmaps + which has same shape with `heatmaps` + - displacements (np.ndarray): The generated displacement fields in + shape (K*D, H, W). The vector on each pixels represents the + displacement of keypoints belong to the associated instance + from this pixel. + - displacement_weights (np.ndarray): The pixel-wise weight for + displacements which has same shape with `displacements` + """ + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + # keypoint coordinates in heatmap + _keypoints = keypoints / self.scale_factor + + # compute the root and scale of each instance + roots, roots_visible = get_instance_root(_keypoints, keypoints_visible, + self.root_type) + diagonal_lengths = get_diagonal_lengths(_keypoints, keypoints_visible) + + # discard the small instances + roots_visible[diagonal_lengths < self.minimal_diagonal_length] = 0 + + # generate heatmaps + heatmaps, _ = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=roots[:, None], + keypoints_visible=roots_visible[:, None], + sigma=self.sigma[0]) + heatmap_weights = self._get_heatmap_weights( + heatmaps, bg_weight=self.background_weight) + + if self.generate_keypoint_heatmaps: + keypoint_heatmaps, _ = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=_keypoints, + keypoints_visible=keypoints_visible, + sigma=self.sigma[1]) + + keypoint_heatmaps_weights = self._get_heatmap_weights( + keypoint_heatmaps, bg_weight=self.background_weight) + + heatmaps = np.concatenate((keypoint_heatmaps, heatmaps), axis=0) + heatmap_weights = np.concatenate( + (keypoint_heatmaps_weights, heatmap_weights), axis=0) + + # generate displacements + displacements, displacement_weights = \ + generate_displacement_heatmap( + self.heatmap_size, + _keypoints, + keypoints_visible, + roots, + roots_visible, + diagonal_lengths, + self.sigma[0], + ) + + encoded = dict( + heatmaps=heatmaps, + heatmap_weights=heatmap_weights, + displacements=displacements, + displacement_weights=displacement_weights) + + return encoded + + def decode(self, heatmaps: Tensor, + displacements: Tensor) -> Tuple[np.ndarray, np.ndarray]: + """Decode the keypoint coordinates from heatmaps and displacements. The + decoded keypoint coordinates are in the input image space. + + Args: + heatmaps (Tensor): Encoded root and keypoints (optional) heatmaps + in shape (1, H, W) or (K+1, H, W) + displacements (Tensor): Encoded keypoints displacement fields + in shape (K*D, H, W) + + Returns: + tuple: + - keypoints (Tensor): Decoded keypoint coordinates in shape + (N, K, D) + - scores (tuple): + - root_scores (Tensor): The root scores in shape (N, ) + - keypoint_scores (Tensor): The keypoint scores in + shape (N, K). If keypoint heatmaps are not generated, + `keypoint_scores` will be `None` + """ + # heatmaps, displacements = encoded + _k, h, w = displacements.shape + k = _k // 2 + displacements = displacements.view(k, 2, h, w) + + # convert displacements to a dense keypoint prediction + y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) + regular_grid = torch.stack([x, y], dim=0).to(displacements) + posemaps = (regular_grid[None] + displacements).flatten(2) + + # find local maximum on root heatmap + root_heatmap_peaks = batch_heatmap_nms(heatmaps[None, -1:], + self.decode_nms_kernel) + root_scores, pos_idx = root_heatmap_peaks.flatten().topk( + self.decode_max_instances) + mask = root_scores > self.decode_thr + root_scores, pos_idx = root_scores[mask], pos_idx[mask] + + keypoints = posemaps[:, :, pos_idx].permute(2, 0, 1).contiguous() + + if self.generate_keypoint_heatmaps and heatmaps.shape[0] == 1 + k: + # compute scores for each keypoint + keypoint_scores = self.get_keypoint_scores(heatmaps[:k], keypoints) + else: + keypoint_scores = None + + keypoints = torch.cat([ + kpt * self.scale_factor[i] + for i, kpt in enumerate(keypoints.split(1, -1)) + ], + dim=-1) + return keypoints, (root_scores, keypoint_scores) + + def get_keypoint_scores(self, heatmaps: Tensor, keypoints: Tensor): + """Calculate the keypoint scores with keypoints heatmaps and + coordinates. + + Args: + heatmaps (Tensor): Keypoint heatmaps in shape (K, H, W) + keypoints (Tensor): Keypoint coordinates in shape (N, K, D) + + Returns: + Tensor: Keypoint scores in [N, K] + """ + k, h, w = heatmaps.shape + keypoints = torch.stack(( + keypoints[..., 0] / (w - 1) * 2 - 1, + keypoints[..., 1] / (h - 1) * 2 - 1, + ), + dim=-1) + keypoints = keypoints.transpose(0, 1).unsqueeze(1).contiguous() + + keypoint_scores = torch.nn.functional.grid_sample( + heatmaps.unsqueeze(1), keypoints, + padding_mode='border').view(k, -1).transpose(0, 1).contiguous() + + return keypoint_scores diff --git a/mmpose/codecs/udp_heatmap.py b/mmpose/codecs/udp_heatmap.py new file mode 100644 index 0000000000000000000000000000000000000000..1fcdbd559166ff159d614d2c1e3048c27e942570 --- /dev/null +++ b/mmpose/codecs/udp_heatmap.py @@ -0,0 +1,263 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import cv2 +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import (generate_offset_heatmap, generate_udp_gaussian_heatmaps, + get_heatmap_maximum, refine_keypoints_dark_udp) + + +@KEYPOINT_CODECS.register_module() +class UDPHeatmap(BaseKeypointCodec): + r"""Generate keypoint heatmaps by Unbiased Data Processing (UDP). + See the paper: `The Devil is in the Details: Delving into Unbiased Data + Processing for Human Pose Estimation`_ by Huang et al (2020) for details. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmap (np.ndarray): The generated heatmap in shape (C_out, H, W) + where [W, H] is the `heatmap_size`, and the C_out is the output + channel number which depends on the `heatmap_type`. If + `heatmap_type=='gaussian'`, C_out equals to keypoint number K; + if `heatmap_type=='combined'`, C_out equals to K*3 + (x_offset, y_offset and class label) + - keypoint_weights (np.ndarray): The target weights in shape (K,) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + heatmap_type (str): The heatmap type to encode the keypoitns. Options + are: + + - ``'gaussian'``: Gaussian heatmap + - ``'combined'``: Combination of a binary label map and offset + maps for X and Y axes. + + sigma (float): The sigma value of the Gaussian heatmap when + ``heatmap_type=='gaussian'``. Defaults to 2.0 + radius_factor (float): The radius factor of the binary label + map when ``heatmap_type=='combined'``. The positive region is + defined as the neighbor of the keypoit with the radius + :math:`r=radius_factor*max(W, H)`. Defaults to 0.0546875 + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation in DarkPose. Defaults to 11 + + .. _`The Devil is in the Details: Delving into Unbiased Data Processing for + Human Pose Estimation`: https://arxiv.org/abs/1911.07524 + """ + + label_mapping_table = dict(keypoint_weights='keypoint_weights', ) + field_mapping_table = dict(heatmaps='heatmaps', ) + + def __init__(self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + heatmap_type: str = 'gaussian', + sigma: float = 2., + radius_factor: float = 0.0546875, + blur_kernel_size: int = 11, + increase_sigma_with_padding=False, + amap_scale: float = 1.0, + normalize=None, + ) -> None: + super().__init__() + self.input_size = np.array(input_size) + self.heatmap_size = np.array(heatmap_size) + self.sigma = sigma + self.radius_factor = radius_factor + self.heatmap_type = heatmap_type + self.blur_kernel_size = blur_kernel_size + self.increase_sigma_with_padding = increase_sigma_with_padding + self.normalize = normalize + + self.amap_size = self.input_size * amap_scale + self.scale_factor = ((self.amap_size - 1) / + (self.heatmap_size - 1)).astype(np.float32) + self.input_center = self.input_size / 2 + self.top_left = self.input_center - self.amap_size / 2 + + if self.heatmap_type not in {'gaussian', 'combined'}: + raise ValueError( + f'{self.__class__.__name__} got invalid `heatmap_type` value' + f'{self.heatmap_type}. Should be one of ' + '{"gaussian", "combined"}') + + def _kpts_to_activation_pts(self, keypoints: np.ndarray) -> np.ndarray: + """ + Transform the keypoint coordinates to the activation space. + In the original UDPHeatmap, activation map is the same as the input image space with + different resolution but in this case we allow the activation map to have different + size (padding) than the input image space. + Centers of activation map and input image space are aligned. + """ + transformed_keypoints = keypoints - self.top_left + transformed_keypoints = transformed_keypoints / self.scale_factor + return transformed_keypoints + + def _activation_pts_to_kpts(self, keypoints: np.ndarray) -> np.ndarray: + """ + Transform the points in activation map to the keypoint coordinates. + In the original UDPHeatmap, activation map is the same as the input image space with + different resolution but in this case we allow the activation map to have different + size (padding) than the input image space. + Centers of activation map and input image space are aligned. + """ + W, H = self.heatmap_size + transformed_keypoints = keypoints / [W - 1, H - 1] * self.amap_size + transformed_keypoints += self.top_left + return transformed_keypoints + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + id_similarity: Optional[float] = 0.0, + keypoints_visibility: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. Note that the original keypoint + coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + id_similarity (float): The usefulness of the identity information + for the whole pose. Defaults to 0.0 + keypoints_visibility (np.ndarray): The visibility bit for each + keypoint (N, K). Defaults to None + + Returns: + dict: + - heatmap (np.ndarray): The generated heatmap in shape + (C_out, H, W) where [W, H] is the `heatmap_size`, and the + C_out is the output channel number which depends on the + `heatmap_type`. If `heatmap_type=='gaussian'`, C_out equals to + keypoint number K; if `heatmap_type=='combined'`, C_out + equals to K*3 (x_offset, y_offset and class label) + - keypoint_weights (np.ndarray): The target weights in shape + (K,) + """ + assert keypoints.shape[0] == 1, ( + f'{self.__class__.__name__} only support single-instance ' + 'keypoint encoding') + + if keypoints_visibility is None: + keypoints_visibility = np.zeros(keypoints.shape[:2], dtype=np.float32) + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if self.heatmap_type == 'gaussian': + heatmaps, keypoint_weights = generate_udp_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=self._kpts_to_activation_pts(keypoints), + keypoints_visible=keypoints_visible, + sigma=self.sigma, + keypoints_visibility=keypoints_visibility, + increase_sigma_with_padding=self.increase_sigma_with_padding) + elif self.heatmap_type == 'combined': + heatmaps, keypoint_weights = generate_offset_heatmap( + heatmap_size=self.heatmap_size, + keypoints=self._kpts_to_activation_pts(keypoints), + keypoints_visible=keypoints_visible, + radius_factor=self.radius_factor) + else: + raise ValueError( + f'{self.__class__.__name__} got invalid `heatmap_type` value' + f'{self.heatmap_type}. Should be one of ' + '{"gaussian", "combined"}') + + if self.normalize is not None: + heatmaps_sum = np.sum(heatmaps, axis=(1, 2), keepdims=False) + mask = heatmaps_sum > 0 + heatmaps[mask, :, :] = heatmaps[mask, :, :] / (heatmaps_sum[mask, None, None] + np.finfo(np.float32).eps) + heatmaps = heatmaps * self.normalize + + annotated = keypoints_visible > 0 + + heatmap_keypoints = self._kpts_to_activation_pts(keypoints) + in_image = np.logical_and( + heatmap_keypoints[:, :, 0] >= 0, + heatmap_keypoints[:, :, 0] < self.heatmap_size[0], + ) + in_image = np.logical_and( + in_image, + heatmap_keypoints[:, :, 1] >= 0, + ) + in_image = np.logical_and( + in_image, + heatmap_keypoints[:, :, 1] < self.heatmap_size[1], + ) + + encoded = dict( + heatmaps=heatmaps, + keypoint_weights=keypoint_weights, + annotated=annotated, + in_image=in_image, + keypoints_scaled=keypoints, + heatmap_keypoints=heatmap_keypoints, + identification_similarity=id_similarity, + ) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + encoded (np.ndarray): Heatmaps in shape (K, H, W) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + heatmaps = encoded.copy() + + if self.heatmap_type == 'gaussian': + keypoints, scores = get_heatmap_maximum(heatmaps) + # unsqueeze the instance dimension for single-instance results + keypoints = keypoints[None] + scores = scores[None] + + keypoints = refine_keypoints_dark_udp( + keypoints, heatmaps, blur_kernel_size=self.blur_kernel_size) + + elif self.heatmap_type == 'combined': + _K, H, W = heatmaps.shape + K = _K // 3 + + for cls_heatmap in heatmaps[::3]: + # Apply Gaussian blur on classification maps + ks = 2 * self.blur_kernel_size + 1 + cv2.GaussianBlur(cls_heatmap, (ks, ks), 0, cls_heatmap) + + # valid radius + radius = self.radius_factor * max(W, H) + + x_offset = heatmaps[1::3].flatten() * radius + y_offset = heatmaps[2::3].flatten() * radius + keypoints, scores = get_heatmap_maximum(heatmaps=heatmaps[::3]) + index = (keypoints[..., 0] + keypoints[..., 1] * W).flatten() + index += W * H * np.arange(0, K) + index = index.astype(int) + keypoints += np.stack((x_offset[index], y_offset[index]), axis=-1) + # unsqueeze the instance dimension for single-instance results + keypoints = keypoints[None].astype(np.float32) + scores = scores[None] + + keypoints = self._activation_pts_to_kpts(keypoints) + + return keypoints, scores diff --git a/mmpose/codecs/utils/__init__.py b/mmpose/codecs/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e11f254466857d73de73112c9a8c4f28112a98e4 --- /dev/null +++ b/mmpose/codecs/utils/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .camera_image_projection import (camera_to_image_coord, camera_to_pixel, + pixel_to_camera) +from .gaussian_heatmap import (generate_3d_gaussian_heatmaps, + generate_gaussian_heatmaps, + generate_udp_gaussian_heatmaps, + generate_unbiased_gaussian_heatmaps, + generate_onehot_heatmaps) +from .instance_property import (get_diagonal_lengths, get_instance_bbox, + get_instance_root) +from .offset_heatmap import (generate_displacement_heatmap, + generate_offset_heatmap) +from .post_processing import (batch_heatmap_nms, gaussian_blur, + gaussian_blur1d, get_heatmap_3d_maximum, + get_heatmap_maximum, get_simcc_maximum, + get_simcc_normalized, get_heatmap_expected_value) +from .refinement import (refine_keypoints, refine_keypoints_dark, + refine_keypoints_dark_udp, refine_simcc_dark) +from .oks_map import generate_oks_maps + +__all__ = [ + 'generate_gaussian_heatmaps', 'generate_udp_gaussian_heatmaps', + 'generate_unbiased_gaussian_heatmaps', 'gaussian_blur', + 'get_heatmap_maximum', 'get_simcc_maximum', 'generate_offset_heatmap', + 'batch_heatmap_nms', 'refine_keypoints', 'refine_keypoints_dark', + 'refine_keypoints_dark_udp', 'generate_displacement_heatmap', + 'refine_simcc_dark', 'gaussian_blur1d', 'get_diagonal_lengths', + 'get_instance_root', 'get_instance_bbox', 'get_simcc_normalized', + 'camera_to_image_coord', 'camera_to_pixel', 'pixel_to_camera', + 'get_heatmap_3d_maximum', 'generate_3d_gaussian_heatmaps', + 'generate_oks_maps', 'get_heatmap_expected_value', 'generate_onehot_heatmaps' +] diff --git a/mmpose/codecs/utils/camera_image_projection.py b/mmpose/codecs/utils/camera_image_projection.py new file mode 100644 index 0000000000000000000000000000000000000000..b26d1396f1d054b1f36fd50df4c469d6201f12e6 --- /dev/null +++ b/mmpose/codecs/utils/camera_image_projection.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Tuple + +import numpy as np + + +def camera_to_image_coord(root_index: int, kpts_3d_cam: np.ndarray, + camera_param: Dict) -> Tuple[np.ndarray, np.ndarray]: + """Project keypoints from camera space to image space and calculate factor. + + Args: + root_index (int): Index for root keypoint. + kpts_3d_cam (np.ndarray): Keypoint coordinates in camera space in + shape (N, K, D). + camera_param (dict): Parameters for the camera. + + Returns: + tuple: + - kpts_3d_image (np.ndarray): Keypoint coordinates in image space in + shape (N, K, D). + - factor (np.ndarray): The scaling factor that maps keypoints from + image space to camera space in shape (N, ). + """ + + root = kpts_3d_cam[..., root_index, :] + tl_kpt = root.copy() + tl_kpt[..., :2] -= 1.0 + br_kpt = root.copy() + br_kpt[..., :2] += 1.0 + tl_kpt = np.reshape(tl_kpt, (-1, 3)) + br_kpt = np.reshape(br_kpt, (-1, 3)) + fx, fy = camera_param['f'] / 1000. + cx, cy = camera_param['c'] / 1000. + + tl2d = camera_to_pixel(tl_kpt, fx, fy, cx, cy) + br2d = camera_to_pixel(br_kpt, fx, fy, cx, cy) + + rectangle_3d_size = 2.0 + kpts_3d_image = np.zeros_like(kpts_3d_cam) + kpts_3d_image[..., :2] = camera_to_pixel(kpts_3d_cam.copy(), fx, fy, cx, + cy) + ratio = (br2d[..., 0] - tl2d[..., 0] + 0.001) / rectangle_3d_size + factor = rectangle_3d_size / (br2d[..., 0] - tl2d[..., 0] + 0.001) + kpts_3d_depth = ratio[:, None] * ( + kpts_3d_cam[..., 2] - kpts_3d_cam[..., root_index:root_index + 1, 2]) + kpts_3d_image[..., 2] = kpts_3d_depth + return kpts_3d_image, factor + + +def camera_to_pixel(kpts_3d: np.ndarray, + fx: float, + fy: float, + cx: float, + cy: float, + shift: bool = False) -> np.ndarray: + """Project keypoints from camera space to image space. + + Args: + kpts_3d (np.ndarray): Keypoint coordinates in camera space. + fx (float): x-coordinate of camera's focal length. + fy (float): y-coordinate of camera's focal length. + cx (float): x-coordinate of image center. + cy (float): y-coordinate of image center. + shift (bool): Whether to shift the coordinates by 1e-8. + + Returns: + pose_2d (np.ndarray): Projected keypoint coordinates in image space. + """ + if not shift: + pose_2d = kpts_3d[..., :2] / kpts_3d[..., 2:3] + else: + pose_2d = kpts_3d[..., :2] / (kpts_3d[..., 2:3] + 1e-8) + pose_2d[..., 0] *= fx + pose_2d[..., 1] *= fy + pose_2d[..., 0] += cx + pose_2d[..., 1] += cy + return pose_2d + + +def pixel_to_camera(kpts_3d: np.ndarray, fx: float, fy: float, cx: float, + cy: float) -> np.ndarray: + """Project keypoints from camera space to image space. + + Args: + kpts_3d (np.ndarray): Keypoint coordinates in camera space. + fx (float): x-coordinate of camera's focal length. + fy (float): y-coordinate of camera's focal length. + cx (float): x-coordinate of image center. + cy (float): y-coordinate of image center. + shift (bool): Whether to shift the coordinates by 1e-8. + + Returns: + pose_2d (np.ndarray): Projected keypoint coordinates in image space. + """ + pose_2d = kpts_3d.copy() + pose_2d[..., 0] -= cx + pose_2d[..., 1] -= cy + pose_2d[..., 0] /= fx + pose_2d[..., 1] /= fy + pose_2d[..., 0] *= kpts_3d[..., 2] + pose_2d[..., 1] *= kpts_3d[..., 2] + return pose_2d diff --git a/mmpose/codecs/utils/gaussian_heatmap.py b/mmpose/codecs/utils/gaussian_heatmap.py new file mode 100644 index 0000000000000000000000000000000000000000..a475a6ae12c968598fed7abdcf5a0ce5b0b38e74 --- /dev/null +++ b/mmpose/codecs/utils/gaussian_heatmap.py @@ -0,0 +1,433 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Optional, Tuple, Union + +import numpy as np +from scipy.spatial.distance import cdist + + +def generate_3d_gaussian_heatmaps( + heatmap_size: Tuple[int, int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + sigma: Union[float, Tuple[float], np.ndarray], + image_size: Tuple[int, int], + heatmap3d_depth_bound: float = 400.0, + joint_indices: Optional[list] = None, + max_bound: float = 1.0, + use_different_joint_weights: bool = False, + dataset_keypoint_weights: Optional[np.ndarray] = None +) -> Tuple[np.ndarray, np.ndarray]: + """Generate 3d gaussian heatmaps of keypoints. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H, D] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, C) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + sigma (float or List[float]): A list of sigma values of the Gaussian + heatmap for each instance. If sigma is given as a single float + value, it will be expanded into a tuple + image_size (Tuple[int, int]): Size of input image. + heatmap3d_depth_bound (float): Boundary for 3d heatmap depth. + Default: 400.0. + joint_indices (List[int], optional): Indices of joints used for heatmap + generation. If None (default) is given, all joints will be used. + Default: ``None``. + max_bound (float): The maximal value of heatmap. Default: 1.0. + use_different_joint_weights (bool): Whether to use different joint + weights. Default: ``False``. + dataset_keypoint_weights (np.ndarray, optional): Keypoints weight in + shape (K, ). + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K * D, H, W) where [W, H, D] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + W, H, D = heatmap_size + + # select the joints used for target generation + if joint_indices is not None: + keypoints = keypoints[:, joint_indices, ...] + keypoints_visible = keypoints_visible[:, joint_indices, ...] + N, K, _ = keypoints.shape + + heatmaps = np.zeros([K, D, H, W], dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + if isinstance(sigma, (int, float)): + sigma = (sigma, ) * N + + for n in range(N): + # 3-sigma rule + radius = sigma[n] * 3 + + # joint location in heatmap coordinates + mu_x = keypoints[n, :, 0] * W / image_size[0] # (K, ) + mu_y = keypoints[n, :, 1] * H / image_size[1] + mu_z = (keypoints[n, :, 2] / heatmap3d_depth_bound + 0.5) * D + + keypoint_weights[n, ...] = keypoint_weights[n, ...] * (mu_z >= 0) * ( + mu_z < D) + if use_different_joint_weights: + keypoint_weights[ + n] = keypoint_weights[n] * dataset_keypoint_weights + # xy grid + gaussian_size = 2 * radius + 1 + + # get neighboring voxels coordinates + x = y = z = np.arange(gaussian_size, dtype=np.float32) - radius + zz, yy, xx = np.meshgrid(z, y, x) + + xx = np.expand_dims(xx, axis=0) + yy = np.expand_dims(yy, axis=0) + zz = np.expand_dims(zz, axis=0) + mu_x = np.expand_dims(mu_x, axis=(-1, -2, -3)) + mu_y = np.expand_dims(mu_y, axis=(-1, -2, -3)) + mu_z = np.expand_dims(mu_z, axis=(-1, -2, -3)) + + xx, yy, zz = xx + mu_x, yy + mu_y, zz + mu_z + local_size = xx.shape[1] + + # round the coordinates + xx = xx.round().clip(0, W - 1) + yy = yy.round().clip(0, H - 1) + zz = zz.round().clip(0, D - 1) + + # compute the target value near joints + gaussian = np.exp(-((xx - mu_x)**2 + (yy - mu_y)**2 + (zz - mu_z)**2) / + (2 * sigma[n]**2)) + + # put the local target value to the full target heatmap + idx_joints = np.tile( + np.expand_dims(np.arange(K), axis=(-1, -2, -3)), + [1, local_size, local_size, local_size]) + idx = np.stack([idx_joints, zz, yy, xx], + axis=-1).astype(int).reshape(-1, 4) + + heatmaps[idx[:, 0], idx[:, 1], idx[:, 2], idx[:, 3]] = np.maximum( + heatmaps[idx[:, 0], idx[:, 1], idx[:, 2], idx[:, 3]], + gaussian.reshape(-1)) + + heatmaps = (heatmaps * max_bound).reshape(-1, H, W) + + return heatmaps, keypoint_weights + + +def generate_gaussian_heatmaps( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + sigma: Union[float, Tuple[float], np.ndarray], +) -> Tuple[np.ndarray, np.ndarray]: + """Generate gaussian heatmaps of keypoints. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + sigma (float or List[float]): A list of sigma values of the Gaussian + heatmap for each instance. If sigma is given as a single float + value, it will be expanded into a tuple + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + if isinstance(sigma, (int, float)): + sigma = (sigma, ) * N + + for n in range(N): + # 3-sigma rule + radius = sigma[n] * 3 + + # xy grid + gaussian_size = 2 * radius + 1 + x = np.arange(0, gaussian_size, 1, dtype=np.float32) + y = x[:, None] + x0 = y0 = gaussian_size // 2 + + for k in range(K): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + # get gaussian center coordinates + mu = (keypoints[n, k] + 0.5).astype(np.int64) + + # check that the gaussian has in-bounds part + left, top = (mu - radius).astype(np.int64) + right, bottom = (mu + radius + 1).astype(np.int64) + + if left >= W or top >= H or right < 0 or bottom < 0: + keypoint_weights[n, k] = 0 + continue + + # The gaussian is not normalized, + # we want the center value to equal 1 + gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma[n]**2)) + + # valid range in gaussian + g_x1 = max(0, -left) + g_x2 = min(W, right) - left + g_y1 = max(0, -top) + g_y2 = min(H, bottom) - top + + # valid range in heatmap + h_x1 = max(0, left) + h_x2 = min(W, right) + h_y1 = max(0, top) + h_y2 = min(H, bottom) + + heatmap_region = heatmaps[k, h_y1:h_y2, h_x1:h_x2] + gaussian_regsion = gaussian[g_y1:g_y2, g_x1:g_x2] + + _ = np.maximum( + heatmap_region, gaussian_regsion, out=heatmap_region) + + return heatmaps, keypoint_weights + + +def generate_unbiased_gaussian_heatmaps( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + sigma: float, +) -> Tuple[np.ndarray, np.ndarray]: + """Generate gaussian heatmaps of keypoints using `Dark Pose`_. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + + .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + # 3-sigma rule + radius = sigma * 3 + + # xy grid + x = np.arange(0, W, 1, dtype=np.float32) + y = np.arange(0, H, 1, dtype=np.float32)[:, None] + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + mu = keypoints[n, k] + # check that the gaussian has in-bounds part + left, top = mu - radius + right, bottom = mu + radius + 1 + + if left >= W or top >= H or right < 0 or bottom < 0: + keypoint_weights[n, k] = 0 + continue + + gaussian = np.exp(-((x - mu[0])**2 + (y - mu[1])**2) / (2 * sigma**2)) + + _ = np.maximum(gaussian, heatmaps[k], out=heatmaps[k]) + + return heatmaps, keypoint_weights + + +def generate_udp_gaussian_heatmaps( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + sigma, + keypoints_visibility: np.ndarray, + increase_sigma_with_padding: bool = False, +) -> Tuple[np.ndarray, np.ndarray]: + """Generate gaussian heatmaps of keypoints using `UDP`_. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + sigma (float): The sigma value of the Gaussian heatmap + keypoints_visibility (np.ndarray): The visibility bit for each keypoint (N, K) + increase_sigma_with_padding (bool): Whether to increase the sigma + value with padding. Default: False + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + + .. _`UDP`: https://arxiv.org/abs/1911.07524 + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + if isinstance(sigma, (int, float)): + scaled_sigmas = sigma * np.ones((N, K), dtype=np.float32) + sigmas = np.array([sigma] * K).reshape(1, -1).repeat(N, axis=0) + else: + scaled_sigmas = np.array(sigma).reshape(1, -1).repeat(N, axis=0) + sigmas = np.array(sigma).reshape(1, -1).repeat(N, axis=0) + + scales_arr = np.ones((N, K), dtype=np.float32) + if increase_sigma_with_padding: + diag = np.sqrt(W**2 + H**2) + for n in range(N): + image_kpts = keypoints[n, :].squeeze() + vis_kpts = image_kpts[keypoints_visibility[n, :] > 0.5] + + # Compute the distance between img_kpts and visible_kpts + if vis_kpts.size == 0: + min_dists = np.ones(image_kpts.shape[0]) * diag + else: + dists = cdist(image_kpts, vis_kpts, metric='euclidean') + min_dists = np.min(dists, axis=1) + + scales = min_dists / diag * 2.0 # Maximum distance (diagonal) results in .0*sigma + scales_arr[n, :] = scales + scaled_sigmas[n, :] = sigma * (1+scales) + + # print(scales_arr) + # print(scaled_sigmas) + + for n, k in product(range(N), range(K)): + scaled_sigma = scaled_sigmas[n, k] + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + # 3-sigma rule + radius = scaled_sigma * 3 + + # xy grid + gaussian_size = 2 * radius + 1 + x = np.arange(0, gaussian_size, 1, dtype=np.float32) + y = x[:, None] + + mu = (keypoints[n, k] + 0.5).astype(np.int64) + # check that the gaussian has in-bounds part + left, top = (mu - radius).round().astype(np.int64) + right, bottom = (mu + radius + 1).round().astype(np.int64) + # left, top = (mu - radius).astype(np.int64) + # right, bottom = (mu + radius + 1).astype(np.int64) + + if left >= W or top >= H or right < 0 or bottom < 0: + keypoint_weights[n, k] = 0 + continue + + mu_ac = keypoints[n, k] + x0 = y0 = gaussian_size // 2 + x0 += mu_ac[0] - mu[0] + y0 += mu_ac[1] - mu[1] + gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * scaled_sigma**2)) + + # Normalize Gaussian such that scaled_sigma = sigma is the norm + gaussian = gaussian / (scaled_sigma / sigmas[n, k]) + + # valid range in gaussian + g_x1 = max(0, -left) + g_x2 = min(W, right) - left + g_y1 = max(0, -top) + g_y2 = min(H, bottom) - top + + # valid range in heatmap + h_x1 = max(0, left) + h_x2 = min(W, right) + h_y1 = max(0, top) + h_y2 = min(H, bottom) + + # breakpoint() + + heatmap_region = heatmaps[k, h_y1:h_y2, h_x1:h_x2] + gaussian_regsion = gaussian[g_y1:g_y2, g_x1:g_x2] + + _ = np.maximum(heatmap_region, gaussian_regsion, out=heatmap_region) + + return heatmaps, keypoint_weights + + +def generate_onehot_heatmaps( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + sigma, + keypoints_visibility: np.ndarray, + increase_sigma_with_padding: bool = False, +) -> Tuple[np.ndarray, np.ndarray]: + """Generate gaussian heatmaps of keypoints using `UDP`_. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + sigma (float): The sigma value of the Gaussian heatmap + keypoints_visibility (np.ndarray): The visibility bit for each keypoint (N, K) + increase_sigma_with_padding (bool): Whether to increase the sigma + value with padding. Default: False + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + + .. _`UDP`: https://arxiv.org/abs/1911.07524 + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + mu = (keypoints[n, k] + 0.5).astype(np.int64) + + + if mu[0] < 0 or mu[0] >= W or mu[1] < 0 or mu[1] >= H: + keypoint_weights[n, k] = 0 + continue + + heatmaps[k, mu[1], mu[0]] = 1 + return heatmaps, keypoint_weights diff --git a/mmpose/codecs/utils/instance_property.py b/mmpose/codecs/utils/instance_property.py new file mode 100644 index 0000000000000000000000000000000000000000..15ae30aef021939e2f0dbf276ce8b1c3cceaa40e --- /dev/null +++ b/mmpose/codecs/utils/instance_property.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import numpy as np + + +def get_instance_root(keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + root_type: str = 'kpt_center') -> np.ndarray: + """Calculate the coordinates and visibility of instance roots. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + root_type (str): Calculation of instance roots which should + be one of the following options: + + - ``'kpt_center'``: The roots' coordinates are the mean + coordinates of visible keypoints + - ``'bbox_center'``: The roots' are the center of bounding + boxes outlined by visible keypoints + + Defaults to ``'kpt_center'`` + + Returns: + tuple + - roots_coordinate(np.ndarray): Coordinates of instance roots in + shape [N, D] + - roots_visible(np.ndarray): Visibility of instance roots in + shape [N] + """ + + roots_coordinate = np.zeros((keypoints.shape[0], 2), dtype=np.float32) + roots_visible = np.ones((keypoints.shape[0]), dtype=np.float32) * 2 + + for i in range(keypoints.shape[0]): + + # collect visible keypoints + if keypoints_visible is not None: + visible_keypoints = keypoints[i][keypoints_visible[i] > 0] + else: + visible_keypoints = keypoints[i] + if visible_keypoints.size == 0: + roots_visible[i] = 0 + continue + + # compute the instance root with visible keypoints + if root_type == 'kpt_center': + roots_coordinate[i] = visible_keypoints.mean(axis=0) + roots_visible[i] = 1 + elif root_type == 'bbox_center': + roots_coordinate[i] = (visible_keypoints.max(axis=0) + + visible_keypoints.min(axis=0)) / 2.0 + roots_visible[i] = 1 + else: + raise ValueError( + f'the value of `root_type` must be \'kpt_center\' or ' + f'\'bbox_center\', but got \'{root_type}\'') + + return roots_coordinate, roots_visible + + +def get_instance_bbox(keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> np.ndarray: + """Calculate the pseudo instance bounding box from visible keypoints. The + bounding boxes are in the xyxy format. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + np.ndarray: bounding boxes in [N, 4] + """ + bbox = np.zeros((keypoints.shape[0], 4), dtype=np.float32) + for i in range(keypoints.shape[0]): + if keypoints_visible is not None: + visible_keypoints = keypoints[i][keypoints_visible[i] > 0] + else: + visible_keypoints = keypoints[i] + if visible_keypoints.size == 0: + continue + + bbox[i, :2] = visible_keypoints.min(axis=0) + bbox[i, 2:] = visible_keypoints.max(axis=0) + return bbox + + +def get_diagonal_lengths(keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> np.ndarray: + """Calculate the diagonal length of instance bounding box from visible + keypoints. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + np.ndarray: bounding box diagonal length in [N] + """ + pseudo_bbox = get_instance_bbox(keypoints, keypoints_visible) + pseudo_bbox = pseudo_bbox.reshape(-1, 2, 2) + h_w_diff = pseudo_bbox[:, 1] - pseudo_bbox[:, 0] + diagonal_length = np.sqrt(np.power(h_w_diff, 2).sum(axis=1)) + + return diagonal_length diff --git a/mmpose/codecs/utils/offset_heatmap.py b/mmpose/codecs/utils/offset_heatmap.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c1c32ed391982fa0f8cd31b6240363b4fe1c52 --- /dev/null +++ b/mmpose/codecs/utils/offset_heatmap.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Tuple + +import numpy as np + + +def generate_offset_heatmap( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + radius_factor: float, +) -> Tuple[np.ndarray, np.ndarray]: + """Generate offset heatmaps of keypoints, where each keypoint is + represented by 3 maps: one pixel-level class label map (1 for keypoint and + 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions + respectively. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + radius_factor (float): The radius factor of the binary label + map. The positive region is defined as the neighbor of the + keypoint with the radius :math:`r=radius_factor*max(W, H)` + + Returns: + tuple: + - heatmap (np.ndarray): The generated heatmap in shape + (K*3, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (K,) + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, 3, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + # xy grid + x = np.arange(0, W, 1) + y = np.arange(0, H, 1)[:, None] + + # positive area radius in the classification map + radius = radius_factor * max(W, H) + + for n, k in product(range(N), range(K)): + if keypoints_visible[n, k] < 0.5: + continue + + mu = keypoints[n, k] + + x_offset = (mu[0] - x) / radius + y_offset = (mu[1] - y) / radius + + heatmaps[k, 0] = np.where(x_offset**2 + y_offset**2 <= 1, 1., 0.) + heatmaps[k, 1] = x_offset + heatmaps[k, 2] = y_offset + + heatmaps = heatmaps.reshape(K * 3, H, W) + + return heatmaps, keypoint_weights + + +def generate_displacement_heatmap( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + roots: np.ndarray, + roots_visible: np.ndarray, + diagonal_lengths: np.ndarray, + radius: float, +): + """Generate displacement heatmaps of keypoints, where each keypoint is + represented by 3 maps: one pixel-level class label map (1 for keypoint and + 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions + respectively. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + roots (np.ndarray): Coordinates of instance centers in shape (N, D). + The displacement fields of each instance will locate around its + center. + roots_visible (np.ndarray): Roots visibilities in shape (N,) + diagonal_lengths (np.ndarray): Diaginal length of the bounding boxes + of each instance in shape (N,) + radius (float): The radius factor of the binary label + map. The positive region is defined as the neighbor of the + keypoint with the radius :math:`r=radius_factor*max(W, H)` + + Returns: + tuple: + - displacements (np.ndarray): The generated displacement map in + shape (K*2, H, W) where [W, H] is the `heatmap_size` + - displacement_weights (np.ndarray): The target weights in shape + (K*2, H, W) + """ + N, K, _ = keypoints.shape + W, H = heatmap_size + + displacements = np.zeros((K * 2, H, W), dtype=np.float32) + displacement_weights = np.zeros((K * 2, H, W), dtype=np.float32) + instance_size_map = np.zeros((H, W), dtype=np.float32) + + for n in range(N): + if (roots_visible[n] < 1 or (roots[n, 0] < 0 or roots[n, 1] < 0) + or (roots[n, 0] >= W or roots[n, 1] >= H)): + continue + + diagonal_length = diagonal_lengths[n] + + for k in range(K): + if keypoints_visible[n, k] < 1 or keypoints[n, k, 0] < 0 \ + or keypoints[n, k, 1] < 0 or keypoints[n, k, 0] >= W \ + or keypoints[n, k, 1] >= H: + continue + + start_x = max(int(roots[n, 0] - radius), 0) + start_y = max(int(roots[n, 1] - radius), 0) + end_x = min(int(roots[n, 0] + radius), W) + end_y = min(int(roots[n, 1] + radius), H) + + for x in range(start_x, end_x): + for y in range(start_y, end_y): + if displacements[2 * k, y, + x] != 0 or displacements[2 * k + 1, y, + x] != 0: + if diagonal_length > instance_size_map[y, x]: + # keep the gt displacement of smaller instance + continue + + displacement_weights[2 * k:2 * k + 2, y, + x] = 1 / diagonal_length + displacements[2 * k:2 * k + 2, y, + x] = keypoints[n, k] - [x, y] + instance_size_map[y, x] = diagonal_length + + return displacements, displacement_weights diff --git a/mmpose/codecs/utils/oks_map.py b/mmpose/codecs/utils/oks_map.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d886e8d64e2d6214391cabd564302d447f1aed --- /dev/null +++ b/mmpose/codecs/utils/oks_map.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Optional, Tuple, Union + +import numpy as np +from scipy.spatial.distance import cdist + + +def generate_oks_maps( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + keypoints_visibility: np.ndarray, + sigma: float = 0.55, + increase_sigma_with_padding: bool = False, +) -> Tuple[np.ndarray, np.ndarray]: + """Generate gaussian heatmaps of keypoints using `UDP`_. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + sigma (float): The sigma value of the Gaussian heatmap + keypoints_visibility (np.ndarray): The visibility bit for each keypoint (N, K) + increase_sigma_with_padding (bool): Whether to increase the sigma + value with padding. Default: False + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + + .. _`UDP`: https://arxiv.org/abs/1911.07524 + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + # The default sigmas are used for COCO dataset. + sigmas = np.array( + [2.6, 2.5, 2.5, 3.5, 3.5, 7.9, 7.9, 7.2, 7.2, 6.2, 6.2, 10.7, 10.7, 8.7, 8.7, 8.9, 8.9])/100 + # sigmas = sigmas * 2 / sigmas.mean() + # sigmas = np.round(sigmas).astype(int) + # sigmas = np.clip(sigmas, 1, 10) + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + # bbox_area = W/1.25 * H/1.25 + # bbox_area = W * H * 0.53 + bbox_area = np.sqrt(H/1.25 * W/1.25) + + # print(scales_arr) + # print(scaled_sigmas) + + for n, k in product(range(N), range(K)): + kpt_sigma = sigmas[k] + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + y_idx, x_idx = np.indices((H, W)) + dx = x_idx - keypoints[n, k, 0] + dy = y_idx - keypoints[n, k, 1] + dist = np.sqrt(dx**2 + dy**2) + + # e_map = (dx**2 + dy**2) / ((kpt_sigma*100)**2 * sigma) + vars = (kpt_sigma*2)**2 + s = vars * bbox_area * 2 + s = np.clip(s, 0.55, 3.0) + if sigma is not None and sigma > 0: + s = sigma + e_map = dist**2 / (2*s) + oks_map = np.exp(-e_map) + + keypoint_weights[n, k] = (oks_map.max() > 0).astype(int) + + # Scale such that there is always 1 at the maximum + if oks_map.max() > 1e-3: + oks_map = oks_map / oks_map.max() + + # Scale OKS map such that 1 stays 1 and 0.5 becomes 0 + # oks_map[oks_map < 0.5] = 0 + # oks_map = 2 * oks_map - 1 + + + # oks_map[oks_map > 0.95] = 1 + # print("{:.4f}, {:7.1f}, {:9.3f}, {:9.3f}, {:4.2f}".format(vars, bbox_area, vars * bbox_area* 2, s, oks_map.max())) + # if np.all(oks_map < 0.1): + # print("\t{:d} --> {:.4f}".format(k, s)) + heatmaps[k] = oks_map + # breakpoint() + + return heatmaps, keypoint_weights diff --git a/mmpose/codecs/utils/post_processing.py b/mmpose/codecs/utils/post_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..054eaedd8f189860dccd28252b2a4046f6e40d8c --- /dev/null +++ b/mmpose/codecs/utils/post_processing.py @@ -0,0 +1,530 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Tuple + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from torch import Tensor + +from scipy.signal import convolve2d + + +def get_simcc_normalized(batch_pred_simcc, sigma=None): + """Normalize the predicted SimCC. + + Args: + batch_pred_simcc (torch.Tensor): The predicted SimCC. + sigma (float): The sigma of the Gaussian distribution. + + Returns: + torch.Tensor: The normalized SimCC. + """ + B, K, _ = batch_pred_simcc.shape + + # Scale and clamp the tensor + if sigma is not None: + batch_pred_simcc = batch_pred_simcc / (sigma * np.sqrt(np.pi * 2)) + batch_pred_simcc = batch_pred_simcc.clamp(min=0) + + # Compute the binary mask + mask = (batch_pred_simcc.amax(dim=-1) > 1).reshape(B, K, 1) + + # Normalize the tensor using the maximum value + norm = (batch_pred_simcc / batch_pred_simcc.amax(dim=-1).reshape(B, K, 1)) + + # Apply normalization + batch_pred_simcc = torch.where(mask, norm, batch_pred_simcc) + + return batch_pred_simcc + + +def get_simcc_maximum(simcc_x: np.ndarray, + simcc_y: np.ndarray, + apply_softmax: bool = False + ) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from simcc representations. + + Note: + instance number: N + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) + simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) + apply_softmax (bool): whether to apply softmax on the heatmap. + Defaults to False. + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 2) or (N, K, 2) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (N, K) + """ + + assert isinstance(simcc_x, np.ndarray), ('simcc_x should be numpy.ndarray') + assert isinstance(simcc_y, np.ndarray), ('simcc_y should be numpy.ndarray') + assert simcc_x.ndim == 2 or simcc_x.ndim == 3, ( + f'Invalid shape {simcc_x.shape}') + assert simcc_y.ndim == 2 or simcc_y.ndim == 3, ( + f'Invalid shape {simcc_y.shape}') + assert simcc_x.ndim == simcc_y.ndim, ( + f'{simcc_x.shape} != {simcc_y.shape}') + + if simcc_x.ndim == 3: + N, K, Wx = simcc_x.shape + simcc_x = simcc_x.reshape(N * K, -1) + simcc_y = simcc_y.reshape(N * K, -1) + else: + N = None + + if apply_softmax: + simcc_x = simcc_x - np.max(simcc_x, axis=1, keepdims=True) + simcc_y = simcc_y - np.max(simcc_y, axis=1, keepdims=True) + ex, ey = np.exp(simcc_x), np.exp(simcc_y) + simcc_x = ex / np.sum(ex, axis=1, keepdims=True) + simcc_y = ey / np.sum(ey, axis=1, keepdims=True) + + x_locs = np.argmax(simcc_x, axis=1) + y_locs = np.argmax(simcc_y, axis=1) + locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + max_val_x = np.amax(simcc_x, axis=1) + max_val_y = np.amax(simcc_y, axis=1) + + mask = max_val_x > max_val_y + max_val_x[mask] = max_val_y[mask] + vals = max_val_x + locs[vals <= 0.] = -1 + + if N: + locs = locs.reshape(N, K, 2) + vals = vals.reshape(N, K) + + return locs, vals + + +def get_heatmap_3d_maximum(heatmaps: np.ndarray + ) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from heatmaps. + + Note: + batch_size: B + num_keypoints: K + heatmap dimension: D + heatmap height: H + heatmap width: W + + Args: + heatmaps (np.ndarray): Heatmaps in shape (K, D, H, W) or + (B, K, D, H, W) + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 3) or (B, K, 3) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (B, K) + """ + assert isinstance(heatmaps, + np.ndarray), ('heatmaps should be numpy.ndarray') + assert heatmaps.ndim == 4 or heatmaps.ndim == 5, ( + f'Invalid shape {heatmaps.shape}') + + if heatmaps.ndim == 4: + K, D, H, W = heatmaps.shape + B = None + heatmaps_flatten = heatmaps.reshape(K, -1) + else: + B, K, D, H, W = heatmaps.shape + heatmaps_flatten = heatmaps.reshape(B * K, -1) + + z_locs, y_locs, x_locs = np.unravel_index( + np.argmax(heatmaps_flatten, axis=1), shape=(D, H, W)) + locs = np.stack((x_locs, y_locs, z_locs), axis=-1).astype(np.float32) + vals = np.amax(heatmaps_flatten, axis=1) + locs[vals <= 0.] = -1 + + if B: + locs = locs.reshape(B, K, 3) + vals = vals.reshape(B, K) + + return locs, vals + + +def get_heatmap_maximum(heatmaps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from heatmaps. + + Note: + batch_size: B + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + heatmaps (np.ndarray): Heatmaps in shape (K, H, W) or (B, K, H, W) + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 2) or (B, K, 2) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (B, K) + """ + assert isinstance(heatmaps, + np.ndarray), ('heatmaps should be numpy.ndarray') + assert heatmaps.ndim == 3 or heatmaps.ndim == 4, ( + f'Invalid shape {heatmaps.shape}') + + if heatmaps.ndim == 3: + K, H, W = heatmaps.shape + B = None + heatmaps_flatten = heatmaps.reshape(K, -1) + else: + B, K, H, W = heatmaps.shape + heatmaps_flatten = heatmaps.reshape(B * K, -1) + + y_locs, x_locs = np.unravel_index( + np.argmax(heatmaps_flatten, axis=1), shape=(H, W)) + locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + vals = np.amax(heatmaps_flatten, axis=1) + locs[vals <= 0.] = -1 + + if B: + locs = locs.reshape(B, K, 2) + vals = vals.reshape(B, K) + + return locs, vals + + +def gaussian_blur(heatmaps: np.ndarray, kernel: int = 11) -> np.ndarray: + """Modulate heatmap distribution with Gaussian. + + Note: + - num_keypoints: K + - heatmap height: H + - heatmap width: W + + Args: + heatmaps (np.ndarray[K, H, W]): model predicted heatmaps. + kernel (int): Gaussian kernel size (K) for modulation, which should + match the heatmap gaussian sigma when training. + K=17 for sigma=3 and k=11 for sigma=2. + + Returns: + np.ndarray ([K, H, W]): Modulated heatmap distribution. + """ + assert kernel % 2 == 1 + + border = (kernel - 1) // 2 + K, H, W = heatmaps.shape + + for k in range(K): + origin_max = np.max(heatmaps[k]) + dr = np.zeros((H + 2 * border, W + 2 * border), dtype=np.float32) + dr[border:-border, border:-border] = heatmaps[k].copy() + dr = cv2.GaussianBlur(dr, (kernel, kernel), 0) + heatmaps[k] = dr[border:-border, border:-border].copy() + heatmaps[k] *= origin_max / (np.max(heatmaps[k])+1e-12) + return heatmaps + + +def gaussian_blur1d(simcc: np.ndarray, kernel: int = 11) -> np.ndarray: + """Modulate simcc distribution with Gaussian. + + Note: + - num_keypoints: K + - simcc length: Wx + + Args: + simcc (np.ndarray[K, Wx]): model predicted simcc. + kernel (int): Gaussian kernel size (K) for modulation, which should + match the simcc gaussian sigma when training. + K=17 for sigma=3 and k=11 for sigma=2. + + Returns: + np.ndarray ([K, Wx]): Modulated simcc distribution. + """ + assert kernel % 2 == 1 + + border = (kernel - 1) // 2 + N, K, Wx = simcc.shape + + for n, k in product(range(N), range(K)): + origin_max = np.max(simcc[n, k]) + dr = np.zeros((1, Wx + 2 * border), dtype=np.float32) + dr[0, border:-border] = simcc[n, k].copy() + dr = cv2.GaussianBlur(dr, (kernel, 1), 0) + simcc[n, k] = dr[0, border:-border].copy() + simcc[n, k] *= origin_max / np.max(simcc[n, k]) + return simcc + + +def batch_heatmap_nms(batch_heatmaps: Tensor, kernel_size: int = 5): + """Apply NMS on a batch of heatmaps. + + Args: + batch_heatmaps (Tensor): batch heatmaps in shape (B, K, H, W) + kernel_size (int): The kernel size of the NMS which should be + a odd integer. Defaults to 5 + + Returns: + Tensor: The batch heatmaps after NMS. + """ + + assert isinstance(kernel_size, int) and kernel_size % 2 == 1, \ + f'The kernel_size should be an odd integer, got {kernel_size}' + + padding = (kernel_size - 1) // 2 + + maximum = F.max_pool2d( + batch_heatmaps, kernel_size, stride=1, padding=padding) + maximum_indicator = torch.eq(batch_heatmaps, maximum) + batch_heatmaps = batch_heatmaps * maximum_indicator.float() + + return batch_heatmaps + + +def get_heatmap_expected_value(heatmaps: np.ndarray, parzen_size: float = 0.1, return_heatmap: bool = False) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from heatmaps. + + Note: + batch_size: B + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + heatmaps (np.ndarray): Heatmaps in shape (K, H, W) or (B, K, H, W) + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 2) or (B, K, 2) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (B, K) + """ + assert isinstance(heatmaps, + np.ndarray), ('heatmaps should be numpy.ndarray') + assert heatmaps.ndim == 3 or heatmaps.ndim == 4, ( + f'Invalid shape {heatmaps.shape}') + + assert parzen_size >= 0.0 and parzen_size <= 1.0, ( + f'Invalid parzen_size {parzen_size}') + + if heatmaps.ndim == 3: + K, H, W = heatmaps.shape + B = 1 + FIRST_DIM = K + heatmaps_flatten = heatmaps.reshape(1, K, H, W) + else: + B, K, H, W = heatmaps.shape + FIRST_DIM = K*B + heatmaps_flatten = heatmaps.reshape(B, K, H, W) + + # Blur heatmaps with Gaussian + # heatmaps_flatten = gaussian_blur(heatmaps_flatten, kernel=9) + + # Zero out pixels far from the maximum for each heatmap + # heatmaps_tmp = heatmaps_flatten.copy().reshape(B*K, H*W) + # y_locs, x_locs = np.unravel_index( + # np.argmax(heatmaps_tmp, axis=1), shape=(H, W)) + # locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + # heatmaps_flatten = heatmaps_flatten.reshape(B*K, H, W) + # for i, x in enumerate(x_locs): + # y = y_locs[i] + # start_x = int(max(0, x - 0.2*W)) + # end_x = int(min(W, x + 0.2*W)) + # start_y = int(max(0, y - 0.2*H)) + # end_y = int(min(H, y + 0.2*H)) + # mask = np.zeros((H, W)) + # mask[start_y:end_y, start_x:end_x] = 1 + # heatmaps_flatten[i] = heatmaps_flatten[i] * mask + # heatmaps_flatten = heatmaps_flatten.reshape(B, K, H, W) + + + bbox_area = np.sqrt(H/1.25 * W/1.25) + + kpt_sigmas = np.array( + [2.6, 2.5, 2.5, 3.5, 3.5, 7.9, 7.9, 7.2, 7.2, 6.2, 6.2, 10.7, 10.7, 8.7, 8.7, 8.9, 8.9])/100 + + heatmaps_covolved = np.zeros_like(heatmaps_flatten) + for k in range(K): + vars = (kpt_sigmas[k]*2)**2 + s = vars * bbox_area * 2 + s = np.clip(s, 0.55, 3.0) + radius = np.ceil(s * 3).astype(int) + diameter = 2*radius + 1 + diameter = np.ceil(diameter).astype(int) + # kernel_sizes[kernel_sizes % 2 == 0] += 1 + center = diameter // 2 + dist_x = np.arange(diameter) - center + dist_y = np.arange(diameter) - center + dist_x, dist_y = np.meshgrid(dist_x, dist_y) + dist = np.sqrt(dist_x**2 + dist_y**2) + oks_kernel = np.exp(-dist**2 / (2 * s)) + oks_kernel = oks_kernel / oks_kernel.sum() + + htm = heatmaps_flatten[:, k, :, :].reshape(-1, H, W) + # htm = np.pad(htm, ((0, 0), (radius, radius), (radius, radius)), mode='symmetric') + # htm = torch.from_numpy(htm).float() + # oks_kernel = torch.from_numpy(oks_kernel).float().to(htm.device).reshape(1, diameter, diameter) + oks_kernel = oks_kernel.reshape(1, diameter, diameter) + htm_conv = np.zeros_like(htm) + for b in range(B): + htm_conv[b, :, :] = convolve2d(htm[b, :, :], oks_kernel[b, :, :], mode='same', boundary='symm') + # htm_conv = F.conv2d(htm.unsqueeze(1), oks_kernel.unsqueeze(1), padding='same') + # htm_conv = htm_conv[:, :, radius:-radius, radius:-radius] + htm_conv = htm_conv.reshape(-1, 1, H, W) + heatmaps_covolved[:, k, :, :] = htm_conv + + + heatmaps_covolved = heatmaps_covolved.reshape(B*K, H*W) + y_locs, x_locs = np.unravel_index( + np.argmax(heatmaps_covolved, axis=1), shape=(H, W)) + locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + + # Apply mean-shift to get sub-pixel locations + locs = _get_subpixel_maximums(heatmaps_covolved.reshape(B*K, H, W), locs) + # breakpoint() + + + # heatmaps_sums = heatmaps_flatten.sum(axis=(1, 2)) + # norm_heatmaps = heatmaps_flatten.copy() + # norm_heatmaps[heatmaps_sums > 0] = heatmaps_flatten[heatmaps_sums > 0] / heatmaps_sums[heatmaps_sums > 0, None, None] + + + # # Compute Parzen window with Gaussian blur along the edge instead of simple mirroring + # x_pad = int(parzen_size * W + 0.5) + # y_pad = int(parzen_size * H + 0.5) + # # x_pad = 0 + # # y_pad = 0 + # kernel_size = int(min(H, W)*parzen_size + 0.5) + # if kernel_size % 2 == 0: + # kernel_size += 1 + # # norm_heatmaps_pad_blur = np.pad(norm_heatmaps, ((0, 0), (x_pad, x_pad), (y_pad, y_pad)), mode='symmetric') + # norm_heatmaps_pad = np.pad(norm_heatmaps, ((0, 0), (y_pad, y_pad), (x_pad, x_pad)), mode='constant', constant_values=0) + # norm_heatmaps_pad_blur = gaussian_blur(norm_heatmaps_pad, kernel=kernel_size) + + # # norm_heatmaps_pad_blur[:, x_pad:-x_pad, y_pad:-y_pad] = norm_heatmaps + + # norm_heatmaps_pad_sum = norm_heatmaps_pad_blur.sum(axis=(1, 2)) + # norm_heatmaps_pad_blur[norm_heatmaps_pad_sum>0] = norm_heatmaps_pad_blur[norm_heatmaps_pad_sum>0] / norm_heatmaps_pad_sum[norm_heatmaps_pad_sum>0, None, None] + + # # # Save the blurred heatmaps + # # for i in range(heatmaps.shape[0]): + # # tmp_htm = norm_heatmaps_pad_blur[i].copy() + # # tmp_htm = (tmp_htm - tmp_htm.min()) / (tmp_htm.max() - tmp_htm.min()) + # # tmp_htm = (tmp_htm*255).astype(np.uint8) + # # tmp_htm = cv2.cvtColor(tmp_htm, cv2.COLOR_GRAY2BGR) + # # tmp_htm = cv2.applyColorMap(tmp_htm, cv2.COLORMAP_JET) + + # # tmp_htm2 = norm_heatmaps_pad[i].copy() + # # tmp_htm2 = (tmp_htm2 - tmp_htm2.min()) / (tmp_htm2.max() - tmp_htm2.min()) + # # tmp_htm2 = (tmp_htm2*255).astype(np.uint8) + # # tmp_htm2 = cv2.cvtColor(tmp_htm2, cv2.COLOR_GRAY2BGR) + # # tmp_htm2 = cv2.applyColorMap(tmp_htm2, cv2.COLORMAP_JET) + + # # tmp_htm = cv2.addWeighted(tmp_htm, 0.5, tmp_htm2, 0.5, 0) + + # # cv2.imwrite(f'heatmaps_blurred_{i}.png', tmp_htm) + + # # norm_heatmaps_pad = np.pad(norm_heatmaps, ((0, 0), (x_pad, x_pad), (y_pad, y_pad)), mode='edge') + + # y_idx, x_idx = np.indices(norm_heatmaps_pad_blur.shape[1:]) + + # # breakpoint() + # x_locs = np.sum(norm_heatmaps_pad_blur * x_idx, axis=(1, 2)) - x_pad + # y_locs = np.sum(norm_heatmaps_pad_blur * y_idx, axis=(1, 2)) - y_pad + + # # mean_idx = np.argmax(heatmaps_flatten, axis=1) + # # x_locs, y_locs = np.unravel_index(mean_idx, shape=(H, W)) + # # locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + # # breakpoint() + # # vals = heatmaps_flatten[np.arange(heatmaps_flatten.shape[0]), mean_idx] + # # locs[vals <= 0.] = -1 + + # # mean_idx = np.argmax(norm_heatmaps, axis=1) + # # y_locs, x_locs = np.unravel_index( + # # mean_idx, shape=(H, W)) + + # locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + # # vals = np.amax(heatmaps_flatten, axis=1) + + + x_locs_int = np.round(x_locs).astype(int) + x_locs_int = np.clip(x_locs_int, 0, W-1) + y_locs_int = np.round(y_locs).astype(int) + y_locs_int = np.clip(y_locs_int, 0, H-1) + vals = heatmaps_flatten[np.arange(B), np.arange(K), y_locs_int, x_locs_int] + # breakpoint() + # locs[vals <= 0.] = -1 + + # print(mean_idx) + # print(x_locs) + # print(y_locs) + # print(locs) + heatmaps_covolved = heatmaps_covolved.reshape(B, K, H, W) + + if B > 1: + locs = locs.reshape(B, K, 2) + vals = vals.reshape(B, K) + heatmaps_covolved = heatmaps_covolved.reshape(B, K, H, W) + else: + locs = locs.reshape(K, 2) + vals = vals.reshape(K) + heatmaps_covolved = heatmaps_covolved.reshape(K, H, W) + + if return_heatmap: + return locs, vals, heatmaps_covolved + else: + return locs, vals + + + +def _get_subpixel_maximums(heatmaps, locs): + # Extract integer peak locations + x_locs = locs[:, 0].astype(np.int32) + y_locs = locs[:, 1].astype(np.int32) + + # Ensure we are not near the boundaries (avoid boundary issues) + valid_mask = (x_locs > 0) & (x_locs < heatmaps.shape[2] - 1) & \ + (y_locs > 0) & (y_locs < heatmaps.shape[1] - 1) + + # Initialize the output array with the integer locations + subpixel_locs = locs.copy() + + if np.any(valid_mask): + # Extract valid locations + x_locs_valid = x_locs[valid_mask] + y_locs_valid = y_locs[valid_mask] + + # Compute gradients (dx, dy) and second derivatives (dxx, dyy) + dx = (heatmaps[valid_mask, y_locs_valid, x_locs_valid + 1] - + heatmaps[valid_mask, y_locs_valid, x_locs_valid - 1]) / 2.0 + dy = (heatmaps[valid_mask, y_locs_valid + 1, x_locs_valid] - + heatmaps[valid_mask, y_locs_valid - 1, x_locs_valid]) / 2.0 + dxx = heatmaps[valid_mask, y_locs_valid, x_locs_valid + 1] + \ + heatmaps[valid_mask, y_locs_valid, x_locs_valid - 1] - \ + 2 * heatmaps[valid_mask, y_locs_valid, x_locs_valid] + dyy = heatmaps[valid_mask, y_locs_valid + 1, x_locs_valid] + \ + heatmaps[valid_mask, y_locs_valid - 1, x_locs_valid] - \ + 2 * heatmaps[valid_mask, y_locs_valid, x_locs_valid] + + # Avoid division by zero by setting a minimum threshold for the second derivatives + dxx = np.where(dxx != 0, dxx, 1e-6) + dyy = np.where(dyy != 0, dyy, 1e-6) + + # Calculate the sub-pixel shift + subpixel_x_shift = -dx / dxx + subpixel_y_shift = -dy / dyy + + # Update subpixel locations for valid indices + subpixel_locs[valid_mask, 0] += subpixel_x_shift + subpixel_locs[valid_mask, 1] += subpixel_y_shift + + return subpixel_locs + diff --git a/mmpose/codecs/utils/refinement.py b/mmpose/codecs/utils/refinement.py new file mode 100644 index 0000000000000000000000000000000000000000..13c79b4b4c1c8b774a84801ac2c03bac3417cf7d --- /dev/null +++ b/mmpose/codecs/utils/refinement.py @@ -0,0 +1,215 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product + +import numpy as np + +from .post_processing import gaussian_blur, gaussian_blur1d + + +def refine_keypoints(keypoints: np.ndarray, + heatmaps: np.ndarray) -> np.ndarray: + """Refine keypoint predictions by moving from the maximum towards the + second maximum by 0.25 pixel. The operation is in-place. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - heatmap size: [W, H] + + Args: + keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) + heatmaps (np.ndarray): The heatmaps in shape (K, H, W) + + Returns: + np.ndarray: Refine keypoint coordinates in shape (N, K, D) + """ + N, K = keypoints.shape[:2] + H, W = heatmaps.shape[1:] + + for n, k in product(range(N), range(K)): + x, y = keypoints[n, k, :2].astype(int) + + if 1 < x < W - 1 and 0 < y < H: + dx = heatmaps[k, y, x + 1] - heatmaps[k, y, x - 1] + else: + dx = 0. + + if 1 < y < H - 1 and 0 < x < W: + dy = heatmaps[k, y + 1, x] - heatmaps[k, y - 1, x] + else: + dy = 0. + + keypoints[n, k] += np.sign([dx, dy], dtype=np.float32) * 0.25 + + return keypoints + + +def refine_keypoints_dark(keypoints: np.ndarray, heatmaps: np.ndarray, + blur_kernel_size: int) -> np.ndarray: + """Refine keypoint predictions using distribution aware coordinate + decoding. See `Dark Pose`_ for details. The operation is in-place. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - heatmap size: [W, H] + + Args: + keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) + heatmaps (np.ndarray): The heatmaps in shape (K, H, W) + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation + + Returns: + np.ndarray: Refine keypoint coordinates in shape (N, K, D) + + .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 + """ + N, K = keypoints.shape[:2] + H, W = heatmaps.shape[1:] + + # modulate heatmaps + heatmaps = gaussian_blur(heatmaps, blur_kernel_size) + np.maximum(heatmaps, 1e-10, heatmaps) + np.log(heatmaps, heatmaps) + + for n, k in product(range(N), range(K)): + x, y = keypoints[n, k, :2].astype(int) + if 1 < x < W - 2 and 1 < y < H - 2: + dx = 0.5 * (heatmaps[k, y, x + 1] - heatmaps[k, y, x - 1]) + dy = 0.5 * (heatmaps[k, y + 1, x] - heatmaps[k, y - 1, x]) + + dxx = 0.25 * ( + heatmaps[k, y, x + 2] - 2 * heatmaps[k, y, x] + + heatmaps[k, y, x - 2]) + dxy = 0.25 * ( + heatmaps[k, y + 1, x + 1] - heatmaps[k, y - 1, x + 1] - + heatmaps[k, y + 1, x - 1] + heatmaps[k, y - 1, x - 1]) + dyy = 0.25 * ( + heatmaps[k, y + 2, x] - 2 * heatmaps[k, y, x] + + heatmaps[k, y - 2, x]) + derivative = np.array([[dx], [dy]]) + hessian = np.array([[dxx, dxy], [dxy, dyy]]) + if dxx * dyy - dxy**2 != 0: + hessianinv = np.linalg.pinv(hessian) + offset = -hessianinv @ derivative + offset = np.squeeze(np.array(offset.T), axis=0) + keypoints[n, k, :2] += offset + return keypoints + + +def refine_keypoints_dark_udp(keypoints: np.ndarray, heatmaps: np.ndarray, + blur_kernel_size: int) -> np.ndarray: + """Refine keypoint predictions using distribution aware coordinate decoding + for UDP. See `UDP`_ for details. The operation is in-place. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - heatmap size: [W, H] + + Args: + keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) + heatmaps (np.ndarray): The heatmaps in shape (K, H, W) + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation + + Returns: + np.ndarray: Refine keypoint coordinates in shape (N, K, D) + + .. _`UDP`: https://arxiv.org/abs/1911.07524 + """ + N, K = keypoints.shape[:2] + H, W = heatmaps.shape[1:] + + # modulate heatmaps + heatmaps = gaussian_blur(heatmaps, blur_kernel_size) + np.clip(heatmaps, 1e-3, 50., heatmaps) + np.log(heatmaps, heatmaps) + + heatmaps_pad = np.pad( + heatmaps, ((0, 0), (1, 1), (1, 1)), mode='edge').flatten() + + for n in range(N): + index = keypoints[n, :, 0] + 1 + (keypoints[n, :, 1] + 1) * (W + 2) + index += (W + 2) * (H + 2) * np.arange(0, K) + index = index.astype(int).reshape(-1, 1) + i_ = heatmaps_pad[index] + ix1 = heatmaps_pad[index + 1] + iy1 = heatmaps_pad[index + W + 2] + ix1y1 = heatmaps_pad[index + W + 3] + ix1_y1_ = heatmaps_pad[index - W - 3] + ix1_ = heatmaps_pad[index - 1] + iy1_ = heatmaps_pad[index - 2 - W] + + dx = 0.5 * (ix1 - ix1_) + dy = 0.5 * (iy1 - iy1_) + derivative = np.concatenate([dx, dy], axis=1) + derivative = derivative.reshape(K, 2, 1) + + dxx = ix1 - 2 * i_ + ix1_ + dyy = iy1 - 2 * i_ + iy1_ + dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_) + hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1) + hessian = hessian.reshape(K, 2, 2) + hessian = np.linalg.pinv(hessian + np.finfo(np.float32).eps * np.eye(2)) + keypoints[n] -= np.einsum('imn,ink->imk', hessian, + derivative).squeeze() + + return keypoints + + +def refine_simcc_dark(keypoints: np.ndarray, simcc: np.ndarray, + blur_kernel_size: int) -> np.ndarray: + """SimCC version. Refine keypoint predictions using distribution aware + coordinate decoding for UDP. See `UDP`_ for details. The operation is in- + place. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + + Args: + keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) + simcc (np.ndarray): The heatmaps in shape (N, K, Wx) + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation + + Returns: + np.ndarray: Refine keypoint coordinates in shape (N, K, D) + + .. _`UDP`: https://arxiv.org/abs/1911.07524 + """ + N = simcc.shape[0] + + # modulate simcc + simcc = gaussian_blur1d(simcc, blur_kernel_size) + np.clip(simcc, 1e-3, 50., simcc) + np.log(simcc, simcc) + + simcc = np.pad(simcc, ((0, 0), (0, 0), (2, 2)), 'edge') + + for n in range(N): + px = (keypoints[n] + 2.5).astype(np.int64).reshape(-1, 1) # K, 1 + + dx0 = np.take_along_axis(simcc[n], px, axis=1) # K, 1 + dx1 = np.take_along_axis(simcc[n], px + 1, axis=1) + dx_1 = np.take_along_axis(simcc[n], px - 1, axis=1) + dx2 = np.take_along_axis(simcc[n], px + 2, axis=1) + dx_2 = np.take_along_axis(simcc[n], px - 2, axis=1) + + dx = 0.5 * (dx1 - dx_1) + dxx = 1e-9 + 0.25 * (dx2 - 2 * dx0 + dx_2) + + offset = dx / dxx + keypoints[n] -= offset.reshape(-1) + + return keypoints diff --git a/mmpose/codecs/video_pose_lifting.py b/mmpose/codecs/video_pose_lifting.py new file mode 100644 index 0000000000000000000000000000000000000000..5a5a7b1983381b9752d3bc9514fc28d6e1f73c43 --- /dev/null +++ b/mmpose/codecs/video_pose_lifting.py @@ -0,0 +1,246 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from copy import deepcopy +from typing import List, Optional, Tuple, Union + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class VideoPoseLifting(BaseKeypointCodec): + r"""Generate keypoint coordinates for pose lifter. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - pose-lifitng target dimension: C + + Args: + num_keypoints (int): The number of keypoints in the dataset. + zero_center: Whether to zero-center the target around root. Default: + ``True``. + root_index (Union[int, List]): Root keypoint index in the pose. + Default: 0. + remove_root (bool): If true, remove the root keypoint from the pose. + Default: ``False``. + save_index (bool): If true, store the root position separated from the + original pose, only takes effect if ``remove_root`` is ``True``. + Default: ``False``. + reshape_keypoints (bool): If true, reshape the keypoints into shape + (-1, N). Default: ``True``. + concat_vis (bool): If true, concat the visibility item of keypoints. + Default: ``False``. + normalize_camera (bool): Whether to normalize camera intrinsics. + Default: ``False``. + """ + + auxiliary_encode_keys = { + 'lifting_target', 'lifting_target_visible', 'camera_param' + } + + instance_mapping_table = dict( + lifting_target='lifting_target', + lifting_target_visible='lifting_target_visible', + ) + label_mapping_table = dict( + trajectory_weights='trajectory_weights', + lifting_target_label='lifting_target_label', + lifting_target_weight='lifting_target_weight') + + def __init__(self, + num_keypoints: int, + zero_center: bool = True, + root_index: Union[int, List] = 0, + remove_root: bool = False, + save_index: bool = False, + reshape_keypoints: bool = True, + concat_vis: bool = False, + normalize_camera: bool = False): + super().__init__() + + self.num_keypoints = num_keypoints + self.zero_center = zero_center + if isinstance(root_index, int): + root_index = [root_index] + self.root_index = root_index + self.remove_root = remove_root + self.save_index = save_index + self.reshape_keypoints = reshape_keypoints + self.concat_vis = concat_vis + self.normalize_camera = normalize_camera + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + lifting_target: Optional[np.ndarray] = None, + lifting_target_visible: Optional[np.ndarray] = None, + camera_param: Optional[dict] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D). + keypoints_visible (np.ndarray, optional): Keypoint visibilities in + shape (N, K). + lifting_target (np.ndarray, optional): 3d target coordinate in + shape (T, K, C). + lifting_target_visible (np.ndarray, optional): Target coordinate in + shape (T, K, ). + camera_param (dict, optional): The camera parameter dictionary. + + Returns: + encoded (dict): Contains the following items: + + - keypoint_labels (np.ndarray): The processed keypoints in + shape like (N, K, D) or (K * D, N). + - keypoint_labels_visible (np.ndarray): The processed + keypoints' weights in shape (N, K, ) or (N-1, K, ). + - lifting_target_label: The processed target coordinate in + shape (K, C) or (K-1, C). + - lifting_target_weight (np.ndarray): The target weights in + shape (K, ) or (K-1, ). + - trajectory_weights (np.ndarray): The trajectory weights in + shape (K, ). + + In addition, there are some optional items it may contain: + + - target_root (np.ndarray): The root coordinate of target in + shape (C, ). Exists if ``zero_center`` is ``True``. + - target_root_removed (bool): Indicate whether the root of + pose-lifitng target is removed. Exists if + ``remove_root`` is ``True``. + - target_root_index (int): An integer indicating the index of + root. Exists if ``remove_root`` and ``save_index`` + are ``True``. + - camera_param (dict): The updated camera parameter dictionary. + Exists if ``normalize_camera`` is ``True``. + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if lifting_target is None: + lifting_target = [keypoints[0]] + + # set initial value for `lifting_target_weight` + # and `trajectory_weights` + if lifting_target_visible is None: + lifting_target_visible = np.ones( + lifting_target.shape[:-1], dtype=np.float32) + lifting_target_weight = lifting_target_visible + trajectory_weights = (1 / lifting_target[:, 2]) + else: + valid = lifting_target_visible > 0.5 + lifting_target_weight = np.where(valid, 1., 0.).astype(np.float32) + trajectory_weights = lifting_target_weight + + if camera_param is None: + camera_param = dict() + + encoded = dict() + + lifting_target_label = lifting_target.copy() + # Zero-center the target pose around a given root keypoint + if self.zero_center: + assert (lifting_target.ndim >= 2 and + lifting_target.shape[-2] > max(self.root_index)), \ + f'Got invalid joint shape {lifting_target.shape}' + + root = np.mean(lifting_target[..., self.root_index, :], axis=-2) + lifting_target_label -= root[..., np.newaxis, :] + encoded['target_root'] = root + + if self.remove_root and len(self.root_index) == 1: + root_index = self.root_index[0] + lifting_target_label = np.delete( + lifting_target_label, root_index, axis=-2) + lifting_target_visible = np.delete( + lifting_target_visible, root_index, axis=-2) + assert lifting_target_weight.ndim in { + 2, 3 + }, (f'Got invalid lifting target weights shape ' + f'{lifting_target_weight.shape}') + + axis_to_remove = -2 if lifting_target_weight.ndim == 3 else -1 + lifting_target_weight = np.delete( + lifting_target_weight, root_index, axis=axis_to_remove) + # Add a flag to avoid latter transforms that rely on the root + # joint or the original joint index + encoded['target_root_removed'] = True + + # Save the root index for restoring the global pose + if self.save_index: + encoded['target_root_index'] = root_index + + # Normalize the 2D keypoint coordinate with image width and height + _camera_param = deepcopy(camera_param) + assert 'w' in _camera_param and 'h' in _camera_param, ( + 'Camera parameter `w` and `h` should be provided.') + + center = np.array([0.5 * _camera_param['w'], 0.5 * _camera_param['h']], + dtype=np.float32) + scale = np.array(0.5 * _camera_param['w'], dtype=np.float32) + + keypoint_labels = (keypoints - center) / scale + + assert keypoint_labels.ndim in { + 2, 3 + }, (f'Got invalid keypoint labels shape {keypoint_labels.shape}') + if keypoint_labels.ndim == 2: + keypoint_labels = keypoint_labels[None, ...] + + if self.normalize_camera: + assert 'f' in _camera_param and 'c' in _camera_param, ( + 'Camera parameter `f` and `c` should be provided.') + _camera_param['f'] = _camera_param['f'] / scale + _camera_param['c'] = (_camera_param['c'] - center[:, None]) / scale + encoded['camera_param'] = _camera_param + + if self.concat_vis: + keypoints_visible_ = keypoints_visible + if keypoints_visible.ndim == 2: + keypoints_visible_ = keypoints_visible[..., None] + keypoint_labels = np.concatenate( + (keypoint_labels, keypoints_visible_), axis=2) + + if self.reshape_keypoints: + N = keypoint_labels.shape[0] + keypoint_labels = keypoint_labels.transpose(1, 2, 0).reshape(-1, N) + + encoded['keypoint_labels'] = keypoint_labels + encoded['keypoints_visible'] = keypoints_visible + encoded['lifting_target_label'] = lifting_target_label + encoded['lifting_target_weight'] = lifting_target_weight + encoded['trajectory_weights'] = trajectory_weights + + return encoded + + def decode(self, + encoded: np.ndarray, + target_root: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, C). + target_root (np.ndarray, optional): The pose-lifitng target root + coordinate. Default: ``None``. + + Returns: + keypoints (np.ndarray): Decoded coordinates in shape (N, K, C). + scores (np.ndarray): The keypoint scores in shape (N, K). + """ + keypoints = encoded.copy() + + if target_root is not None and target_root.size > 0: + keypoints = keypoints + target_root + if self.remove_root and len(self.root_index) == 1: + keypoints = np.insert( + keypoints, self.root_index, target_root, axis=1) + scores = np.ones(keypoints.shape[:-1], dtype=np.float32) + + return keypoints, scores diff --git a/mmpose/configs/MaskPose/ViTb-multi_mask.py b/mmpose/configs/MaskPose/ViTb-multi_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..7431a3ef78ef8bcfe21e2b0c39a2c9db93f5e6ee --- /dev/null +++ b/mmpose/configs/MaskPose/ViTb-multi_mask.py @@ -0,0 +1,291 @@ +COCO_ROOT = "path/to/COCO/" +MPII_ROOT = "path/to/MPII/" +AIC_ROOT = "path/to/AIC/" +OCHUMAN_ROOT = "path/to/OCHuman/" + +BATCH_SIZE = 64 +COCO_NAME = "COCO" +MPII_NAME = "MPII" +AIC_NAME = "AIC" +OCHUMAN_NAME = "OCHuman" + +_base_ = ['../_base_/default_runtime.py'] + +# resume = True +load_from = "work_dirs/ViTb-multi/epoch_210.pth" + +# runtime +train_cfg = dict(max_epochs=210, val_interval=5) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4*BATCH_SIZE/64, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='{}/AP'.format(COCO_NAME), rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='base', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.3, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=None, + # init_cfg=dict( + # type='Pretrained', + # checkpoint='models/pretrained/mae_pretrain_vit_base_20230913.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=21, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='MaskBackground', + prob=1.0, + continue_on_failure=False, + alpha=0.2, + dilate_prob=0.5, + dilate_amount=0.1, + erode_prob=0.5, + erode_amount=0.5, + ), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='MaskBackground', continue_on_failure=False, alpha=0.2), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# # base dataset settings +# data_root = TRAIN_ROOT +# val_data_root = VAL_ROOT +# dataset_type = 'CocoDataset' +# data_mode = 'topdown' + +coco_train_dataset = dict( + type="CocoDataset", + data_root=COCO_ROOT, + data_mode="topdown", + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[], + test_mode=False, +) +coco_val_dataset = dict( + type="CocoDataset", + data_root=COCO_ROOT, + data_mode="topdown", + ann_file="annotations/person_keypoints_val2017.json", + bbox_file=COCO_ROOT + "/detections/rtmdet-l-ins-mask.json", + filter_cfg=dict(bbox_score_thr=0.3), + data_prefix=dict(img='val2017/'), + pipeline=[], + test_mode=True, +) +mpii_train_dataset = dict( + type="MpiiDataset", + data_root=MPII_ROOT, + data_mode="topdown", + ann_file="annotations/mpii_sam_train.json", + data_prefix=dict(img='images/'), + pipeline=[], + test_mode=False, +) +mpii_val_dataset = dict( + type="MpiiDataset", + data_root=MPII_ROOT, + data_mode="topdown", + ann_file="annotations/mpii_sam_val.json", + data_prefix=dict(img='images/'), + pipeline=[], + test_mode=True, +) +aic_train_dataset = dict( + type="AicDataset", + data_root=AIC_ROOT, + data_mode="topdown", + ann_file="annotations/aic_sam_train.json", + data_prefix=dict(img='images/'), + pipeline=[], + test_mode=False, +) +aic_val_dataset = dict( + type="AicDataset", + data_root=AIC_ROOT, + data_mode="topdown", + ann_file="annotations/aic_sam_val.json", + data_prefix=dict(img='images/'), + pipeline=[], + test_mode=True, +) +ochuman_val_dataset = dict( + type="OCHumanDataset", + data_root=OCHUMAN_ROOT, + data_mode="topdown", + ann_file="annotations/person_keypoints_val2017.json", + data_prefix=dict(img='val2017/'), + # bbox_file=OCHUMAN_ROOT + "/detections/rtmdet-l-ins.json", + # filter_cfg=dict(bbox_score_thr=0.3), + pipeline=[], + test_mode=True, +) + +combined_val_dataset = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/merged_COCO_AIC_MPII.py'), + datasets=[coco_val_dataset, mpii_val_dataset, aic_val_dataset, ochuman_val_dataset], + pipeline=val_pipeline, + test_mode=True, + keypoints_mapping=[ + {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, + 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16}, # Identity mapping for COCO as merged is based on COCO + {0: 16, 1: 14, 2: 12, 3: 11, 4: 13, 5: 15, 6: 20, 7: 17, 8: 18, + 9: 19, 10: 10, 11: 8, 12: 6, 13: 5, 14: 7, 15: 9}, # MPII -> COCO and additional points + {0: 6, 1: 8, 2: 10, 3: 5, 4: 7, 5: 9, 6: 12, 7: 14, 8: 16, + 9: 11, 10: 13, 11: 15, 12: 19, 13: 17}, # AIC -> COCO and additional points + {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, + 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16}, # Identity mapping for OCHuman as merged is based on COCO + ], +) + +combined_train_dataset = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/merged_COCO_AIC_MPII.py'), + datasets=[coco_train_dataset, mpii_train_dataset, aic_train_dataset], + pipeline=train_pipeline, + test_mode=False, + keypoints_mapping=[ + {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, + 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16}, # Identity mapping for COCO as merged is based on COCO + {0: 16, 1: 14, 2: 12, 3: 11, 4: 13, 5: 15, 6: 20, 7: 17, 8: 18, + 9: 19, 10: 10, 11: 8, 12: 6, 13: 5, 14: 7, 15: 9}, # MPII -> COCO and additional points + {0: 6, 1: 8, 2: 10, 3: 5, 4: 7, 5: 9, 6: 12, 7: 14, 8: 16, + 9: 11, 10: 13, 11: 15, 12: 19, 13: 17}, # AIC -> COCO and additional points + ], +) + +# data loaders +train_dataloader = dict( + batch_size=BATCH_SIZE, + num_workers=8, + persistent_workers=True, + sampler=dict( + type='MultiSourceSampler', + batch_size=BATCH_SIZE, + source_ratio=[1, 1, 1], + shuffle=True, + ), + dataset=combined_train_dataset, +) +val_dataloader = dict( + batch_size=128, + num_workers=8, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=combined_val_dataset, +) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='MultiDatasetEvaluator', + metrics=[ + dict(type='CocoMetric', + ann_file=COCO_ROOT + 'annotations/person_keypoints_val2017.json', + prefix=COCO_NAME, + nms_mode='none', + outfile_prefix='COCO_MaskPose', + ignore_stats=['AP .5', 'AP .75', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)'], + ), + dict(type='PCKAccuracy', + prefix=MPII_NAME, + ), + dict(type='PCKAccuracy', + prefix=AIC_NAME, + ), + dict(type='CocoMetric', + ann_file=OCHUMAN_ROOT + 'annotations/person_keypoints_val2017.json', + prefix=OCHUMAN_NAME, + outfile_prefix='ochuman', + nms_mode='none', + ignore_stats=['AP .5', 'AP .75', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)'], + ), + ], + datasets=combined_val_dataset['datasets'], + ) +test_evaluator = val_evaluator diff --git a/mmpose/configs/_base_/datasets/300w.py b/mmpose/configs/_base_/datasets/300w.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3728da1d1555c3526ccbfca182385961e8b667 --- /dev/null +++ b/mmpose/configs/_base_/datasets/300w.py @@ -0,0 +1,134 @@ +dataset_info = dict( + dataset_name='300w', + paper_info=dict( + author='Sagonas, Christos and Antonakos, Epameinondas ' + 'and Tzimiropoulos, Georgios and Zafeiriou, Stefanos ' + 'and Pantic, Maja', + title='300 faces in-the-wild challenge: ' + 'Database and results', + container='Image and vision computing', + year='2016', + homepage='https://ibug.doc.ic.ac.uk/resources/300-W/', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-16'), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-15'), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-14'), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-13'), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-12'), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-11'), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-10'), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-9'), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap=''), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-7'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-6'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-5'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-4'), + 13: + dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-3'), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-2'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-1'), + 16: + dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap='kpt-0'), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-26'), + 18: + dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-25'), + 19: + dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-24'), + 20: + dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap='kpt-23'), + 21: + dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap='kpt-22'), + 22: + dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-21'), + 23: + dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-20'), + 24: + dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap='kpt-19'), + 25: + dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap='kpt-18'), + 26: + dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap='kpt-17'), + 27: dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap=''), + 28: dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap=''), + 29: dict(name='kpt-29', id=29, color=[255, 0, 0], type='', swap=''), + 30: dict(name='kpt-30', id=30, color=[255, 0, 0], type='', swap=''), + 31: + dict(name='kpt-31', id=31, color=[255, 0, 0], type='', swap='kpt-35'), + 32: + dict(name='kpt-32', id=32, color=[255, 0, 0], type='', swap='kpt-34'), + 33: dict(name='kpt-33', id=33, color=[255, 0, 0], type='', swap=''), + 34: + dict(name='kpt-34', id=34, color=[255, 0, 0], type='', swap='kpt-32'), + 35: + dict(name='kpt-35', id=35, color=[255, 0, 0], type='', swap='kpt-31'), + 36: + dict(name='kpt-36', id=36, color=[255, 0, 0], type='', swap='kpt-45'), + 37: + dict(name='kpt-37', id=37, color=[255, 0, 0], type='', swap='kpt-44'), + 38: + dict(name='kpt-38', id=38, color=[255, 0, 0], type='', swap='kpt-43'), + 39: + dict(name='kpt-39', id=39, color=[255, 0, 0], type='', swap='kpt-42'), + 40: + dict(name='kpt-40', id=40, color=[255, 0, 0], type='', swap='kpt-47'), + 41: dict( + name='kpt-41', id=41, color=[255, 0, 0], type='', swap='kpt-46'), + 42: dict( + name='kpt-42', id=42, color=[255, 0, 0], type='', swap='kpt-39'), + 43: dict( + name='kpt-43', id=43, color=[255, 0, 0], type='', swap='kpt-38'), + 44: dict( + name='kpt-44', id=44, color=[255, 0, 0], type='', swap='kpt-37'), + 45: dict( + name='kpt-45', id=45, color=[255, 0, 0], type='', swap='kpt-36'), + 46: dict( + name='kpt-46', id=46, color=[255, 0, 0], type='', swap='kpt-41'), + 47: dict( + name='kpt-47', id=47, color=[255, 0, 0], type='', swap='kpt-40'), + 48: dict( + name='kpt-48', id=48, color=[255, 0, 0], type='', swap='kpt-54'), + 49: dict( + name='kpt-49', id=49, color=[255, 0, 0], type='', swap='kpt-53'), + 50: dict( + name='kpt-50', id=50, color=[255, 0, 0], type='', swap='kpt-52'), + 51: dict(name='kpt-51', id=51, color=[255, 0, 0], type='', swap=''), + 52: dict( + name='kpt-52', id=52, color=[255, 0, 0], type='', swap='kpt-50'), + 53: dict( + name='kpt-53', id=53, color=[255, 0, 0], type='', swap='kpt-49'), + 54: dict( + name='kpt-54', id=54, color=[255, 0, 0], type='', swap='kpt-48'), + 55: dict( + name='kpt-55', id=55, color=[255, 0, 0], type='', swap='kpt-59'), + 56: dict( + name='kpt-56', id=56, color=[255, 0, 0], type='', swap='kpt-58'), + 57: dict(name='kpt-57', id=57, color=[255, 0, 0], type='', swap=''), + 58: dict( + name='kpt-58', id=58, color=[255, 0, 0], type='', swap='kpt-56'), + 59: dict( + name='kpt-59', id=59, color=[255, 0, 0], type='', swap='kpt-55'), + 60: dict( + name='kpt-60', id=60, color=[255, 0, 0], type='', swap='kpt-64'), + 61: dict( + name='kpt-61', id=61, color=[255, 0, 0], type='', swap='kpt-63'), + 62: dict(name='kpt-62', id=62, color=[255, 0, 0], type='', swap=''), + 63: dict( + name='kpt-63', id=63, color=[255, 0, 0], type='', swap='kpt-61'), + 64: dict( + name='kpt-64', id=64, color=[255, 0, 0], type='', swap='kpt-60'), + 65: dict( + name='kpt-65', id=65, color=[255, 0, 0], type='', swap='kpt-67'), + 66: dict(name='kpt-66', id=66, color=[255, 0, 0], type='', swap=''), + 67: dict( + name='kpt-67', id=67, color=[255, 0, 0], type='', swap='kpt-65'), + }, + skeleton_info={}, + joint_weights=[1.] * 68, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/300wlp.py b/mmpose/configs/_base_/datasets/300wlp.py new file mode 100644 index 0000000000000000000000000000000000000000..76eb4b70b1a342c17deeb65de79c3fc99ee09f8b --- /dev/null +++ b/mmpose/configs/_base_/datasets/300wlp.py @@ -0,0 +1,86 @@ +dataset_info = dict( + dataset_name='300wlp', + paper_info=dict( + author='Xiangyu Zhu1, and Zhen Lei1 ' + 'and Xiaoming Liu2, and Hailin Shi1 ' + 'and Stan Z. Li1', + title='300 faces in-the-wild challenge: ' + 'Database and results', + container='Image and vision computing', + year='2016', + homepage='http://www.cbsr.ia.ac.cn/users/xiangyuzhu/' + 'projects/3DDFA/main.htm', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap=''), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap=''), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap=''), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap=''), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap=''), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap=''), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap=''), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap=''), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap=''), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap=''), + 10: dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap=''), + 11: dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap=''), + 12: dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap=''), + 13: dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap=''), + 14: dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap=''), + 15: dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap=''), + 16: dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap=''), + 17: dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap=''), + 18: dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap=''), + 19: dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap=''), + 20: dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap=''), + 21: dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap=''), + 22: dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap=''), + 23: dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap=''), + 24: dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap=''), + 25: dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap=''), + 26: dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap=''), + 27: dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap=''), + 28: dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap=''), + 29: dict(name='kpt-29', id=29, color=[255, 0, 0], type='', swap=''), + 30: dict(name='kpt-30', id=30, color=[255, 0, 0], type='', swap=''), + 31: dict(name='kpt-31', id=31, color=[255, 0, 0], type='', swap=''), + 32: dict(name='kpt-32', id=32, color=[255, 0, 0], type='', swap=''), + 33: dict(name='kpt-33', id=33, color=[255, 0, 0], type='', swap=''), + 34: dict(name='kpt-34', id=34, color=[255, 0, 0], type='', swap=''), + 35: dict(name='kpt-35', id=35, color=[255, 0, 0], type='', swap=''), + 36: dict(name='kpt-36', id=36, color=[255, 0, 0], type='', swap=''), + 37: dict(name='kpt-37', id=37, color=[255, 0, 0], type='', swap=''), + 38: dict(name='kpt-38', id=38, color=[255, 0, 0], type='', swap=''), + 39: dict(name='kpt-39', id=39, color=[255, 0, 0], type='', swap=''), + 40: dict(name='kpt-40', id=40, color=[255, 0, 0], type='', swap=''), + 41: dict(name='kpt-41', id=41, color=[255, 0, 0], type='', swap=''), + 42: dict(name='kpt-42', id=42, color=[255, 0, 0], type='', swap=''), + 43: dict(name='kpt-43', id=43, color=[255, 0, 0], type='', swap=''), + 44: dict(name='kpt-44', id=44, color=[255, 0, 0], type='', swap=''), + 45: dict(name='kpt-45', id=45, color=[255, 0, 0], type='', swap=''), + 46: dict(name='kpt-46', id=46, color=[255, 0, 0], type='', swap=''), + 47: dict(name='kpt-47', id=47, color=[255, 0, 0], type='', swap=''), + 48: dict(name='kpt-48', id=48, color=[255, 0, 0], type='', swap=''), + 49: dict(name='kpt-49', id=49, color=[255, 0, 0], type='', swap=''), + 50: dict(name='kpt-50', id=50, color=[255, 0, 0], type='', swap=''), + 51: dict(name='kpt-51', id=51, color=[255, 0, 0], type='', swap=''), + 52: dict(name='kpt-52', id=52, color=[255, 0, 0], type='', swap=''), + 53: dict(name='kpt-53', id=53, color=[255, 0, 0], type='', swap=''), + 54: dict(name='kpt-54', id=54, color=[255, 0, 0], type='', swap=''), + 55: dict(name='kpt-55', id=55, color=[255, 0, 0], type='', swap=''), + 56: dict(name='kpt-56', id=56, color=[255, 0, 0], type='', swap=''), + 57: dict(name='kpt-57', id=57, color=[255, 0, 0], type='', swap=''), + 58: dict(name='kpt-58', id=58, color=[255, 0, 0], type='', swap=''), + 59: dict(name='kpt-59', id=59, color=[255, 0, 0], type='', swap=''), + 60: dict(name='kpt-60', id=60, color=[255, 0, 0], type='', swap=''), + 61: dict(name='kpt-61', id=61, color=[255, 0, 0], type='', swap=''), + 62: dict(name='kpt-62', id=62, color=[255, 0, 0], type='', swap=''), + 63: dict(name='kpt-63', id=63, color=[255, 0, 0], type='', swap=''), + 64: dict(name='kpt-64', id=64, color=[255, 0, 0], type='', swap=''), + 65: dict(name='kpt-65', id=65, color=[255, 0, 0], type='', swap=''), + 66: dict(name='kpt-66', id=66, color=[255, 0, 0], type='', swap=''), + 67: dict(name='kpt-67', id=67, color=[255, 0, 0], type='', swap=''), + }, + skeleton_info={}, + joint_weights=[1.] * 68, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/aflw.py b/mmpose/configs/_base_/datasets/aflw.py new file mode 100644 index 0000000000000000000000000000000000000000..cf5e10964da700415f3613ca43a0755f5015d8f0 --- /dev/null +++ b/mmpose/configs/_base_/datasets/aflw.py @@ -0,0 +1,44 @@ +dataset_info = dict( + dataset_name='aflw', + paper_info=dict( + author='Koestinger, Martin and Wohlhart, Paul and ' + 'Roth, Peter M and Bischof, Horst', + title='Annotated facial landmarks in the wild: ' + 'A large-scale, real-world database for facial ' + 'landmark localization', + container='2011 IEEE international conference on computer ' + 'vision workshops (ICCV workshops)', + year='2011', + homepage='https://www.tugraz.at/institute/icg/research/' + 'team-bischof/lrs/downloads/aflw/', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-5'), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-4'), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-3'), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-2'), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-1'), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-0'), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-11'), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-10'), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-9'), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-8'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-7'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-6'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-14'), + 13: dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap=''), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-12'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-17'), + 16: dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap=''), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-15'), + 18: dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='') + }, + skeleton_info={}, + joint_weights=[1.] * 19, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/aic.py b/mmpose/configs/_base_/datasets/aic.py new file mode 100644 index 0000000000000000000000000000000000000000..9ecdbe3f0afeb19dbb7aed42653ce5efd85cfda3 --- /dev/null +++ b/mmpose/configs/_base_/datasets/aic.py @@ -0,0 +1,140 @@ +dataset_info = dict( + dataset_name='aic', + paper_info=dict( + author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' + 'Li, Yixin and Yan, Baoming and Liang, Rui and ' + 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' + 'Fu, Yanwei and others', + title='Ai challenger: A large-scale dataset for going ' + 'deeper in image understanding', + container='arXiv', + year='2017', + homepage='https://github.com/AIChallenger/AI_Challenger_2017', + ), + keypoint_info={ + 0: + dict( + name='right_shoulder', + id=0, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 1: + dict( + name='right_elbow', + id=1, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 2: + dict( + name='right_wrist', + id=2, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 3: + dict( + name='left_shoulder', + id=3, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 4: + dict( + name='left_elbow', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 5: + dict( + name='left_wrist', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 6: + dict( + name='right_hip', + id=6, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 7: + dict( + name='right_knee', + id=7, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 8: + dict( + name='right_ankle', + id=8, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 9: + dict( + name='left_hip', + id=9, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 10: + dict( + name='left_knee', + id=10, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 11: + dict( + name='left_ankle', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 12: + dict( + name='head_top', + id=12, + color=[51, 153, 255], + type='upper', + swap=''), + 13: + dict(name='neck', id=13, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: + dict(link=('right_wrist', 'right_elbow'), id=0, color=[255, 128, 0]), + 1: dict( + link=('right_elbow', 'right_shoulder'), id=1, color=[255, 128, 0]), + 2: dict(link=('right_shoulder', 'neck'), id=2, color=[51, 153, 255]), + 3: dict(link=('neck', 'left_shoulder'), id=3, color=[51, 153, 255]), + 4: dict(link=('left_shoulder', 'left_elbow'), id=4, color=[0, 255, 0]), + 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), + 6: dict(link=('right_ankle', 'right_knee'), id=6, color=[255, 128, 0]), + 7: dict(link=('right_knee', 'right_hip'), id=7, color=[255, 128, 0]), + 8: dict(link=('right_hip', 'left_hip'), id=8, color=[51, 153, 255]), + 9: dict(link=('left_hip', 'left_knee'), id=9, color=[0, 255, 0]), + 10: dict(link=('left_knee', 'left_ankle'), id=10, color=[0, 255, 0]), + 11: dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), + 12: dict( + link=('right_shoulder', 'right_hip'), id=12, color=[51, 153, 255]), + 13: + dict(link=('left_shoulder', 'left_hip'), id=13, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1. + ], + + # 'https://github.com/AIChallenger/AI_Challenger_2017/blob/master/' + # 'Evaluation/keypoint_eval/keypoint_eval.py#L50' + # delta = 2 x sigma + sigmas=[ + 0.01388152, 0.01515228, 0.01057665, 0.01417709, 0.01497891, 0.01402144, + 0.03909642, 0.03686941, 0.01981803, 0.03843971, 0.03412318, 0.02415081, + 0.01291456, 0.01236173 + ]) diff --git a/mmpose/configs/_base_/datasets/ak.py b/mmpose/configs/_base_/datasets/ak.py new file mode 100644 index 0000000000000000000000000000000000000000..e8b12f5a3125a7eec549a483d70077361f215205 --- /dev/null +++ b/mmpose/configs/_base_/datasets/ak.py @@ -0,0 +1,267 @@ +dataset_info = dict( + dataset_name='Animal Kingdom', + paper_info=dict( + author='Singapore University of Technology and Design, Singapore.' + ' Xun Long Ng, Kian Eng Ong, Qichen Zheng,' + ' Yun Ni, Si Yong Yeo, Jun Liu.', + title='Animal Kingdom: ' + 'A Large and Diverse Dataset for Animal Behavior Understanding', + container='Conference on Computer Vision ' + 'and Pattern Recognition (CVPR)', + year='2022', + homepage='https://sutdcv.github.io/Animal-Kingdom', + version='1.0 (2022-06)', + date_created='2022-06', + ), + keypoint_info={ + 0: + dict( + name='Head_Mid_Top', + id=0, + color=(225, 0, 255), + type='upper', + swap=''), + 1: + dict( + name='Eye_Left', + id=1, + color=[220, 20, 60], + type='upper', + swap='Eye_Right'), + 2: + dict( + name='Eye_Right', + id=2, + color=[0, 255, 255], + type='upper', + swap='Eye_Left'), + 3: + dict( + name='Mouth_Front_Top', + id=3, + color=(0, 255, 42), + type='upper', + swap=''), + 4: + dict( + name='Mouth_Back_Left', + id=4, + color=[221, 160, 221], + type='upper', + swap='Mouth_Back_Right'), + 5: + dict( + name='Mouth_Back_Right', + id=5, + color=[135, 206, 250], + type='upper', + swap='Mouth_Back_Left'), + 6: + dict( + name='Mouth_Front_Bottom', + id=6, + color=[50, 205, 50], + type='upper', + swap=''), + 7: + dict( + name='Shoulder_Left', + id=7, + color=[255, 182, 193], + type='upper', + swap='Shoulder_Right'), + 8: + dict( + name='Shoulder_Right', + id=8, + color=[0, 191, 255], + type='upper', + swap='Shoulder_Left'), + 9: + dict( + name='Elbow_Left', + id=9, + color=[255, 105, 180], + type='upper', + swap='Elbow_Right'), + 10: + dict( + name='Elbow_Right', + id=10, + color=[30, 144, 255], + type='upper', + swap='Elbow_Left'), + 11: + dict( + name='Wrist_Left', + id=11, + color=[255, 20, 147], + type='upper', + swap='Wrist_Right'), + 12: + dict( + name='Wrist_Right', + id=12, + color=[0, 0, 255], + type='upper', + swap='Wrist_Left'), + 13: + dict( + name='Torso_Mid_Back', + id=13, + color=(185, 3, 221), + type='upper', + swap=''), + 14: + dict( + name='Hip_Left', + id=14, + color=[255, 215, 0], + type='lower', + swap='Hip_Right'), + 15: + dict( + name='Hip_Right', + id=15, + color=[147, 112, 219], + type='lower', + swap='Hip_Left'), + 16: + dict( + name='Knee_Left', + id=16, + color=[255, 165, 0], + type='lower', + swap='Knee_Right'), + 17: + dict( + name='Knee_Right', + id=17, + color=[138, 43, 226], + type='lower', + swap='Knee_Left'), + 18: + dict( + name='Ankle_Left', + id=18, + color=[255, 140, 0], + type='lower', + swap='Ankle_Right'), + 19: + dict( + name='Ankle_Right', + id=19, + color=[128, 0, 128], + type='lower', + swap='Ankle_Left'), + 20: + dict( + name='Tail_Top_Back', + id=20, + color=(0, 251, 255), + type='lower', + swap=''), + 21: + dict( + name='Tail_Mid_Back', + id=21, + color=[32, 178, 170], + type='lower', + swap=''), + 22: + dict( + name='Tail_End_Back', + id=22, + color=(0, 102, 102), + type='lower', + swap='') + }, + skeleton_info={ + 0: + dict(link=('Eye_Left', 'Head_Mid_Top'), id=0, color=[220, 20, 60]), + 1: + dict(link=('Eye_Right', 'Head_Mid_Top'), id=1, color=[0, 255, 255]), + 2: + dict( + link=('Mouth_Front_Top', 'Mouth_Back_Left'), + id=2, + color=[221, 160, 221]), + 3: + dict( + link=('Mouth_Front_Top', 'Mouth_Back_Right'), + id=3, + color=[135, 206, 250]), + 4: + dict( + link=('Mouth_Front_Bottom', 'Mouth_Back_Left'), + id=4, + color=[221, 160, 221]), + 5: + dict( + link=('Mouth_Front_Bottom', 'Mouth_Back_Right'), + id=5, + color=[135, 206, 250]), + 6: + dict( + link=('Head_Mid_Top', 'Torso_Mid_Back'), id=6, + color=(225, 0, 255)), + 7: + dict( + link=('Torso_Mid_Back', 'Tail_Top_Back'), + id=7, + color=(185, 3, 221)), + 8: + dict( + link=('Tail_Top_Back', 'Tail_Mid_Back'), id=8, + color=(0, 251, 255)), + 9: + dict( + link=('Tail_Mid_Back', 'Tail_End_Back'), + id=9, + color=[32, 178, 170]), + 10: + dict( + link=('Head_Mid_Top', 'Shoulder_Left'), + id=10, + color=[255, 182, 193]), + 11: + dict( + link=('Head_Mid_Top', 'Shoulder_Right'), + id=11, + color=[0, 191, 255]), + 12: + dict( + link=('Shoulder_Left', 'Elbow_Left'), id=12, color=[255, 105, + 180]), + 13: + dict( + link=('Shoulder_Right', 'Elbow_Right'), + id=13, + color=[30, 144, 255]), + 14: + dict(link=('Elbow_Left', 'Wrist_Left'), id=14, color=[255, 20, 147]), + 15: + dict(link=('Elbow_Right', 'Wrist_Right'), id=15, color=[0, 0, 255]), + 16: + dict(link=('Tail_Top_Back', 'Hip_Left'), id=16, color=[255, 215, 0]), + 17: + dict( + link=('Tail_Top_Back', 'Hip_Right'), id=17, color=[147, 112, 219]), + 18: + dict(link=('Hip_Left', 'Knee_Left'), id=18, color=[255, 165, 0]), + 19: + dict(link=('Hip_Right', 'Knee_Right'), id=19, color=[138, 43, 226]), + 20: + dict(link=('Knee_Left', 'Ankle_Left'), id=20, color=[255, 140, 0]), + 21: + dict(link=('Knee_Right', 'Ankle_Right'), id=21, color=[128, 0, 128]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1. + ], + sigmas=[ + 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, + 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, + 0.025, 0.025, 0.025 + ]) diff --git a/mmpose/configs/_base_/datasets/animalpose.py b/mmpose/configs/_base_/datasets/animalpose.py new file mode 100644 index 0000000000000000000000000000000000000000..d5bb62d951b71da25e679bd755fe566216dc3f6f --- /dev/null +++ b/mmpose/configs/_base_/datasets/animalpose.py @@ -0,0 +1,166 @@ +dataset_info = dict( + dataset_name='animalpose', + paper_info=dict( + author='Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and ' + 'Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing', + title='Cross-Domain Adaptation for Animal Pose Estimation', + container='The IEEE International Conference on ' + 'Computer Vision (ICCV)', + year='2019', + homepage='https://sites.google.com/view/animal-pose/', + ), + keypoint_info={ + 0: + dict( + name='L_Eye', id=0, color=[0, 255, 0], type='upper', swap='R_Eye'), + 1: + dict( + name='R_Eye', + id=1, + color=[255, 128, 0], + type='upper', + swap='L_Eye'), + 2: + dict( + name='L_EarBase', + id=2, + color=[0, 255, 0], + type='upper', + swap='R_EarBase'), + 3: + dict( + name='R_EarBase', + id=3, + color=[255, 128, 0], + type='upper', + swap='L_EarBase'), + 4: + dict(name='Nose', id=4, color=[51, 153, 255], type='upper', swap=''), + 5: + dict(name='Throat', id=5, color=[51, 153, 255], type='upper', swap=''), + 6: + dict( + name='TailBase', id=6, color=[51, 153, 255], type='lower', + swap=''), + 7: + dict( + name='Withers', id=7, color=[51, 153, 255], type='upper', swap=''), + 8: + dict( + name='L_F_Elbow', + id=8, + color=[0, 255, 0], + type='upper', + swap='R_F_Elbow'), + 9: + dict( + name='R_F_Elbow', + id=9, + color=[255, 128, 0], + type='upper', + swap='L_F_Elbow'), + 10: + dict( + name='L_B_Elbow', + id=10, + color=[0, 255, 0], + type='lower', + swap='R_B_Elbow'), + 11: + dict( + name='R_B_Elbow', + id=11, + color=[255, 128, 0], + type='lower', + swap='L_B_Elbow'), + 12: + dict( + name='L_F_Knee', + id=12, + color=[0, 255, 0], + type='upper', + swap='R_F_Knee'), + 13: + dict( + name='R_F_Knee', + id=13, + color=[255, 128, 0], + type='upper', + swap='L_F_Knee'), + 14: + dict( + name='L_B_Knee', + id=14, + color=[0, 255, 0], + type='lower', + swap='R_B_Knee'), + 15: + dict( + name='R_B_Knee', + id=15, + color=[255, 128, 0], + type='lower', + swap='L_B_Knee'), + 16: + dict( + name='L_F_Paw', + id=16, + color=[0, 255, 0], + type='upper', + swap='R_F_Paw'), + 17: + dict( + name='R_F_Paw', + id=17, + color=[255, 128, 0], + type='upper', + swap='L_F_Paw'), + 18: + dict( + name='L_B_Paw', + id=18, + color=[0, 255, 0], + type='lower', + swap='R_B_Paw'), + 19: + dict( + name='R_B_Paw', + id=19, + color=[255, 128, 0], + type='lower', + swap='L_B_Paw') + }, + skeleton_info={ + 0: dict(link=('L_Eye', 'R_Eye'), id=0, color=[51, 153, 255]), + 1: dict(link=('L_Eye', 'L_EarBase'), id=1, color=[0, 255, 0]), + 2: dict(link=('R_Eye', 'R_EarBase'), id=2, color=[255, 128, 0]), + 3: dict(link=('L_Eye', 'Nose'), id=3, color=[0, 255, 0]), + 4: dict(link=('R_Eye', 'Nose'), id=4, color=[255, 128, 0]), + 5: dict(link=('Nose', 'Throat'), id=5, color=[51, 153, 255]), + 6: dict(link=('Throat', 'Withers'), id=6, color=[51, 153, 255]), + 7: dict(link=('TailBase', 'Withers'), id=7, color=[51, 153, 255]), + 8: dict(link=('Throat', 'L_F_Elbow'), id=8, color=[0, 255, 0]), + 9: dict(link=('L_F_Elbow', 'L_F_Knee'), id=9, color=[0, 255, 0]), + 10: dict(link=('L_F_Knee', 'L_F_Paw'), id=10, color=[0, 255, 0]), + 11: dict(link=('Throat', 'R_F_Elbow'), id=11, color=[255, 128, 0]), + 12: dict(link=('R_F_Elbow', 'R_F_Knee'), id=12, color=[255, 128, 0]), + 13: dict(link=('R_F_Knee', 'R_F_Paw'), id=13, color=[255, 128, 0]), + 14: dict(link=('TailBase', 'L_B_Elbow'), id=14, color=[0, 255, 0]), + 15: dict(link=('L_B_Elbow', 'L_B_Knee'), id=15, color=[0, 255, 0]), + 16: dict(link=('L_B_Knee', 'L_B_Paw'), id=16, color=[0, 255, 0]), + 17: dict(link=('TailBase', 'R_B_Elbow'), id=17, color=[255, 128, 0]), + 18: dict(link=('R_B_Elbow', 'R_B_Knee'), id=18, color=[255, 128, 0]), + 19: dict(link=('R_B_Knee', 'R_B_Paw'), id=19, color=[255, 128, 0]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.2, 1.2, + 1.5, 1.5, 1.5, 1.5 + ], + + # Note: The original paper did not provide enough information about + # the sigmas. We modified from 'https://github.com/cocodataset/' + # 'cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py#L523' + sigmas=[ + 0.025, 0.025, 0.026, 0.035, 0.035, 0.10, 0.10, 0.10, 0.107, 0.107, + 0.107, 0.107, 0.087, 0.087, 0.087, 0.087, 0.089, 0.089, 0.089, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/ap10k.py b/mmpose/configs/_base_/datasets/ap10k.py new file mode 100644 index 0000000000000000000000000000000000000000..c0df579acbb8cf0de1ef62412ba865ee8710f0aa --- /dev/null +++ b/mmpose/configs/_base_/datasets/ap10k.py @@ -0,0 +1,142 @@ +dataset_info = dict( + dataset_name='ap10k', + paper_info=dict( + author='Yu, Hang and Xu, Yufei and Zhang, Jing and ' + 'Zhao, Wei and Guan, Ziyu and Tao, Dacheng', + title='AP-10K: A Benchmark for Animal Pose Estimation in the Wild', + container='35th Conference on Neural Information Processing Systems ' + '(NeurIPS 2021) Track on Datasets and Bench-marks.', + year='2021', + homepage='https://github.com/AlexTheBad/AP-10K', + ), + keypoint_info={ + 0: + dict( + name='L_Eye', id=0, color=[0, 255, 0], type='upper', swap='R_Eye'), + 1: + dict( + name='R_Eye', + id=1, + color=[255, 128, 0], + type='upper', + swap='L_Eye'), + 2: + dict(name='Nose', id=2, color=[51, 153, 255], type='upper', swap=''), + 3: + dict(name='Neck', id=3, color=[51, 153, 255], type='upper', swap=''), + 4: + dict( + name='Root of tail', + id=4, + color=[51, 153, 255], + type='lower', + swap=''), + 5: + dict( + name='L_Shoulder', + id=5, + color=[51, 153, 255], + type='upper', + swap='R_Shoulder'), + 6: + dict( + name='L_Elbow', + id=6, + color=[51, 153, 255], + type='upper', + swap='R_Elbow'), + 7: + dict( + name='L_F_Paw', + id=7, + color=[0, 255, 0], + type='upper', + swap='R_F_Paw'), + 8: + dict( + name='R_Shoulder', + id=8, + color=[0, 255, 0], + type='upper', + swap='L_Shoulder'), + 9: + dict( + name='R_Elbow', + id=9, + color=[255, 128, 0], + type='upper', + swap='L_Elbow'), + 10: + dict( + name='R_F_Paw', + id=10, + color=[0, 255, 0], + type='lower', + swap='L_F_Paw'), + 11: + dict( + name='L_Hip', + id=11, + color=[255, 128, 0], + type='lower', + swap='R_Hip'), + 12: + dict( + name='L_Knee', + id=12, + color=[255, 128, 0], + type='lower', + swap='R_Knee'), + 13: + dict( + name='L_B_Paw', + id=13, + color=[0, 255, 0], + type='lower', + swap='R_B_Paw'), + 14: + dict( + name='R_Hip', id=14, color=[0, 255, 0], type='lower', + swap='L_Hip'), + 15: + dict( + name='R_Knee', + id=15, + color=[0, 255, 0], + type='lower', + swap='L_Knee'), + 16: + dict( + name='R_B_Paw', + id=16, + color=[0, 255, 0], + type='lower', + swap='L_B_Paw'), + }, + skeleton_info={ + 0: dict(link=('L_Eye', 'R_Eye'), id=0, color=[0, 0, 255]), + 1: dict(link=('L_Eye', 'Nose'), id=1, color=[0, 0, 255]), + 2: dict(link=('R_Eye', 'Nose'), id=2, color=[0, 0, 255]), + 3: dict(link=('Nose', 'Neck'), id=3, color=[0, 255, 0]), + 4: dict(link=('Neck', 'Root of tail'), id=4, color=[0, 255, 0]), + 5: dict(link=('Neck', 'L_Shoulder'), id=5, color=[0, 255, 255]), + 6: dict(link=('L_Shoulder', 'L_Elbow'), id=6, color=[0, 255, 255]), + 7: dict(link=('L_Elbow', 'L_F_Paw'), id=6, color=[0, 255, 255]), + 8: dict(link=('Neck', 'R_Shoulder'), id=7, color=[6, 156, 250]), + 9: dict(link=('R_Shoulder', 'R_Elbow'), id=8, color=[6, 156, 250]), + 10: dict(link=('R_Elbow', 'R_F_Paw'), id=9, color=[6, 156, 250]), + 11: dict(link=('Root of tail', 'L_Hip'), id=10, color=[0, 255, 255]), + 12: dict(link=('L_Hip', 'L_Knee'), id=11, color=[0, 255, 255]), + 13: dict(link=('L_Knee', 'L_B_Paw'), id=12, color=[0, 255, 255]), + 14: dict(link=('Root of tail', 'R_Hip'), id=13, color=[6, 156, 250]), + 15: dict(link=('R_Hip', 'R_Knee'), id=14, color=[6, 156, 250]), + 16: dict(link=('R_Knee', 'R_B_Paw'), id=15, color=[6, 156, 250]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.025, 0.025, 0.026, 0.035, 0.035, 0.079, 0.072, 0.062, 0.079, 0.072, + 0.062, 0.107, 0.087, 0.089, 0.107, 0.087, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/atrw.py b/mmpose/configs/_base_/datasets/atrw.py new file mode 100644 index 0000000000000000000000000000000000000000..7ec71c8c508a0340139371a651ca2dd56eeae3cf --- /dev/null +++ b/mmpose/configs/_base_/datasets/atrw.py @@ -0,0 +1,144 @@ +dataset_info = dict( + dataset_name='atrw', + paper_info=dict( + author='Li, Shuyuan and Li, Jianguo and Tang, Hanlin ' + 'and Qian, Rui and Lin, Weiyao', + title='ATRW: A Benchmark for Amur Tiger ' + 'Re-identification in the Wild', + container='Proceedings of the 28th ACM ' + 'International Conference on Multimedia', + year='2020', + homepage='https://cvwc2019.github.io/challenge.html', + ), + keypoint_info={ + 0: + dict( + name='left_ear', + id=0, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 1: + dict( + name='right_ear', + id=1, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 2: + dict(name='nose', id=2, color=[51, 153, 255], type='upper', swap=''), + 3: + dict( + name='right_shoulder', + id=3, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 4: + dict( + name='right_front_paw', + id=4, + color=[255, 128, 0], + type='upper', + swap='left_front_paw'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='left_front_paw', + id=6, + color=[0, 255, 0], + type='upper', + swap='right_front_paw'), + 7: + dict( + name='right_hip', + id=7, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 8: + dict( + name='right_knee', + id=8, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 9: + dict( + name='right_back_paw', + id=9, + color=[255, 128, 0], + type='lower', + swap='left_back_paw'), + 10: + dict( + name='left_hip', + id=10, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 11: + dict( + name='left_knee', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 12: + dict( + name='left_back_paw', + id=12, + color=[0, 255, 0], + type='lower', + swap='right_back_paw'), + 13: + dict(name='tail', id=13, color=[51, 153, 255], type='lower', swap=''), + 14: + dict( + name='center', id=14, color=[51, 153, 255], type='lower', swap=''), + }, + skeleton_info={ + 0: + dict(link=('left_ear', 'nose'), id=0, color=[51, 153, 255]), + 1: + dict(link=('right_ear', 'nose'), id=1, color=[51, 153, 255]), + 2: + dict(link=('nose', 'center'), id=2, color=[51, 153, 255]), + 3: + dict( + link=('left_shoulder', 'left_front_paw'), id=3, color=[0, 255, 0]), + 4: + dict(link=('left_shoulder', 'center'), id=4, color=[0, 255, 0]), + 5: + dict( + link=('right_shoulder', 'right_front_paw'), + id=5, + color=[255, 128, 0]), + 6: + dict(link=('right_shoulder', 'center'), id=6, color=[255, 128, 0]), + 7: + dict(link=('tail', 'center'), id=7, color=[51, 153, 255]), + 8: + dict(link=('right_back_paw', 'right_knee'), id=8, color=[255, 128, 0]), + 9: + dict(link=('right_knee', 'right_hip'), id=9, color=[255, 128, 0]), + 10: + dict(link=('right_hip', 'tail'), id=10, color=[255, 128, 0]), + 11: + dict(link=('left_back_paw', 'left_knee'), id=11, color=[0, 255, 0]), + 12: + dict(link=('left_knee', 'left_hip'), id=12, color=[0, 255, 0]), + 13: + dict(link=('left_hip', 'tail'), id=13, color=[0, 255, 0]), + }, + joint_weights=[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], + sigmas=[ + 0.0277, 0.0823, 0.0831, 0.0202, 0.0716, 0.0263, 0.0646, 0.0302, 0.0440, + 0.0316, 0.0333, 0.0547, 0.0263, 0.0683, 0.0539 + ]) diff --git a/mmpose/configs/_base_/datasets/campus.py b/mmpose/configs/_base_/datasets/campus.py new file mode 100644 index 0000000000000000000000000000000000000000..334316e9c25282508767158d3fae30578ab3949d --- /dev/null +++ b/mmpose/configs/_base_/datasets/campus.py @@ -0,0 +1,151 @@ +dataset_info = dict( + dataset_name='campus', + paper_info=dict( + author='Belagiannis, Vasileios and Amin, Sikandar and Andriluka, ' + 'Mykhaylo and Schiele, Bernt and Navab, Nassir and Ilic, Slobodan', + title='3D Pictorial Structures for Multiple Human Pose Estimation', + container='IEEE Computer Society Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2014', + homepage='http://campar.in.tum.de/Chair/MultiHumanPose', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 1: + dict( + name='right_knee', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 2: + dict( + name='right_hip', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 3: + dict( + name='left_hip', + id=3, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 4: + dict( + name='left_knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 5: + dict( + name='left_ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 6: + dict( + name='right_wrist', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 7: + dict( + name='right_elbow', + id=7, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 8: + dict( + name='right_shoulder', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 9: + dict( + name='left_shoulder', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 10: + dict( + name='left_elbow', + id=10, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 11: + dict( + name='left_wrist', + id=11, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 12: + dict( + name='bottom_head', + id=12, + color=[51, 153, 255], + type='upper', + swap=''), + 13: + dict( + name='top_head', + id=13, + color=[51, 153, 255], + type='upper', + swap=''), + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: + dict(link=('left_hip', 'left_knee'), id=2, color=[0, 255, 0]), + 3: + dict(link=('left_knee', 'left_ankle'), id=3, color=[0, 255, 0]), + 4: + dict(link=('right_hip', 'left_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('right_wrist', 'right_elbow'), id=5, color=[255, 128, 0]), + 6: + dict( + link=('right_elbow', 'right_shoulder'), id=6, color=[255, 128, 0]), + 7: + dict(link=('left_shoulder', 'left_elbow'), id=7, color=[0, 255, 0]), + 8: + dict(link=('left_elbow', 'left_wrist'), id=8, color=[0, 255, 0]), + 9: + dict(link=('right_hip', 'right_shoulder'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_hip', 'left_shoulder'), id=10, color=[0, 255, 0]), + 11: + dict( + link=('right_shoulder', 'bottom_head'), id=11, color=[255, 128, + 0]), + 12: + dict(link=('left_shoulder', 'bottom_head'), id=12, color=[0, 255, 0]), + 13: + dict(link=('bottom_head', 'top_head'), id=13, color=[51, 153, 255]), + }, + joint_weights=[ + 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.0, 1.0 + ], + sigmas=[ + 0.089, 0.087, 0.107, 0.107, 0.087, 0.089, 0.062, 0.072, 0.079, 0.079, + 0.072, 0.062, 0.026, 0.026 + ]) diff --git a/mmpose/configs/_base_/datasets/coco.py b/mmpose/configs/_base_/datasets/coco.py new file mode 100644 index 0000000000000000000000000000000000000000..865a95bc02fedd318f32d2e7aa8397147d78fdb5 --- /dev/null +++ b/mmpose/configs/_base_/datasets/coco.py @@ -0,0 +1,181 @@ +dataset_info = dict( + dataset_name='coco', + paper_info=dict( + author='Lin, Tsung-Yi and Maire, Michael and ' + 'Belongie, Serge and Hays, James and ' + 'Perona, Pietro and Ramanan, Deva and ' + r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', + title='Microsoft coco: Common objects in context', + container='European conference on computer vision', + year='2014', + homepage='http://cocodataset.org/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/coco_aic.py b/mmpose/configs/_base_/datasets/coco_aic.py new file mode 100644 index 0000000000000000000000000000000000000000..a084247468dac1b766cbcf756b750aa3d3680b9d --- /dev/null +++ b/mmpose/configs/_base_/datasets/coco_aic.py @@ -0,0 +1,205 @@ +dataset_info = dict( + dataset_name='coco', + paper_info=[ + dict( + author='Lin, Tsung-Yi and Maire, Michael and ' + 'Belongie, Serge and Hays, James and ' + 'Perona, Pietro and Ramanan, Deva and ' + r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', + title='Microsoft coco: Common objects in context', + container='European conference on computer vision', + year='2014', + homepage='http://cocodataset.org/', + ), + dict( + author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' + 'Li, Yixin and Yan, Baoming and Liang, Rui and ' + 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' + 'Fu, Yanwei and others', + title='Ai challenger: A large-scale dataset for going ' + 'deeper in image understanding', + container='arXiv', + year='2017', + homepage='https://github.com/AIChallenger/AI_Challenger_2017', + ), + ], + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='head_top', + id=17, + color=[51, 153, 255], + type='upper', + swap=''), + 18: + dict(name='neck', id=18, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5, 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.026, 0.026 + ]) diff --git a/mmpose/configs/_base_/datasets/coco_crop.py b/mmpose/configs/_base_/datasets/coco_crop.py new file mode 100644 index 0000000000000000000000000000000000000000..8c465b2b8033073c6f1deed93830554262afba26 --- /dev/null +++ b/mmpose/configs/_base_/datasets/coco_crop.py @@ -0,0 +1,181 @@ +dataset_info = dict( + dataset_name='coco_crop', + paper_info=dict( + author='Lin, Tsung-Yi and Maire, Michael and ' + 'Belongie, Serge and Hays, James and ' + 'Perona, Pietro and Ramanan, Deva and ' + r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', + title='Microsoft coco: Common objects in context', + container='European conference on computer vision', + year='2014', + homepage='http://cocodataset.org/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/coco_openpose.py b/mmpose/configs/_base_/datasets/coco_openpose.py new file mode 100644 index 0000000000000000000000000000000000000000..cce11b27f16b480facf8717055500d3e60c6ec4f --- /dev/null +++ b/mmpose/configs/_base_/datasets/coco_openpose.py @@ -0,0 +1,157 @@ +dataset_info = dict( + dataset_name='coco_openpose', + paper_info=dict( + author='Zhe, Cao and Tomas, Simon and ' + 'Shih-En, Wei and Yaser, Sheikh', + title='OpenPose: Realtime Multi-Person 2D Pose ' + 'Estimation using Part Affinity Fields', + container='IEEE Transactions on Pattern Analysis ' + 'and Machine Intelligence', + year='2019', + homepage='https://github.com/CMU-Perceptual-Computing-Lab/openpose/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[255, 0, 0], type='upper', swap=''), + 1: + dict(name='neck', id=1, color=[255, 85, 0], type='upper', swap=''), + 2: + dict( + name='right_shoulder', + id=2, + color=[255, 170, 0], + type='upper', + swap='left_shoulder'), + 3: + dict( + name='right_elbow', + id=3, + color=[255, 255, 0], + type='upper', + swap='left_elbow'), + 4: + dict( + name='right_wrist', + id=4, + color=[170, 255, 0], + type='upper', + swap='left_wrist'), + 5: + dict( + name='left_shoulder', + id=5, + color=[85, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='left_elbow', + id=6, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 7: + dict( + name='left_wrist', + id=7, + color=[0, 255, 85], + type='upper', + swap='right_wrist'), + 8: + dict( + name='right_hip', + id=8, + color=[0, 255, 170], + type='lower', + swap='left_hip'), + 9: + dict( + name='right_knee', + id=9, + color=[0, 255, 255], + type='lower', + swap='left_knee'), + 10: + dict( + name='right_ankle', + id=10, + color=[0, 170, 255], + type='lower', + swap='left_ankle'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 85, 255], + type='lower', + swap='right_hip'), + 12: + dict( + name='left_knee', + id=12, + color=[0, 0, 255], + type='lower', + swap='right_knee'), + 13: + dict( + name='left_ankle', + id=13, + color=[85, 0, 255], + type='lower', + swap='right_ankle'), + 14: + dict( + name='right_eye', + id=14, + color=[170, 0, 255], + type='upper', + swap='left_eye'), + 15: + dict( + name='left_eye', + id=15, + color=[255, 0, 255], + type='upper', + swap='right_eye'), + 16: + dict( + name='right_ear', + id=16, + color=[255, 0, 170], + type='upper', + swap='left_ear'), + 17: + dict( + name='left_ear', + id=17, + color=[255, 0, 85], + type='upper', + swap='right_ear'), + }, + skeleton_info={ + 0: dict(link=('neck', 'right_shoulder'), id=0, color=[255, 0, 0]), + 1: dict(link=('neck', 'left_shoulder'), id=1, color=[255, 85, 0]), + 2: dict( + link=('right_shoulder', 'right_elbow'), id=2, color=[255, 170, 0]), + 3: + dict(link=('right_elbow', 'right_wrist'), id=3, color=[255, 255, 0]), + 4: + dict(link=('left_shoulder', 'left_elbow'), id=4, color=[170, 255, 0]), + 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[85, 255, 0]), + 6: dict(link=('neck', 'right_hip'), id=6, color=[0, 255, 0]), + 7: dict(link=('right_hip', 'right_knee'), id=7, color=[0, 255, 85]), + 8: dict(link=('right_knee', 'right_ankle'), id=8, color=[0, 255, 170]), + 9: dict(link=('neck', 'left_hip'), id=9, color=[0, 255, 225]), + 10: dict(link=('left_hip', 'left_knee'), id=10, color=[0, 170, 255]), + 11: dict(link=('left_knee', 'left_ankle'), id=11, color=[0, 85, 255]), + 12: dict(link=('neck', 'nose'), id=12, color=[0, 0, 255]), + 13: dict(link=('nose', 'right_eye'), id=13, color=[255, 0, 170]), + 14: dict(link=('right_eye', 'right_ear'), id=14, color=[170, 0, 255]), + 15: dict(link=('nose', 'left_eye'), id=15, color=[255, 0, 255]), + 16: dict(link=('left_eye', 'left_ear'), id=16, color=[255, 0, 170]), + }, + joint_weights=[1.] * 18, + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.082 + ]) diff --git a/mmpose/configs/_base_/datasets/coco_wholebody.py b/mmpose/configs/_base_/datasets/coco_wholebody.py new file mode 100644 index 0000000000000000000000000000000000000000..ef9b707017a24a1a133bb28566d212c618fee694 --- /dev/null +++ b/mmpose/configs/_base_/datasets/coco_wholebody.py @@ -0,0 +1,1154 @@ +dataset_info = dict( + dataset_name='coco_wholebody', + paper_info=dict( + author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' + 'Wang, Can and Liu, Wentao and ' + 'Qian, Chen and Ouyang, Wanli and Luo, Ping', + title='Whole-Body Human Pose Estimation in the Wild', + container='Proceedings of the European ' + 'Conference on Computer Vision (ECCV)', + year='2020', + homepage='https://github.com/jin-s13/COCO-WholeBody/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='left_big_toe', + id=17, + color=[255, 128, 0], + type='lower', + swap='right_big_toe'), + 18: + dict( + name='left_small_toe', + id=18, + color=[255, 128, 0], + type='lower', + swap='right_small_toe'), + 19: + dict( + name='left_heel', + id=19, + color=[255, 128, 0], + type='lower', + swap='right_heel'), + 20: + dict( + name='right_big_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='left_big_toe'), + 21: + dict( + name='right_small_toe', + id=21, + color=[255, 128, 0], + type='lower', + swap='left_small_toe'), + 22: + dict( + name='right_heel', + id=22, + color=[255, 128, 0], + type='lower', + swap='left_heel'), + 23: + dict( + name='face-0', + id=23, + color=[255, 255, 255], + type='', + swap='face-16'), + 24: + dict( + name='face-1', + id=24, + color=[255, 255, 255], + type='', + swap='face-15'), + 25: + dict( + name='face-2', + id=25, + color=[255, 255, 255], + type='', + swap='face-14'), + 26: + dict( + name='face-3', + id=26, + color=[255, 255, 255], + type='', + swap='face-13'), + 27: + dict( + name='face-4', + id=27, + color=[255, 255, 255], + type='', + swap='face-12'), + 28: + dict( + name='face-5', + id=28, + color=[255, 255, 255], + type='', + swap='face-11'), + 29: + dict( + name='face-6', + id=29, + color=[255, 255, 255], + type='', + swap='face-10'), + 30: + dict( + name='face-7', + id=30, + color=[255, 255, 255], + type='', + swap='face-9'), + 31: + dict(name='face-8', id=31, color=[255, 255, 255], type='', swap=''), + 32: + dict( + name='face-9', + id=32, + color=[255, 255, 255], + type='', + swap='face-7'), + 33: + dict( + name='face-10', + id=33, + color=[255, 255, 255], + type='', + swap='face-6'), + 34: + dict( + name='face-11', + id=34, + color=[255, 255, 255], + type='', + swap='face-5'), + 35: + dict( + name='face-12', + id=35, + color=[255, 255, 255], + type='', + swap='face-4'), + 36: + dict( + name='face-13', + id=36, + color=[255, 255, 255], + type='', + swap='face-3'), + 37: + dict( + name='face-14', + id=37, + color=[255, 255, 255], + type='', + swap='face-2'), + 38: + dict( + name='face-15', + id=38, + color=[255, 255, 255], + type='', + swap='face-1'), + 39: + dict( + name='face-16', + id=39, + color=[255, 255, 255], + type='', + swap='face-0'), + 40: + dict( + name='face-17', + id=40, + color=[255, 255, 255], + type='', + swap='face-26'), + 41: + dict( + name='face-18', + id=41, + color=[255, 255, 255], + type='', + swap='face-25'), + 42: + dict( + name='face-19', + id=42, + color=[255, 255, 255], + type='', + swap='face-24'), + 43: + dict( + name='face-20', + id=43, + color=[255, 255, 255], + type='', + swap='face-23'), + 44: + dict( + name='face-21', + id=44, + color=[255, 255, 255], + type='', + swap='face-22'), + 45: + dict( + name='face-22', + id=45, + color=[255, 255, 255], + type='', + swap='face-21'), + 46: + dict( + name='face-23', + id=46, + color=[255, 255, 255], + type='', + swap='face-20'), + 47: + dict( + name='face-24', + id=47, + color=[255, 255, 255], + type='', + swap='face-19'), + 48: + dict( + name='face-25', + id=48, + color=[255, 255, 255], + type='', + swap='face-18'), + 49: + dict( + name='face-26', + id=49, + color=[255, 255, 255], + type='', + swap='face-17'), + 50: + dict(name='face-27', id=50, color=[255, 255, 255], type='', swap=''), + 51: + dict(name='face-28', id=51, color=[255, 255, 255], type='', swap=''), + 52: + dict(name='face-29', id=52, color=[255, 255, 255], type='', swap=''), + 53: + dict(name='face-30', id=53, color=[255, 255, 255], type='', swap=''), + 54: + dict( + name='face-31', + id=54, + color=[255, 255, 255], + type='', + swap='face-35'), + 55: + dict( + name='face-32', + id=55, + color=[255, 255, 255], + type='', + swap='face-34'), + 56: + dict(name='face-33', id=56, color=[255, 255, 255], type='', swap=''), + 57: + dict( + name='face-34', + id=57, + color=[255, 255, 255], + type='', + swap='face-32'), + 58: + dict( + name='face-35', + id=58, + color=[255, 255, 255], + type='', + swap='face-31'), + 59: + dict( + name='face-36', + id=59, + color=[255, 255, 255], + type='', + swap='face-45'), + 60: + dict( + name='face-37', + id=60, + color=[255, 255, 255], + type='', + swap='face-44'), + 61: + dict( + name='face-38', + id=61, + color=[255, 255, 255], + type='', + swap='face-43'), + 62: + dict( + name='face-39', + id=62, + color=[255, 255, 255], + type='', + swap='face-42'), + 63: + dict( + name='face-40', + id=63, + color=[255, 255, 255], + type='', + swap='face-47'), + 64: + dict( + name='face-41', + id=64, + color=[255, 255, 255], + type='', + swap='face-46'), + 65: + dict( + name='face-42', + id=65, + color=[255, 255, 255], + type='', + swap='face-39'), + 66: + dict( + name='face-43', + id=66, + color=[255, 255, 255], + type='', + swap='face-38'), + 67: + dict( + name='face-44', + id=67, + color=[255, 255, 255], + type='', + swap='face-37'), + 68: + dict( + name='face-45', + id=68, + color=[255, 255, 255], + type='', + swap='face-36'), + 69: + dict( + name='face-46', + id=69, + color=[255, 255, 255], + type='', + swap='face-41'), + 70: + dict( + name='face-47', + id=70, + color=[255, 255, 255], + type='', + swap='face-40'), + 71: + dict( + name='face-48', + id=71, + color=[255, 255, 255], + type='', + swap='face-54'), + 72: + dict( + name='face-49', + id=72, + color=[255, 255, 255], + type='', + swap='face-53'), + 73: + dict( + name='face-50', + id=73, + color=[255, 255, 255], + type='', + swap='face-52'), + 74: + dict(name='face-51', id=74, color=[255, 255, 255], type='', swap=''), + 75: + dict( + name='face-52', + id=75, + color=[255, 255, 255], + type='', + swap='face-50'), + 76: + dict( + name='face-53', + id=76, + color=[255, 255, 255], + type='', + swap='face-49'), + 77: + dict( + name='face-54', + id=77, + color=[255, 255, 255], + type='', + swap='face-48'), + 78: + dict( + name='face-55', + id=78, + color=[255, 255, 255], + type='', + swap='face-59'), + 79: + dict( + name='face-56', + id=79, + color=[255, 255, 255], + type='', + swap='face-58'), + 80: + dict(name='face-57', id=80, color=[255, 255, 255], type='', swap=''), + 81: + dict( + name='face-58', + id=81, + color=[255, 255, 255], + type='', + swap='face-56'), + 82: + dict( + name='face-59', + id=82, + color=[255, 255, 255], + type='', + swap='face-55'), + 83: + dict( + name='face-60', + id=83, + color=[255, 255, 255], + type='', + swap='face-64'), + 84: + dict( + name='face-61', + id=84, + color=[255, 255, 255], + type='', + swap='face-63'), + 85: + dict(name='face-62', id=85, color=[255, 255, 255], type='', swap=''), + 86: + dict( + name='face-63', + id=86, + color=[255, 255, 255], + type='', + swap='face-61'), + 87: + dict( + name='face-64', + id=87, + color=[255, 255, 255], + type='', + swap='face-60'), + 88: + dict( + name='face-65', + id=88, + color=[255, 255, 255], + type='', + swap='face-67'), + 89: + dict(name='face-66', id=89, color=[255, 255, 255], type='', swap=''), + 90: + dict( + name='face-67', + id=90, + color=[255, 255, 255], + type='', + swap='face-65'), + 91: + dict( + name='left_hand_root', + id=91, + color=[255, 255, 255], + type='', + swap='right_hand_root'), + 92: + dict( + name='left_thumb1', + id=92, + color=[255, 128, 0], + type='', + swap='right_thumb1'), + 93: + dict( + name='left_thumb2', + id=93, + color=[255, 128, 0], + type='', + swap='right_thumb2'), + 94: + dict( + name='left_thumb3', + id=94, + color=[255, 128, 0], + type='', + swap='right_thumb3'), + 95: + dict( + name='left_thumb4', + id=95, + color=[255, 128, 0], + type='', + swap='right_thumb4'), + 96: + dict( + name='left_forefinger1', + id=96, + color=[255, 153, 255], + type='', + swap='right_forefinger1'), + 97: + dict( + name='left_forefinger2', + id=97, + color=[255, 153, 255], + type='', + swap='right_forefinger2'), + 98: + dict( + name='left_forefinger3', + id=98, + color=[255, 153, 255], + type='', + swap='right_forefinger3'), + 99: + dict( + name='left_forefinger4', + id=99, + color=[255, 153, 255], + type='', + swap='right_forefinger4'), + 100: + dict( + name='left_middle_finger1', + id=100, + color=[102, 178, 255], + type='', + swap='right_middle_finger1'), + 101: + dict( + name='left_middle_finger2', + id=101, + color=[102, 178, 255], + type='', + swap='right_middle_finger2'), + 102: + dict( + name='left_middle_finger3', + id=102, + color=[102, 178, 255], + type='', + swap='right_middle_finger3'), + 103: + dict( + name='left_middle_finger4', + id=103, + color=[102, 178, 255], + type='', + swap='right_middle_finger4'), + 104: + dict( + name='left_ring_finger1', + id=104, + color=[255, 51, 51], + type='', + swap='right_ring_finger1'), + 105: + dict( + name='left_ring_finger2', + id=105, + color=[255, 51, 51], + type='', + swap='right_ring_finger2'), + 106: + dict( + name='left_ring_finger3', + id=106, + color=[255, 51, 51], + type='', + swap='right_ring_finger3'), + 107: + dict( + name='left_ring_finger4', + id=107, + color=[255, 51, 51], + type='', + swap='right_ring_finger4'), + 108: + dict( + name='left_pinky_finger1', + id=108, + color=[0, 255, 0], + type='', + swap='right_pinky_finger1'), + 109: + dict( + name='left_pinky_finger2', + id=109, + color=[0, 255, 0], + type='', + swap='right_pinky_finger2'), + 110: + dict( + name='left_pinky_finger3', + id=110, + color=[0, 255, 0], + type='', + swap='right_pinky_finger3'), + 111: + dict( + name='left_pinky_finger4', + id=111, + color=[0, 255, 0], + type='', + swap='right_pinky_finger4'), + 112: + dict( + name='right_hand_root', + id=112, + color=[255, 255, 255], + type='', + swap='left_hand_root'), + 113: + dict( + name='right_thumb1', + id=113, + color=[255, 128, 0], + type='', + swap='left_thumb1'), + 114: + dict( + name='right_thumb2', + id=114, + color=[255, 128, 0], + type='', + swap='left_thumb2'), + 115: + dict( + name='right_thumb3', + id=115, + color=[255, 128, 0], + type='', + swap='left_thumb3'), + 116: + dict( + name='right_thumb4', + id=116, + color=[255, 128, 0], + type='', + swap='left_thumb4'), + 117: + dict( + name='right_forefinger1', + id=117, + color=[255, 153, 255], + type='', + swap='left_forefinger1'), + 118: + dict( + name='right_forefinger2', + id=118, + color=[255, 153, 255], + type='', + swap='left_forefinger2'), + 119: + dict( + name='right_forefinger3', + id=119, + color=[255, 153, 255], + type='', + swap='left_forefinger3'), + 120: + dict( + name='right_forefinger4', + id=120, + color=[255, 153, 255], + type='', + swap='left_forefinger4'), + 121: + dict( + name='right_middle_finger1', + id=121, + color=[102, 178, 255], + type='', + swap='left_middle_finger1'), + 122: + dict( + name='right_middle_finger2', + id=122, + color=[102, 178, 255], + type='', + swap='left_middle_finger2'), + 123: + dict( + name='right_middle_finger3', + id=123, + color=[102, 178, 255], + type='', + swap='left_middle_finger3'), + 124: + dict( + name='right_middle_finger4', + id=124, + color=[102, 178, 255], + type='', + swap='left_middle_finger4'), + 125: + dict( + name='right_ring_finger1', + id=125, + color=[255, 51, 51], + type='', + swap='left_ring_finger1'), + 126: + dict( + name='right_ring_finger2', + id=126, + color=[255, 51, 51], + type='', + swap='left_ring_finger2'), + 127: + dict( + name='right_ring_finger3', + id=127, + color=[255, 51, 51], + type='', + swap='left_ring_finger3'), + 128: + dict( + name='right_ring_finger4', + id=128, + color=[255, 51, 51], + type='', + swap='left_ring_finger4'), + 129: + dict( + name='right_pinky_finger1', + id=129, + color=[0, 255, 0], + type='', + swap='left_pinky_finger1'), + 130: + dict( + name='right_pinky_finger2', + id=130, + color=[0, 255, 0], + type='', + swap='left_pinky_finger2'), + 131: + dict( + name='right_pinky_finger3', + id=131, + color=[0, 255, 0], + type='', + swap='left_pinky_finger3'), + 132: + dict( + name='right_pinky_finger4', + id=132, + color=[0, 255, 0], + type='', + swap='left_pinky_finger4') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ankle', 'left_big_toe'), id=19, color=[0, 255, 0]), + 20: + dict(link=('left_ankle', 'left_small_toe'), id=20, color=[0, 255, 0]), + 21: + dict(link=('left_ankle', 'left_heel'), id=21, color=[0, 255, 0]), + 22: + dict( + link=('right_ankle', 'right_big_toe'), id=22, color=[255, 128, 0]), + 23: + dict( + link=('right_ankle', 'right_small_toe'), + id=23, + color=[255, 128, 0]), + 24: + dict(link=('right_ankle', 'right_heel'), id=24, color=[255, 128, 0]), + 25: + dict( + link=('left_hand_root', 'left_thumb1'), id=25, color=[255, 128, + 0]), + 26: + dict(link=('left_thumb1', 'left_thumb2'), id=26, color=[255, 128, 0]), + 27: + dict(link=('left_thumb2', 'left_thumb3'), id=27, color=[255, 128, 0]), + 28: + dict(link=('left_thumb3', 'left_thumb4'), id=28, color=[255, 128, 0]), + 29: + dict( + link=('left_hand_root', 'left_forefinger1'), + id=29, + color=[255, 153, 255]), + 30: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=30, + color=[255, 153, 255]), + 31: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=31, + color=[255, 153, 255]), + 32: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=32, + color=[255, 153, 255]), + 33: + dict( + link=('left_hand_root', 'left_middle_finger1'), + id=33, + color=[102, 178, 255]), + 34: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=34, + color=[102, 178, 255]), + 35: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=35, + color=[102, 178, 255]), + 36: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=36, + color=[102, 178, 255]), + 37: + dict( + link=('left_hand_root', 'left_ring_finger1'), + id=37, + color=[255, 51, 51]), + 38: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=38, + color=[255, 51, 51]), + 39: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=39, + color=[255, 51, 51]), + 40: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=40, + color=[255, 51, 51]), + 41: + dict( + link=('left_hand_root', 'left_pinky_finger1'), + id=41, + color=[0, 255, 0]), + 42: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=42, + color=[0, 255, 0]), + 43: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=43, + color=[0, 255, 0]), + 44: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=44, + color=[0, 255, 0]), + 45: + dict( + link=('right_hand_root', 'right_thumb1'), + id=45, + color=[255, 128, 0]), + 46: + dict( + link=('right_thumb1', 'right_thumb2'), id=46, color=[255, 128, 0]), + 47: + dict( + link=('right_thumb2', 'right_thumb3'), id=47, color=[255, 128, 0]), + 48: + dict( + link=('right_thumb3', 'right_thumb4'), id=48, color=[255, 128, 0]), + 49: + dict( + link=('right_hand_root', 'right_forefinger1'), + id=49, + color=[255, 153, 255]), + 50: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=50, + color=[255, 153, 255]), + 51: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=51, + color=[255, 153, 255]), + 52: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=52, + color=[255, 153, 255]), + 53: + dict( + link=('right_hand_root', 'right_middle_finger1'), + id=53, + color=[102, 178, 255]), + 54: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=54, + color=[102, 178, 255]), + 55: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=55, + color=[102, 178, 255]), + 56: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=56, + color=[102, 178, 255]), + 57: + dict( + link=('right_hand_root', 'right_ring_finger1'), + id=57, + color=[255, 51, 51]), + 58: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=58, + color=[255, 51, 51]), + 59: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=59, + color=[255, 51, 51]), + 60: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=60, + color=[255, 51, 51]), + 61: + dict( + link=('right_hand_root', 'right_pinky_finger1'), + id=61, + color=[0, 255, 0]), + 62: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=62, + color=[0, 255, 0]), + 63: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=63, + color=[0, 255, 0]), + 64: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=64, + color=[0, 255, 0]) + }, + joint_weights=[1.] * 133, + # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' + # 'evaluation/myeval_wholebody.py#L175' + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.068, 0.066, 0.066, + 0.092, 0.094, 0.094, 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, + 0.025, 0.020, 0.023, 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, + 0.013, 0.012, 0.011, 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, + 0.009, 0.007, 0.007, 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, 0.017, + 0.011, 0.009, 0.011, 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, + 0.034, 0.008, 0.008, 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, + 0.009, 0.009, 0.007, 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01, + 0.008, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, + 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, 0.019, + 0.022, 0.031, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, + 0.035, 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, + 0.019, 0.022, 0.031 + ]) diff --git a/mmpose/configs/_base_/datasets/coco_wholebody_face.py b/mmpose/configs/_base_/datasets/coco_wholebody_face.py new file mode 100644 index 0000000000000000000000000000000000000000..a3fe1e5b336d8ddd668d47123f5c0ceeff580914 --- /dev/null +++ b/mmpose/configs/_base_/datasets/coco_wholebody_face.py @@ -0,0 +1,154 @@ +dataset_info = dict( + dataset_name='coco_wholebody_face', + paper_info=dict( + author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' + 'Wang, Can and Liu, Wentao and ' + 'Qian, Chen and Ouyang, Wanli and Luo, Ping', + title='Whole-Body Human Pose Estimation in the Wild', + container='Proceedings of the European ' + 'Conference on Computer Vision (ECCV)', + year='2020', + homepage='https://github.com/jin-s13/COCO-WholeBody/', + ), + keypoint_info={ + 0: + dict(name='face-0', id=0, color=[255, 0, 0], type='', swap='face-16'), + 1: + dict(name='face-1', id=1, color=[255, 0, 0], type='', swap='face-15'), + 2: + dict(name='face-2', id=2, color=[255, 0, 0], type='', swap='face-14'), + 3: + dict(name='face-3', id=3, color=[255, 0, 0], type='', swap='face-13'), + 4: + dict(name='face-4', id=4, color=[255, 0, 0], type='', swap='face-12'), + 5: + dict(name='face-5', id=5, color=[255, 0, 0], type='', swap='face-11'), + 6: + dict(name='face-6', id=6, color=[255, 0, 0], type='', swap='face-10'), + 7: + dict(name='face-7', id=7, color=[255, 0, 0], type='', swap='face-9'), + 8: dict(name='face-8', id=8, color=[255, 0, 0], type='', swap=''), + 9: + dict(name='face-9', id=9, color=[255, 0, 0], type='', swap='face-7'), + 10: + dict(name='face-10', id=10, color=[255, 0, 0], type='', swap='face-6'), + 11: + dict(name='face-11', id=11, color=[255, 0, 0], type='', swap='face-5'), + 12: + dict(name='face-12', id=12, color=[255, 0, 0], type='', swap='face-4'), + 13: + dict(name='face-13', id=13, color=[255, 0, 0], type='', swap='face-3'), + 14: + dict(name='face-14', id=14, color=[255, 0, 0], type='', swap='face-2'), + 15: + dict(name='face-15', id=15, color=[255, 0, 0], type='', swap='face-1'), + 16: + dict(name='face-16', id=16, color=[255, 0, 0], type='', swap='face-0'), + 17: dict( + name='face-17', id=17, color=[255, 0, 0], type='', swap='face-26'), + 18: dict( + name='face-18', id=18, color=[255, 0, 0], type='', swap='face-25'), + 19: dict( + name='face-19', id=19, color=[255, 0, 0], type='', swap='face-24'), + 20: dict( + name='face-20', id=20, color=[255, 0, 0], type='', swap='face-23'), + 21: dict( + name='face-21', id=21, color=[255, 0, 0], type='', swap='face-22'), + 22: dict( + name='face-22', id=22, color=[255, 0, 0], type='', swap='face-21'), + 23: dict( + name='face-23', id=23, color=[255, 0, 0], type='', swap='face-20'), + 24: dict( + name='face-24', id=24, color=[255, 0, 0], type='', swap='face-19'), + 25: dict( + name='face-25', id=25, color=[255, 0, 0], type='', swap='face-18'), + 26: dict( + name='face-26', id=26, color=[255, 0, 0], type='', swap='face-17'), + 27: dict(name='face-27', id=27, color=[255, 0, 0], type='', swap=''), + 28: dict(name='face-28', id=28, color=[255, 0, 0], type='', swap=''), + 29: dict(name='face-29', id=29, color=[255, 0, 0], type='', swap=''), + 30: dict(name='face-30', id=30, color=[255, 0, 0], type='', swap=''), + 31: dict( + name='face-31', id=31, color=[255, 0, 0], type='', swap='face-35'), + 32: dict( + name='face-32', id=32, color=[255, 0, 0], type='', swap='face-34'), + 33: dict(name='face-33', id=33, color=[255, 0, 0], type='', swap=''), + 34: dict( + name='face-34', id=34, color=[255, 0, 0], type='', swap='face-32'), + 35: dict( + name='face-35', id=35, color=[255, 0, 0], type='', swap='face-31'), + 36: dict( + name='face-36', id=36, color=[255, 0, 0], type='', swap='face-45'), + 37: dict( + name='face-37', id=37, color=[255, 0, 0], type='', swap='face-44'), + 38: dict( + name='face-38', id=38, color=[255, 0, 0], type='', swap='face-43'), + 39: dict( + name='face-39', id=39, color=[255, 0, 0], type='', swap='face-42'), + 40: dict( + name='face-40', id=40, color=[255, 0, 0], type='', swap='face-47'), + 41: dict( + name='face-41', id=41, color=[255, 0, 0], type='', swap='face-46'), + 42: dict( + name='face-42', id=42, color=[255, 0, 0], type='', swap='face-39'), + 43: dict( + name='face-43', id=43, color=[255, 0, 0], type='', swap='face-38'), + 44: dict( + name='face-44', id=44, color=[255, 0, 0], type='', swap='face-37'), + 45: dict( + name='face-45', id=45, color=[255, 0, 0], type='', swap='face-36'), + 46: dict( + name='face-46', id=46, color=[255, 0, 0], type='', swap='face-41'), + 47: dict( + name='face-47', id=47, color=[255, 0, 0], type='', swap='face-40'), + 48: dict( + name='face-48', id=48, color=[255, 0, 0], type='', swap='face-54'), + 49: dict( + name='face-49', id=49, color=[255, 0, 0], type='', swap='face-53'), + 50: dict( + name='face-50', id=50, color=[255, 0, 0], type='', swap='face-52'), + 51: dict(name='face-51', id=52, color=[255, 0, 0], type='', swap=''), + 52: dict( + name='face-52', id=52, color=[255, 0, 0], type='', swap='face-50'), + 53: dict( + name='face-53', id=53, color=[255, 0, 0], type='', swap='face-49'), + 54: dict( + name='face-54', id=54, color=[255, 0, 0], type='', swap='face-48'), + 55: dict( + name='face-55', id=55, color=[255, 0, 0], type='', swap='face-59'), + 56: dict( + name='face-56', id=56, color=[255, 0, 0], type='', swap='face-58'), + 57: dict(name='face-57', id=57, color=[255, 0, 0], type='', swap=''), + 58: dict( + name='face-58', id=58, color=[255, 0, 0], type='', swap='face-56'), + 59: dict( + name='face-59', id=59, color=[255, 0, 0], type='', swap='face-55'), + 60: dict( + name='face-60', id=60, color=[255, 0, 0], type='', swap='face-64'), + 61: dict( + name='face-61', id=61, color=[255, 0, 0], type='', swap='face-63'), + 62: dict(name='face-62', id=62, color=[255, 0, 0], type='', swap=''), + 63: dict( + name='face-63', id=63, color=[255, 0, 0], type='', swap='face-61'), + 64: dict( + name='face-64', id=64, color=[255, 0, 0], type='', swap='face-60'), + 65: dict( + name='face-65', id=65, color=[255, 0, 0], type='', swap='face-67'), + 66: dict(name='face-66', id=66, color=[255, 0, 0], type='', swap=''), + 67: dict( + name='face-67', id=67, color=[255, 0, 0], type='', swap='face-65') + }, + skeleton_info={}, + joint_weights=[1.] * 68, + + # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' + # 'evaluation/myeval_wholebody.py#L177' + sigmas=[ + 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, 0.025, 0.020, 0.023, + 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, 0.013, 0.012, 0.011, + 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, 0.009, 0.007, 0.007, + 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, 0.017, 0.011, 0.009, 0.011, + 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, 0.034, 0.008, 0.008, + 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, 0.009, 0.009, 0.007, + 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01, 0.008 + ]) diff --git a/mmpose/configs/_base_/datasets/coco_wholebody_hand.py b/mmpose/configs/_base_/datasets/coco_wholebody_hand.py new file mode 100644 index 0000000000000000000000000000000000000000..1910b2ced5a8b31cd6f83911e41cae9f1a580222 --- /dev/null +++ b/mmpose/configs/_base_/datasets/coco_wholebody_hand.py @@ -0,0 +1,147 @@ +dataset_info = dict( + dataset_name='coco_wholebody_hand', + paper_info=dict( + author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' + 'Wang, Can and Liu, Wentao and ' + 'Qian, Chen and Ouyang, Wanli and Luo, Ping', + title='Whole-Body Human Pose Estimation in the Wild', + container='Proceedings of the European ' + 'Conference on Computer Vision (ECCV)', + year='2020', + homepage='https://github.com/jin-s13/COCO-WholeBody/', + ), + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger1', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger3', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger4', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[ + 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, 0.018, + 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, 0.019, 0.022, + 0.031 + ]) diff --git a/mmpose/configs/_base_/datasets/coco_wholebody_openpose.py b/mmpose/configs/_base_/datasets/coco_wholebody_openpose.py new file mode 100644 index 0000000000000000000000000000000000000000..f05dda18abc4f3b02020d5ad4fc19154e715f97d --- /dev/null +++ b/mmpose/configs/_base_/datasets/coco_wholebody_openpose.py @@ -0,0 +1,1128 @@ +dataset_info = dict( + dataset_name='coco_wholebody_openpose', + paper_info=dict( + author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' + 'Wang, Can and Liu, Wentao and ' + 'Qian, Chen and Ouyang, Wanli and Luo, Ping', + title='Whole-Body Human Pose Estimation in the Wild', + container='Proceedings of the European ' + 'Conference on Computer Vision (ECCV)', + year='2020', + homepage='https://github.com/jin-s13/COCO-WholeBody/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[255, 0, 0], type='upper', swap=''), + 1: + dict(name='neck', id=1, color=[255, 85, 0], type='upper', swap=''), + 2: + dict( + name='right_shoulder', + id=2, + color=[255, 170, 0], + type='upper', + swap='left_shoulder'), + 3: + dict( + name='right_elbow', + id=3, + color=[255, 255, 0], + type='upper', + swap='left_elbow'), + 4: + dict( + name='right_wrist', + id=4, + color=[170, 255, 0], + type='upper', + swap='left_wrist'), + 5: + dict( + name='left_shoulder', + id=5, + color=[85, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='left_elbow', + id=6, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 7: + dict( + name='left_wrist', + id=7, + color=[0, 255, 85], + type='upper', + swap='right_wrist'), + 8: + dict( + name='right_hip', + id=8, + color=[0, 255, 170], + type='lower', + swap='left_hip'), + 9: + dict( + name='right_knee', + id=9, + color=[0, 255, 255], + type='lower', + swap='left_knee'), + 10: + dict( + name='right_ankle', + id=10, + color=[0, 170, 255], + type='lower', + swap='left_ankle'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 85, 255], + type='lower', + swap='right_hip'), + 12: + dict( + name='left_knee', + id=12, + color=[0, 0, 255], + type='lower', + swap='right_knee'), + 13: + dict( + name='left_ankle', + id=13, + color=[85, 0, 255], + type='lower', + swap='right_ankle'), + 14: + dict( + name='right_eye', + id=14, + color=[170, 0, 255], + type='upper', + swap='left_eye'), + 15: + dict( + name='left_eye', + id=15, + color=[255, 0, 255], + type='upper', + swap='right_eye'), + 16: + dict( + name='right_ear', + id=16, + color=[255, 0, 170], + type='upper', + swap='left_ear'), + 17: + dict( + name='left_ear', + id=17, + color=[255, 0, 85], + type='upper', + swap='right_ear'), + 18: + dict( + name='left_big_toe', + id=17, + color=[0, 0, 0], + type='lower', + swap='right_big_toe'), + 19: + dict( + name='left_small_toe', + id=18, + color=[0, 0, 0], + type='lower', + swap='right_small_toe'), + 20: + dict( + name='left_heel', + id=19, + color=[0, 0, 0], + type='lower', + swap='right_heel'), + 21: + dict( + name='right_big_toe', + id=20, + color=[0, 0, 0], + type='lower', + swap='left_big_toe'), + 22: + dict( + name='right_small_toe', + id=21, + color=[0, 0, 0], + type='lower', + swap='left_small_toe'), + 23: + dict( + name='right_heel', + id=22, + color=[0, 0, 0], + type='lower', + swap='left_heel'), + 24: + dict( + name='face-0', + id=23, + color=[255, 255, 255], + type='', + swap='face-16'), + 25: + dict( + name='face-1', + id=24, + color=[255, 255, 255], + type='', + swap='face-15'), + 26: + dict( + name='face-2', + id=25, + color=[255, 255, 255], + type='', + swap='face-14'), + 27: + dict( + name='face-3', + id=26, + color=[255, 255, 255], + type='', + swap='face-13'), + 28: + dict( + name='face-4', + id=27, + color=[255, 255, 255], + type='', + swap='face-12'), + 29: + dict( + name='face-5', + id=28, + color=[255, 255, 255], + type='', + swap='face-11'), + 30: + dict( + name='face-6', + id=29, + color=[255, 255, 255], + type='', + swap='face-10'), + 31: + dict( + name='face-7', + id=30, + color=[255, 255, 255], + type='', + swap='face-9'), + 32: + dict(name='face-8', id=31, color=[255, 255, 255], type='', swap=''), + 33: + dict( + name='face-9', + id=32, + color=[255, 255, 255], + type='', + swap='face-7'), + 34: + dict( + name='face-10', + id=33, + color=[255, 255, 255], + type='', + swap='face-6'), + 35: + dict( + name='face-11', + id=34, + color=[255, 255, 255], + type='', + swap='face-5'), + 36: + dict( + name='face-12', + id=35, + color=[255, 255, 255], + type='', + swap='face-4'), + 37: + dict( + name='face-13', + id=36, + color=[255, 255, 255], + type='', + swap='face-3'), + 38: + dict( + name='face-14', + id=37, + color=[255, 255, 255], + type='', + swap='face-2'), + 39: + dict( + name='face-15', + id=38, + color=[255, 255, 255], + type='', + swap='face-1'), + 40: + dict( + name='face-16', + id=39, + color=[255, 255, 255], + type='', + swap='face-0'), + 41: + dict( + name='face-17', + id=40, + color=[255, 255, 255], + type='', + swap='face-26'), + 42: + dict( + name='face-18', + id=41, + color=[255, 255, 255], + type='', + swap='face-25'), + 43: + dict( + name='face-19', + id=42, + color=[255, 255, 255], + type='', + swap='face-24'), + 44: + dict( + name='face-20', + id=43, + color=[255, 255, 255], + type='', + swap='face-23'), + 45: + dict( + name='face-21', + id=44, + color=[255, 255, 255], + type='', + swap='face-22'), + 46: + dict( + name='face-22', + id=45, + color=[255, 255, 255], + type='', + swap='face-21'), + 47: + dict( + name='face-23', + id=46, + color=[255, 255, 255], + type='', + swap='face-20'), + 48: + dict( + name='face-24', + id=47, + color=[255, 255, 255], + type='', + swap='face-19'), + 49: + dict( + name='face-25', + id=48, + color=[255, 255, 255], + type='', + swap='face-18'), + 50: + dict( + name='face-26', + id=49, + color=[255, 255, 255], + type='', + swap='face-17'), + 51: + dict(name='face-27', id=50, color=[255, 255, 255], type='', swap=''), + 52: + dict(name='face-28', id=51, color=[255, 255, 255], type='', swap=''), + 53: + dict(name='face-29', id=52, color=[255, 255, 255], type='', swap=''), + 54: + dict(name='face-30', id=53, color=[255, 255, 255], type='', swap=''), + 55: + dict( + name='face-31', + id=54, + color=[255, 255, 255], + type='', + swap='face-35'), + 56: + dict( + name='face-32', + id=55, + color=[255, 255, 255], + type='', + swap='face-34'), + 57: + dict(name='face-33', id=56, color=[255, 255, 255], type='', swap=''), + 58: + dict( + name='face-34', + id=57, + color=[255, 255, 255], + type='', + swap='face-32'), + 59: + dict( + name='face-35', + id=58, + color=[255, 255, 255], + type='', + swap='face-31'), + 60: + dict( + name='face-36', + id=59, + color=[255, 255, 255], + type='', + swap='face-45'), + 61: + dict( + name='face-37', + id=60, + color=[255, 255, 255], + type='', + swap='face-44'), + 62: + dict( + name='face-38', + id=61, + color=[255, 255, 255], + type='', + swap='face-43'), + 63: + dict( + name='face-39', + id=62, + color=[255, 255, 255], + type='', + swap='face-42'), + 64: + dict( + name='face-40', + id=63, + color=[255, 255, 255], + type='', + swap='face-47'), + 65: + dict( + name='face-41', + id=64, + color=[255, 255, 255], + type='', + swap='face-46'), + 66: + dict( + name='face-42', + id=65, + color=[255, 255, 255], + type='', + swap='face-39'), + 67: + dict( + name='face-43', + id=66, + color=[255, 255, 255], + type='', + swap='face-38'), + 68: + dict( + name='face-44', + id=67, + color=[255, 255, 255], + type='', + swap='face-37'), + 69: + dict( + name='face-45', + id=68, + color=[255, 255, 255], + type='', + swap='face-36'), + 70: + dict( + name='face-46', + id=69, + color=[255, 255, 255], + type='', + swap='face-41'), + 71: + dict( + name='face-47', + id=70, + color=[255, 255, 255], + type='', + swap='face-40'), + 72: + dict( + name='face-48', + id=71, + color=[255, 255, 255], + type='', + swap='face-54'), + 73: + dict( + name='face-49', + id=72, + color=[255, 255, 255], + type='', + swap='face-53'), + 74: + dict( + name='face-50', + id=73, + color=[255, 255, 255], + type='', + swap='face-52'), + 75: + dict(name='face-51', id=74, color=[255, 255, 255], type='', swap=''), + 76: + dict( + name='face-52', + id=75, + color=[255, 255, 255], + type='', + swap='face-50'), + 77: + dict( + name='face-53', + id=76, + color=[255, 255, 255], + type='', + swap='face-49'), + 78: + dict( + name='face-54', + id=77, + color=[255, 255, 255], + type='', + swap='face-48'), + 79: + dict( + name='face-55', + id=78, + color=[255, 255, 255], + type='', + swap='face-59'), + 80: + dict( + name='face-56', + id=79, + color=[255, 255, 255], + type='', + swap='face-58'), + 81: + dict(name='face-57', id=80, color=[255, 255, 255], type='', swap=''), + 82: + dict( + name='face-58', + id=81, + color=[255, 255, 255], + type='', + swap='face-56'), + 83: + dict( + name='face-59', + id=82, + color=[255, 255, 255], + type='', + swap='face-55'), + 84: + dict( + name='face-60', + id=83, + color=[255, 255, 255], + type='', + swap='face-64'), + 85: + dict( + name='face-61', + id=84, + color=[255, 255, 255], + type='', + swap='face-63'), + 86: + dict(name='face-62', id=85, color=[255, 255, 255], type='', swap=''), + 87: + dict( + name='face-63', + id=86, + color=[255, 255, 255], + type='', + swap='face-61'), + 88: + dict( + name='face-64', + id=87, + color=[255, 255, 255], + type='', + swap='face-60'), + 89: + dict( + name='face-65', + id=88, + color=[255, 255, 255], + type='', + swap='face-67'), + 90: + dict(name='face-66', id=89, color=[255, 255, 255], type='', swap=''), + 91: + dict( + name='face-67', + id=90, + color=[255, 255, 255], + type='', + swap='face-65'), + 92: + dict( + name='left_hand_root', + id=92, + color=[0, 0, 255], + type='', + swap='right_hand_root'), + 93: + dict( + name='left_thumb1', + id=93, + color=[0, 0, 255], + type='', + swap='right_thumb1'), + 94: + dict( + name='left_thumb2', + id=94, + color=[0, 0, 255], + type='', + swap='right_thumb2'), + 95: + dict( + name='left_thumb3', + id=95, + color=[0, 0, 255], + type='', + swap='right_thumb3'), + 96: + dict( + name='left_thumb4', + id=96, + color=[0, 0, 255], + type='', + swap='right_thumb4'), + 97: + dict( + name='left_forefinger1', + id=97, + color=[0, 0, 255], + type='', + swap='right_forefinger1'), + 98: + dict( + name='left_forefinger2', + id=98, + color=[0, 0, 255], + type='', + swap='right_forefinger2'), + 99: + dict( + name='left_forefinger3', + id=99, + color=[0, 0, 255], + type='', + swap='right_forefinger3'), + 100: + dict( + name='left_forefinger4', + id=100, + color=[0, 0, 255], + type='', + swap='right_forefinger4'), + 101: + dict( + name='left_middle_finger1', + id=101, + color=[0, 0, 255], + type='', + swap='right_middle_finger1'), + 102: + dict( + name='left_middle_finger2', + id=102, + color=[0, 0, 255], + type='', + swap='right_middle_finger2'), + 103: + dict( + name='left_middle_finger3', + id=103, + color=[0, 0, 255], + type='', + swap='right_middle_finger3'), + 104: + dict( + name='left_middle_finger4', + id=104, + color=[0, 0, 255], + type='', + swap='right_middle_finger4'), + 105: + dict( + name='left_ring_finger1', + id=105, + color=[0, 0, 255], + type='', + swap='right_ring_finger1'), + 106: + dict( + name='left_ring_finger2', + id=106, + color=[0, 0, 255], + type='', + swap='right_ring_finger2'), + 107: + dict( + name='left_ring_finger3', + id=107, + color=[0, 0, 255], + type='', + swap='right_ring_finger3'), + 108: + dict( + name='left_ring_finger4', + id=108, + color=[0, 0, 255], + type='', + swap='right_ring_finger4'), + 109: + dict( + name='left_pinky_finger1', + id=109, + color=[0, 0, 255], + type='', + swap='right_pinky_finger1'), + 110: + dict( + name='left_pinky_finger2', + id=110, + color=[0, 0, 255], + type='', + swap='right_pinky_finger2'), + 111: + dict( + name='left_pinky_finger3', + id=111, + color=[0, 0, 255], + type='', + swap='right_pinky_finger3'), + 112: + dict( + name='left_pinky_finger4', + id=112, + color=[0, 0, 255], + type='', + swap='right_pinky_finger4'), + 113: + dict( + name='right_hand_root', + id=113, + color=[0, 0, 255], + type='', + swap='left_hand_root'), + 114: + dict( + name='right_thumb1', + id=114, + color=[0, 0, 255], + type='', + swap='left_thumb1'), + 115: + dict( + name='right_thumb2', + id=115, + color=[0, 0, 255], + type='', + swap='left_thumb2'), + 116: + dict( + name='right_thumb3', + id=116, + color=[0, 0, 255], + type='', + swap='left_thumb3'), + 117: + dict( + name='right_thumb4', + id=117, + color=[0, 0, 255], + type='', + swap='left_thumb4'), + 118: + dict( + name='right_forefinger1', + id=118, + color=[0, 0, 255], + type='', + swap='left_forefinger1'), + 119: + dict( + name='right_forefinger2', + id=119, + color=[0, 0, 255], + type='', + swap='left_forefinger2'), + 120: + dict( + name='right_forefinger3', + id=120, + color=[0, 0, 255], + type='', + swap='left_forefinger3'), + 121: + dict( + name='right_forefinger4', + id=121, + color=[0, 0, 255], + type='', + swap='left_forefinger4'), + 122: + dict( + name='right_middle_finger1', + id=122, + color=[0, 0, 255], + type='', + swap='left_middle_finger1'), + 123: + dict( + name='right_middle_finger2', + id=123, + color=[0, 0, 255], + type='', + swap='left_middle_finger2'), + 124: + dict( + name='right_middle_finger3', + id=124, + color=[0, 0, 255], + type='', + swap='left_middle_finger3'), + 125: + dict( + name='right_middle_finger4', + id=125, + color=[0, 0, 255], + type='', + swap='left_middle_finger4'), + 126: + dict( + name='right_ring_finger1', + id=126, + color=[0, 0, 255], + type='', + swap='left_ring_finger1'), + 127: + dict( + name='right_ring_finger2', + id=127, + color=[0, 0, 255], + type='', + swap='left_ring_finger2'), + 128: + dict( + name='right_ring_finger3', + id=128, + color=[0, 0, 255], + type='', + swap='left_ring_finger3'), + 129: + dict( + name='right_ring_finger4', + id=129, + color=[0, 0, 255], + type='', + swap='left_ring_finger4'), + 130: + dict( + name='right_pinky_finger1', + id=130, + color=[0, 0, 255], + type='', + swap='left_pinky_finger1'), + 131: + dict( + name='right_pinky_finger2', + id=131, + color=[0, 0, 255], + type='', + swap='left_pinky_finger2'), + 132: + dict( + name='right_pinky_finger3', + id=132, + color=[0, 0, 255], + type='', + swap='left_pinky_finger3'), + 133: + dict( + name='right_pinky_finger4', + id=133, + color=[0, 0, 255], + type='', + swap='left_pinky_finger4') + }, + skeleton_info={ + 0: + dict(link=('neck', 'right_shoulder'), id=0, color=[255, 0, 0]), + 1: + dict(link=('neck', 'left_shoulder'), id=1, color=[255, 85, 0]), + 2: + dict( + link=('right_shoulder', 'right_elbow'), id=2, color=[255, 170, 0]), + 3: + dict(link=('right_elbow', 'right_wrist'), id=3, color=[255, 255, 0]), + 4: + dict(link=('left_shoulder', 'left_elbow'), id=4, color=[170, 255, 0]), + 5: + dict(link=('left_elbow', 'left_wrist'), id=5, color=[85, 255, 0]), + 6: + dict(link=('neck', 'right_hip'), id=6, color=[0, 255, 0]), + 7: + dict(link=('right_hip', 'right_knee'), id=7, color=[0, 255, 85]), + 8: + dict(link=('right_knee', 'right_ankle'), id=8, color=[0, 255, 170]), + 9: + dict(link=('neck', 'left_hip'), id=9, color=[0, 255, 225]), + 10: + dict(link=('left_hip', 'left_knee'), id=10, color=[0, 170, 255]), + 11: + dict(link=('left_knee', 'left_ankle'), id=11, color=[0, 85, 255]), + 12: + dict(link=('neck', 'nose'), id=12, color=[0, 0, 255]), + 13: + dict(link=('nose', 'right_eye'), id=13, color=[255, 0, 170]), + 14: + dict(link=('right_eye', 'right_ear'), id=14, color=[170, 0, 255]), + 15: + dict(link=('nose', 'left_eye'), id=15, color=[255, 0, 255]), + 16: + dict(link=('left_eye', 'left_ear'), id=16, color=[255, 0, 170]), + 17: + dict(link=('left_hand_root', 'left_thumb1'), id=17, color=[255, 0, 0]), + 18: + dict(link=('left_thumb1', 'left_thumb2'), id=18, color=[255, 76, 0]), + 19: + dict(link=('left_thumb2', 'left_thumb3'), id=19, color=[255, 153, 0]), + 20: + dict(link=('left_thumb3', 'left_thumb4'), id=20, color=[255, 230, 0]), + 21: + dict( + link=('left_hand_root', 'left_forefinger1'), + id=21, + color=[204, 255, 0]), + 22: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=22, + color=[128, 255, 0]), + 23: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=23, + color=[51, 255, 0]), + 24: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=24, + color=[0, 255, 26]), + 25: + dict( + link=('left_hand_root', 'left_middle_finger1'), + id=25, + color=[0, 255, 102]), + 26: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=26, + color=[0, 255, 178]), + 27: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=27, + color=[0, 255, 255]), + 28: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=28, + color=[0, 178, 255]), + 29: + dict( + link=('left_hand_root', 'left_ring_finger1'), + id=29, + color=[0, 102, 255]), + 30: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=30, + color=[0, 26, 255]), + 31: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=31, + color=[51, 0, 255]), + 32: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=32, + color=[128, 0, 255]), + 33: + dict( + link=('left_hand_root', 'left_pinky_finger1'), + id=33, + color=[204, 0, 255]), + 34: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=34, + color=[255, 0, 230]), + 35: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=35, + color=[255, 0, 153]), + 36: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=36, + color=[255, 0, 76]), + 37: + dict( + link=('right_hand_root', 'right_thumb1'), id=37, color=[255, 0, + 0]), + 38: + dict(link=('right_thumb1', 'right_thumb2'), id=38, color=[255, 76, 0]), + 39: + dict( + link=('right_thumb2', 'right_thumb3'), id=39, color=[255, 153, 0]), + 40: + dict( + link=('right_thumb3', 'right_thumb4'), id=40, color=[255, 230, 0]), + 41: + dict( + link=('right_hand_root', 'right_forefinger1'), + id=41, + color=[204, 255, 0]), + 42: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=42, + color=[128, 255, 0]), + 43: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=43, + color=[51, 255, 0]), + 44: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=44, + color=[0, 255, 26]), + 45: + dict( + link=('right_hand_root', 'right_middle_finger1'), + id=45, + color=[0, 255, 102]), + 46: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=46, + color=[0, 255, 178]), + 47: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=47, + color=[255, 255, 255]), + 48: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=48, + color=[0, 178, 255]), + 49: + dict( + link=('right_hand_root', 'right_ring_finger1'), + id=49, + color=[0, 102, 255]), + 50: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=50, + color=[0, 26, 255]), + 51: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=51, + color=[51, 0, 255]), + 52: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=52, + color=[128, 0, 255]), + 53: + dict( + link=('right_hand_root', 'right_pinky_finger1'), + id=53, + color=[204, 0, 255]), + 54: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=54, + color=[255, 0, 230]), + 55: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=55, + color=[255, 0, 153]), + 56: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=56, + color=[255, 0, 76]) + }, + joint_weights=[1.] * 134, + # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' + # 'evaluation/myeval_wholebody.py#L175' + sigmas=[ + 0.026, 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, + 0.062, 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.068, 0.066, + 0.066, 0.092, 0.094, 0.094, 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, + 0.031, 0.025, 0.020, 0.023, 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, + 0.045, 0.013, 0.012, 0.011, 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, + 0.015, 0.009, 0.007, 0.007, 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, + 0.017, 0.011, 0.009, 0.011, 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, + 0.010, 0.034, 0.008, 0.008, 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, + 0.009, 0.009, 0.009, 0.007, 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, + 0.01, 0.008, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, + 0.035, 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, + 0.019, 0.022, 0.031, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, + 0.024, 0.035, 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, + 0.02, 0.019, 0.022, 0.031 + ]) diff --git a/mmpose/configs/_base_/datasets/cofw.py b/mmpose/configs/_base_/datasets/cofw.py new file mode 100644 index 0000000000000000000000000000000000000000..d528bf2f2f7e63adbff3ed56e18bca8b02165e42 --- /dev/null +++ b/mmpose/configs/_base_/datasets/cofw.py @@ -0,0 +1,57 @@ +dataset_info = dict( + dataset_name='cofw', + paper_info=dict( + author='Burgos-Artizzu, Xavier P and Perona, ' + r'Pietro and Doll{\'a}r, Piotr', + title='Robust face landmark estimation under occlusion', + container='Proceedings of the IEEE international ' + 'conference on computer vision', + year='2013', + homepage='http://www.vision.caltech.edu/xpburgos/ICCV13/', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-1'), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-0'), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-3'), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-2'), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-6'), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-7'), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-4'), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-5'), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-9'), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-8'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-11'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-10'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-14'), + 13: + dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-15'), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-12'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-13'), + 16: + dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap='kpt-17'), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-16'), + 18: + dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-19'), + 19: + dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-18'), + 20: dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap=''), + 21: dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap=''), + 22: + dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-23'), + 23: + dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-22'), + 24: dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap=''), + 25: dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap=''), + 26: dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap=''), + 27: dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap=''), + 28: dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap='') + }, + skeleton_info={}, + joint_weights=[1.] * 29, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/crowdpose.py b/mmpose/configs/_base_/datasets/crowdpose.py new file mode 100644 index 0000000000000000000000000000000000000000..45086531a601870716eed15a32c5413c0e24b7ae --- /dev/null +++ b/mmpose/configs/_base_/datasets/crowdpose.py @@ -0,0 +1,147 @@ +dataset_info = dict( + dataset_name='crowdpose', + paper_info=dict( + author='Li, Jiefeng and Wang, Can and Zhu, Hao and ' + 'Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu', + title='CrowdPose: Efficient Crowded Scenes Pose Estimation ' + 'and A New Benchmark', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2019', + homepage='https://github.com/Jeff-sjtu/CrowdPose', + ), + keypoint_info={ + 0: + dict( + name='left_shoulder', + id=0, + color=[51, 153, 255], + type='upper', + swap='right_shoulder'), + 1: + dict( + name='right_shoulder', + id=1, + color=[51, 153, 255], + type='upper', + swap='left_shoulder'), + 2: + dict( + name='left_elbow', + id=2, + color=[51, 153, 255], + type='upper', + swap='right_elbow'), + 3: + dict( + name='right_elbow', + id=3, + color=[51, 153, 255], + type='upper', + swap='left_elbow'), + 4: + dict( + name='left_wrist', + id=4, + color=[51, 153, 255], + type='upper', + swap='right_wrist'), + 5: + dict( + name='right_wrist', + id=5, + color=[0, 255, 0], + type='upper', + swap='left_wrist'), + 6: + dict( + name='left_hip', + id=6, + color=[255, 128, 0], + type='lower', + swap='right_hip'), + 7: + dict( + name='right_hip', + id=7, + color=[0, 255, 0], + type='lower', + swap='left_hip'), + 8: + dict( + name='left_knee', + id=8, + color=[255, 128, 0], + type='lower', + swap='right_knee'), + 9: + dict( + name='right_knee', + id=9, + color=[0, 255, 0], + type='lower', + swap='left_knee'), + 10: + dict( + name='left_ankle', + id=10, + color=[255, 128, 0], + type='lower', + swap='right_ankle'), + 11: + dict( + name='right_ankle', + id=11, + color=[0, 255, 0], + type='lower', + swap='left_ankle'), + 12: + dict( + name='top_head', id=12, color=[255, 128, 0], type='upper', + swap=''), + 13: + dict(name='neck', id=13, color=[0, 255, 0], type='upper', swap='') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('top_head', 'neck'), id=12, color=[51, 153, 255]), + 13: + dict(link=('right_shoulder', 'neck'), id=13, color=[51, 153, 255]), + 14: + dict(link=('left_shoulder', 'neck'), id=14, color=[51, 153, 255]) + }, + joint_weights=[ + 0.2, 0.2, 0.2, 1.3, 1.5, 0.2, 1.3, 1.5, 0.2, 0.2, 0.5, 0.2, 0.2, 0.5 + ], + sigmas=[ + 0.079, 0.079, 0.072, 0.072, 0.062, 0.062, 0.107, 0.107, 0.087, 0.087, + 0.089, 0.089, 0.079, 0.079 + ]) diff --git a/mmpose/configs/_base_/datasets/deepfashion2.py b/mmpose/configs/_base_/datasets/deepfashion2.py new file mode 100644 index 0000000000000000000000000000000000000000..f65d1bb591fab8f06a79b5d595478a282acd8b3e --- /dev/null +++ b/mmpose/configs/_base_/datasets/deepfashion2.py @@ -0,0 +1,2660 @@ +colors = dict( + sss=[255, 128, 0], # short_sleeve_shirt + lss=[255, 0, 128], # long_sleeved_shirt + sso=[128, 0, 255], # short_sleeved_outwear + lso=[0, 128, 255], # long_sleeved_outwear + vest=[0, 128, 128], # vest + sling=[0, 0, 128], # sling + shorts=[128, 128, 128], # shorts + trousers=[128, 0, 128], # trousers + skirt=[64, 128, 128], # skirt + ssd=[64, 64, 128], # short_sleeved_dress + lsd=[128, 64, 0], # long_sleeved_dress + vd=[128, 64, 255], # vest_dress + sd=[128, 64, 0], # sling_dress +) +dataset_info = dict( + dataset_name='deepfashion2', + paper_info=dict( + author='Yuying Ge and Ruimao Zhang and Lingyun Wu ' + 'and Xiaogang Wang and Xiaoou Tang and Ping Luo', + title='DeepFashion2: A Versatile Benchmark for ' + 'Detection, Pose Estimation, Segmentation and ' + 'Re-Identification of Clothing Images', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2019', + homepage='https://github.com/switchablenorms/DeepFashion2', + ), + keypoint_info={ + # short_sleeved_shirt + 0: + dict(name='sss_kpt1', id=0, color=colors['sss'], type='', swap=''), + 1: + dict( + name='sss_kpt2', + id=1, + color=colors['sss'], + type='', + swap='sss_kpt6'), + 2: + dict( + name='sss_kpt3', + id=2, + color=colors['sss'], + type='', + swap='sss_kpt5'), + 3: + dict(name='sss_kpt4', id=3, color=colors['sss'], type='', swap=''), + 4: + dict( + name='sss_kpt5', + id=4, + color=colors['sss'], + type='', + swap='sss_kpt3'), + 5: + dict( + name='sss_kpt6', + id=5, + color=colors['sss'], + type='', + swap='sss_kpt2'), + 6: + dict( + name='sss_kpt7', + id=6, + color=colors['sss'], + type='', + swap='sss_kpt25'), + 7: + dict( + name='sss_kpt8', + id=7, + color=colors['sss'], + type='', + swap='sss_kpt24'), + 8: + dict( + name='sss_kpt9', + id=8, + color=colors['sss'], + type='', + swap='sss_kpt23'), + 9: + dict( + name='sss_kpt10', + id=9, + color=colors['sss'], + type='', + swap='sss_kpt22'), + 10: + dict( + name='sss_kpt11', + id=10, + color=colors['sss'], + type='', + swap='sss_kpt21'), + 11: + dict( + name='sss_kpt12', + id=11, + color=colors['sss'], + type='', + swap='sss_kpt20'), + 12: + dict( + name='sss_kpt13', + id=12, + color=colors['sss'], + type='', + swap='sss_kpt19'), + 13: + dict( + name='sss_kpt14', + id=13, + color=colors['sss'], + type='', + swap='sss_kpt18'), + 14: + dict( + name='sss_kpt15', + id=14, + color=colors['sss'], + type='', + swap='sss_kpt17'), + 15: + dict(name='sss_kpt16', id=15, color=colors['sss'], type='', swap=''), + 16: + dict( + name='sss_kpt17', + id=16, + color=colors['sss'], + type='', + swap='sss_kpt15'), + 17: + dict( + name='sss_kpt18', + id=17, + color=colors['sss'], + type='', + swap='sss_kpt14'), + 18: + dict( + name='sss_kpt19', + id=18, + color=colors['sss'], + type='', + swap='sss_kpt13'), + 19: + dict( + name='sss_kpt20', + id=19, + color=colors['sss'], + type='', + swap='sss_kpt12'), + 20: + dict( + name='sss_kpt21', + id=20, + color=colors['sss'], + type='', + swap='sss_kpt11'), + 21: + dict( + name='sss_kpt22', + id=21, + color=colors['sss'], + type='', + swap='sss_kpt10'), + 22: + dict( + name='sss_kpt23', + id=22, + color=colors['sss'], + type='', + swap='sss_kpt9'), + 23: + dict( + name='sss_kpt24', + id=23, + color=colors['sss'], + type='', + swap='sss_kpt8'), + 24: + dict( + name='sss_kpt25', + id=24, + color=colors['sss'], + type='', + swap='sss_kpt7'), + # long_sleeved_shirt + 25: + dict(name='lss_kpt1', id=25, color=colors['lss'], type='', swap=''), + 26: + dict( + name='lss_kpt2', + id=26, + color=colors['lss'], + type='', + swap='lss_kpt6'), + 27: + dict( + name='lss_kpt3', + id=27, + color=colors['lss'], + type='', + swap='lss_kpt5'), + 28: + dict(name='lss_kpt4', id=28, color=colors['lss'], type='', swap=''), + 29: + dict( + name='lss_kpt5', + id=29, + color=colors['lss'], + type='', + swap='lss_kpt3'), + 30: + dict( + name='lss_kpt6', + id=30, + color=colors['lss'], + type='', + swap='lss_kpt2'), + 31: + dict( + name='lss_kpt7', + id=31, + color=colors['lss'], + type='', + swap='lss_kpt33'), + 32: + dict( + name='lss_kpt8', + id=32, + color=colors['lss'], + type='', + swap='lss_kpt32'), + 33: + dict( + name='lss_kpt9', + id=33, + color=colors['lss'], + type='', + swap='lss_kpt31'), + 34: + dict( + name='lss_kpt10', + id=34, + color=colors['lss'], + type='', + swap='lss_kpt30'), + 35: + dict( + name='lss_kpt11', + id=35, + color=colors['lss'], + type='', + swap='lss_kpt29'), + 36: + dict( + name='lss_kpt12', + id=36, + color=colors['lss'], + type='', + swap='lss_kpt28'), + 37: + dict( + name='lss_kpt13', + id=37, + color=colors['lss'], + type='', + swap='lss_kpt27'), + 38: + dict( + name='lss_kpt14', + id=38, + color=colors['lss'], + type='', + swap='lss_kpt26'), + 39: + dict( + name='lss_kpt15', + id=39, + color=colors['lss'], + type='', + swap='lss_kpt25'), + 40: + dict( + name='lss_kpt16', + id=40, + color=colors['lss'], + type='', + swap='lss_kpt24'), + 41: + dict( + name='lss_kpt17', + id=41, + color=colors['lss'], + type='', + swap='lss_kpt23'), + 42: + dict( + name='lss_kpt18', + id=42, + color=colors['lss'], + type='', + swap='lss_kpt22'), + 43: + dict( + name='lss_kpt19', + id=43, + color=colors['lss'], + type='', + swap='lss_kpt21'), + 44: + dict(name='lss_kpt20', id=44, color=colors['lss'], type='', swap=''), + 45: + dict( + name='lss_kpt21', + id=45, + color=colors['lss'], + type='', + swap='lss_kpt19'), + 46: + dict( + name='lss_kpt22', + id=46, + color=colors['lss'], + type='', + swap='lss_kpt18'), + 47: + dict( + name='lss_kpt23', + id=47, + color=colors['lss'], + type='', + swap='lss_kpt17'), + 48: + dict( + name='lss_kpt24', + id=48, + color=colors['lss'], + type='', + swap='lss_kpt16'), + 49: + dict( + name='lss_kpt25', + id=49, + color=colors['lss'], + type='', + swap='lss_kpt15'), + 50: + dict( + name='lss_kpt26', + id=50, + color=colors['lss'], + type='', + swap='lss_kpt14'), + 51: + dict( + name='lss_kpt27', + id=51, + color=colors['lss'], + type='', + swap='lss_kpt13'), + 52: + dict( + name='lss_kpt28', + id=52, + color=colors['lss'], + type='', + swap='lss_kpt12'), + 53: + dict( + name='lss_kpt29', + id=53, + color=colors['lss'], + type='', + swap='lss_kpt11'), + 54: + dict( + name='lss_kpt30', + id=54, + color=colors['lss'], + type='', + swap='lss_kpt10'), + 55: + dict( + name='lss_kpt31', + id=55, + color=colors['lss'], + type='', + swap='lss_kpt9'), + 56: + dict( + name='lss_kpt32', + id=56, + color=colors['lss'], + type='', + swap='lss_kpt8'), + 57: + dict( + name='lss_kpt33', + id=57, + color=colors['lss'], + type='', + swap='lss_kpt7'), + # short_sleeved_outwear + 58: + dict(name='sso_kpt1', id=58, color=colors['sso'], type='', swap=''), + 59: + dict( + name='sso_kpt2', + id=59, + color=colors['sso'], + type='', + swap='sso_kpt26'), + 60: + dict( + name='sso_kpt3', + id=60, + color=colors['sso'], + type='', + swap='sso_kpt5'), + 61: + dict( + name='sso_kpt4', + id=61, + color=colors['sso'], + type='', + swap='sso_kpt6'), + 62: + dict( + name='sso_kpt5', + id=62, + color=colors['sso'], + type='', + swap='sso_kpt3'), + 63: + dict( + name='sso_kpt6', + id=63, + color=colors['sso'], + type='', + swap='sso_kpt4'), + 64: + dict( + name='sso_kpt7', + id=64, + color=colors['sso'], + type='', + swap='sso_kpt25'), + 65: + dict( + name='sso_kpt8', + id=65, + color=colors['sso'], + type='', + swap='sso_kpt24'), + 66: + dict( + name='sso_kpt9', + id=66, + color=colors['sso'], + type='', + swap='sso_kpt23'), + 67: + dict( + name='sso_kpt10', + id=67, + color=colors['sso'], + type='', + swap='sso_kpt22'), + 68: + dict( + name='sso_kpt11', + id=68, + color=colors['sso'], + type='', + swap='sso_kpt21'), + 69: + dict( + name='sso_kpt12', + id=69, + color=colors['sso'], + type='', + swap='sso_kpt20'), + 70: + dict( + name='sso_kpt13', + id=70, + color=colors['sso'], + type='', + swap='sso_kpt19'), + 71: + dict( + name='sso_kpt14', + id=71, + color=colors['sso'], + type='', + swap='sso_kpt18'), + 72: + dict( + name='sso_kpt15', + id=72, + color=colors['sso'], + type='', + swap='sso_kpt17'), + 73: + dict( + name='sso_kpt16', + id=73, + color=colors['sso'], + type='', + swap='sso_kpt29'), + 74: + dict( + name='sso_kpt17', + id=74, + color=colors['sso'], + type='', + swap='sso_kpt15'), + 75: + dict( + name='sso_kpt18', + id=75, + color=colors['sso'], + type='', + swap='sso_kpt14'), + 76: + dict( + name='sso_kpt19', + id=76, + color=colors['sso'], + type='', + swap='sso_kpt13'), + 77: + dict( + name='sso_kpt20', + id=77, + color=colors['sso'], + type='', + swap='sso_kpt12'), + 78: + dict( + name='sso_kpt21', + id=78, + color=colors['sso'], + type='', + swap='sso_kpt11'), + 79: + dict( + name='sso_kpt22', + id=79, + color=colors['sso'], + type='', + swap='sso_kpt10'), + 80: + dict( + name='sso_kpt23', + id=80, + color=colors['sso'], + type='', + swap='sso_kpt9'), + 81: + dict( + name='sso_kpt24', + id=81, + color=colors['sso'], + type='', + swap='sso_kpt8'), + 82: + dict( + name='sso_kpt25', + id=82, + color=colors['sso'], + type='', + swap='sso_kpt7'), + 83: + dict( + name='sso_kpt26', + id=83, + color=colors['sso'], + type='', + swap='sso_kpt2'), + 84: + dict( + name='sso_kpt27', + id=84, + color=colors['sso'], + type='', + swap='sso_kpt30'), + 85: + dict( + name='sso_kpt28', + id=85, + color=colors['sso'], + type='', + swap='sso_kpt31'), + 86: + dict( + name='sso_kpt29', + id=86, + color=colors['sso'], + type='', + swap='sso_kpt16'), + 87: + dict( + name='sso_kpt30', + id=87, + color=colors['sso'], + type='', + swap='sso_kpt27'), + 88: + dict( + name='sso_kpt31', + id=88, + color=colors['sso'], + type='', + swap='sso_kpt28'), + # long_sleeved_outwear + 89: + dict(name='lso_kpt1', id=89, color=colors['lso'], type='', swap=''), + 90: + dict( + name='lso_kpt2', + id=90, + color=colors['lso'], + type='', + swap='lso_kpt6'), + 91: + dict( + name='lso_kpt3', + id=91, + color=colors['lso'], + type='', + swap='lso_kpt5'), + 92: + dict( + name='lso_kpt4', + id=92, + color=colors['lso'], + type='', + swap='lso_kpt34'), + 93: + dict( + name='lso_kpt5', + id=93, + color=colors['lso'], + type='', + swap='lso_kpt3'), + 94: + dict( + name='lso_kpt6', + id=94, + color=colors['lso'], + type='', + swap='lso_kpt2'), + 95: + dict( + name='lso_kpt7', + id=95, + color=colors['lso'], + type='', + swap='lso_kpt33'), + 96: + dict( + name='lso_kpt8', + id=96, + color=colors['lso'], + type='', + swap='lso_kpt32'), + 97: + dict( + name='lso_kpt9', + id=97, + color=colors['lso'], + type='', + swap='lso_kpt31'), + 98: + dict( + name='lso_kpt10', + id=98, + color=colors['lso'], + type='', + swap='lso_kpt30'), + 99: + dict( + name='lso_kpt11', + id=99, + color=colors['lso'], + type='', + swap='lso_kpt29'), + 100: + dict( + name='lso_kpt12', + id=100, + color=colors['lso'], + type='', + swap='lso_kpt28'), + 101: + dict( + name='lso_kpt13', + id=101, + color=colors['lso'], + type='', + swap='lso_kpt27'), + 102: + dict( + name='lso_kpt14', + id=102, + color=colors['lso'], + type='', + swap='lso_kpt26'), + 103: + dict( + name='lso_kpt15', + id=103, + color=colors['lso'], + type='', + swap='lso_kpt25'), + 104: + dict( + name='lso_kpt16', + id=104, + color=colors['lso'], + type='', + swap='lso_kpt24'), + 105: + dict( + name='lso_kpt17', + id=105, + color=colors['lso'], + type='', + swap='lso_kpt23'), + 106: + dict( + name='lso_kpt18', + id=106, + color=colors['lso'], + type='', + swap='lso_kpt22'), + 107: + dict( + name='lso_kpt19', + id=107, + color=colors['lso'], + type='', + swap='lso_kpt21'), + 108: + dict( + name='lso_kpt20', + id=108, + color=colors['lso'], + type='', + swap='lso_kpt37'), + 109: + dict( + name='lso_kpt21', + id=109, + color=colors['lso'], + type='', + swap='lso_kpt19'), + 110: + dict( + name='lso_kpt22', + id=110, + color=colors['lso'], + type='', + swap='lso_kpt18'), + 111: + dict( + name='lso_kpt23', + id=111, + color=colors['lso'], + type='', + swap='lso_kpt17'), + 112: + dict( + name='lso_kpt24', + id=112, + color=colors['lso'], + type='', + swap='lso_kpt16'), + 113: + dict( + name='lso_kpt25', + id=113, + color=colors['lso'], + type='', + swap='lso_kpt15'), + 114: + dict( + name='lso_kpt26', + id=114, + color=colors['lso'], + type='', + swap='lso_kpt14'), + 115: + dict( + name='lso_kpt27', + id=115, + color=colors['lso'], + type='', + swap='lso_kpt13'), + 116: + dict( + name='lso_kpt28', + id=116, + color=colors['lso'], + type='', + swap='lso_kpt12'), + 117: + dict( + name='lso_kpt29', + id=117, + color=colors['lso'], + type='', + swap='lso_kpt11'), + 118: + dict( + name='lso_kpt30', + id=118, + color=colors['lso'], + type='', + swap='lso_kpt10'), + 119: + dict( + name='lso_kpt31', + id=119, + color=colors['lso'], + type='', + swap='lso_kpt9'), + 120: + dict( + name='lso_kpt32', + id=120, + color=colors['lso'], + type='', + swap='lso_kpt8'), + 121: + dict( + name='lso_kpt33', + id=121, + color=colors['lso'], + type='', + swap='lso_kpt7'), + 122: + dict( + name='lso_kpt34', + id=122, + color=colors['lso'], + type='', + swap='lso_kpt4'), + 123: + dict( + name='lso_kpt35', + id=123, + color=colors['lso'], + type='', + swap='lso_kpt38'), + 124: + dict( + name='lso_kpt36', + id=124, + color=colors['lso'], + type='', + swap='lso_kpt39'), + 125: + dict( + name='lso_kpt37', + id=125, + color=colors['lso'], + type='', + swap='lso_kpt20'), + 126: + dict( + name='lso_kpt38', + id=126, + color=colors['lso'], + type='', + swap='lso_kpt35'), + 127: + dict( + name='lso_kpt39', + id=127, + color=colors['lso'], + type='', + swap='lso_kpt36'), + # vest + 128: + dict(name='vest_kpt1', id=128, color=colors['vest'], type='', swap=''), + 129: + dict( + name='vest_kpt2', + id=129, + color=colors['vest'], + type='', + swap='vest_kpt6'), + 130: + dict( + name='vest_kpt3', + id=130, + color=colors['vest'], + type='', + swap='vest_kpt5'), + 131: + dict(name='vest_kpt4', id=131, color=colors['vest'], type='', swap=''), + 132: + dict( + name='vest_kpt5', + id=132, + color=colors['vest'], + type='', + swap='vest_kpt3'), + 133: + dict( + name='vest_kpt6', + id=133, + color=colors['vest'], + type='', + swap='vest_kpt2'), + 134: + dict( + name='vest_kpt7', + id=134, + color=colors['vest'], + type='', + swap='vest_kpt15'), + 135: + dict( + name='vest_kpt8', + id=135, + color=colors['vest'], + type='', + swap='vest_kpt14'), + 136: + dict( + name='vest_kpt9', + id=136, + color=colors['vest'], + type='', + swap='vest_kpt13'), + 137: + dict( + name='vest_kpt10', + id=137, + color=colors['vest'], + type='', + swap='vest_kpt12'), + 138: + dict( + name='vest_kpt11', id=138, color=colors['vest'], type='', swap=''), + 139: + dict( + name='vest_kpt12', + id=139, + color=colors['vest'], + type='', + swap='vest_kpt10'), + 140: + dict( + name='vest_kpt13', id=140, color=colors['vest'], type='', swap=''), + 141: + dict( + name='vest_kpt14', + id=141, + color=colors['vest'], + type='', + swap='vest_kpt8'), + 142: + dict( + name='vest_kpt15', + id=142, + color=colors['vest'], + type='', + swap='vest_kpt7'), + # sling + 143: + dict( + name='sling_kpt1', id=143, color=colors['sling'], type='', + swap=''), + 144: + dict( + name='sling_kpt2', + id=144, + color=colors['sling'], + type='', + swap='sling_kpt6'), + 145: + dict( + name='sling_kpt3', + id=145, + color=colors['sling'], + type='', + swap='sling_kpt5'), + 146: + dict( + name='sling_kpt4', id=146, color=colors['sling'], type='', + swap=''), + 147: + dict( + name='sling_kpt5', + id=147, + color=colors['sling'], + type='', + swap='sling_kpt3'), + 148: + dict( + name='sling_kpt6', + id=148, + color=colors['sling'], + type='', + swap='sling_kpt2'), + 149: + dict( + name='sling_kpt7', + id=149, + color=colors['sling'], + type='', + swap='sling_kpt15'), + 150: + dict( + name='sling_kpt8', + id=150, + color=colors['sling'], + type='', + swap='sling_kpt14'), + 151: + dict( + name='sling_kpt9', + id=151, + color=colors['sling'], + type='', + swap='sling_kpt13'), + 152: + dict( + name='sling_kpt10', + id=152, + color=colors['sling'], + type='', + swap='sling_kpt12'), + 153: + dict( + name='sling_kpt11', + id=153, + color=colors['sling'], + type='', + swap=''), + 154: + dict( + name='sling_kpt12', + id=154, + color=colors['sling'], + type='', + swap='sling_kpt10'), + 155: + dict( + name='sling_kpt13', + id=155, + color=colors['sling'], + type='', + swap='sling_kpt9'), + 156: + dict( + name='sling_kpt14', + id=156, + color=colors['sling'], + type='', + swap='sling_kpt8'), + 157: + dict( + name='sling_kpt15', + id=157, + color=colors['sling'], + type='', + swap='sling_kpt7'), + # shorts + 158: + dict( + name='shorts_kpt1', + id=158, + color=colors['shorts'], + type='', + swap='shorts_kpt3'), + 159: + dict( + name='shorts_kpt2', + id=159, + color=colors['shorts'], + type='', + swap=''), + 160: + dict( + name='shorts_kpt3', + id=160, + color=colors['shorts'], + type='', + swap='shorts_kpt1'), + 161: + dict( + name='shorts_kpt4', + id=161, + color=colors['shorts'], + type='', + swap='shorts_kpt10'), + 162: + dict( + name='shorts_kpt5', + id=162, + color=colors['shorts'], + type='', + swap='shorts_kpt9'), + 163: + dict( + name='shorts_kpt6', + id=163, + color=colors['shorts'], + type='', + swap='shorts_kpt8'), + 164: + dict( + name='shorts_kpt7', + id=164, + color=colors['shorts'], + type='', + swap=''), + 165: + dict( + name='shorts_kpt8', + id=165, + color=colors['shorts'], + type='', + swap='shorts_kpt6'), + 166: + dict( + name='shorts_kpt9', + id=166, + color=colors['shorts'], + type='', + swap='shorts_kpt5'), + 167: + dict( + name='shorts_kpt10', + id=167, + color=colors['shorts'], + type='', + swap='shorts_kpt4'), + # trousers + 168: + dict( + name='trousers_kpt1', + id=168, + color=colors['trousers'], + type='', + swap='trousers_kpt3'), + 169: + dict( + name='trousers_kpt2', + id=169, + color=colors['trousers'], + type='', + swap=''), + 170: + dict( + name='trousers_kpt3', + id=170, + color=colors['trousers'], + type='', + swap='trousers_kpt1'), + 171: + dict( + name='trousers_kpt4', + id=171, + color=colors['trousers'], + type='', + swap='trousers_kpt14'), + 172: + dict( + name='trousers_kpt5', + id=172, + color=colors['trousers'], + type='', + swap='trousers_kpt13'), + 173: + dict( + name='trousers_kpt6', + id=173, + color=colors['trousers'], + type='', + swap='trousers_kpt12'), + 174: + dict( + name='trousers_kpt7', + id=174, + color=colors['trousers'], + type='', + swap='trousers_kpt11'), + 175: + dict( + name='trousers_kpt8', + id=175, + color=colors['trousers'], + type='', + swap='trousers_kpt10'), + 176: + dict( + name='trousers_kpt9', + id=176, + color=colors['trousers'], + type='', + swap=''), + 177: + dict( + name='trousers_kpt10', + id=177, + color=colors['trousers'], + type='', + swap='trousers_kpt8'), + 178: + dict( + name='trousers_kpt11', + id=178, + color=colors['trousers'], + type='', + swap='trousers_kpt7'), + 179: + dict( + name='trousers_kpt12', + id=179, + color=colors['trousers'], + type='', + swap='trousers_kpt6'), + 180: + dict( + name='trousers_kpt13', + id=180, + color=colors['trousers'], + type='', + swap='trousers_kpt5'), + 181: + dict( + name='trousers_kpt14', + id=181, + color=colors['trousers'], + type='', + swap='trousers_kpt4'), + # skirt + 182: + dict( + name='skirt_kpt1', + id=182, + color=colors['skirt'], + type='', + swap='skirt_kpt3'), + 183: + dict( + name='skirt_kpt2', id=183, color=colors['skirt'], type='', + swap=''), + 184: + dict( + name='skirt_kpt3', + id=184, + color=colors['skirt'], + type='', + swap='skirt_kpt1'), + 185: + dict( + name='skirt_kpt4', + id=185, + color=colors['skirt'], + type='', + swap='skirt_kpt8'), + 186: + dict( + name='skirt_kpt5', + id=186, + color=colors['skirt'], + type='', + swap='skirt_kpt7'), + 187: + dict( + name='skirt_kpt6', id=187, color=colors['skirt'], type='', + swap=''), + 188: + dict( + name='skirt_kpt7', + id=188, + color=colors['skirt'], + type='', + swap='skirt_kpt5'), + 189: + dict( + name='skirt_kpt8', + id=189, + color=colors['skirt'], + type='', + swap='skirt_kpt4'), + # short_sleeved_dress + 190: + dict(name='ssd_kpt1', id=190, color=colors['ssd'], type='', swap=''), + 191: + dict( + name='ssd_kpt2', + id=191, + color=colors['ssd'], + type='', + swap='ssd_kpt6'), + 192: + dict( + name='ssd_kpt3', + id=192, + color=colors['ssd'], + type='', + swap='ssd_kpt5'), + 193: + dict(name='ssd_kpt4', id=193, color=colors['ssd'], type='', swap=''), + 194: + dict( + name='ssd_kpt5', + id=194, + color=colors['ssd'], + type='', + swap='ssd_kpt3'), + 195: + dict( + name='ssd_kpt6', + id=195, + color=colors['ssd'], + type='', + swap='ssd_kpt2'), + 196: + dict( + name='ssd_kpt7', + id=196, + color=colors['ssd'], + type='', + swap='ssd_kpt29'), + 197: + dict( + name='ssd_kpt8', + id=197, + color=colors['ssd'], + type='', + swap='ssd_kpt28'), + 198: + dict( + name='ssd_kpt9', + id=198, + color=colors['ssd'], + type='', + swap='ssd_kpt27'), + 199: + dict( + name='ssd_kpt10', + id=199, + color=colors['ssd'], + type='', + swap='ssd_kpt26'), + 200: + dict( + name='ssd_kpt11', + id=200, + color=colors['ssd'], + type='', + swap='ssd_kpt25'), + 201: + dict( + name='ssd_kpt12', + id=201, + color=colors['ssd'], + type='', + swap='ssd_kpt24'), + 202: + dict( + name='ssd_kpt13', + id=202, + color=colors['ssd'], + type='', + swap='ssd_kpt23'), + 203: + dict( + name='ssd_kpt14', + id=203, + color=colors['ssd'], + type='', + swap='ssd_kpt22'), + 204: + dict( + name='ssd_kpt15', + id=204, + color=colors['ssd'], + type='', + swap='ssd_kpt21'), + 205: + dict( + name='ssd_kpt16', + id=205, + color=colors['ssd'], + type='', + swap='ssd_kpt20'), + 206: + dict( + name='ssd_kpt17', + id=206, + color=colors['ssd'], + type='', + swap='ssd_kpt19'), + 207: + dict(name='ssd_kpt18', id=207, color=colors['ssd'], type='', swap=''), + 208: + dict( + name='ssd_kpt19', + id=208, + color=colors['ssd'], + type='', + swap='ssd_kpt17'), + 209: + dict( + name='ssd_kpt20', + id=209, + color=colors['ssd'], + type='', + swap='ssd_kpt16'), + 210: + dict( + name='ssd_kpt21', + id=210, + color=colors['ssd'], + type='', + swap='ssd_kpt15'), + 211: + dict( + name='ssd_kpt22', + id=211, + color=colors['ssd'], + type='', + swap='ssd_kpt14'), + 212: + dict( + name='ssd_kpt23', + id=212, + color=colors['ssd'], + type='', + swap='ssd_kpt13'), + 213: + dict( + name='ssd_kpt24', + id=213, + color=colors['ssd'], + type='', + swap='ssd_kpt12'), + 214: + dict( + name='ssd_kpt25', + id=214, + color=colors['ssd'], + type='', + swap='ssd_kpt11'), + 215: + dict( + name='ssd_kpt26', + id=215, + color=colors['ssd'], + type='', + swap='ssd_kpt10'), + 216: + dict( + name='ssd_kpt27', + id=216, + color=colors['ssd'], + type='', + swap='ssd_kpt9'), + 217: + dict( + name='ssd_kpt28', + id=217, + color=colors['ssd'], + type='', + swap='ssd_kpt8'), + 218: + dict( + name='ssd_kpt29', + id=218, + color=colors['ssd'], + type='', + swap='ssd_kpt7'), + # long_sleeved_dress + 219: + dict(name='lsd_kpt1', id=219, color=colors['lsd'], type='', swap=''), + 220: + dict( + name='lsd_kpt2', + id=220, + color=colors['lsd'], + type='', + swap='lsd_kpt6'), + 221: + dict( + name='lsd_kpt3', + id=221, + color=colors['lsd'], + type='', + swap='lsd_kpt5'), + 222: + dict(name='lsd_kpt4', id=222, color=colors['lsd'], type='', swap=''), + 223: + dict( + name='lsd_kpt5', + id=223, + color=colors['lsd'], + type='', + swap='lsd_kpt3'), + 224: + dict( + name='lsd_kpt6', + id=224, + color=colors['lsd'], + type='', + swap='lsd_kpt2'), + 225: + dict( + name='lsd_kpt7', + id=225, + color=colors['lsd'], + type='', + swap='lsd_kpt37'), + 226: + dict( + name='lsd_kpt8', + id=226, + color=colors['lsd'], + type='', + swap='lsd_kpt36'), + 227: + dict( + name='lsd_kpt9', + id=227, + color=colors['lsd'], + type='', + swap='lsd_kpt35'), + 228: + dict( + name='lsd_kpt10', + id=228, + color=colors['lsd'], + type='', + swap='lsd_kpt34'), + 229: + dict( + name='lsd_kpt11', + id=229, + color=colors['lsd'], + type='', + swap='lsd_kpt33'), + 230: + dict( + name='lsd_kpt12', + id=230, + color=colors['lsd'], + type='', + swap='lsd_kpt32'), + 231: + dict( + name='lsd_kpt13', + id=231, + color=colors['lsd'], + type='', + swap='lsd_kpt31'), + 232: + dict( + name='lsd_kpt14', + id=232, + color=colors['lsd'], + type='', + swap='lsd_kpt30'), + 233: + dict( + name='lsd_kpt15', + id=233, + color=colors['lsd'], + type='', + swap='lsd_kpt29'), + 234: + dict( + name='lsd_kpt16', + id=234, + color=colors['lsd'], + type='', + swap='lsd_kpt28'), + 235: + dict( + name='lsd_kpt17', + id=235, + color=colors['lsd'], + type='', + swap='lsd_kpt27'), + 236: + dict( + name='lsd_kpt18', + id=236, + color=colors['lsd'], + type='', + swap='lsd_kpt26'), + 237: + dict( + name='lsd_kpt19', + id=237, + color=colors['lsd'], + type='', + swap='lsd_kpt25'), + 238: + dict( + name='lsd_kpt20', + id=238, + color=colors['lsd'], + type='', + swap='lsd_kpt24'), + 239: + dict( + name='lsd_kpt21', + id=239, + color=colors['lsd'], + type='', + swap='lsd_kpt23'), + 240: + dict(name='lsd_kpt22', id=240, color=colors['lsd'], type='', swap=''), + 241: + dict( + name='lsd_kpt23', + id=241, + color=colors['lsd'], + type='', + swap='lsd_kpt21'), + 242: + dict( + name='lsd_kpt24', + id=242, + color=colors['lsd'], + type='', + swap='lsd_kpt20'), + 243: + dict( + name='lsd_kpt25', + id=243, + color=colors['lsd'], + type='', + swap='lsd_kpt19'), + 244: + dict( + name='lsd_kpt26', + id=244, + color=colors['lsd'], + type='', + swap='lsd_kpt18'), + 245: + dict( + name='lsd_kpt27', + id=245, + color=colors['lsd'], + type='', + swap='lsd_kpt17'), + 246: + dict( + name='lsd_kpt28', + id=246, + color=colors['lsd'], + type='', + swap='lsd_kpt16'), + 247: + dict( + name='lsd_kpt29', + id=247, + color=colors['lsd'], + type='', + swap='lsd_kpt15'), + 248: + dict( + name='lsd_kpt30', + id=248, + color=colors['lsd'], + type='', + swap='lsd_kpt14'), + 249: + dict( + name='lsd_kpt31', + id=249, + color=colors['lsd'], + type='', + swap='lsd_kpt13'), + 250: + dict( + name='lsd_kpt32', + id=250, + color=colors['lsd'], + type='', + swap='lsd_kpt12'), + 251: + dict( + name='lsd_kpt33', + id=251, + color=colors['lsd'], + type='', + swap='lsd_kpt11'), + 252: + dict( + name='lsd_kpt34', + id=252, + color=colors['lsd'], + type='', + swap='lsd_kpt10'), + 253: + dict( + name='lsd_kpt35', + id=253, + color=colors['lsd'], + type='', + swap='lsd_kpt9'), + 254: + dict( + name='lsd_kpt36', + id=254, + color=colors['lsd'], + type='', + swap='lsd_kpt8'), + 255: + dict( + name='lsd_kpt37', + id=255, + color=colors['lsd'], + type='', + swap='lsd_kpt7'), + # vest_dress + 256: + dict(name='vd_kpt1', id=256, color=colors['vd'], type='', swap=''), + 257: + dict( + name='vd_kpt2', + id=257, + color=colors['vd'], + type='', + swap='vd_kpt6'), + 258: + dict( + name='vd_kpt3', + id=258, + color=colors['vd'], + type='', + swap='vd_kpt5'), + 259: + dict(name='vd_kpt4', id=259, color=colors['vd'], type='', swap=''), + 260: + dict( + name='vd_kpt5', + id=260, + color=colors['vd'], + type='', + swap='vd_kpt3'), + 261: + dict( + name='vd_kpt6', + id=261, + color=colors['vd'], + type='', + swap='vd_kpt2'), + 262: + dict( + name='vd_kpt7', + id=262, + color=colors['vd'], + type='', + swap='vd_kpt19'), + 263: + dict( + name='vd_kpt8', + id=263, + color=colors['vd'], + type='', + swap='vd_kpt18'), + 264: + dict( + name='vd_kpt9', + id=264, + color=colors['vd'], + type='', + swap='vd_kpt17'), + 265: + dict( + name='vd_kpt10', + id=265, + color=colors['vd'], + type='', + swap='vd_kpt16'), + 266: + dict( + name='vd_kpt11', + id=266, + color=colors['vd'], + type='', + swap='vd_kpt15'), + 267: + dict( + name='vd_kpt12', + id=267, + color=colors['vd'], + type='', + swap='vd_kpt14'), + 268: + dict(name='vd_kpt13', id=268, color=colors['vd'], type='', swap=''), + 269: + dict( + name='vd_kpt14', + id=269, + color=colors['vd'], + type='', + swap='vd_kpt12'), + 270: + dict( + name='vd_kpt15', + id=270, + color=colors['vd'], + type='', + swap='vd_kpt11'), + 271: + dict( + name='vd_kpt16', + id=271, + color=colors['vd'], + type='', + swap='vd_kpt10'), + 272: + dict( + name='vd_kpt17', + id=272, + color=colors['vd'], + type='', + swap='vd_kpt9'), + 273: + dict( + name='vd_kpt18', + id=273, + color=colors['vd'], + type='', + swap='vd_kpt8'), + 274: + dict( + name='vd_kpt19', + id=274, + color=colors['vd'], + type='', + swap='vd_kpt7'), + # sling_dress + 275: + dict(name='sd_kpt1', id=275, color=colors['sd'], type='', swap=''), + 276: + dict( + name='sd_kpt2', + id=276, + color=colors['sd'], + type='', + swap='sd_kpt6'), + 277: + dict( + name='sd_kpt3', + id=277, + color=colors['sd'], + type='', + swap='sd_kpt5'), + 278: + dict(name='sd_kpt4', id=278, color=colors['sd'], type='', swap=''), + 279: + dict( + name='sd_kpt5', + id=279, + color=colors['sd'], + type='', + swap='sd_kpt3'), + 280: + dict( + name='sd_kpt6', + id=280, + color=colors['sd'], + type='', + swap='sd_kpt2'), + 281: + dict( + name='sd_kpt7', + id=281, + color=colors['sd'], + type='', + swap='sd_kpt19'), + 282: + dict( + name='sd_kpt8', + id=282, + color=colors['sd'], + type='', + swap='sd_kpt18'), + 283: + dict( + name='sd_kpt9', + id=283, + color=colors['sd'], + type='', + swap='sd_kpt17'), + 284: + dict( + name='sd_kpt10', + id=284, + color=colors['sd'], + type='', + swap='sd_kpt16'), + 285: + dict( + name='sd_kpt11', + id=285, + color=colors['sd'], + type='', + swap='sd_kpt15'), + 286: + dict( + name='sd_kpt12', + id=286, + color=colors['sd'], + type='', + swap='sd_kpt14'), + 287: + dict(name='sd_kpt13', id=287, color=colors['sd'], type='', swap=''), + 288: + dict( + name='sd_kpt14', + id=288, + color=colors['sd'], + type='', + swap='sd_kpt12'), + 289: + dict( + name='sd_kpt15', + id=289, + color=colors['sd'], + type='', + swap='sd_kpt11'), + 290: + dict( + name='sd_kpt16', + id=290, + color=colors['sd'], + type='', + swap='sd_kpt10'), + 291: + dict( + name='sd_kpt17', + id=291, + color=colors['sd'], + type='', + swap='sd_kpt9'), + 292: + dict( + name='sd_kpt18', + id=292, + color=colors['sd'], + type='', + swap='sd_kpt8'), + 293: + dict( + name='sd_kpt19', + id=293, + color=colors['sd'], + type='', + swap='sd_kpt7'), + }, + skeleton_info={ + # short_sleeved_shirt + 0: + dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]), + 1: + dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]), + 2: + dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]), + 3: + dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]), + 4: + dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]), + 5: + dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]), + 6: + dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]), + 7: + dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]), + 8: + dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]), + 9: + dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]), + 10: + dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]), + 11: + dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]), + 12: + dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]), + 13: + dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]), + 14: + dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]), + 15: + dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]), + 16: + dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]), + 17: + dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]), + 18: + dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]), + 19: + dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]), + 20: + dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]), + 21: + dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]), + 22: + dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]), + 23: + dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]), + 24: + dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]), + 25: + dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]), + # long_sleeve_shirt + 26: + dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]), + 27: + dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]), + 28: + dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]), + 29: + dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]), + 30: + dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]), + 31: + dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]), + 32: + dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]), + 33: + dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]), + 34: + dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]), + 35: + dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]), + 36: + dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]), + 37: + dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]), + 38: + dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]), + 39: + dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]), + 40: + dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]), + 41: + dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]), + 42: + dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]), + 43: + dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]), + 44: + dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]), + 45: + dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]), + 46: + dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]), + 47: + dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]), + 48: + dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]), + 49: + dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]), + 50: + dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]), + 51: + dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]), + 52: + dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]), + 53: + dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]), + 54: + dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]), + 55: + dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]), + 56: + dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]), + 57: + dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]), + 58: + dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]), + 59: + dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]), + # short_sleeved_outwear + 60: + dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]), + 61: + dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]), + 62: + dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]), + 63: + dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]), + 64: + dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]), + 65: + dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]), + 66: + dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]), + 67: + dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]), + 68: + dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]), + 69: + dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]), + 70: + dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]), + 71: + dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]), + 72: + dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]), + 73: + dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]), + 74: + dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]), + 75: + dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]), + 76: + dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]), + 77: + dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]), + 78: + dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]), + 79: + dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]), + 80: + dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]), + 81: + dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]), + 82: + dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]), + 83: + dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]), + 84: + dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]), + 85: + dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]), + 86: + dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]), + 87: + dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]), + 88: + dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]), + 89: + dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]), + 90: + dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]), + 91: + dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]), + # long_sleeved_outwear + 92: + dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]), + 93: + dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]), + 94: + dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]), + 95: + dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]), + 96: + dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]), + 97: + dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]), + 98: + dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]), + 99: + dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]), + 100: + dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]), + 101: + dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]), + 102: + dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]), + 103: + dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]), + 104: + dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]), + 105: + dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]), + 106: + dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]), + 107: + dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]), + 108: + dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]), + 109: + dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]), + 110: + dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]), + 111: + dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]), + 112: + dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]), + 113: + dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]), + 114: + dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]), + 115: + dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]), + 116: + dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]), + 117: + dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]), + 118: + dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]), + 119: + dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]), + 120: + dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]), + 121: + dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]), + 122: + dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]), + 123: + dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]), + 124: + dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]), + 125: + dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]), + 126: + dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]), + 127: + dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]), + 128: + dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]), + 129: + dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]), + 130: + dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]), + 131: + dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]), + # vest + 132: + dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]), + 133: + dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]), + 134: + dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]), + 135: + dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]), + 136: + dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]), + 137: + dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]), + 138: + dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]), + 139: + dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]), + 140: + dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]), + 141: + dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]), + 142: + dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]), + 143: + dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]), + 144: + dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]), + 145: + dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]), + 146: + dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]), + 147: + dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]), + # sling + 148: + dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]), + 149: + dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]), + 150: + dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]), + 151: + dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]), + 152: + dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]), + 153: + dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]), + 154: + dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]), + 155: + dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]), + 156: + dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]), + 157: + dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]), + 158: + dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]), + 159: + dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]), + 160: + dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]), + 161: + dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]), + 162: + dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]), + 163: + dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]), + # shorts + 164: + dict( + link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128, + 128]), + 165: + dict( + link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128, + 128]), + 166: + dict( + link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128, + 128]), + 167: + dict( + link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128, + 128]), + 168: + dict( + link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128, + 128]), + 169: + dict( + link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128, + 128]), + 170: + dict( + link=('shorts_kpt9', 'shorts_kpt10'), + id=170, + color=[128, 128, 128]), + 171: + dict( + link=('shorts_kpt10', 'shorts_kpt3'), + id=171, + color=[128, 128, 128]), + 172: + dict( + link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128, + 128]), + 173: + dict( + link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128, + 128]), + # trousers + 174: + dict( + link=('trousers_kpt1', 'trousers_kpt4'), + id=174, + color=[128, 0, 128]), + 175: + dict( + link=('trousers_kpt4', 'trousers_kpt5'), + id=175, + color=[128, 0, 128]), + 176: + dict( + link=('trousers_kpt5', 'trousers_kpt6'), + id=176, + color=[128, 0, 128]), + 177: + dict( + link=('trousers_kpt6', 'trousers_kpt7'), + id=177, + color=[128, 0, 128]), + 178: + dict( + link=('trousers_kpt7', 'trousers_kpt8'), + id=178, + color=[128, 0, 128]), + 179: + dict( + link=('trousers_kpt8', 'trousers_kpt9'), + id=179, + color=[128, 0, 128]), + 180: + dict( + link=('trousers_kpt9', 'trousers_kpt10'), + id=180, + color=[128, 0, 128]), + 181: + dict( + link=('trousers_kpt10', 'trousers_kpt11'), + id=181, + color=[128, 0, 128]), + 182: + dict( + link=('trousers_kpt11', 'trousers_kpt12'), + id=182, + color=[128, 0, 128]), + 183: + dict( + link=('trousers_kpt12', 'trousers_kpt13'), + id=183, + color=[128, 0, 128]), + 184: + dict( + link=('trousers_kpt13', 'trousers_kpt14'), + id=184, + color=[128, 0, 128]), + 185: + dict( + link=('trousers_kpt14', 'trousers_kpt3'), + id=185, + color=[128, 0, 128]), + 186: + dict( + link=('trousers_kpt3', 'trousers_kpt2'), + id=186, + color=[128, 0, 128]), + 187: + dict( + link=('trousers_kpt2', 'trousers_kpt1'), + id=187, + color=[128, 0, 128]), + # skirt + 188: + dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]), + 189: + dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]), + 190: + dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]), + 191: + dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]), + 192: + dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]), + 193: + dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]), + 194: + dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]), + 195: + dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]), + # short_sleeved_dress + 196: + dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]), + 197: + dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]), + 198: + dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]), + 199: + dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]), + 200: + dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]), + 201: + dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]), + 202: + dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]), + 203: + dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]), + 204: + dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]), + 205: + dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]), + 206: + dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]), + 207: + dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]), + 208: + dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]), + 209: + dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]), + 210: + dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]), + 211: + dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]), + 212: + dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]), + 213: + dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]), + 214: + dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]), + 215: + dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]), + 216: + dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]), + 217: + dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]), + 218: + dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]), + 219: + dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]), + 220: + dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]), + 221: + dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]), + 222: + dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]), + 223: + dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]), + 224: + dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]), + 225: + dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]), + # long_sleeved_dress + 226: + dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]), + 227: + dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]), + 228: + dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]), + 229: + dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]), + 230: + dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]), + 231: + dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]), + 232: + dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]), + 233: + dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]), + 234: + dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]), + 235: + dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]), + 236: + dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]), + 237: + dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]), + 238: + dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]), + 239: + dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]), + 240: + dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]), + 241: + dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]), + 242: + dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]), + 243: + dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]), + 244: + dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]), + 245: + dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]), + 246: + dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]), + 247: + dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]), + 248: + dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]), + 249: + dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]), + 250: + dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]), + 251: + dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]), + 252: + dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]), + 253: + dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]), + 254: + dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]), + 255: + dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]), + 256: + dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]), + 257: + dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]), + 258: + dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]), + 259: + dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]), + 260: + dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]), + 261: + dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]), + 262: + dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]), + 263: + dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]), + # vest_dress + 264: + dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]), + 265: + dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]), + 266: + dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]), + 267: + dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]), + 268: + dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]), + 269: + dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]), + 270: + dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]), + 271: + dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]), + 272: + dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]), + 273: + dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]), + 274: + dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]), + 275: + dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]), + 276: + dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]), + 277: + dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]), + 278: + dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]), + 279: + dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]), + 280: + dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]), + 281: + dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]), + 282: + dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]), + 283: + dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]), + # sling_dress + 284: + dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]), + 285: + dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]), + 286: + dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]), + 287: + dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]), + 288: + dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]), + 289: + dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]), + 290: + dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]), + 291: + dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]), + 292: + dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]), + 293: + dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]), + 294: + dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]), + 295: + dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]), + 296: + dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]), + 297: + dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]), + 298: + dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]), + 299: + dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]), + 300: + dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]), + 301: + dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]), + 302: + dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]), + 303: + dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0]), + }, + joint_weights=[1.] * 294, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/deepfashion_full.py b/mmpose/configs/_base_/datasets/deepfashion_full.py new file mode 100644 index 0000000000000000000000000000000000000000..4d989069ee7253d3a5b5f01c81135b1a472cd4b2 --- /dev/null +++ b/mmpose/configs/_base_/datasets/deepfashion_full.py @@ -0,0 +1,74 @@ +dataset_info = dict( + dataset_name='deepfashion_full', + paper_info=dict( + author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' + 'and Wang, Xiaogang and Tang, Xiaoou', + title='DeepFashion: Powering Robust Clothes Recognition ' + 'and Retrieval with Rich Annotations', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2016', + homepage='http://mmlab.ie.cuhk.edu.hk/projects/' + 'DeepFashion/LandmarkDetection.html', + ), + keypoint_info={ + 0: + dict( + name='left collar', + id=0, + color=[255, 255, 255], + type='', + swap='right collar'), + 1: + dict( + name='right collar', + id=1, + color=[255, 255, 255], + type='', + swap='left collar'), + 2: + dict( + name='left sleeve', + id=2, + color=[255, 255, 255], + type='', + swap='right sleeve'), + 3: + dict( + name='right sleeve', + id=3, + color=[255, 255, 255], + type='', + swap='left sleeve'), + 4: + dict( + name='left waistline', + id=0, + color=[255, 255, 255], + type='', + swap='right waistline'), + 5: + dict( + name='right waistline', + id=1, + color=[255, 255, 255], + type='', + swap='left waistline'), + 6: + dict( + name='left hem', + id=2, + color=[255, 255, 255], + type='', + swap='right hem'), + 7: + dict( + name='right hem', + id=3, + color=[255, 255, 255], + type='', + swap='left hem'), + }, + skeleton_info={}, + joint_weights=[1.] * 8, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/deepfashion_lower.py b/mmpose/configs/_base_/datasets/deepfashion_lower.py new file mode 100644 index 0000000000000000000000000000000000000000..db014a1747ca618f93a7d092d29027015b48ae3c --- /dev/null +++ b/mmpose/configs/_base_/datasets/deepfashion_lower.py @@ -0,0 +1,46 @@ +dataset_info = dict( + dataset_name='deepfashion_lower', + paper_info=dict( + author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' + 'and Wang, Xiaogang and Tang, Xiaoou', + title='DeepFashion: Powering Robust Clothes Recognition ' + 'and Retrieval with Rich Annotations', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2016', + homepage='http://mmlab.ie.cuhk.edu.hk/projects/' + 'DeepFashion/LandmarkDetection.html', + ), + keypoint_info={ + 0: + dict( + name='left waistline', + id=0, + color=[255, 255, 255], + type='', + swap='right waistline'), + 1: + dict( + name='right waistline', + id=1, + color=[255, 255, 255], + type='', + swap='left waistline'), + 2: + dict( + name='left hem', + id=2, + color=[255, 255, 255], + type='', + swap='right hem'), + 3: + dict( + name='right hem', + id=3, + color=[255, 255, 255], + type='', + swap='left hem'), + }, + skeleton_info={}, + joint_weights=[1.] * 4, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/deepfashion_upper.py b/mmpose/configs/_base_/datasets/deepfashion_upper.py new file mode 100644 index 0000000000000000000000000000000000000000..f0b012fd37bee1ba5ed956a7a5465a8623bf0894 --- /dev/null +++ b/mmpose/configs/_base_/datasets/deepfashion_upper.py @@ -0,0 +1,60 @@ +dataset_info = dict( + dataset_name='deepfashion_upper', + paper_info=dict( + author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' + 'and Wang, Xiaogang and Tang, Xiaoou', + title='DeepFashion: Powering Robust Clothes Recognition ' + 'and Retrieval with Rich Annotations', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2016', + homepage='http://mmlab.ie.cuhk.edu.hk/projects/' + 'DeepFashion/LandmarkDetection.html', + ), + keypoint_info={ + 0: + dict( + name='left collar', + id=0, + color=[255, 255, 255], + type='', + swap='right collar'), + 1: + dict( + name='right collar', + id=1, + color=[255, 255, 255], + type='', + swap='left collar'), + 2: + dict( + name='left sleeve', + id=2, + color=[255, 255, 255], + type='', + swap='right sleeve'), + 3: + dict( + name='right sleeve', + id=3, + color=[255, 255, 255], + type='', + swap='left sleeve'), + 4: + dict( + name='left hem', + id=4, + color=[255, 255, 255], + type='', + swap='right hem'), + 5: + dict( + name='right hem', + id=5, + color=[255, 255, 255], + type='', + swap='left hem'), + }, + skeleton_info={}, + joint_weights=[1.] * 6, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/exlpose.py b/mmpose/configs/_base_/datasets/exlpose.py new file mode 100644 index 0000000000000000000000000000000000000000..29b758aa21117bb71766373d3eabc8633b372354 --- /dev/null +++ b/mmpose/configs/_base_/datasets/exlpose.py @@ -0,0 +1,125 @@ +dataset_info = dict( + dataset_name='exlpose', + paper_info=dict( + author='Sohyun Lee, Jaesung Rim, Boseung Jeong, Geonu Kim,' + 'ByungJu Woo, Haechan Lee, Sunghyun Cho, Suha Kwak', + title='Human Pose Estimation in Extremely Low-Light Conditions', + container='arXiv', + year='2023', + homepage='https://arxiv.org/abs/2303.15410', + ), + keypoint_info={ + 0: + dict( + name='left_shoulder', + id=0, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 1: + dict( + name='right_shoulder', + id=1, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 2: + dict( + name='left_elbow', + id=2, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 3: + dict( + name='right_elbow', + id=3, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 4: + dict( + name='left_wrist', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 5: + dict( + name='right_wrist', + id=5, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 6: + dict( + name='left_hip', + id=6, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 7: + dict( + name='right_hip', + id=7, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 8: + dict( + name='left_knee', + id=8, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 9: + dict( + name='right_knee', + id=9, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 10: + dict( + name='left_ankle', + id=10, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 11: + dict( + name='right_ankle', + id=11, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 12: + dict(name='head', id=12, color=[51, 153, 255], type='upper', swap=''), + 13: + dict(name='neck', id=13, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: dict(link=('head', 'neck'), id=0, color=[51, 153, 255]), + 1: dict(link=('neck', 'left_shoulder'), id=1, color=[51, 153, 255]), + 2: dict(link=('neck', 'right_shoulder'), id=2, color=[51, 153, 255]), + 3: dict(link=('left_shoulder', 'left_elbow'), id=3, color=[0, 255, 0]), + 4: dict(link=('left_elbow', 'left_wrist'), id=4, color=[0, 255, 0]), + 5: dict( + link=('right_shoulder', 'right_elbow'), id=5, color=[255, 128, 0]), + 6: + dict(link=('right_elbow', 'right_wrist'), id=6, color=[255, 128, 0]), + 7: dict(link=('neck', 'right_hip'), id=7, color=[51, 153, 255]), + 8: dict(link=('neck', 'left_hip'), id=8, color=[51, 153, 255]), + 9: dict(link=('right_hip', 'right_knee'), id=9, color=[255, 128, 0]), + 10: + dict(link=('right_knee', 'right_ankle'), id=10, color=[255, 128, 0]), + 11: dict(link=('left_hip', 'left_knee'), id=11, color=[0, 255, 0]), + 12: dict(link=('left_knee', 'left_ankle'), id=12, color=[0, 255, 0]), + }, + joint_weights=[ + 0.2, 0.2, 0.2, 1.3, 1.5, 0.2, 1.3, 1.5, 0.2, 0.2, 0.5, 0.2, 0.2, 0.5 + ], + sigmas=[ + 0.079, 0.079, 0.072, 0.072, 0.062, 0.062, 0.107, 0.107, 0.087, 0.087, + 0.089, 0.089, 0.079, 0.079 + ]) diff --git a/mmpose/configs/_base_/datasets/fly.py b/mmpose/configs/_base_/datasets/fly.py new file mode 100644 index 0000000000000000000000000000000000000000..5f94ff57ca93d8f562b6a61b9a67198abdcde217 --- /dev/null +++ b/mmpose/configs/_base_/datasets/fly.py @@ -0,0 +1,237 @@ +dataset_info = dict( + dataset_name='fly', + paper_info=dict( + author='Pereira, Talmo D and Aldarondo, Diego E and ' + 'Willmore, Lindsay and Kislin, Mikhail and ' + 'Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W', + title='Fast animal pose estimation using deep neural networks', + container='Nature methods', + year='2019', + homepage='https://github.com/jgraving/DeepPoseKit-Data', + ), + keypoint_info={ + 0: + dict(name='head', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='eyeL', id=1, color=[255, 255, 255], type='', swap='eyeR'), + 2: + dict(name='eyeR', id=2, color=[255, 255, 255], type='', swap='eyeL'), + 3: + dict(name='neck', id=3, color=[255, 255, 255], type='', swap=''), + 4: + dict(name='thorax', id=4, color=[255, 255, 255], type='', swap=''), + 5: + dict(name='abdomen', id=5, color=[255, 255, 255], type='', swap=''), + 6: + dict( + name='forelegR1', + id=6, + color=[255, 255, 255], + type='', + swap='forelegL1'), + 7: + dict( + name='forelegR2', + id=7, + color=[255, 255, 255], + type='', + swap='forelegL2'), + 8: + dict( + name='forelegR3', + id=8, + color=[255, 255, 255], + type='', + swap='forelegL3'), + 9: + dict( + name='forelegR4', + id=9, + color=[255, 255, 255], + type='', + swap='forelegL4'), + 10: + dict( + name='midlegR1', + id=10, + color=[255, 255, 255], + type='', + swap='midlegL1'), + 11: + dict( + name='midlegR2', + id=11, + color=[255, 255, 255], + type='', + swap='midlegL2'), + 12: + dict( + name='midlegR3', + id=12, + color=[255, 255, 255], + type='', + swap='midlegL3'), + 13: + dict( + name='midlegR4', + id=13, + color=[255, 255, 255], + type='', + swap='midlegL4'), + 14: + dict( + name='hindlegR1', + id=14, + color=[255, 255, 255], + type='', + swap='hindlegL1'), + 15: + dict( + name='hindlegR2', + id=15, + color=[255, 255, 255], + type='', + swap='hindlegL2'), + 16: + dict( + name='hindlegR3', + id=16, + color=[255, 255, 255], + type='', + swap='hindlegL3'), + 17: + dict( + name='hindlegR4', + id=17, + color=[255, 255, 255], + type='', + swap='hindlegL4'), + 18: + dict( + name='forelegL1', + id=18, + color=[255, 255, 255], + type='', + swap='forelegR1'), + 19: + dict( + name='forelegL2', + id=19, + color=[255, 255, 255], + type='', + swap='forelegR2'), + 20: + dict( + name='forelegL3', + id=20, + color=[255, 255, 255], + type='', + swap='forelegR3'), + 21: + dict( + name='forelegL4', + id=21, + color=[255, 255, 255], + type='', + swap='forelegR4'), + 22: + dict( + name='midlegL1', + id=22, + color=[255, 255, 255], + type='', + swap='midlegR1'), + 23: + dict( + name='midlegL2', + id=23, + color=[255, 255, 255], + type='', + swap='midlegR2'), + 24: + dict( + name='midlegL3', + id=24, + color=[255, 255, 255], + type='', + swap='midlegR3'), + 25: + dict( + name='midlegL4', + id=25, + color=[255, 255, 255], + type='', + swap='midlegR4'), + 26: + dict( + name='hindlegL1', + id=26, + color=[255, 255, 255], + type='', + swap='hindlegR1'), + 27: + dict( + name='hindlegL2', + id=27, + color=[255, 255, 255], + type='', + swap='hindlegR2'), + 28: + dict( + name='hindlegL3', + id=28, + color=[255, 255, 255], + type='', + swap='hindlegR3'), + 29: + dict( + name='hindlegL4', + id=29, + color=[255, 255, 255], + type='', + swap='hindlegR4'), + 30: + dict( + name='wingL', id=30, color=[255, 255, 255], type='', swap='wingR'), + 31: + dict( + name='wingR', id=31, color=[255, 255, 255], type='', swap='wingL'), + }, + skeleton_info={ + 0: dict(link=('eyeL', 'head'), id=0, color=[255, 255, 255]), + 1: dict(link=('eyeR', 'head'), id=1, color=[255, 255, 255]), + 2: dict(link=('neck', 'head'), id=2, color=[255, 255, 255]), + 3: dict(link=('thorax', 'neck'), id=3, color=[255, 255, 255]), + 4: dict(link=('abdomen', 'thorax'), id=4, color=[255, 255, 255]), + 5: dict(link=('forelegR2', 'forelegR1'), id=5, color=[255, 255, 255]), + 6: dict(link=('forelegR3', 'forelegR2'), id=6, color=[255, 255, 255]), + 7: dict(link=('forelegR4', 'forelegR3'), id=7, color=[255, 255, 255]), + 8: dict(link=('midlegR2', 'midlegR1'), id=8, color=[255, 255, 255]), + 9: dict(link=('midlegR3', 'midlegR2'), id=9, color=[255, 255, 255]), + 10: dict(link=('midlegR4', 'midlegR3'), id=10, color=[255, 255, 255]), + 11: + dict(link=('hindlegR2', 'hindlegR1'), id=11, color=[255, 255, 255]), + 12: + dict(link=('hindlegR3', 'hindlegR2'), id=12, color=[255, 255, 255]), + 13: + dict(link=('hindlegR4', 'hindlegR3'), id=13, color=[255, 255, 255]), + 14: + dict(link=('forelegL2', 'forelegL1'), id=14, color=[255, 255, 255]), + 15: + dict(link=('forelegL3', 'forelegL2'), id=15, color=[255, 255, 255]), + 16: + dict(link=('forelegL4', 'forelegL3'), id=16, color=[255, 255, 255]), + 17: dict(link=('midlegL2', 'midlegL1'), id=17, color=[255, 255, 255]), + 18: dict(link=('midlegL3', 'midlegL2'), id=18, color=[255, 255, 255]), + 19: dict(link=('midlegL4', 'midlegL3'), id=19, color=[255, 255, 255]), + 20: + dict(link=('hindlegL2', 'hindlegL1'), id=20, color=[255, 255, 255]), + 21: + dict(link=('hindlegL3', 'hindlegL2'), id=21, color=[255, 255, 255]), + 22: + dict(link=('hindlegL4', 'hindlegL3'), id=22, color=[255, 255, 255]), + 23: dict(link=('wingL', 'neck'), id=23, color=[255, 255, 255]), + 24: dict(link=('wingR', 'neck'), id=24, color=[255, 255, 255]) + }, + joint_weights=[1.] * 32, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/freihand2d.py b/mmpose/configs/_base_/datasets/freihand2d.py new file mode 100644 index 0000000000000000000000000000000000000000..8b960d10f3538801531dbccdd67aeac6e73ac572 --- /dev/null +++ b/mmpose/configs/_base_/datasets/freihand2d.py @@ -0,0 +1,144 @@ +dataset_info = dict( + dataset_name='freihand', + paper_info=dict( + author='Zimmermann, Christian and Ceylan, Duygu and ' + 'Yang, Jimei and Russell, Bryan and ' + 'Argus, Max and Brox, Thomas', + title='Freihand: A dataset for markerless capture of hand pose ' + 'and shape from single rgb images', + container='Proceedings of the IEEE International ' + 'Conference on Computer Vision', + year='2019', + homepage='https://lmb.informatik.uni-freiburg.de/projects/freihand/', + ), + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger1', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger3', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger4', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/h36m.py b/mmpose/configs/_base_/datasets/h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..00a719d8b19f9ff3c5ef98476d73216055bf9186 --- /dev/null +++ b/mmpose/configs/_base_/datasets/h36m.py @@ -0,0 +1,152 @@ +dataset_info = dict( + dataset_name='h36m', + paper_info=dict( + author='Ionescu, Catalin and Papava, Dragos and ' + 'Olaru, Vlad and Sminchisescu, Cristian', + title='Human3.6M: Large Scale Datasets and Predictive ' + 'Methods for 3D Human Sensing in Natural Environments', + container='IEEE Transactions on Pattern Analysis and ' + 'Machine Intelligence', + year='2014', + homepage='http://vision.imar.ro/human3.6m/description.php', + ), + keypoint_info={ + 0: + dict(name='root', id=0, color=[51, 153, 255], type='lower', swap=''), + 1: + dict( + name='right_hip', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 2: + dict( + name='right_knee', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 3: + dict( + name='right_foot', + id=3, + color=[255, 128, 0], + type='lower', + swap='left_foot'), + 4: + dict( + name='left_hip', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 5: + dict( + name='left_knee', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 6: + dict( + name='left_foot', + id=6, + color=[0, 255, 0], + type='lower', + swap='right_foot'), + 7: + dict(name='spine', id=7, color=[51, 153, 255], type='upper', swap=''), + 8: + dict(name='thorax', id=8, color=[51, 153, 255], type='upper', swap=''), + 9: + dict( + name='neck_base', + id=9, + color=[51, 153, 255], + type='upper', + swap=''), + 10: + dict(name='head', id=10, color=[51, 153, 255], type='upper', swap=''), + 11: + dict( + name='left_shoulder', + id=11, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 12: + dict( + name='left_elbow', + id=12, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 13: + dict( + name='left_wrist', + id=13, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 14: + dict( + name='right_shoulder', + id=14, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 15: + dict( + name='right_elbow', + id=15, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 16: + dict( + name='right_wrist', + id=16, + color=[255, 128, 0], + type='upper', + swap='left_wrist') + }, + skeleton_info={ + 0: + dict(link=('root', 'left_hip'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_hip', 'left_knee'), id=1, color=[0, 255, 0]), + 2: + dict(link=('left_knee', 'left_foot'), id=2, color=[0, 255, 0]), + 3: + dict(link=('root', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('right_hip', 'right_knee'), id=4, color=[255, 128, 0]), + 5: + dict(link=('right_knee', 'right_foot'), id=5, color=[255, 128, 0]), + 6: + dict(link=('root', 'spine'), id=6, color=[51, 153, 255]), + 7: + dict(link=('spine', 'thorax'), id=7, color=[51, 153, 255]), + 8: + dict(link=('thorax', 'neck_base'), id=8, color=[51, 153, 255]), + 9: + dict(link=('neck_base', 'head'), id=9, color=[51, 153, 255]), + 10: + dict(link=('thorax', 'left_shoulder'), id=10, color=[0, 255, 0]), + 11: + dict(link=('left_shoulder', 'left_elbow'), id=11, color=[0, 255, 0]), + 12: + dict(link=('left_elbow', 'left_wrist'), id=12, color=[0, 255, 0]), + 13: + dict(link=('thorax', 'right_shoulder'), id=13, color=[255, 128, 0]), + 14: + dict( + link=('right_shoulder', 'right_elbow'), id=14, color=[255, 128, + 0]), + 15: + dict(link=('right_elbow', 'right_wrist'), id=15, color=[255, 128, 0]) + }, + joint_weights=[1.] * 17, + sigmas=[], + stats_info=dict(bbox_center=(528., 427.), bbox_scale=400.)) diff --git a/mmpose/configs/_base_/datasets/h3wb.py b/mmpose/configs/_base_/datasets/h3wb.py new file mode 100644 index 0000000000000000000000000000000000000000..bb47a1b3f5809d7b2c6429e0c8520f7141e4ca3d --- /dev/null +++ b/mmpose/configs/_base_/datasets/h3wb.py @@ -0,0 +1,1151 @@ +dataset_info = dict( + dataset_name='h3wb', + paper_info=dict( + author='Yue Zhu, Nermin Samet, David Picard', + title='H3WB: Human3.6M 3D WholeBody Dataset and Benchmark', + container='International Conf. on Computer Vision (ICCV)', + year='2023', + homepage='https://github.com/wholebody3d/wholebody3d', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='left_big_toe', + id=17, + color=[255, 128, 0], + type='lower', + swap='right_big_toe'), + 18: + dict( + name='left_small_toe', + id=18, + color=[255, 128, 0], + type='lower', + swap='right_small_toe'), + 19: + dict( + name='left_heel', + id=19, + color=[255, 128, 0], + type='lower', + swap='right_heel'), + 20: + dict( + name='right_big_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='left_big_toe'), + 21: + dict( + name='right_small_toe', + id=21, + color=[255, 128, 0], + type='lower', + swap='left_small_toe'), + 22: + dict( + name='right_heel', + id=22, + color=[255, 128, 0], + type='lower', + swap='left_heel'), + 23: + dict( + name='face-0', + id=23, + color=[255, 255, 255], + type='', + swap='face-16'), + 24: + dict( + name='face-1', + id=24, + color=[255, 255, 255], + type='', + swap='face-15'), + 25: + dict( + name='face-2', + id=25, + color=[255, 255, 255], + type='', + swap='face-14'), + 26: + dict( + name='face-3', + id=26, + color=[255, 255, 255], + type='', + swap='face-13'), + 27: + dict( + name='face-4', + id=27, + color=[255, 255, 255], + type='', + swap='face-12'), + 28: + dict( + name='face-5', + id=28, + color=[255, 255, 255], + type='', + swap='face-11'), + 29: + dict( + name='face-6', + id=29, + color=[255, 255, 255], + type='', + swap='face-10'), + 30: + dict( + name='face-7', + id=30, + color=[255, 255, 255], + type='', + swap='face-9'), + 31: + dict(name='face-8', id=31, color=[255, 255, 255], type='', swap=''), + 32: + dict( + name='face-9', + id=32, + color=[255, 255, 255], + type='', + swap='face-7'), + 33: + dict( + name='face-10', + id=33, + color=[255, 255, 255], + type='', + swap='face-6'), + 34: + dict( + name='face-11', + id=34, + color=[255, 255, 255], + type='', + swap='face-5'), + 35: + dict( + name='face-12', + id=35, + color=[255, 255, 255], + type='', + swap='face-4'), + 36: + dict( + name='face-13', + id=36, + color=[255, 255, 255], + type='', + swap='face-3'), + 37: + dict( + name='face-14', + id=37, + color=[255, 255, 255], + type='', + swap='face-2'), + 38: + dict( + name='face-15', + id=38, + color=[255, 255, 255], + type='', + swap='face-1'), + 39: + dict( + name='face-16', + id=39, + color=[255, 255, 255], + type='', + swap='face-0'), + 40: + dict( + name='face-17', + id=40, + color=[255, 255, 255], + type='', + swap='face-26'), + 41: + dict( + name='face-18', + id=41, + color=[255, 255, 255], + type='', + swap='face-25'), + 42: + dict( + name='face-19', + id=42, + color=[255, 255, 255], + type='', + swap='face-24'), + 43: + dict( + name='face-20', + id=43, + color=[255, 255, 255], + type='', + swap='face-23'), + 44: + dict( + name='face-21', + id=44, + color=[255, 255, 255], + type='', + swap='face-22'), + 45: + dict( + name='face-22', + id=45, + color=[255, 255, 255], + type='', + swap='face-21'), + 46: + dict( + name='face-23', + id=46, + color=[255, 255, 255], + type='', + swap='face-20'), + 47: + dict( + name='face-24', + id=47, + color=[255, 255, 255], + type='', + swap='face-19'), + 48: + dict( + name='face-25', + id=48, + color=[255, 255, 255], + type='', + swap='face-18'), + 49: + dict( + name='face-26', + id=49, + color=[255, 255, 255], + type='', + swap='face-17'), + 50: + dict(name='face-27', id=50, color=[255, 255, 255], type='', swap=''), + 51: + dict(name='face-28', id=51, color=[255, 255, 255], type='', swap=''), + 52: + dict(name='face-29', id=52, color=[255, 255, 255], type='', swap=''), + 53: + dict(name='face-30', id=53, color=[255, 255, 255], type='', swap=''), + 54: + dict( + name='face-31', + id=54, + color=[255, 255, 255], + type='', + swap='face-35'), + 55: + dict( + name='face-32', + id=55, + color=[255, 255, 255], + type='', + swap='face-34'), + 56: + dict(name='face-33', id=56, color=[255, 255, 255], type='', swap=''), + 57: + dict( + name='face-34', + id=57, + color=[255, 255, 255], + type='', + swap='face-32'), + 58: + dict( + name='face-35', + id=58, + color=[255, 255, 255], + type='', + swap='face-31'), + 59: + dict( + name='face-36', + id=59, + color=[255, 255, 255], + type='', + swap='face-45'), + 60: + dict( + name='face-37', + id=60, + color=[255, 255, 255], + type='', + swap='face-44'), + 61: + dict( + name='face-38', + id=61, + color=[255, 255, 255], + type='', + swap='face-43'), + 62: + dict( + name='face-39', + id=62, + color=[255, 255, 255], + type='', + swap='face-42'), + 63: + dict( + name='face-40', + id=63, + color=[255, 255, 255], + type='', + swap='face-47'), + 64: + dict( + name='face-41', + id=64, + color=[255, 255, 255], + type='', + swap='face-46'), + 65: + dict( + name='face-42', + id=65, + color=[255, 255, 255], + type='', + swap='face-39'), + 66: + dict( + name='face-43', + id=66, + color=[255, 255, 255], + type='', + swap='face-38'), + 67: + dict( + name='face-44', + id=67, + color=[255, 255, 255], + type='', + swap='face-37'), + 68: + dict( + name='face-45', + id=68, + color=[255, 255, 255], + type='', + swap='face-36'), + 69: + dict( + name='face-46', + id=69, + color=[255, 255, 255], + type='', + swap='face-41'), + 70: + dict( + name='face-47', + id=70, + color=[255, 255, 255], + type='', + swap='face-40'), + 71: + dict( + name='face-48', + id=71, + color=[255, 255, 255], + type='', + swap='face-54'), + 72: + dict( + name='face-49', + id=72, + color=[255, 255, 255], + type='', + swap='face-53'), + 73: + dict( + name='face-50', + id=73, + color=[255, 255, 255], + type='', + swap='face-52'), + 74: + dict(name='face-51', id=74, color=[255, 255, 255], type='', swap=''), + 75: + dict( + name='face-52', + id=75, + color=[255, 255, 255], + type='', + swap='face-50'), + 76: + dict( + name='face-53', + id=76, + color=[255, 255, 255], + type='', + swap='face-49'), + 77: + dict( + name='face-54', + id=77, + color=[255, 255, 255], + type='', + swap='face-48'), + 78: + dict( + name='face-55', + id=78, + color=[255, 255, 255], + type='', + swap='face-59'), + 79: + dict( + name='face-56', + id=79, + color=[255, 255, 255], + type='', + swap='face-58'), + 80: + dict(name='face-57', id=80, color=[255, 255, 255], type='', swap=''), + 81: + dict( + name='face-58', + id=81, + color=[255, 255, 255], + type='', + swap='face-56'), + 82: + dict( + name='face-59', + id=82, + color=[255, 255, 255], + type='', + swap='face-55'), + 83: + dict( + name='face-60', + id=83, + color=[255, 255, 255], + type='', + swap='face-64'), + 84: + dict( + name='face-61', + id=84, + color=[255, 255, 255], + type='', + swap='face-63'), + 85: + dict(name='face-62', id=85, color=[255, 255, 255], type='', swap=''), + 86: + dict( + name='face-63', + id=86, + color=[255, 255, 255], + type='', + swap='face-61'), + 87: + dict( + name='face-64', + id=87, + color=[255, 255, 255], + type='', + swap='face-60'), + 88: + dict( + name='face-65', + id=88, + color=[255, 255, 255], + type='', + swap='face-67'), + 89: + dict(name='face-66', id=89, color=[255, 255, 255], type='', swap=''), + 90: + dict( + name='face-67', + id=90, + color=[255, 255, 255], + type='', + swap='face-65'), + 91: + dict( + name='left_hand_root', + id=91, + color=[255, 255, 255], + type='', + swap='right_hand_root'), + 92: + dict( + name='left_thumb1', + id=92, + color=[255, 128, 0], + type='', + swap='right_thumb1'), + 93: + dict( + name='left_thumb2', + id=93, + color=[255, 128, 0], + type='', + swap='right_thumb2'), + 94: + dict( + name='left_thumb3', + id=94, + color=[255, 128, 0], + type='', + swap='right_thumb3'), + 95: + dict( + name='left_thumb4', + id=95, + color=[255, 128, 0], + type='', + swap='right_thumb4'), + 96: + dict( + name='left_forefinger1', + id=96, + color=[255, 153, 255], + type='', + swap='right_forefinger1'), + 97: + dict( + name='left_forefinger2', + id=97, + color=[255, 153, 255], + type='', + swap='right_forefinger2'), + 98: + dict( + name='left_forefinger3', + id=98, + color=[255, 153, 255], + type='', + swap='right_forefinger3'), + 99: + dict( + name='left_forefinger4', + id=99, + color=[255, 153, 255], + type='', + swap='right_forefinger4'), + 100: + dict( + name='left_middle_finger1', + id=100, + color=[102, 178, 255], + type='', + swap='right_middle_finger1'), + 101: + dict( + name='left_middle_finger2', + id=101, + color=[102, 178, 255], + type='', + swap='right_middle_finger2'), + 102: + dict( + name='left_middle_finger3', + id=102, + color=[102, 178, 255], + type='', + swap='right_middle_finger3'), + 103: + dict( + name='left_middle_finger4', + id=103, + color=[102, 178, 255], + type='', + swap='right_middle_finger4'), + 104: + dict( + name='left_ring_finger1', + id=104, + color=[255, 51, 51], + type='', + swap='right_ring_finger1'), + 105: + dict( + name='left_ring_finger2', + id=105, + color=[255, 51, 51], + type='', + swap='right_ring_finger2'), + 106: + dict( + name='left_ring_finger3', + id=106, + color=[255, 51, 51], + type='', + swap='right_ring_finger3'), + 107: + dict( + name='left_ring_finger4', + id=107, + color=[255, 51, 51], + type='', + swap='right_ring_finger4'), + 108: + dict( + name='left_pinky_finger1', + id=108, + color=[0, 255, 0], + type='', + swap='right_pinky_finger1'), + 109: + dict( + name='left_pinky_finger2', + id=109, + color=[0, 255, 0], + type='', + swap='right_pinky_finger2'), + 110: + dict( + name='left_pinky_finger3', + id=110, + color=[0, 255, 0], + type='', + swap='right_pinky_finger3'), + 111: + dict( + name='left_pinky_finger4', + id=111, + color=[0, 255, 0], + type='', + swap='right_pinky_finger4'), + 112: + dict( + name='right_hand_root', + id=112, + color=[255, 255, 255], + type='', + swap='left_hand_root'), + 113: + dict( + name='right_thumb1', + id=113, + color=[255, 128, 0], + type='', + swap='left_thumb1'), + 114: + dict( + name='right_thumb2', + id=114, + color=[255, 128, 0], + type='', + swap='left_thumb2'), + 115: + dict( + name='right_thumb3', + id=115, + color=[255, 128, 0], + type='', + swap='left_thumb3'), + 116: + dict( + name='right_thumb4', + id=116, + color=[255, 128, 0], + type='', + swap='left_thumb4'), + 117: + dict( + name='right_forefinger1', + id=117, + color=[255, 153, 255], + type='', + swap='left_forefinger1'), + 118: + dict( + name='right_forefinger2', + id=118, + color=[255, 153, 255], + type='', + swap='left_forefinger2'), + 119: + dict( + name='right_forefinger3', + id=119, + color=[255, 153, 255], + type='', + swap='left_forefinger3'), + 120: + dict( + name='right_forefinger4', + id=120, + color=[255, 153, 255], + type='', + swap='left_forefinger4'), + 121: + dict( + name='right_middle_finger1', + id=121, + color=[102, 178, 255], + type='', + swap='left_middle_finger1'), + 122: + dict( + name='right_middle_finger2', + id=122, + color=[102, 178, 255], + type='', + swap='left_middle_finger2'), + 123: + dict( + name='right_middle_finger3', + id=123, + color=[102, 178, 255], + type='', + swap='left_middle_finger3'), + 124: + dict( + name='right_middle_finger4', + id=124, + color=[102, 178, 255], + type='', + swap='left_middle_finger4'), + 125: + dict( + name='right_ring_finger1', + id=125, + color=[255, 51, 51], + type='', + swap='left_ring_finger1'), + 126: + dict( + name='right_ring_finger2', + id=126, + color=[255, 51, 51], + type='', + swap='left_ring_finger2'), + 127: + dict( + name='right_ring_finger3', + id=127, + color=[255, 51, 51], + type='', + swap='left_ring_finger3'), + 128: + dict( + name='right_ring_finger4', + id=128, + color=[255, 51, 51], + type='', + swap='left_ring_finger4'), + 129: + dict( + name='right_pinky_finger1', + id=129, + color=[0, 255, 0], + type='', + swap='left_pinky_finger1'), + 130: + dict( + name='right_pinky_finger2', + id=130, + color=[0, 255, 0], + type='', + swap='left_pinky_finger2'), + 131: + dict( + name='right_pinky_finger3', + id=131, + color=[0, 255, 0], + type='', + swap='left_pinky_finger3'), + 132: + dict( + name='right_pinky_finger4', + id=132, + color=[0, 255, 0], + type='', + swap='left_pinky_finger4') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ankle', 'left_big_toe'), id=19, color=[0, 255, 0]), + 20: + dict(link=('left_ankle', 'left_small_toe'), id=20, color=[0, 255, 0]), + 21: + dict(link=('left_ankle', 'left_heel'), id=21, color=[0, 255, 0]), + 22: + dict( + link=('right_ankle', 'right_big_toe'), id=22, color=[255, 128, 0]), + 23: + dict( + link=('right_ankle', 'right_small_toe'), + id=23, + color=[255, 128, 0]), + 24: + dict(link=('right_ankle', 'right_heel'), id=24, color=[255, 128, 0]), + 25: + dict( + link=('left_hand_root', 'left_thumb1'), id=25, color=[255, 128, + 0]), + 26: + dict(link=('left_thumb1', 'left_thumb2'), id=26, color=[255, 128, 0]), + 27: + dict(link=('left_thumb2', 'left_thumb3'), id=27, color=[255, 128, 0]), + 28: + dict(link=('left_thumb3', 'left_thumb4'), id=28, color=[255, 128, 0]), + 29: + dict( + link=('left_hand_root', 'left_forefinger1'), + id=29, + color=[255, 153, 255]), + 30: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=30, + color=[255, 153, 255]), + 31: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=31, + color=[255, 153, 255]), + 32: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=32, + color=[255, 153, 255]), + 33: + dict( + link=('left_hand_root', 'left_middle_finger1'), + id=33, + color=[102, 178, 255]), + 34: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=34, + color=[102, 178, 255]), + 35: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=35, + color=[102, 178, 255]), + 36: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=36, + color=[102, 178, 255]), + 37: + dict( + link=('left_hand_root', 'left_ring_finger1'), + id=37, + color=[255, 51, 51]), + 38: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=38, + color=[255, 51, 51]), + 39: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=39, + color=[255, 51, 51]), + 40: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=40, + color=[255, 51, 51]), + 41: + dict( + link=('left_hand_root', 'left_pinky_finger1'), + id=41, + color=[0, 255, 0]), + 42: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=42, + color=[0, 255, 0]), + 43: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=43, + color=[0, 255, 0]), + 44: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=44, + color=[0, 255, 0]), + 45: + dict( + link=('right_hand_root', 'right_thumb1'), + id=45, + color=[255, 128, 0]), + 46: + dict( + link=('right_thumb1', 'right_thumb2'), id=46, color=[255, 128, 0]), + 47: + dict( + link=('right_thumb2', 'right_thumb3'), id=47, color=[255, 128, 0]), + 48: + dict( + link=('right_thumb3', 'right_thumb4'), id=48, color=[255, 128, 0]), + 49: + dict( + link=('right_hand_root', 'right_forefinger1'), + id=49, + color=[255, 153, 255]), + 50: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=50, + color=[255, 153, 255]), + 51: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=51, + color=[255, 153, 255]), + 52: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=52, + color=[255, 153, 255]), + 53: + dict( + link=('right_hand_root', 'right_middle_finger1'), + id=53, + color=[102, 178, 255]), + 54: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=54, + color=[102, 178, 255]), + 55: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=55, + color=[102, 178, 255]), + 56: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=56, + color=[102, 178, 255]), + 57: + dict( + link=('right_hand_root', 'right_ring_finger1'), + id=57, + color=[255, 51, 51]), + 58: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=58, + color=[255, 51, 51]), + 59: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=59, + color=[255, 51, 51]), + 60: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=60, + color=[255, 51, 51]), + 61: + dict( + link=('right_hand_root', 'right_pinky_finger1'), + id=61, + color=[0, 255, 0]), + 62: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=62, + color=[0, 255, 0]), + 63: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=63, + color=[0, 255, 0]), + 64: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=64, + color=[0, 255, 0]) + }, + joint_weights=[1.] * 133, + # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' + # 'evaluation/myeval_wholebody.py#L175' + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.068, 0.066, 0.066, + 0.092, 0.094, 0.094, 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, + 0.025, 0.020, 0.023, 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, + 0.013, 0.012, 0.011, 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, + 0.009, 0.007, 0.007, 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, 0.017, + 0.011, 0.009, 0.011, 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, + 0.034, 0.008, 0.008, 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, + 0.009, 0.009, 0.007, 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01, + 0.008, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, + 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, 0.019, + 0.022, 0.031, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, + 0.035, 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, + 0.019, 0.022, 0.031 + ]) diff --git a/mmpose/configs/_base_/datasets/halpe.py b/mmpose/configs/_base_/datasets/halpe.py new file mode 100644 index 0000000000000000000000000000000000000000..1385fe81dc2190684f2142449c0f288f2cb74c1a --- /dev/null +++ b/mmpose/configs/_base_/datasets/halpe.py @@ -0,0 +1,1157 @@ +dataset_info = dict( + dataset_name='halpe', + paper_info=dict( + author='Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie' + ' and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu' + ' and Ma, Ze and Chen, Mingyang and Lu, Cewu', + title='PaStaNet: Toward Human Activity Knowledge Engine', + container='CVPR', + year='2020', + homepage='https://github.com/Fang-Haoshu/Halpe-FullBody/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict(name='head', id=17, color=[255, 128, 0], type='upper', swap=''), + 18: + dict(name='neck', id=18, color=[255, 128, 0], type='upper', swap=''), + 19: + dict(name='hip', id=19, color=[255, 128, 0], type='lower', swap=''), + 20: + dict( + name='left_big_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='right_big_toe'), + 21: + dict( + name='right_big_toe', + id=21, + color=[255, 128, 0], + type='lower', + swap='left_big_toe'), + 22: + dict( + name='left_small_toe', + id=22, + color=[255, 128, 0], + type='lower', + swap='right_small_toe'), + 23: + dict( + name='right_small_toe', + id=23, + color=[255, 128, 0], + type='lower', + swap='left_small_toe'), + 24: + dict( + name='left_heel', + id=24, + color=[255, 128, 0], + type='lower', + swap='right_heel'), + 25: + dict( + name='right_heel', + id=25, + color=[255, 128, 0], + type='lower', + swap='left_heel'), + 26: + dict( + name='face-0', + id=26, + color=[255, 255, 255], + type='', + swap='face-16'), + 27: + dict( + name='face-1', + id=27, + color=[255, 255, 255], + type='', + swap='face-15'), + 28: + dict( + name='face-2', + id=28, + color=[255, 255, 255], + type='', + swap='face-14'), + 29: + dict( + name='face-3', + id=29, + color=[255, 255, 255], + type='', + swap='face-13'), + 30: + dict( + name='face-4', + id=30, + color=[255, 255, 255], + type='', + swap='face-12'), + 31: + dict( + name='face-5', + id=31, + color=[255, 255, 255], + type='', + swap='face-11'), + 32: + dict( + name='face-6', + id=32, + color=[255, 255, 255], + type='', + swap='face-10'), + 33: + dict( + name='face-7', + id=33, + color=[255, 255, 255], + type='', + swap='face-9'), + 34: + dict(name='face-8', id=34, color=[255, 255, 255], type='', swap=''), + 35: + dict( + name='face-9', + id=35, + color=[255, 255, 255], + type='', + swap='face-7'), + 36: + dict( + name='face-10', + id=36, + color=[255, 255, 255], + type='', + swap='face-6'), + 37: + dict( + name='face-11', + id=37, + color=[255, 255, 255], + type='', + swap='face-5'), + 38: + dict( + name='face-12', + id=38, + color=[255, 255, 255], + type='', + swap='face-4'), + 39: + dict( + name='face-13', + id=39, + color=[255, 255, 255], + type='', + swap='face-3'), + 40: + dict( + name='face-14', + id=40, + color=[255, 255, 255], + type='', + swap='face-2'), + 41: + dict( + name='face-15', + id=41, + color=[255, 255, 255], + type='', + swap='face-1'), + 42: + dict( + name='face-16', + id=42, + color=[255, 255, 255], + type='', + swap='face-0'), + 43: + dict( + name='face-17', + id=43, + color=[255, 255, 255], + type='', + swap='face-26'), + 44: + dict( + name='face-18', + id=44, + color=[255, 255, 255], + type='', + swap='face-25'), + 45: + dict( + name='face-19', + id=45, + color=[255, 255, 255], + type='', + swap='face-24'), + 46: + dict( + name='face-20', + id=46, + color=[255, 255, 255], + type='', + swap='face-23'), + 47: + dict( + name='face-21', + id=47, + color=[255, 255, 255], + type='', + swap='face-22'), + 48: + dict( + name='face-22', + id=48, + color=[255, 255, 255], + type='', + swap='face-21'), + 49: + dict( + name='face-23', + id=49, + color=[255, 255, 255], + type='', + swap='face-20'), + 50: + dict( + name='face-24', + id=50, + color=[255, 255, 255], + type='', + swap='face-19'), + 51: + dict( + name='face-25', + id=51, + color=[255, 255, 255], + type='', + swap='face-18'), + 52: + dict( + name='face-26', + id=52, + color=[255, 255, 255], + type='', + swap='face-17'), + 53: + dict(name='face-27', id=53, color=[255, 255, 255], type='', swap=''), + 54: + dict(name='face-28', id=54, color=[255, 255, 255], type='', swap=''), + 55: + dict(name='face-29', id=55, color=[255, 255, 255], type='', swap=''), + 56: + dict(name='face-30', id=56, color=[255, 255, 255], type='', swap=''), + 57: + dict( + name='face-31', + id=57, + color=[255, 255, 255], + type='', + swap='face-35'), + 58: + dict( + name='face-32', + id=58, + color=[255, 255, 255], + type='', + swap='face-34'), + 59: + dict(name='face-33', id=59, color=[255, 255, 255], type='', swap=''), + 60: + dict( + name='face-34', + id=60, + color=[255, 255, 255], + type='', + swap='face-32'), + 61: + dict( + name='face-35', + id=61, + color=[255, 255, 255], + type='', + swap='face-31'), + 62: + dict( + name='face-36', + id=62, + color=[255, 255, 255], + type='', + swap='face-45'), + 63: + dict( + name='face-37', + id=63, + color=[255, 255, 255], + type='', + swap='face-44'), + 64: + dict( + name='face-38', + id=64, + color=[255, 255, 255], + type='', + swap='face-43'), + 65: + dict( + name='face-39', + id=65, + color=[255, 255, 255], + type='', + swap='face-42'), + 66: + dict( + name='face-40', + id=66, + color=[255, 255, 255], + type='', + swap='face-47'), + 67: + dict( + name='face-41', + id=67, + color=[255, 255, 255], + type='', + swap='face-46'), + 68: + dict( + name='face-42', + id=68, + color=[255, 255, 255], + type='', + swap='face-39'), + 69: + dict( + name='face-43', + id=69, + color=[255, 255, 255], + type='', + swap='face-38'), + 70: + dict( + name='face-44', + id=70, + color=[255, 255, 255], + type='', + swap='face-37'), + 71: + dict( + name='face-45', + id=71, + color=[255, 255, 255], + type='', + swap='face-36'), + 72: + dict( + name='face-46', + id=72, + color=[255, 255, 255], + type='', + swap='face-41'), + 73: + dict( + name='face-47', + id=73, + color=[255, 255, 255], + type='', + swap='face-40'), + 74: + dict( + name='face-48', + id=74, + color=[255, 255, 255], + type='', + swap='face-54'), + 75: + dict( + name='face-49', + id=75, + color=[255, 255, 255], + type='', + swap='face-53'), + 76: + dict( + name='face-50', + id=76, + color=[255, 255, 255], + type='', + swap='face-52'), + 77: + dict(name='face-51', id=77, color=[255, 255, 255], type='', swap=''), + 78: + dict( + name='face-52', + id=78, + color=[255, 255, 255], + type='', + swap='face-50'), + 79: + dict( + name='face-53', + id=79, + color=[255, 255, 255], + type='', + swap='face-49'), + 80: + dict( + name='face-54', + id=80, + color=[255, 255, 255], + type='', + swap='face-48'), + 81: + dict( + name='face-55', + id=81, + color=[255, 255, 255], + type='', + swap='face-59'), + 82: + dict( + name='face-56', + id=82, + color=[255, 255, 255], + type='', + swap='face-58'), + 83: + dict(name='face-57', id=83, color=[255, 255, 255], type='', swap=''), + 84: + dict( + name='face-58', + id=84, + color=[255, 255, 255], + type='', + swap='face-56'), + 85: + dict( + name='face-59', + id=85, + color=[255, 255, 255], + type='', + swap='face-55'), + 86: + dict( + name='face-60', + id=86, + color=[255, 255, 255], + type='', + swap='face-64'), + 87: + dict( + name='face-61', + id=87, + color=[255, 255, 255], + type='', + swap='face-63'), + 88: + dict(name='face-62', id=88, color=[255, 255, 255], type='', swap=''), + 89: + dict( + name='face-63', + id=89, + color=[255, 255, 255], + type='', + swap='face-61'), + 90: + dict( + name='face-64', + id=90, + color=[255, 255, 255], + type='', + swap='face-60'), + 91: + dict( + name='face-65', + id=91, + color=[255, 255, 255], + type='', + swap='face-67'), + 92: + dict(name='face-66', id=92, color=[255, 255, 255], type='', swap=''), + 93: + dict( + name='face-67', + id=93, + color=[255, 255, 255], + type='', + swap='face-65'), + 94: + dict( + name='left_hand_root', + id=94, + color=[255, 255, 255], + type='', + swap='right_hand_root'), + 95: + dict( + name='left_thumb1', + id=95, + color=[255, 128, 0], + type='', + swap='right_thumb1'), + 96: + dict( + name='left_thumb2', + id=96, + color=[255, 128, 0], + type='', + swap='right_thumb2'), + 97: + dict( + name='left_thumb3', + id=97, + color=[255, 128, 0], + type='', + swap='right_thumb3'), + 98: + dict( + name='left_thumb4', + id=98, + color=[255, 128, 0], + type='', + swap='right_thumb4'), + 99: + dict( + name='left_forefinger1', + id=99, + color=[255, 153, 255], + type='', + swap='right_forefinger1'), + 100: + dict( + name='left_forefinger2', + id=100, + color=[255, 153, 255], + type='', + swap='right_forefinger2'), + 101: + dict( + name='left_forefinger3', + id=101, + color=[255, 153, 255], + type='', + swap='right_forefinger3'), + 102: + dict( + name='left_forefinger4', + id=102, + color=[255, 153, 255], + type='', + swap='right_forefinger4'), + 103: + dict( + name='left_middle_finger1', + id=103, + color=[102, 178, 255], + type='', + swap='right_middle_finger1'), + 104: + dict( + name='left_middle_finger2', + id=104, + color=[102, 178, 255], + type='', + swap='right_middle_finger2'), + 105: + dict( + name='left_middle_finger3', + id=105, + color=[102, 178, 255], + type='', + swap='right_middle_finger3'), + 106: + dict( + name='left_middle_finger4', + id=106, + color=[102, 178, 255], + type='', + swap='right_middle_finger4'), + 107: + dict( + name='left_ring_finger1', + id=107, + color=[255, 51, 51], + type='', + swap='right_ring_finger1'), + 108: + dict( + name='left_ring_finger2', + id=108, + color=[255, 51, 51], + type='', + swap='right_ring_finger2'), + 109: + dict( + name='left_ring_finger3', + id=109, + color=[255, 51, 51], + type='', + swap='right_ring_finger3'), + 110: + dict( + name='left_ring_finger4', + id=110, + color=[255, 51, 51], + type='', + swap='right_ring_finger4'), + 111: + dict( + name='left_pinky_finger1', + id=111, + color=[0, 255, 0], + type='', + swap='right_pinky_finger1'), + 112: + dict( + name='left_pinky_finger2', + id=112, + color=[0, 255, 0], + type='', + swap='right_pinky_finger2'), + 113: + dict( + name='left_pinky_finger3', + id=113, + color=[0, 255, 0], + type='', + swap='right_pinky_finger3'), + 114: + dict( + name='left_pinky_finger4', + id=114, + color=[0, 255, 0], + type='', + swap='right_pinky_finger4'), + 115: + dict( + name='right_hand_root', + id=115, + color=[255, 255, 255], + type='', + swap='left_hand_root'), + 116: + dict( + name='right_thumb1', + id=116, + color=[255, 128, 0], + type='', + swap='left_thumb1'), + 117: + dict( + name='right_thumb2', + id=117, + color=[255, 128, 0], + type='', + swap='left_thumb2'), + 118: + dict( + name='right_thumb3', + id=118, + color=[255, 128, 0], + type='', + swap='left_thumb3'), + 119: + dict( + name='right_thumb4', + id=119, + color=[255, 128, 0], + type='', + swap='left_thumb4'), + 120: + dict( + name='right_forefinger1', + id=120, + color=[255, 153, 255], + type='', + swap='left_forefinger1'), + 121: + dict( + name='right_forefinger2', + id=121, + color=[255, 153, 255], + type='', + swap='left_forefinger2'), + 122: + dict( + name='right_forefinger3', + id=122, + color=[255, 153, 255], + type='', + swap='left_forefinger3'), + 123: + dict( + name='right_forefinger4', + id=123, + color=[255, 153, 255], + type='', + swap='left_forefinger4'), + 124: + dict( + name='right_middle_finger1', + id=124, + color=[102, 178, 255], + type='', + swap='left_middle_finger1'), + 125: + dict( + name='right_middle_finger2', + id=125, + color=[102, 178, 255], + type='', + swap='left_middle_finger2'), + 126: + dict( + name='right_middle_finger3', + id=126, + color=[102, 178, 255], + type='', + swap='left_middle_finger3'), + 127: + dict( + name='right_middle_finger4', + id=127, + color=[102, 178, 255], + type='', + swap='left_middle_finger4'), + 128: + dict( + name='right_ring_finger1', + id=128, + color=[255, 51, 51], + type='', + swap='left_ring_finger1'), + 129: + dict( + name='right_ring_finger2', + id=129, + color=[255, 51, 51], + type='', + swap='left_ring_finger2'), + 130: + dict( + name='right_ring_finger3', + id=130, + color=[255, 51, 51], + type='', + swap='left_ring_finger3'), + 131: + dict( + name='right_ring_finger4', + id=131, + color=[255, 51, 51], + type='', + swap='left_ring_finger4'), + 132: + dict( + name='right_pinky_finger1', + id=132, + color=[0, 255, 0], + type='', + swap='left_pinky_finger1'), + 133: + dict( + name='right_pinky_finger2', + id=133, + color=[0, 255, 0], + type='', + swap='left_pinky_finger2'), + 134: + dict( + name='right_pinky_finger3', + id=134, + color=[0, 255, 0], + type='', + swap='left_pinky_finger3'), + 135: + dict( + name='right_pinky_finger4', + id=135, + color=[0, 255, 0], + type='', + swap='left_pinky_finger4') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('left_hip', 'hip'), id=2, color=[0, 255, 0]), + 3: + dict(link=('right_ankle', 'right_knee'), id=3, color=[255, 128, 0]), + 4: + dict(link=('right_knee', 'right_hip'), id=4, color=[255, 128, 0]), + 5: + dict(link=('right_hip', 'hip'), id=5, color=[255, 128, 0]), + 6: + dict(link=('head', 'neck'), id=6, color=[51, 153, 255]), + 7: + dict(link=('neck', 'hip'), id=7, color=[51, 153, 255]), + 8: + dict(link=('neck', 'left_shoulder'), id=8, color=[0, 255, 0]), + 9: + dict(link=('left_shoulder', 'left_elbow'), id=9, color=[0, 255, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('neck', 'right_shoulder'), id=11, color=[255, 128, 0]), + 12: + dict( + link=('right_shoulder', 'right_elbow'), id=12, color=[255, 128, + 0]), + 13: + dict(link=('right_elbow', 'right_wrist'), id=13, color=[255, 128, 0]), + 14: + dict(link=('left_eye', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('nose', 'left_eye'), id=15, color=[51, 153, 255]), + 16: + dict(link=('nose', 'right_eye'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_eye', 'left_ear'), id=17, color=[51, 153, 255]), + 18: + dict(link=('right_eye', 'right_ear'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ear', 'left_shoulder'), id=19, color=[51, 153, 255]), + 20: + dict( + link=('right_ear', 'right_shoulder'), id=20, color=[51, 153, 255]), + 21: + dict(link=('left_ankle', 'left_big_toe'), id=21, color=[0, 255, 0]), + 22: + dict(link=('left_ankle', 'left_small_toe'), id=22, color=[0, 255, 0]), + 23: + dict(link=('left_ankle', 'left_heel'), id=23, color=[0, 255, 0]), + 24: + dict( + link=('right_ankle', 'right_big_toe'), id=24, color=[255, 128, 0]), + 25: + dict( + link=('right_ankle', 'right_small_toe'), + id=25, + color=[255, 128, 0]), + 26: + dict(link=('right_ankle', 'right_heel'), id=26, color=[255, 128, 0]), + 27: + dict(link=('left_wrist', 'left_thumb1'), id=27, color=[255, 128, 0]), + 28: + dict(link=('left_thumb1', 'left_thumb2'), id=28, color=[255, 128, 0]), + 29: + dict(link=('left_thumb2', 'left_thumb3'), id=29, color=[255, 128, 0]), + 30: + dict(link=('left_thumb3', 'left_thumb4'), id=30, color=[255, 128, 0]), + 31: + dict( + link=('left_wrist', 'left_forefinger1'), + id=31, + color=[255, 153, 255]), + 32: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=32, + color=[255, 153, 255]), + 33: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=33, + color=[255, 153, 255]), + 34: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=34, + color=[255, 153, 255]), + 35: + dict( + link=('left_wrist', 'left_middle_finger1'), + id=35, + color=[102, 178, 255]), + 36: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=36, + color=[102, 178, 255]), + 37: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=37, + color=[102, 178, 255]), + 38: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=38, + color=[102, 178, 255]), + 39: + dict( + link=('left_wrist', 'left_ring_finger1'), + id=39, + color=[255, 51, 51]), + 40: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=40, + color=[255, 51, 51]), + 41: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=41, + color=[255, 51, 51]), + 42: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=42, + color=[255, 51, 51]), + 43: + dict( + link=('left_wrist', 'left_pinky_finger1'), + id=43, + color=[0, 255, 0]), + 44: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=44, + color=[0, 255, 0]), + 45: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=45, + color=[0, 255, 0]), + 46: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=46, + color=[0, 255, 0]), + 47: + dict(link=('right_wrist', 'right_thumb1'), id=47, color=[255, 128, 0]), + 48: + dict( + link=('right_thumb1', 'right_thumb2'), id=48, color=[255, 128, 0]), + 49: + dict( + link=('right_thumb2', 'right_thumb3'), id=49, color=[255, 128, 0]), + 50: + dict( + link=('right_thumb3', 'right_thumb4'), id=50, color=[255, 128, 0]), + 51: + dict( + link=('right_wrist', 'right_forefinger1'), + id=51, + color=[255, 153, 255]), + 52: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=52, + color=[255, 153, 255]), + 53: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=53, + color=[255, 153, 255]), + 54: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=54, + color=[255, 153, 255]), + 55: + dict( + link=('right_wrist', 'right_middle_finger1'), + id=55, + color=[102, 178, 255]), + 56: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=56, + color=[102, 178, 255]), + 57: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=57, + color=[102, 178, 255]), + 58: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=58, + color=[102, 178, 255]), + 59: + dict( + link=('right_wrist', 'right_ring_finger1'), + id=59, + color=[255, 51, 51]), + 60: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=60, + color=[255, 51, 51]), + 61: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=61, + color=[255, 51, 51]), + 62: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=62, + color=[255, 51, 51]), + 63: + dict( + link=('right_wrist', 'right_pinky_finger1'), + id=63, + color=[0, 255, 0]), + 64: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=64, + color=[0, 255, 0]), + 65: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=65, + color=[0, 255, 0]), + 66: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=66, + color=[0, 255, 0]) + }, + joint_weights=[1.] * 136, + + # 'https://github.com/Fang-Haoshu/Halpe-FullBody/blob/master/' + # 'HalpeCOCOAPI/PythonAPI/halpecocotools/cocoeval.py#L245' + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.08, 0.08, 0.08, + 0.089, 0.089, 0.089, 0.089, 0.089, 0.089, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015 + ]) diff --git a/mmpose/configs/_base_/datasets/halpe26.py b/mmpose/configs/_base_/datasets/halpe26.py new file mode 100644 index 0000000000000000000000000000000000000000..cb4df83874c08ee7169aed251b266a03e411ccc9 --- /dev/null +++ b/mmpose/configs/_base_/datasets/halpe26.py @@ -0,0 +1,274 @@ +dataset_info = dict( + dataset_name='halpe26', + paper_info=dict( + author='Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie' + ' and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu' + ' and Ma, Ze and Chen, Mingyang and Lu, Cewu', + title='PaStaNet: Toward Human Activity Knowledge Engine', + container='CVPR', + year='2020', + homepage='https://github.com/Fang-Haoshu/Halpe-FullBody/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict(name='head', id=17, color=[255, 128, 0], type='upper', swap=''), + 18: + dict(name='neck', id=18, color=[255, 128, 0], type='upper', swap=''), + 19: + dict(name='hip', id=19, color=[255, 128, 0], type='lower', swap=''), + 20: + dict( + name='left_big_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='right_big_toe'), + 21: + dict( + name='right_big_toe', + id=21, + color=[255, 128, 0], + type='lower', + swap='left_big_toe'), + 22: + dict( + name='left_small_toe', + id=22, + color=[255, 128, 0], + type='lower', + swap='right_small_toe'), + 23: + dict( + name='right_small_toe', + id=23, + color=[255, 128, 0], + type='lower', + swap='left_small_toe'), + 24: + dict( + name='left_heel', + id=24, + color=[255, 128, 0], + type='lower', + swap='right_heel'), + 25: + dict( + name='right_heel', + id=25, + color=[255, 128, 0], + type='lower', + swap='left_heel') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('left_hip', 'hip'), id=2, color=[0, 255, 0]), + 3: + dict(link=('right_ankle', 'right_knee'), id=3, color=[255, 128, 0]), + 4: + dict(link=('right_knee', 'right_hip'), id=4, color=[255, 128, 0]), + 5: + dict(link=('right_hip', 'hip'), id=5, color=[255, 128, 0]), + 6: + dict(link=('head', 'neck'), id=6, color=[51, 153, 255]), + 7: + dict(link=('neck', 'hip'), id=7, color=[51, 153, 255]), + 8: + dict(link=('neck', 'left_shoulder'), id=8, color=[0, 255, 0]), + 9: + dict(link=('left_shoulder', 'left_elbow'), id=9, color=[0, 255, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('neck', 'right_shoulder'), id=11, color=[255, 128, 0]), + 12: + dict( + link=('right_shoulder', 'right_elbow'), id=12, color=[255, 128, + 0]), + 13: + dict(link=('right_elbow', 'right_wrist'), id=13, color=[255, 128, 0]), + 14: + dict(link=('left_eye', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('nose', 'left_eye'), id=15, color=[51, 153, 255]), + 16: + dict(link=('nose', 'right_eye'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_eye', 'left_ear'), id=17, color=[51, 153, 255]), + 18: + dict(link=('right_eye', 'right_ear'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ear', 'left_shoulder'), id=19, color=[51, 153, 255]), + 20: + dict( + link=('right_ear', 'right_shoulder'), id=20, color=[51, 153, 255]), + 21: + dict(link=('left_ankle', 'left_big_toe'), id=21, color=[0, 255, 0]), + 22: + dict(link=('left_ankle', 'left_small_toe'), id=22, color=[0, 255, 0]), + 23: + dict(link=('left_ankle', 'left_heel'), id=23, color=[0, 255, 0]), + 24: + dict( + link=('right_ankle', 'right_big_toe'), id=24, color=[255, 128, 0]), + 25: + dict( + link=('right_ankle', 'right_small_toe'), + id=25, + color=[255, 128, 0]), + 26: + dict(link=('right_ankle', 'right_heel'), id=26, color=[255, 128, 0]), + }, + # the joint_weights is modified by MMPose Team + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ] + [1., 1., 1.2] + [1.5] * 6, + + # 'https://github.com/Fang-Haoshu/Halpe-FullBody/blob/master/' + # 'HalpeCOCOAPI/PythonAPI/halpecocotools/cocoeval.py#L245' + sigmas=[ + 0.026, + 0.025, + 0.025, + 0.035, + 0.035, + 0.079, + 0.079, + 0.072, + 0.072, + 0.062, + 0.062, + 0.107, + 0.107, + 0.087, + 0.087, + 0.089, + 0.089, + 0.026, + 0.026, + 0.066, + 0.079, + 0.079, + 0.079, + 0.079, + 0.079, + 0.079, + ]) diff --git a/mmpose/configs/_base_/datasets/horse10.py b/mmpose/configs/_base_/datasets/horse10.py new file mode 100644 index 0000000000000000000000000000000000000000..a485bf191bc151b0d76e48f3e55eb8e2dda6c506 --- /dev/null +++ b/mmpose/configs/_base_/datasets/horse10.py @@ -0,0 +1,201 @@ +dataset_info = dict( + dataset_name='horse10', + paper_info=dict( + author='Mathis, Alexander and Biasi, Thomas and ' + 'Schneider, Steffen and ' + 'Yuksekgonul, Mert and Rogers, Byron and ' + 'Bethge, Matthias and ' + 'Mathis, Mackenzie W', + title='Pretraining boosts out-of-domain robustness ' + 'for pose estimation', + container='Proceedings of the IEEE/CVF Winter Conference on ' + 'Applications of Computer Vision', + year='2021', + homepage='http://www.mackenziemathislab.org/horse10', + ), + keypoint_info={ + 0: + dict(name='Nose', id=0, color=[255, 153, 255], type='upper', swap=''), + 1: + dict(name='Eye', id=1, color=[255, 153, 255], type='upper', swap=''), + 2: + dict( + name='Nearknee', + id=2, + color=[255, 102, 255], + type='upper', + swap=''), + 3: + dict( + name='Nearfrontfetlock', + id=3, + color=[255, 102, 255], + type='upper', + swap=''), + 4: + dict( + name='Nearfrontfoot', + id=4, + color=[255, 102, 255], + type='upper', + swap=''), + 5: + dict( + name='Offknee', id=5, color=[255, 102, 255], type='upper', + swap=''), + 6: + dict( + name='Offfrontfetlock', + id=6, + color=[255, 102, 255], + type='upper', + swap=''), + 7: + dict( + name='Offfrontfoot', + id=7, + color=[255, 102, 255], + type='upper', + swap=''), + 8: + dict( + name='Shoulder', + id=8, + color=[255, 153, 255], + type='upper', + swap=''), + 9: + dict( + name='Midshoulder', + id=9, + color=[255, 153, 255], + type='upper', + swap=''), + 10: + dict( + name='Elbow', id=10, color=[255, 153, 255], type='upper', swap=''), + 11: + dict( + name='Girth', id=11, color=[255, 153, 255], type='upper', swap=''), + 12: + dict( + name='Wither', id=12, color=[255, 153, 255], type='upper', + swap=''), + 13: + dict( + name='Nearhindhock', + id=13, + color=[255, 51, 255], + type='lower', + swap=''), + 14: + dict( + name='Nearhindfetlock', + id=14, + color=[255, 51, 255], + type='lower', + swap=''), + 15: + dict( + name='Nearhindfoot', + id=15, + color=[255, 51, 255], + type='lower', + swap=''), + 16: + dict(name='Hip', id=16, color=[255, 153, 255], type='lower', swap=''), + 17: + dict( + name='Stifle', id=17, color=[255, 153, 255], type='lower', + swap=''), + 18: + dict( + name='Offhindhock', + id=18, + color=[255, 51, 255], + type='lower', + swap=''), + 19: + dict( + name='Offhindfetlock', + id=19, + color=[255, 51, 255], + type='lower', + swap=''), + 20: + dict( + name='Offhindfoot', + id=20, + color=[255, 51, 255], + type='lower', + swap=''), + 21: + dict( + name='Ischium', + id=21, + color=[255, 153, 255], + type='lower', + swap='') + }, + skeleton_info={ + 0: + dict(link=('Nose', 'Eye'), id=0, color=[255, 153, 255]), + 1: + dict(link=('Eye', 'Wither'), id=1, color=[255, 153, 255]), + 2: + dict(link=('Wither', 'Hip'), id=2, color=[255, 153, 255]), + 3: + dict(link=('Hip', 'Ischium'), id=3, color=[255, 153, 255]), + 4: + dict(link=('Ischium', 'Stifle'), id=4, color=[255, 153, 255]), + 5: + dict(link=('Stifle', 'Girth'), id=5, color=[255, 153, 255]), + 6: + dict(link=('Girth', 'Elbow'), id=6, color=[255, 153, 255]), + 7: + dict(link=('Elbow', 'Shoulder'), id=7, color=[255, 153, 255]), + 8: + dict(link=('Shoulder', 'Midshoulder'), id=8, color=[255, 153, 255]), + 9: + dict(link=('Midshoulder', 'Wither'), id=9, color=[255, 153, 255]), + 10: + dict( + link=('Nearknee', 'Nearfrontfetlock'), + id=10, + color=[255, 102, 255]), + 11: + dict( + link=('Nearfrontfetlock', 'Nearfrontfoot'), + id=11, + color=[255, 102, 255]), + 12: + dict( + link=('Offknee', 'Offfrontfetlock'), id=12, color=[255, 102, 255]), + 13: + dict( + link=('Offfrontfetlock', 'Offfrontfoot'), + id=13, + color=[255, 102, 255]), + 14: + dict( + link=('Nearhindhock', 'Nearhindfetlock'), + id=14, + color=[255, 51, 255]), + 15: + dict( + link=('Nearhindfetlock', 'Nearhindfoot'), + id=15, + color=[255, 51, 255]), + 16: + dict( + link=('Offhindhock', 'Offhindfetlock'), + id=16, + color=[255, 51, 255]), + 17: + dict( + link=('Offhindfetlock', 'Offhindfoot'), + id=17, + color=[255, 51, 255]) + }, + joint_weights=[1.] * 22, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/humanart.py b/mmpose/configs/_base_/datasets/humanart.py new file mode 100644 index 0000000000000000000000000000000000000000..b549269b692682f5dd0350e6f557edeb25730126 --- /dev/null +++ b/mmpose/configs/_base_/datasets/humanart.py @@ -0,0 +1,181 @@ +dataset_info = dict( + dataset_name='Human-Art', + paper_info=dict( + author='Ju, Xuan and Zeng, Ailing and ' + 'Wang, Jianan and Xu, Qiang and Zhang, Lei', + title='Human-Art: A Versatile Human-Centric Dataset ' + 'Bridging Natural and Artificial Scenes', + container='Proceedings of the IEEE/CVF Conference on ' + 'Computer Vision and Pattern Recognition', + year='2023', + homepage='https://idea-research.github.io/HumanArt/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/humanart21.py b/mmpose/configs/_base_/datasets/humanart21.py new file mode 100644 index 0000000000000000000000000000000000000000..e6d935d1a97e00aac7830a01f587840321b68625 --- /dev/null +++ b/mmpose/configs/_base_/datasets/humanart21.py @@ -0,0 +1,218 @@ +dataset_info = dict( + dataset_name='Human-Art', + paper_info=dict( + author='Ju, Xuan and Zeng, Ailing and ' + 'Wang, Jianan and Xu, Qiang and Zhang, Lei', + title='Human-Art: A Versatile Human-Centric Dataset ' + 'Bridging Natural and Artificial Scenes', + container='Proceedings of the IEEE/CVF Conference on ' + 'Computer Vision and Pattern Recognition', + year='2023', + homepage='https://idea-research.github.io/HumanArt/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='left_finger', + id=17, + color=[0, 255, 0], + type='lower', + swap='right_finger'), + 18: + dict( + name='right_finger', + id=18, + color=[255, 128, 0], + type='lower', + swap='left_finger'), + 19: + dict( + name='left_toe', + id=19, + color=[0, 255, 0], + type='lower', + swap='right_toe'), + 20: + dict( + name='right_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='left_toe'), + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ankle', 'left_toe'), id=19, color=[0, 255, 0]), + 20: + dict(link=('right_ankle', 'right_toe'), id=20, color=[255, 128, 0]), + 21: + dict(link=('left_wrist', 'left_finger'), id=21, color=[0, 255, 0]), + 22: + dict(link=('right_wrist', 'right_finger'), id=22, color=[255, 128, 0]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5, 1., 1., 1., 1. + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.089, 0.089, 0.089, + 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/humanart_aic.py b/mmpose/configs/_base_/datasets/humanart_aic.py new file mode 100644 index 0000000000000000000000000000000000000000..e99942753606346d24d19ece5a52b55dff72840f --- /dev/null +++ b/mmpose/configs/_base_/datasets/humanart_aic.py @@ -0,0 +1,205 @@ +dataset_info = dict( + dataset_name='humanart', + paper_info=[ + dict( + author='Ju, Xuan and Zeng, Ailing and ' + 'Wang, Jianan and Xu, Qiang and Zhang, ' + 'Lei', + title='Human-Art: A Versatile Human-Centric Dataset ' + 'Bridging Natural and Artificial Scenes', + container='CVPR', + year='2023', + homepage='https://idea-research.github.io/HumanArt/', + ), + dict( + author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' + 'Li, Yixin and Yan, Baoming and Liang, Rui and ' + 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' + 'Fu, Yanwei and others', + title='Ai challenger: A large-scale dataset for going ' + 'deeper in image understanding', + container='arXiv', + year='2017', + homepage='https://github.com/AIChallenger/AI_Challenger_2017', + ), + ], + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='head_top', + id=17, + color=[51, 153, 255], + type='upper', + swap=''), + 18: + dict(name='neck', id=18, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5, 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.026, 0.026 + ]) diff --git a/mmpose/configs/_base_/datasets/interhand2d.py b/mmpose/configs/_base_/datasets/interhand2d.py new file mode 100644 index 0000000000000000000000000000000000000000..0134f07de5bf536eaffbf71155a7e6eb33b24f0a --- /dev/null +++ b/mmpose/configs/_base_/datasets/interhand2d.py @@ -0,0 +1,142 @@ +dataset_info = dict( + dataset_name='interhand2d', + paper_info=dict( + author='Moon, Gyeongsik and Yu, Shoou-I and Wen, He and ' + 'Shiratori, Takaaki and Lee, Kyoung Mu', + title='InterHand2.6M: A dataset and baseline for 3D ' + 'interacting hand pose estimation from a single RGB image', + container='arXiv', + year='2020', + homepage='https://mks0601.github.io/InterHand2.6M/', + ), + keypoint_info={ + 0: + dict(name='thumb4', id=0, color=[255, 128, 0], type='', swap=''), + 1: + dict(name='thumb3', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb1', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict( + name='forefinger4', id=4, color=[255, 153, 255], type='', swap=''), + 5: + dict( + name='forefinger3', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger1', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='middle_finger4', + id=8, + color=[102, 178, 255], + type='', + swap=''), + 9: + dict( + name='middle_finger3', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger1', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='ring_finger4', id=12, color=[255, 51, 51], type='', swap=''), + 13: + dict( + name='ring_finger3', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger1', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict(name='pinky_finger4', id=16, color=[0, 255, 0], type='', swap=''), + 17: + dict(name='pinky_finger3', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger1', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='wrist', id=20, color=[255, 255, 255], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/interhand3d.py b/mmpose/configs/_base_/datasets/interhand3d.py new file mode 100644 index 0000000000000000000000000000000000000000..e2bd8121c281c741ec9b980c7570ebef8a632993 --- /dev/null +++ b/mmpose/configs/_base_/datasets/interhand3d.py @@ -0,0 +1,487 @@ +dataset_info = dict( + dataset_name='interhand3d', + paper_info=dict( + author='Moon, Gyeongsik and Yu, Shoou-I and Wen, He and ' + 'Shiratori, Takaaki and Lee, Kyoung Mu', + title='InterHand2.6M: A dataset and baseline for 3D ' + 'interacting hand pose estimation from a single RGB image', + container='arXiv', + year='2020', + homepage='https://mks0601.github.io/InterHand2.6M/', + ), + keypoint_info={ + 0: + dict( + name='right_thumb4', + id=0, + color=[255, 128, 0], + type='', + swap='left_thumb4'), + 1: + dict( + name='right_thumb3', + id=1, + color=[255, 128, 0], + type='', + swap='left_thumb3'), + 2: + dict( + name='right_thumb2', + id=2, + color=[255, 128, 0], + type='', + swap='left_thumb2'), + 3: + dict( + name='right_thumb1', + id=3, + color=[255, 128, 0], + type='', + swap='left_thumb1'), + 4: + dict( + name='right_forefinger4', + id=4, + color=[255, 153, 255], + type='', + swap='left_forefinger4'), + 5: + dict( + name='right_forefinger3', + id=5, + color=[255, 153, 255], + type='', + swap='left_forefinger3'), + 6: + dict( + name='right_forefinger2', + id=6, + color=[255, 153, 255], + type='', + swap='left_forefinger2'), + 7: + dict( + name='right_forefinger1', + id=7, + color=[255, 153, 255], + type='', + swap='left_forefinger1'), + 8: + dict( + name='right_middle_finger4', + id=8, + color=[102, 178, 255], + type='', + swap='left_middle_finger4'), + 9: + dict( + name='right_middle_finger3', + id=9, + color=[102, 178, 255], + type='', + swap='left_middle_finger3'), + 10: + dict( + name='right_middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap='left_middle_finger2'), + 11: + dict( + name='right_middle_finger1', + id=11, + color=[102, 178, 255], + type='', + swap='left_middle_finger1'), + 12: + dict( + name='right_ring_finger4', + id=12, + color=[255, 51, 51], + type='', + swap='left_ring_finger4'), + 13: + dict( + name='right_ring_finger3', + id=13, + color=[255, 51, 51], + type='', + swap='left_ring_finger3'), + 14: + dict( + name='right_ring_finger2', + id=14, + color=[255, 51, 51], + type='', + swap='left_ring_finger2'), + 15: + dict( + name='right_ring_finger1', + id=15, + color=[255, 51, 51], + type='', + swap='left_ring_finger1'), + 16: + dict( + name='right_pinky_finger4', + id=16, + color=[0, 255, 0], + type='', + swap='left_pinky_finger4'), + 17: + dict( + name='right_pinky_finger3', + id=17, + color=[0, 255, 0], + type='', + swap='left_pinky_finger3'), + 18: + dict( + name='right_pinky_finger2', + id=18, + color=[0, 255, 0], + type='', + swap='left_pinky_finger2'), + 19: + dict( + name='right_pinky_finger1', + id=19, + color=[0, 255, 0], + type='', + swap='left_pinky_finger1'), + 20: + dict( + name='right_wrist', + id=20, + color=[255, 255, 255], + type='', + swap='left_wrist'), + 21: + dict( + name='left_thumb4', + id=21, + color=[255, 128, 0], + type='', + swap='right_thumb4'), + 22: + dict( + name='left_thumb3', + id=22, + color=[255, 128, 0], + type='', + swap='right_thumb3'), + 23: + dict( + name='left_thumb2', + id=23, + color=[255, 128, 0], + type='', + swap='right_thumb2'), + 24: + dict( + name='left_thumb1', + id=24, + color=[255, 128, 0], + type='', + swap='right_thumb1'), + 25: + dict( + name='left_forefinger4', + id=25, + color=[255, 153, 255], + type='', + swap='right_forefinger4'), + 26: + dict( + name='left_forefinger3', + id=26, + color=[255, 153, 255], + type='', + swap='right_forefinger3'), + 27: + dict( + name='left_forefinger2', + id=27, + color=[255, 153, 255], + type='', + swap='right_forefinger2'), + 28: + dict( + name='left_forefinger1', + id=28, + color=[255, 153, 255], + type='', + swap='right_forefinger1'), + 29: + dict( + name='left_middle_finger4', + id=29, + color=[102, 178, 255], + type='', + swap='right_middle_finger4'), + 30: + dict( + name='left_middle_finger3', + id=30, + color=[102, 178, 255], + type='', + swap='right_middle_finger3'), + 31: + dict( + name='left_middle_finger2', + id=31, + color=[102, 178, 255], + type='', + swap='right_middle_finger2'), + 32: + dict( + name='left_middle_finger1', + id=32, + color=[102, 178, 255], + type='', + swap='right_middle_finger1'), + 33: + dict( + name='left_ring_finger4', + id=33, + color=[255, 51, 51], + type='', + swap='right_ring_finger4'), + 34: + dict( + name='left_ring_finger3', + id=34, + color=[255, 51, 51], + type='', + swap='right_ring_finger3'), + 35: + dict( + name='left_ring_finger2', + id=35, + color=[255, 51, 51], + type='', + swap='right_ring_finger2'), + 36: + dict( + name='left_ring_finger1', + id=36, + color=[255, 51, 51], + type='', + swap='right_ring_finger1'), + 37: + dict( + name='left_pinky_finger4', + id=37, + color=[0, 255, 0], + type='', + swap='right_pinky_finger4'), + 38: + dict( + name='left_pinky_finger3', + id=38, + color=[0, 255, 0], + type='', + swap='right_pinky_finger3'), + 39: + dict( + name='left_pinky_finger2', + id=39, + color=[0, 255, 0], + type='', + swap='right_pinky_finger2'), + 40: + dict( + name='left_pinky_finger1', + id=40, + color=[0, 255, 0], + type='', + swap='right_pinky_finger1'), + 41: + dict( + name='left_wrist', + id=41, + color=[255, 255, 255], + type='', + swap='right_wrist'), + }, + skeleton_info={ + 0: + dict(link=('right_wrist', 'right_thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_thumb1', 'right_thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('right_thumb2', 'right_thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_thumb3', 'right_thumb4'), id=3, color=[255, 128, 0]), + 4: + dict( + link=('right_wrist', 'right_forefinger1'), + id=4, + color=[255, 153, 255]), + 5: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=5, + color=[255, 153, 255]), + 6: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=6, + color=[255, 153, 255]), + 7: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=7, + color=[255, 153, 255]), + 8: + dict( + link=('right_wrist', 'right_middle_finger1'), + id=8, + color=[102, 178, 255]), + 9: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict( + link=('right_wrist', 'right_ring_finger1'), + id=12, + color=[255, 51, 51]), + 13: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=13, + color=[255, 51, 51]), + 14: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=14, + color=[255, 51, 51]), + 15: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=15, + color=[255, 51, 51]), + 16: + dict( + link=('right_wrist', 'right_pinky_finger1'), + id=16, + color=[0, 255, 0]), + 17: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=17, + color=[0, 255, 0]), + 18: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=18, + color=[0, 255, 0]), + 19: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=19, + color=[0, 255, 0]), + 20: + dict(link=('left_wrist', 'left_thumb1'), id=20, color=[255, 128, 0]), + 21: + dict(link=('left_thumb1', 'left_thumb2'), id=21, color=[255, 128, 0]), + 22: + dict(link=('left_thumb2', 'left_thumb3'), id=22, color=[255, 128, 0]), + 23: + dict(link=('left_thumb3', 'left_thumb4'), id=23, color=[255, 128, 0]), + 24: + dict( + link=('left_wrist', 'left_forefinger1'), + id=24, + color=[255, 153, 255]), + 25: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=25, + color=[255, 153, 255]), + 26: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=26, + color=[255, 153, 255]), + 27: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=27, + color=[255, 153, 255]), + 28: + dict( + link=('left_wrist', 'left_middle_finger1'), + id=28, + color=[102, 178, 255]), + 29: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=29, + color=[102, 178, 255]), + 30: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=30, + color=[102, 178, 255]), + 31: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=31, + color=[102, 178, 255]), + 32: + dict( + link=('left_wrist', 'left_ring_finger1'), + id=32, + color=[255, 51, 51]), + 33: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=33, + color=[255, 51, 51]), + 34: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=34, + color=[255, 51, 51]), + 35: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=35, + color=[255, 51, 51]), + 36: + dict( + link=('left_wrist', 'left_pinky_finger1'), + id=36, + color=[0, 255, 0]), + 37: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=37, + color=[0, 255, 0]), + 38: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=38, + color=[0, 255, 0]), + 39: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=39, + color=[0, 255, 0]), + }, + joint_weights=[1.] * 42, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/jhmdb.py b/mmpose/configs/_base_/datasets/jhmdb.py new file mode 100644 index 0000000000000000000000000000000000000000..1b37488498a2bade1fa6f2ff6532fcd219071803 --- /dev/null +++ b/mmpose/configs/_base_/datasets/jhmdb.py @@ -0,0 +1,129 @@ +dataset_info = dict( + dataset_name='jhmdb', + paper_info=dict( + author='H. Jhuang and J. Gall and S. Zuffi and ' + 'C. Schmid and M. J. Black', + title='Towards understanding action recognition', + container='International Conf. on Computer Vision (ICCV)', + year='2013', + homepage='http://jhmdb.is.tue.mpg.de/dataset', + ), + keypoint_info={ + 0: + dict(name='neck', id=0, color=[255, 128, 0], type='upper', swap=''), + 1: + dict(name='belly', id=1, color=[255, 128, 0], type='upper', swap=''), + 2: + dict(name='head', id=2, color=[255, 128, 0], type='upper', swap=''), + 3: + dict( + name='right_shoulder', + id=3, + color=[0, 255, 0], + type='upper', + swap='left_shoulder'), + 4: + dict( + name='left_shoulder', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 5: + dict( + name='right_hip', + id=5, + color=[0, 255, 0], + type='lower', + swap='left_hip'), + 6: + dict( + name='left_hip', + id=6, + color=[51, 153, 255], + type='lower', + swap='right_hip'), + 7: + dict( + name='right_elbow', + id=7, + color=[51, 153, 255], + type='upper', + swap='left_elbow'), + 8: + dict( + name='left_elbow', + id=8, + color=[51, 153, 255], + type='upper', + swap='right_elbow'), + 9: + dict( + name='right_knee', + id=9, + color=[51, 153, 255], + type='lower', + swap='left_knee'), + 10: + dict( + name='left_knee', + id=10, + color=[255, 128, 0], + type='lower', + swap='right_knee'), + 11: + dict( + name='right_wrist', + id=11, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 12: + dict( + name='left_wrist', + id=12, + color=[255, 128, 0], + type='upper', + swap='right_wrist'), + 13: + dict( + name='right_ankle', + id=13, + color=[0, 255, 0], + type='lower', + swap='left_ankle'), + 14: + dict( + name='left_ankle', + id=14, + color=[0, 255, 0], + type='lower', + swap='right_ankle') + }, + skeleton_info={ + 0: dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: dict(link=('right_hip', 'belly'), id=2, color=[255, 128, 0]), + 3: dict(link=('belly', 'left_hip'), id=3, color=[0, 255, 0]), + 4: dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), + 5: dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), + 6: dict(link=('belly', 'neck'), id=6, color=[51, 153, 255]), + 7: dict(link=('neck', 'head'), id=7, color=[51, 153, 255]), + 8: dict(link=('neck', 'right_shoulder'), id=8, color=[255, 128, 0]), + 9: dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('right_elbow', 'right_wrist'), id=10, color=[255, 128, 0]), + 11: dict(link=('neck', 'left_shoulder'), id=11, color=[0, 255, 0]), + 12: + dict(link=('left_shoulder', 'left_elbow'), id=12, color=[0, 255, 0]), + 13: dict(link=('left_elbow', 'left_wrist'), id=13, color=[0, 255, 0]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.2, 1.2, 1.5, 1.5, 1.5, 1.5 + ], + # Adapted from COCO dataset. + sigmas=[ + 0.025, 0.107, 0.025, 0.079, 0.079, 0.107, 0.107, 0.072, 0.072, 0.087, + 0.087, 0.062, 0.062, 0.089, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/lapa.py b/mmpose/configs/_base_/datasets/lapa.py new file mode 100644 index 0000000000000000000000000000000000000000..3929edd90ed94b8717fdac62017277d1435a74f2 --- /dev/null +++ b/mmpose/configs/_base_/datasets/lapa.py @@ -0,0 +1,246 @@ +dataset_info = dict( + dataset_name='lapa', + paper_info=dict( + author='Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, ' + 'Yue and Wang, Xiaobo and Mei, Tao', + title='A New Dataset and Boundary-Attention Semantic ' + 'Segmentation for Face Parsing.', + container='Proceedings of the AAAI Conference on ' + 'Artificial Intelligence 2020', + year='2020', + homepage='https://github.com/JDAI-CV/lapa-dataset', + ), + keypoint_info={ + 0: + dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-32'), + 1: + dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-31'), + 2: + dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-30'), + 3: + dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-29'), + 4: + dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-28'), + 5: + dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-27'), + 6: + dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-26'), + 7: + dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-25'), + 8: + dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-24'), + 9: + dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-23'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-22'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-21'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-20'), + 13: + dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-19'), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-18'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-17'), + 16: + dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap=''), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-15'), + 18: + dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-14'), + 19: + dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-13'), + 20: + dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap='kpt-12'), + 21: + dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap='kpt-11'), + 22: + dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-10'), + 23: + dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-9'), + 24: + dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap='kpt-8'), + 25: + dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap='kpt-7'), + 26: + dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap='kpt-6'), + 27: + dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap='kpt-5'), + 28: + dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap='kpt-4'), + 29: + dict(name='kpt-29', id=29, color=[255, 0, 0], type='', swap='kpt-3'), + 30: + dict(name='kpt-30', id=30, color=[255, 0, 0], type='', swap='kpt-2'), + 31: + dict(name='kpt-31', id=31, color=[255, 0, 0], type='', swap='kpt-1'), + 32: + dict(name='kpt-32', id=32, color=[255, 0, 0], type='', swap='kpt-0'), + 33: + dict(name='kpt-33', id=33, color=[255, 0, 0], type='', swap='kpt-46'), + 34: + dict(name='kpt-34', id=34, color=[255, 0, 0], type='', swap='kpt-45'), + 35: + dict(name='kpt-35', id=35, color=[255, 0, 0], type='', swap='kpt-44'), + 36: + dict(name='kpt-36', id=36, color=[255, 0, 0], type='', swap='kpt-43'), + 37: + dict(name='kpt-37', id=37, color=[255, 0, 0], type='', swap='kpt-42'), + 38: + dict(name='kpt-38', id=38, color=[255, 0, 0], type='', swap='kpt-50'), + 39: + dict(name='kpt-39', id=39, color=[255, 0, 0], type='', swap='kpt-49'), + 40: + dict(name='kpt-40', id=40, color=[255, 0, 0], type='', swap='kpt-48'), + 41: + dict(name='kpt-41', id=41, color=[255, 0, 0], type='', swap='kpt-47'), + 42: + dict(name='kpt-42', id=42, color=[255, 0, 0], type='', swap='kpt-37'), + 43: + dict(name='kpt-43', id=43, color=[255, 0, 0], type='', swap='kpt-36'), + 44: + dict(name='kpt-44', id=44, color=[255, 0, 0], type='', swap='kpt-35'), + 45: + dict(name='kpt-45', id=45, color=[255, 0, 0], type='', swap='kpt-34'), + 46: + dict(name='kpt-46', id=46, color=[255, 0, 0], type='', swap='kpt-33'), + 47: + dict(name='kpt-47', id=47, color=[255, 0, 0], type='', swap='kpt-41'), + 48: + dict(name='kpt-48', id=48, color=[255, 0, 0], type='', swap='kpt-40'), + 49: + dict(name='kpt-49', id=49, color=[255, 0, 0], type='', swap='kpt-39'), + 50: + dict(name='kpt-50', id=50, color=[255, 0, 0], type='', swap='kpt-38'), + 51: + dict(name='kpt-51', id=51, color=[255, 0, 0], type='', swap=''), + 52: + dict(name='kpt-52', id=52, color=[255, 0, 0], type='', swap=''), + 53: + dict(name='kpt-53', id=53, color=[255, 0, 0], type='', swap=''), + 54: + dict(name='kpt-54', id=54, color=[255, 0, 0], type='', swap=''), + 55: + dict(name='kpt-55', id=55, color=[255, 0, 0], type='', swap='kpt-65'), + 56: + dict(name='kpt-56', id=56, color=[255, 0, 0], type='', swap='kpt-64'), + 57: + dict(name='kpt-57', id=57, color=[255, 0, 0], type='', swap='kpt-63'), + 58: + dict(name='kpt-58', id=58, color=[255, 0, 0], type='', swap='kpt-62'), + 59: + dict(name='kpt-59', id=59, color=[255, 0, 0], type='', swap='kpt-61'), + 60: + dict(name='kpt-60', id=60, color=[255, 0, 0], type='', swap=''), + 61: + dict(name='kpt-61', id=61, color=[255, 0, 0], type='', swap='kpt-59'), + 62: + dict(name='kpt-62', id=62, color=[255, 0, 0], type='', swap='kpt-58'), + 63: + dict(name='kpt-63', id=63, color=[255, 0, 0], type='', swap='kpt-57'), + 64: + dict(name='kpt-64', id=64, color=[255, 0, 0], type='', swap='kpt-56'), + 65: + dict(name='kpt-65', id=65, color=[255, 0, 0], type='', swap='kpt-55'), + 66: + dict(name='kpt-66', id=66, color=[255, 0, 0], type='', swap='kpt-79'), + 67: + dict(name='kpt-67', id=67, color=[255, 0, 0], type='', swap='kpt-78'), + 68: + dict(name='kpt-68', id=68, color=[255, 0, 0], type='', swap='kpt-77'), + 69: + dict(name='kpt-69', id=69, color=[255, 0, 0], type='', swap='kpt-76'), + 70: + dict(name='kpt-70', id=70, color=[255, 0, 0], type='', swap='kpt-75'), + 71: + dict(name='kpt-71', id=71, color=[255, 0, 0], type='', swap='kpt-82'), + 72: + dict(name='kpt-72', id=72, color=[255, 0, 0], type='', swap='kpt-81'), + 73: + dict(name='kpt-73', id=73, color=[255, 0, 0], type='', swap='kpt-80'), + 74: + dict(name='kpt-74', id=74, color=[255, 0, 0], type='', swap='kpt-83'), + 75: + dict(name='kpt-75', id=75, color=[255, 0, 0], type='', swap='kpt-70'), + 76: + dict(name='kpt-76', id=76, color=[255, 0, 0], type='', swap='kpt-69'), + 77: + dict(name='kpt-77', id=77, color=[255, 0, 0], type='', swap='kpt-68'), + 78: + dict(name='kpt-78', id=78, color=[255, 0, 0], type='', swap='kpt-67'), + 79: + dict(name='kpt-79', id=79, color=[255, 0, 0], type='', swap='kpt-66'), + 80: + dict(name='kpt-80', id=80, color=[255, 0, 0], type='', swap='kpt-73'), + 81: + dict(name='kpt-81', id=81, color=[255, 0, 0], type='', swap='kpt-72'), + 82: + dict(name='kpt-82', id=82, color=[255, 0, 0], type='', swap='kpt-71'), + 83: + dict(name='kpt-83', id=83, color=[255, 0, 0], type='', swap='kpt-74'), + 84: + dict(name='kpt-84', id=84, color=[255, 0, 0], type='', swap='kpt-90'), + 85: + dict(name='kpt-85', id=85, color=[255, 0, 0], type='', swap='kpt-89'), + 86: + dict(name='kpt-86', id=86, color=[255, 0, 0], type='', swap='kpt-88'), + 87: + dict(name='kpt-87', id=87, color=[255, 0, 0], type='', swap=''), + 88: + dict(name='kpt-88', id=88, color=[255, 0, 0], type='', swap='kpt-86'), + 89: + dict(name='kpt-89', id=89, color=[255, 0, 0], type='', swap='kpt-85'), + 90: + dict(name='kpt-90', id=90, color=[255, 0, 0], type='', swap='kpt-84'), + 91: + dict(name='kpt-91', id=91, color=[255, 0, 0], type='', swap='kpt-95'), + 92: + dict(name='kpt-92', id=92, color=[255, 0, 0], type='', swap='kpt-94'), + 93: + dict(name='kpt-93', id=93, color=[255, 0, 0], type='', swap=''), + 94: + dict(name='kpt-94', id=94, color=[255, 0, 0], type='', swap='kpt-92'), + 95: + dict(name='kpt-95', id=95, color=[255, 0, 0], type='', swap='kpt-91'), + 96: + dict(name='kpt-96', id=96, color=[255, 0, 0], type='', swap='kpt-100'), + 97: + dict(name='kpt-97', id=97, color=[255, 0, 0], type='', swap='kpt-99'), + 98: + dict(name='kpt-98', id=98, color=[255, 0, 0], type='', swap=''), + 99: + dict(name='kpt-99', id=99, color=[255, 0, 0], type='', swap='kpt-97'), + 100: + dict( + name='kpt-100', id=100, color=[255, 0, 0], type='', swap='kpt-96'), + 101: + dict( + name='kpt-101', id=101, color=[255, 0, 0], type='', + swap='kpt-103'), + 102: + dict(name='kpt-102', id=102, color=[255, 0, 0], type='', swap=''), + 103: + dict( + name='kpt-103', id=103, color=[255, 0, 0], type='', + swap='kpt-101'), + 104: + dict( + name='kpt-104', id=104, color=[255, 0, 0], type='', + swap='kpt-105'), + 105: + dict( + name='kpt-105', id=105, color=[255, 0, 0], type='', swap='kpt-104') + }, + skeleton_info={}, + joint_weights=[ + 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, + 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, + 0.8, 0.8, 0.8, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, + 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, + 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, + 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.0, 1.0 + ], + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/locust.py b/mmpose/configs/_base_/datasets/locust.py new file mode 100644 index 0000000000000000000000000000000000000000..db3fa15aa060b5806faae7a21f65460f77be2745 --- /dev/null +++ b/mmpose/configs/_base_/datasets/locust.py @@ -0,0 +1,263 @@ +dataset_info = dict( + dataset_name='locust', + paper_info=dict( + author='Graving, Jacob M and Chae, Daniel and Naik, Hemal and ' + 'Li, Liang and Koger, Benjamin and Costelloe, Blair R and ' + 'Couzin, Iain D', + title='DeepPoseKit, a software toolkit for fast and robust ' + 'animal pose estimation using deep learning', + container='Elife', + year='2019', + homepage='https://github.com/jgraving/DeepPoseKit-Data', + ), + keypoint_info={ + 0: + dict(name='head', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='neck', id=1, color=[255, 255, 255], type='', swap=''), + 2: + dict(name='thorax', id=2, color=[255, 255, 255], type='', swap=''), + 3: + dict(name='abdomen1', id=3, color=[255, 255, 255], type='', swap=''), + 4: + dict(name='abdomen2', id=4, color=[255, 255, 255], type='', swap=''), + 5: + dict( + name='anttipL', + id=5, + color=[255, 255, 255], + type='', + swap='anttipR'), + 6: + dict( + name='antbaseL', + id=6, + color=[255, 255, 255], + type='', + swap='antbaseR'), + 7: + dict(name='eyeL', id=7, color=[255, 255, 255], type='', swap='eyeR'), + 8: + dict( + name='forelegL1', + id=8, + color=[255, 255, 255], + type='', + swap='forelegR1'), + 9: + dict( + name='forelegL2', + id=9, + color=[255, 255, 255], + type='', + swap='forelegR2'), + 10: + dict( + name='forelegL3', + id=10, + color=[255, 255, 255], + type='', + swap='forelegR3'), + 11: + dict( + name='forelegL4', + id=11, + color=[255, 255, 255], + type='', + swap='forelegR4'), + 12: + dict( + name='midlegL1', + id=12, + color=[255, 255, 255], + type='', + swap='midlegR1'), + 13: + dict( + name='midlegL2', + id=13, + color=[255, 255, 255], + type='', + swap='midlegR2'), + 14: + dict( + name='midlegL3', + id=14, + color=[255, 255, 255], + type='', + swap='midlegR3'), + 15: + dict( + name='midlegL4', + id=15, + color=[255, 255, 255], + type='', + swap='midlegR4'), + 16: + dict( + name='hindlegL1', + id=16, + color=[255, 255, 255], + type='', + swap='hindlegR1'), + 17: + dict( + name='hindlegL2', + id=17, + color=[255, 255, 255], + type='', + swap='hindlegR2'), + 18: + dict( + name='hindlegL3', + id=18, + color=[255, 255, 255], + type='', + swap='hindlegR3'), + 19: + dict( + name='hindlegL4', + id=19, + color=[255, 255, 255], + type='', + swap='hindlegR4'), + 20: + dict( + name='anttipR', + id=20, + color=[255, 255, 255], + type='', + swap='anttipL'), + 21: + dict( + name='antbaseR', + id=21, + color=[255, 255, 255], + type='', + swap='antbaseL'), + 22: + dict(name='eyeR', id=22, color=[255, 255, 255], type='', swap='eyeL'), + 23: + dict( + name='forelegR1', + id=23, + color=[255, 255, 255], + type='', + swap='forelegL1'), + 24: + dict( + name='forelegR2', + id=24, + color=[255, 255, 255], + type='', + swap='forelegL2'), + 25: + dict( + name='forelegR3', + id=25, + color=[255, 255, 255], + type='', + swap='forelegL3'), + 26: + dict( + name='forelegR4', + id=26, + color=[255, 255, 255], + type='', + swap='forelegL4'), + 27: + dict( + name='midlegR1', + id=27, + color=[255, 255, 255], + type='', + swap='midlegL1'), + 28: + dict( + name='midlegR2', + id=28, + color=[255, 255, 255], + type='', + swap='midlegL2'), + 29: + dict( + name='midlegR3', + id=29, + color=[255, 255, 255], + type='', + swap='midlegL3'), + 30: + dict( + name='midlegR4', + id=30, + color=[255, 255, 255], + type='', + swap='midlegL4'), + 31: + dict( + name='hindlegR1', + id=31, + color=[255, 255, 255], + type='', + swap='hindlegL1'), + 32: + dict( + name='hindlegR2', + id=32, + color=[255, 255, 255], + type='', + swap='hindlegL2'), + 33: + dict( + name='hindlegR3', + id=33, + color=[255, 255, 255], + type='', + swap='hindlegL3'), + 34: + dict( + name='hindlegR4', + id=34, + color=[255, 255, 255], + type='', + swap='hindlegL4') + }, + skeleton_info={ + 0: dict(link=('neck', 'head'), id=0, color=[255, 255, 255]), + 1: dict(link=('thorax', 'neck'), id=1, color=[255, 255, 255]), + 2: dict(link=('abdomen1', 'thorax'), id=2, color=[255, 255, 255]), + 3: dict(link=('abdomen2', 'abdomen1'), id=3, color=[255, 255, 255]), + 4: dict(link=('antbaseL', 'anttipL'), id=4, color=[255, 255, 255]), + 5: dict(link=('eyeL', 'antbaseL'), id=5, color=[255, 255, 255]), + 6: dict(link=('forelegL2', 'forelegL1'), id=6, color=[255, 255, 255]), + 7: dict(link=('forelegL3', 'forelegL2'), id=7, color=[255, 255, 255]), + 8: dict(link=('forelegL4', 'forelegL3'), id=8, color=[255, 255, 255]), + 9: dict(link=('midlegL2', 'midlegL1'), id=9, color=[255, 255, 255]), + 10: dict(link=('midlegL3', 'midlegL2'), id=10, color=[255, 255, 255]), + 11: dict(link=('midlegL4', 'midlegL3'), id=11, color=[255, 255, 255]), + 12: + dict(link=('hindlegL2', 'hindlegL1'), id=12, color=[255, 255, 255]), + 13: + dict(link=('hindlegL3', 'hindlegL2'), id=13, color=[255, 255, 255]), + 14: + dict(link=('hindlegL4', 'hindlegL3'), id=14, color=[255, 255, 255]), + 15: dict(link=('antbaseR', 'anttipR'), id=15, color=[255, 255, 255]), + 16: dict(link=('eyeR', 'antbaseR'), id=16, color=[255, 255, 255]), + 17: + dict(link=('forelegR2', 'forelegR1'), id=17, color=[255, 255, 255]), + 18: + dict(link=('forelegR3', 'forelegR2'), id=18, color=[255, 255, 255]), + 19: + dict(link=('forelegR4', 'forelegR3'), id=19, color=[255, 255, 255]), + 20: dict(link=('midlegR2', 'midlegR1'), id=20, color=[255, 255, 255]), + 21: dict(link=('midlegR3', 'midlegR2'), id=21, color=[255, 255, 255]), + 22: dict(link=('midlegR4', 'midlegR3'), id=22, color=[255, 255, 255]), + 23: + dict(link=('hindlegR2', 'hindlegR1'), id=23, color=[255, 255, 255]), + 24: + dict(link=('hindlegR3', 'hindlegR2'), id=24, color=[255, 255, 255]), + 25: + dict(link=('hindlegR4', 'hindlegR3'), id=25, color=[255, 255, 255]) + }, + joint_weights=[1.] * 35, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/macaque.py b/mmpose/configs/_base_/datasets/macaque.py new file mode 100644 index 0000000000000000000000000000000000000000..ea8dac297ea2f0e36dabccccc021d953216a6ac8 --- /dev/null +++ b/mmpose/configs/_base_/datasets/macaque.py @@ -0,0 +1,183 @@ +dataset_info = dict( + dataset_name='macaque', + paper_info=dict( + author='Labuguen, Rollyn and Matsumoto, Jumpei and ' + 'Negrete, Salvador and Nishimaru, Hiroshi and ' + 'Nishijo, Hisao and Takada, Masahiko and ' + 'Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro', + title='MacaquePose: A novel "in the wild" macaque monkey pose dataset ' + 'for markerless motion capture', + container='bioRxiv', + year='2020', + homepage='http://www.pri.kyoto-u.ac.jp/datasets/' + 'macaquepose/index.html', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/merged_COCO_AIC_MPII.py b/mmpose/configs/_base_/datasets/merged_COCO_AIC_MPII.py new file mode 100644 index 0000000000000000000000000000000000000000..acfff665d2ee53890be41ee73deb8a4e939ea15d --- /dev/null +++ b/mmpose/configs/_base_/datasets/merged_COCO_AIC_MPII.py @@ -0,0 +1,250 @@ +dataset_info = dict( + dataset_name='merged_COCO_AIC_MPII', + paper_info=dict( + author='Miroslav Purkrabek', + title='Merged Pose Estimation Dataset', + container='', + year='2024', + homepage='', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='thorax_mpii', + id=17, + color=[255, 128, 0], + type='upper', + swap=''), + 18: + dict( + name='neck_mpii', + id=18, + color=[255, 128, 0], + type='upper', + swap=''), + 19: + dict( + name='neck_aic', + id=19, + color=[255, 128, 0], + type='upper', + swap=''), + 20: + dict( + name='top_head', + id=20, + color=[255, 128, 0], + type='upper', + swap=''), + 21: + dict( + name='pelvis', + id=21, + color=[255, 128, 0], + type='lower', + swap=''), + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + # 4: + # dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 4: + dict(link=('left_hip', 'pelvis'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + # 7: + # dict( + # link=('left_shoulder', 'right_shoulder'), + # id=7, + # color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'thorax_mpii'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('pelvis', 'right_hip'), id=19, color=[51, 153, 255]), + 20: + dict( + link=('right_shoulder', 'thorax_mpii'), + id=20, + color=[51, 153, 255]), + 21: + dict( + link=('thorax_mpii', 'neck_mpii'), + id=21, + color=[51, 153, 255]), + 22: + dict( + link=('thorax_mpii', 'neck_aic'), + id=22, + color=[51, 153, 255]), + 23: + dict( + link=('left_ear', 'top_head'), + id=23, + color=[51, 153, 255]), + 24: + dict( + link=('right_ear', 'top_head'), + id=24, + color=[51, 153, 255]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5, 1., 1., 1., 1., 1., 1. + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, + 0.079, 0.079, 0.079, # Thorax and neck has the same as shoulders + 0.035, # Top of head has the same as ears + 0.107, # Pelvis has the same as hips + ]) diff --git a/mmpose/configs/_base_/datasets/merged_COCO_AIC_MPII_21.py b/mmpose/configs/_base_/datasets/merged_COCO_AIC_MPII_21.py new file mode 100644 index 0000000000000000000000000000000000000000..5ede684d16ab557f7917a9bfa3f90c8a016cc06c --- /dev/null +++ b/mmpose/configs/_base_/datasets/merged_COCO_AIC_MPII_21.py @@ -0,0 +1,238 @@ +dataset_info = dict( + dataset_name='merged_COCO_AIC_MPII_21', + paper_info=dict( + author='Miroslav Purkrabek', + title='Merged Pose Estimation Dataset', + container='', + year='2024', + homepage='', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='thorax_mpii', + id=17, + color=[255, 128, 0], + type='upper', + swap=''), + 18: + dict( + name='neck_mpii', + id=18, + color=[255, 128, 0], + type='upper', + swap=''), + 19: + dict( + name='neck_aic', + id=19, + color=[255, 128, 0], + type='upper', + swap=''), + 20: + dict( + name='top_head', + id=20, + color=[255, 128, 0], + type='upper', + swap=''), + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + # 7: + # dict( + # link=('left_shoulder', 'right_shoulder'), + # id=7, + # color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'thorax_mpii'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict( + link=('right_shoulder', 'thorax_mpii'), + id=19, + color=[51, 153, 255]), + 20: + dict( + link=('thorax_mpii', 'neck_mpii'), + id=20, + color=[51, 153, 255]), + 21: + dict( + link=('thorax_mpii', 'neck_aic'), + id=21, + color=[51, 153, 255]), + 22: + dict( + link=('left_ear', 'top_head'), + id=22, + color=[51, 153, 255]), + 23: + dict( + link=('right_ear', 'top_head'), + id=23, + color=[51, 153, 255]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5, 1., 1., 1., 1., 1., 1. + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, + 0.079, 0.079, 0.079, # Thorax and neck has the same as shoulders + 0.035, # Top of head has the same as ears + ]) diff --git a/mmpose/configs/_base_/datasets/mhp.py b/mmpose/configs/_base_/datasets/mhp.py new file mode 100644 index 0000000000000000000000000000000000000000..e16e37c79cb63c4352c48bb4e45602b8408f534b --- /dev/null +++ b/mmpose/configs/_base_/datasets/mhp.py @@ -0,0 +1,156 @@ +dataset_info = dict( + dataset_name='mhp', + paper_info=dict( + author='Zhao, Jian and Li, Jianshu and Cheng, Yu and ' + 'Sim, Terence and Yan, Shuicheng and Feng, Jiashi', + title='Understanding humans in crowded scenes: ' + 'Deep nested adversarial learning and a ' + 'new benchmark for multi-human parsing', + container='Proceedings of the 26th ACM ' + 'international conference on Multimedia', + year='2018', + homepage='https://lv-mhp.github.io/dataset', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 1: + dict( + name='right_knee', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 2: + dict( + name='right_hip', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 3: + dict( + name='left_hip', + id=3, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 4: + dict( + name='left_knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 5: + dict( + name='left_ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 6: + dict(name='pelvis', id=6, color=[51, 153, 255], type='lower', swap=''), + 7: + dict(name='thorax', id=7, color=[51, 153, 255], type='upper', swap=''), + 8: + dict( + name='upper_neck', + id=8, + color=[51, 153, 255], + type='upper', + swap=''), + 9: + dict( + name='head_top', id=9, color=[51, 153, 255], type='upper', + swap=''), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='right_elbow', + id=11, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 12: + dict( + name='right_shoulder', + id=12, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 13: + dict( + name='left_shoulder', + id=13, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 14: + dict( + name='left_elbow', + id=14, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 15: + dict( + name='left_wrist', + id=15, + color=[0, 255, 0], + type='upper', + swap='right_wrist') + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: + dict(link=('right_hip', 'pelvis'), id=2, color=[255, 128, 0]), + 3: + dict(link=('pelvis', 'left_hip'), id=3, color=[0, 255, 0]), + 4: + dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), + 5: + dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), + 6: + dict(link=('pelvis', 'thorax'), id=6, color=[51, 153, 255]), + 7: + dict(link=('thorax', 'upper_neck'), id=7, color=[51, 153, 255]), + 8: + dict(link=('upper_neck', 'head_top'), id=8, color=[51, 153, 255]), + 9: + dict(link=('upper_neck', 'right_shoulder'), id=9, color=[255, 128, 0]), + 10: + dict( + link=('right_shoulder', 'right_elbow'), id=10, color=[255, 128, + 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('upper_neck', 'left_shoulder'), id=12, color=[0, 255, 0]), + 13: + dict(link=('left_shoulder', 'left_elbow'), id=13, color=[0, 255, 0]), + 14: + dict(link=('left_elbow', 'left_wrist'), id=14, color=[0, 255, 0]) + }, + joint_weights=[ + 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 + ], + # Adapted from COCO dataset. + sigmas=[ + 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, + 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 + ]) diff --git a/mmpose/configs/_base_/datasets/mpi_inf_3dhp.py b/mmpose/configs/_base_/datasets/mpi_inf_3dhp.py new file mode 100644 index 0000000000000000000000000000000000000000..ffd0a70297b24456ea38566ac205bb585aa47e5d --- /dev/null +++ b/mmpose/configs/_base_/datasets/mpi_inf_3dhp.py @@ -0,0 +1,132 @@ +dataset_info = dict( + dataset_name='mpi_inf_3dhp', + paper_info=dict( + author='ehta, Dushyant and Rhodin, Helge and Casas, Dan and ' + 'Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and ' + 'Theobalt, Christian', + title='Monocular 3D Human Pose Estimation In The Wild Using Improved ' + 'CNN Supervision', + container='2017 international conference on 3D vision (3DV)', + year='2017', + homepage='http://gvv.mpi-inf.mpg.de/3dhp-dataset', + ), + keypoint_info={ + 0: + dict( + name='head_top', id=0, color=[51, 153, 255], type='upper', + swap=''), + 1: + dict(name='neck', id=1, color=[51, 153, 255], type='upper', swap=''), + 2: + dict( + name='right_shoulder', + id=2, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 3: + dict( + name='right_elbow', + id=3, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 4: + dict( + name='right_wrist', + id=4, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='left_elbow', + id=6, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 7: + dict( + name='left_wrist', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 8: + dict( + name='right_hip', + id=8, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 9: + dict( + name='right_knee', + id=9, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 10: + dict( + name='right_ankle', + id=10, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='left_knee', + id=12, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 13: + dict( + name='left_ankle', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 14: + dict(name='root', id=14, color=[51, 153, 255], type='lower', swap=''), + 15: + dict(name='spine', id=15, color=[51, 153, 255], type='upper', swap=''), + 16: + dict(name='head', id=16, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: dict(link=('neck', 'right_shoulder'), id=0, color=[255, 128, 0]), + 1: dict( + link=('right_shoulder', 'right_elbow'), id=1, color=[255, 128, 0]), + 2: + dict(link=('right_elbow', 'right_wrist'), id=2, color=[255, 128, 0]), + 3: dict(link=('neck', 'left_shoulder'), id=3, color=[0, 255, 0]), + 4: dict(link=('left_shoulder', 'left_elbow'), id=4, color=[0, 255, 0]), + 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), + 6: dict(link=('root', 'right_hip'), id=6, color=[255, 128, 0]), + 7: dict(link=('right_hip', 'right_knee'), id=7, color=[255, 128, 0]), + 8: dict(link=('right_knee', 'right_ankle'), id=8, color=[255, 128, 0]), + 9: dict(link=('root', 'left_hip'), id=9, color=[0, 255, 0]), + 10: dict(link=('left_hip', 'left_knee'), id=10, color=[0, 255, 0]), + 11: dict(link=('left_knee', 'left_ankle'), id=11, color=[0, 255, 0]), + 12: dict(link=('head_top', 'head'), id=12, color=[51, 153, 255]), + 13: dict(link=('head', 'neck'), id=13, color=[51, 153, 255]), + 14: dict(link=('neck', 'spine'), id=14, color=[51, 153, 255]), + 15: dict(link=('spine', 'root'), id=15, color=[51, 153, 255]) + }, + joint_weights=[1.] * 17, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/mpii.py b/mmpose/configs/_base_/datasets/mpii.py new file mode 100644 index 0000000000000000000000000000000000000000..6c2a491c7b58bc3eaa5c0056d3d7184bdd1d1cc7 --- /dev/null +++ b/mmpose/configs/_base_/datasets/mpii.py @@ -0,0 +1,155 @@ +dataset_info = dict( + dataset_name='mpii', + paper_info=dict( + author='Mykhaylo Andriluka and Leonid Pishchulin and ' + 'Peter Gehler and Schiele, Bernt', + title='2D Human Pose Estimation: New Benchmark and ' + 'State of the Art Analysis', + container='IEEE Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2014', + homepage='http://human-pose.mpi-inf.mpg.de/', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 1: + dict( + name='right_knee', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 2: + dict( + name='right_hip', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 3: + dict( + name='left_hip', + id=3, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 4: + dict( + name='left_knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 5: + dict( + name='left_ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 6: + dict(name='pelvis', id=6, color=[51, 153, 255], type='lower', swap=''), + 7: + dict(name='thorax', id=7, color=[51, 153, 255], type='upper', swap=''), + 8: + dict( + name='upper_neck', + id=8, + color=[51, 153, 255], + type='upper', + swap=''), + 9: + dict( + name='head_top', id=9, color=[51, 153, 255], type='upper', + swap=''), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='right_elbow', + id=11, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 12: + dict( + name='right_shoulder', + id=12, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 13: + dict( + name='left_shoulder', + id=13, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 14: + dict( + name='left_elbow', + id=14, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 15: + dict( + name='left_wrist', + id=15, + color=[0, 255, 0], + type='upper', + swap='right_wrist') + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: + dict(link=('right_hip', 'pelvis'), id=2, color=[255, 128, 0]), + 3: + dict(link=('pelvis', 'left_hip'), id=3, color=[0, 255, 0]), + 4: + dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), + 5: + dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), + 6: + dict(link=('pelvis', 'thorax'), id=6, color=[51, 153, 255]), + 7: + dict(link=('thorax', 'upper_neck'), id=7, color=[51, 153, 255]), + 8: + dict(link=('upper_neck', 'head_top'), id=8, color=[51, 153, 255]), + 9: + dict(link=('upper_neck', 'right_shoulder'), id=9, color=[255, 128, 0]), + 10: + dict( + link=('right_shoulder', 'right_elbow'), id=10, color=[255, 128, + 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('upper_neck', 'left_shoulder'), id=12, color=[0, 255, 0]), + 13: + dict(link=('left_shoulder', 'left_elbow'), id=13, color=[0, 255, 0]), + 14: + dict(link=('left_elbow', 'left_wrist'), id=14, color=[0, 255, 0]) + }, + joint_weights=[ + 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 + ], + # Adapted from COCO dataset. + sigmas=[ + 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, + 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 + ]) diff --git a/mmpose/configs/_base_/datasets/mpii_trb.py b/mmpose/configs/_base_/datasets/mpii_trb.py new file mode 100644 index 0000000000000000000000000000000000000000..73940d4b4827f8e08343c3b517360db788e4820d --- /dev/null +++ b/mmpose/configs/_base_/datasets/mpii_trb.py @@ -0,0 +1,380 @@ +dataset_info = dict( + dataset_name='mpii_trb', + paper_info=dict( + author='Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and ' + 'Liu, Wentao and Qian, Chen and Ouyang, Wanli', + title='TRB: A Novel Triplet Representation for ' + 'Understanding 2D Human Body', + container='Proceedings of the IEEE International ' + 'Conference on Computer Vision', + year='2019', + homepage='https://github.com/kennymckormick/' + 'Triplet-Representation-of-human-Body', + ), + keypoint_info={ + 0: + dict( + name='left_shoulder', + id=0, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 1: + dict( + name='right_shoulder', + id=1, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 2: + dict( + name='left_elbow', + id=2, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 3: + dict( + name='right_elbow', + id=3, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 4: + dict( + name='left_wrist', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 5: + dict( + name='right_wrist', + id=5, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 6: + dict( + name='left_hip', + id=6, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 7: + dict( + name='right_hip', + id=7, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 8: + dict( + name='left_knee', + id=8, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 9: + dict( + name='right_knee', + id=9, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 10: + dict( + name='left_ankle', + id=10, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 11: + dict( + name='right_ankle', + id=11, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 12: + dict(name='head', id=12, color=[51, 153, 255], type='upper', swap=''), + 13: + dict(name='neck', id=13, color=[51, 153, 255], type='upper', swap=''), + 14: + dict( + name='right_neck', + id=14, + color=[255, 255, 255], + type='upper', + swap='left_neck'), + 15: + dict( + name='left_neck', + id=15, + color=[255, 255, 255], + type='upper', + swap='right_neck'), + 16: + dict( + name='medial_right_shoulder', + id=16, + color=[255, 255, 255], + type='upper', + swap='medial_left_shoulder'), + 17: + dict( + name='lateral_right_shoulder', + id=17, + color=[255, 255, 255], + type='upper', + swap='lateral_left_shoulder'), + 18: + dict( + name='medial_right_bow', + id=18, + color=[255, 255, 255], + type='upper', + swap='medial_left_bow'), + 19: + dict( + name='lateral_right_bow', + id=19, + color=[255, 255, 255], + type='upper', + swap='lateral_left_bow'), + 20: + dict( + name='medial_right_wrist', + id=20, + color=[255, 255, 255], + type='upper', + swap='medial_left_wrist'), + 21: + dict( + name='lateral_right_wrist', + id=21, + color=[255, 255, 255], + type='upper', + swap='lateral_left_wrist'), + 22: + dict( + name='medial_left_shoulder', + id=22, + color=[255, 255, 255], + type='upper', + swap='medial_right_shoulder'), + 23: + dict( + name='lateral_left_shoulder', + id=23, + color=[255, 255, 255], + type='upper', + swap='lateral_right_shoulder'), + 24: + dict( + name='medial_left_bow', + id=24, + color=[255, 255, 255], + type='upper', + swap='medial_right_bow'), + 25: + dict( + name='lateral_left_bow', + id=25, + color=[255, 255, 255], + type='upper', + swap='lateral_right_bow'), + 26: + dict( + name='medial_left_wrist', + id=26, + color=[255, 255, 255], + type='upper', + swap='medial_right_wrist'), + 27: + dict( + name='lateral_left_wrist', + id=27, + color=[255, 255, 255], + type='upper', + swap='lateral_right_wrist'), + 28: + dict( + name='medial_right_hip', + id=28, + color=[255, 255, 255], + type='lower', + swap='medial_left_hip'), + 29: + dict( + name='lateral_right_hip', + id=29, + color=[255, 255, 255], + type='lower', + swap='lateral_left_hip'), + 30: + dict( + name='medial_right_knee', + id=30, + color=[255, 255, 255], + type='lower', + swap='medial_left_knee'), + 31: + dict( + name='lateral_right_knee', + id=31, + color=[255, 255, 255], + type='lower', + swap='lateral_left_knee'), + 32: + dict( + name='medial_right_ankle', + id=32, + color=[255, 255, 255], + type='lower', + swap='medial_left_ankle'), + 33: + dict( + name='lateral_right_ankle', + id=33, + color=[255, 255, 255], + type='lower', + swap='lateral_left_ankle'), + 34: + dict( + name='medial_left_hip', + id=34, + color=[255, 255, 255], + type='lower', + swap='medial_right_hip'), + 35: + dict( + name='lateral_left_hip', + id=35, + color=[255, 255, 255], + type='lower', + swap='lateral_right_hip'), + 36: + dict( + name='medial_left_knee', + id=36, + color=[255, 255, 255], + type='lower', + swap='medial_right_knee'), + 37: + dict( + name='lateral_left_knee', + id=37, + color=[255, 255, 255], + type='lower', + swap='lateral_right_knee'), + 38: + dict( + name='medial_left_ankle', + id=38, + color=[255, 255, 255], + type='lower', + swap='medial_right_ankle'), + 39: + dict( + name='lateral_left_ankle', + id=39, + color=[255, 255, 255], + type='lower', + swap='lateral_right_ankle'), + }, + skeleton_info={ + 0: + dict(link=('head', 'neck'), id=0, color=[51, 153, 255]), + 1: + dict(link=('neck', 'left_shoulder'), id=1, color=[51, 153, 255]), + 2: + dict(link=('neck', 'right_shoulder'), id=2, color=[51, 153, 255]), + 3: + dict(link=('left_shoulder', 'left_elbow'), id=3, color=[0, 255, 0]), + 4: + dict( + link=('right_shoulder', 'right_elbow'), id=4, color=[255, 128, 0]), + 5: + dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), + 6: + dict(link=('right_elbow', 'right_wrist'), id=6, color=[255, 128, 0]), + 7: + dict(link=('left_shoulder', 'left_hip'), id=7, color=[51, 153, 255]), + 8: + dict(link=('right_shoulder', 'right_hip'), id=8, color=[51, 153, 255]), + 9: + dict(link=('left_hip', 'right_hip'), id=9, color=[51, 153, 255]), + 10: + dict(link=('left_hip', 'left_knee'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_hip', 'right_knee'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_knee', 'left_ankle'), id=12, color=[0, 255, 0]), + 13: + dict(link=('right_knee', 'right_ankle'), id=13, color=[255, 128, 0]), + 14: + dict(link=('right_neck', 'left_neck'), id=14, color=[255, 255, 255]), + 15: + dict( + link=('medial_right_shoulder', 'lateral_right_shoulder'), + id=15, + color=[255, 255, 255]), + 16: + dict( + link=('medial_right_bow', 'lateral_right_bow'), + id=16, + color=[255, 255, 255]), + 17: + dict( + link=('medial_right_wrist', 'lateral_right_wrist'), + id=17, + color=[255, 255, 255]), + 18: + dict( + link=('medial_left_shoulder', 'lateral_left_shoulder'), + id=18, + color=[255, 255, 255]), + 19: + dict( + link=('medial_left_bow', 'lateral_left_bow'), + id=19, + color=[255, 255, 255]), + 20: + dict( + link=('medial_left_wrist', 'lateral_left_wrist'), + id=20, + color=[255, 255, 255]), + 21: + dict( + link=('medial_right_hip', 'lateral_right_hip'), + id=21, + color=[255, 255, 255]), + 22: + dict( + link=('medial_right_knee', 'lateral_right_knee'), + id=22, + color=[255, 255, 255]), + 23: + dict( + link=('medial_right_ankle', 'lateral_right_ankle'), + id=23, + color=[255, 255, 255]), + 24: + dict( + link=('medial_left_hip', 'lateral_left_hip'), + id=24, + color=[255, 255, 255]), + 25: + dict( + link=('medial_left_knee', 'lateral_left_knee'), + id=25, + color=[255, 255, 255]), + 26: + dict( + link=('medial_left_ankle', 'lateral_left_ankle'), + id=26, + color=[255, 255, 255]) + }, + joint_weights=[1.] * 40, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/ochuman.py b/mmpose/configs/_base_/datasets/ochuman.py new file mode 100644 index 0000000000000000000000000000000000000000..2ef20838fe583fde133a97e688d30e91ae562746 --- /dev/null +++ b/mmpose/configs/_base_/datasets/ochuman.py @@ -0,0 +1,181 @@ +dataset_info = dict( + dataset_name='ochuman', + paper_info=dict( + author='Zhang, Song-Hai and Li, Ruilong and Dong, Xin and ' + 'Rosin, Paul and Cai, Zixi and Han, Xi and ' + 'Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min', + title='Pose2seg: Detection free human instance segmentation', + container='Proceedings of the IEEE conference on computer ' + 'vision and pattern recognition', + year='2019', + homepage='https://github.com/liruilong940607/OCHumanApi', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/onehand10k.py b/mmpose/configs/_base_/datasets/onehand10k.py new file mode 100644 index 0000000000000000000000000000000000000000..016770f14f3075dfa7d59389524a0c11a4feb802 --- /dev/null +++ b/mmpose/configs/_base_/datasets/onehand10k.py @@ -0,0 +1,142 @@ +dataset_info = dict( + dataset_name='onehand10k', + paper_info=dict( + author='Wang, Yangang and Peng, Cong and Liu, Yebin', + title='Mask-pose cascaded cnn for 2d hand pose estimation ' + 'from single color image', + container='IEEE Transactions on Circuits and Systems ' + 'for Video Technology', + year='2018', + homepage='https://www.yangangwang.com/papers/WANG-MCC-2018-10.html', + ), + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger1', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger3', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger4', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/panoptic_body3d.py b/mmpose/configs/_base_/datasets/panoptic_body3d.py new file mode 100644 index 0000000000000000000000000000000000000000..e3b19ac462415a840ca2e0b9e214bdb35d91b5e4 --- /dev/null +++ b/mmpose/configs/_base_/datasets/panoptic_body3d.py @@ -0,0 +1,160 @@ +dataset_info = dict( + dataset_name='panoptic_pose_3d', + paper_info=dict( + author='Joo, Hanbyul and Simon, Tomas and Li, Xulong' + 'and Liu, Hao and Tan, Lei and Gui, Lin and Banerjee, Sean' + 'and Godisart, Timothy and Nabbe, Bart and Matthews, Iain' + 'and Kanade, Takeo and Nobuhara, Shohei and Sheikh, Yaser', + title='Panoptic Studio: A Massively Multiview System ' + 'for Interaction Motion Capture', + container='IEEE Transactions on Pattern Analysis' + ' and Machine Intelligence', + year='2017', + homepage='http://domedb.perception.cs.cmu.edu', + ), + keypoint_info={ + 0: + dict(name='neck', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict(name='nose', id=1, color=[51, 153, 255], type='upper', swap=''), + 2: + dict(name='mid_hip', id=2, color=[0, 255, 0], type='lower', swap=''), + 3: + dict( + name='left_shoulder', + id=3, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 4: + dict( + name='left_elbow', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 5: + dict( + name='left_wrist', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 6: + dict( + name='left_hip', + id=6, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 7: + dict( + name='left_knee', + id=7, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 8: + dict( + name='left_ankle', + id=8, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 9: + dict( + name='right_shoulder', + id=9, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 10: + dict( + name='right_elbow', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 11: + dict( + name='right_wrist', + id=11, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='right_knee', + id=13, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 14: + dict( + name='right_ankle', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 15: + dict( + name='left_eye', + id=15, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 16: + dict( + name='left_ear', + id=16, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 17: + dict( + name='right_eye', + id=17, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 18: + dict( + name='right_ear', + id=18, + color=[51, 153, 255], + type='upper', + swap='left_ear') + }, + skeleton_info={ + 0: dict(link=('nose', 'neck'), id=0, color=[51, 153, 255]), + 1: dict(link=('neck', 'left_shoulder'), id=1, color=[0, 255, 0]), + 2: dict(link=('neck', 'right_shoulder'), id=2, color=[255, 128, 0]), + 3: dict(link=('left_shoulder', 'left_elbow'), id=3, color=[0, 255, 0]), + 4: dict( + link=('right_shoulder', 'right_elbow'), id=4, color=[255, 128, 0]), + 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), + 6: + dict(link=('right_elbow', 'right_wrist'), id=6, color=[255, 128, 0]), + 7: dict(link=('left_ankle', 'left_knee'), id=7, color=[0, 255, 0]), + 8: dict(link=('left_knee', 'left_hip'), id=8, color=[0, 255, 0]), + 9: dict(link=('right_ankle', 'right_knee'), id=9, color=[255, 128, 0]), + 10: dict(link=('right_knee', 'right_hip'), id=10, color=[255, 128, 0]), + 11: dict(link=('mid_hip', 'left_hip'), id=11, color=[0, 255, 0]), + 12: dict(link=('mid_hip', 'right_hip'), id=12, color=[255, 128, 0]), + 13: dict(link=('mid_hip', 'neck'), id=13, color=[51, 153, 255]), + }, + joint_weights=[ + 1.0, 1.0, 1.0, 1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.2, + 1.5, 1.0, 1.0, 1.0, 1.0 + ], + sigmas=[ + 0.026, 0.026, 0.107, 0.079, 0.072, 0.062, 0.107, 0.087, 0.089, 0.079, + 0.072, 0.062, 0.107, 0.087, 0.089, 0.025, 0.035, 0.025, 0.035 + ]) diff --git a/mmpose/configs/_base_/datasets/panoptic_hand2d.py b/mmpose/configs/_base_/datasets/panoptic_hand2d.py new file mode 100644 index 0000000000000000000000000000000000000000..7a65731ba87b155beb1b40591fd9acb232c2afc6 --- /dev/null +++ b/mmpose/configs/_base_/datasets/panoptic_hand2d.py @@ -0,0 +1,143 @@ +dataset_info = dict( + dataset_name='panoptic_hand2d', + paper_info=dict( + author='Simon, Tomas and Joo, Hanbyul and ' + 'Matthews, Iain and Sheikh, Yaser', + title='Hand keypoint detection in single images using ' + 'multiview bootstrapping', + container='Proceedings of the IEEE conference on ' + 'Computer Vision and Pattern Recognition', + year='2017', + homepage='http://domedb.perception.cs.cmu.edu/handdb.html', + ), + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger1', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger3', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger4', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/posetrack18.py b/mmpose/configs/_base_/datasets/posetrack18.py new file mode 100644 index 0000000000000000000000000000000000000000..5aefd1c97fe083df35ee88bebab4f99134c27971 --- /dev/null +++ b/mmpose/configs/_base_/datasets/posetrack18.py @@ -0,0 +1,176 @@ +dataset_info = dict( + dataset_name='posetrack18', + paper_info=dict( + author='Andriluka, Mykhaylo and Iqbal, Umar and ' + 'Insafutdinov, Eldar and Pishchulin, Leonid and ' + 'Milan, Anton and Gall, Juergen and Schiele, Bernt', + title='Posetrack: A benchmark for human pose estimation and tracking', + container='Proceedings of the IEEE Conference on ' + 'Computer Vision and Pattern Recognition', + year='2018', + homepage='https://posetrack.net/users/download.php', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='head_bottom', + id=1, + color=[51, 153, 255], + type='upper', + swap=''), + 2: + dict( + name='head_top', id=2, color=[51, 153, 255], type='upper', + swap=''), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('nose', 'head_bottom'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'head_top'), id=13, color=[51, 153, 255]), + 14: + dict( + link=('head_bottom', 'left_shoulder'), id=14, color=[51, 153, + 255]), + 15: + dict( + link=('head_bottom', 'right_shoulder'), + id=15, + color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/mmpose/configs/_base_/datasets/rhd2d.py b/mmpose/configs/_base_/datasets/rhd2d.py new file mode 100644 index 0000000000000000000000000000000000000000..4631ccd03814155b06687e0b1ba2b83404c837fc --- /dev/null +++ b/mmpose/configs/_base_/datasets/rhd2d.py @@ -0,0 +1,151 @@ +dataset_info = dict( + dataset_name='rhd2d', + paper_info=dict( + author='Christian Zimmermann and Thomas Brox', + title='Learning to Estimate 3D Hand Pose from Single RGB Images', + container='arXiv', + year='2017', + homepage='https://lmb.informatik.uni-freiburg.de/resources/' + 'datasets/RenderedHandposeDataset.en.html', + ), + # In RHD, 1-4: left thumb [tip to palm], which means the finger is from + # tip to palm, so as other fingers. Please refer to + # `https://lmb.informatik.uni-freiburg.de/resources/datasets/ + # RenderedHandpose/README` for details of keypoint definition. + # But in COCO-WholeBody-Hand, FreiHand, CMU Panoptic HandDB, it is in + # inverse order. Pay attention to this if you want to combine RHD with + # other hand datasets to train a single model. + # Also, note that 'keypoint_info' will not directly affect the order of + # the keypoint in the dataset. It is mostly for visualization & storing + # information about flip_pairs. + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb4', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb3', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb2', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb1', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger4', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger3', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger2', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger1', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger4', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger3', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger2', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger1', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger4', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger3', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger2', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger1', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger4', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger3', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger2', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger1', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/shelf.py b/mmpose/configs/_base_/datasets/shelf.py new file mode 100644 index 0000000000000000000000000000000000000000..5fe6e42b3b44e3f65947284efd9ffac58d41d43f --- /dev/null +++ b/mmpose/configs/_base_/datasets/shelf.py @@ -0,0 +1,151 @@ +dataset_info = dict( + dataset_name='shelf', + paper_info=dict( + author='Belagiannis, Vasileios and Amin, Sikandar and Andriluka, ' + 'Mykhaylo and Schiele, Bernt and Navab, Nassir and Ilic, Slobodan', + title='3D Pictorial Structures for Multiple Human Pose Estimation', + container='IEEE Computer Society Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2014', + homepage='http://campar.in.tum.de/Chair/MultiHumanPose', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 1: + dict( + name='right_knee', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 2: + dict( + name='right_hip', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 3: + dict( + name='left_hip', + id=3, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 4: + dict( + name='left_knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 5: + dict( + name='left_ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 6: + dict( + name='right_wrist', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 7: + dict( + name='right_elbow', + id=7, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 8: + dict( + name='right_shoulder', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 9: + dict( + name='left_shoulder', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 10: + dict( + name='left_elbow', + id=10, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 11: + dict( + name='left_wrist', + id=11, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 12: + dict( + name='bottom_head', + id=12, + color=[51, 153, 255], + type='upper', + swap=''), + 13: + dict( + name='top_head', + id=13, + color=[51, 153, 255], + type='upper', + swap=''), + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: + dict(link=('left_hip', 'left_knee'), id=2, color=[0, 255, 0]), + 3: + dict(link=('left_knee', 'left_ankle'), id=3, color=[0, 255, 0]), + 4: + dict(link=('right_hip', 'left_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('right_wrist', 'right_elbow'), id=5, color=[255, 128, 0]), + 6: + dict( + link=('right_elbow', 'right_shoulder'), id=6, color=[255, 128, 0]), + 7: + dict(link=('left_shoulder', 'left_elbow'), id=7, color=[0, 255, 0]), + 8: + dict(link=('left_elbow', 'left_wrist'), id=8, color=[0, 255, 0]), + 9: + dict(link=('right_hip', 'right_shoulder'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_hip', 'left_shoulder'), id=10, color=[0, 255, 0]), + 11: + dict( + link=('right_shoulder', 'bottom_head'), id=11, color=[255, 128, + 0]), + 12: + dict(link=('left_shoulder', 'bottom_head'), id=12, color=[0, 255, 0]), + 13: + dict(link=('bottom_head', 'top_head'), id=13, color=[51, 153, 255]), + }, + joint_weights=[ + 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.0, 1.0 + ], + sigmas=[ + 0.089, 0.087, 0.107, 0.107, 0.087, 0.089, 0.062, 0.072, 0.079, 0.079, + 0.072, 0.062, 0.026, 0.026 + ]) diff --git a/mmpose/configs/_base_/datasets/ubody2d.py b/mmpose/configs/_base_/datasets/ubody2d.py new file mode 100644 index 0000000000000000000000000000000000000000..8486db05ab3cf961da15eb5e15ed570d27c3cb09 --- /dev/null +++ b/mmpose/configs/_base_/datasets/ubody2d.py @@ -0,0 +1,1153 @@ +dataset_info = dict( + dataset_name='ubody2d', + paper_info=dict( + author='Jing Lin, Ailing Zeng, Haoqian Wang, Lei Zhang, Yu Li', + title='One-Stage 3D Whole-Body Mesh Recovery with Component Aware' + 'Transformer', + container='IEEE Computer Society Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2023', + homepage='https://github.com/IDEA-Research/OSX', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='left_big_toe', + id=17, + color=[255, 128, 0], + type='lower', + swap='right_big_toe'), + 18: + dict( + name='left_small_toe', + id=18, + color=[255, 128, 0], + type='lower', + swap='right_small_toe'), + 19: + dict( + name='left_heel', + id=19, + color=[255, 128, 0], + type='lower', + swap='right_heel'), + 20: + dict( + name='right_big_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='left_big_toe'), + 21: + dict( + name='right_small_toe', + id=21, + color=[255, 128, 0], + type='lower', + swap='left_small_toe'), + 22: + dict( + name='right_heel', + id=22, + color=[255, 128, 0], + type='lower', + swap='left_heel'), + 23: + dict( + name='face-0', + id=23, + color=[255, 255, 255], + type='', + swap='face-16'), + 24: + dict( + name='face-1', + id=24, + color=[255, 255, 255], + type='', + swap='face-15'), + 25: + dict( + name='face-2', + id=25, + color=[255, 255, 255], + type='', + swap='face-14'), + 26: + dict( + name='face-3', + id=26, + color=[255, 255, 255], + type='', + swap='face-13'), + 27: + dict( + name='face-4', + id=27, + color=[255, 255, 255], + type='', + swap='face-12'), + 28: + dict( + name='face-5', + id=28, + color=[255, 255, 255], + type='', + swap='face-11'), + 29: + dict( + name='face-6', + id=29, + color=[255, 255, 255], + type='', + swap='face-10'), + 30: + dict( + name='face-7', + id=30, + color=[255, 255, 255], + type='', + swap='face-9'), + 31: + dict(name='face-8', id=31, color=[255, 255, 255], type='', swap=''), + 32: + dict( + name='face-9', + id=32, + color=[255, 255, 255], + type='', + swap='face-7'), + 33: + dict( + name='face-10', + id=33, + color=[255, 255, 255], + type='', + swap='face-6'), + 34: + dict( + name='face-11', + id=34, + color=[255, 255, 255], + type='', + swap='face-5'), + 35: + dict( + name='face-12', + id=35, + color=[255, 255, 255], + type='', + swap='face-4'), + 36: + dict( + name='face-13', + id=36, + color=[255, 255, 255], + type='', + swap='face-3'), + 37: + dict( + name='face-14', + id=37, + color=[255, 255, 255], + type='', + swap='face-2'), + 38: + dict( + name='face-15', + id=38, + color=[255, 255, 255], + type='', + swap='face-1'), + 39: + dict( + name='face-16', + id=39, + color=[255, 255, 255], + type='', + swap='face-0'), + 40: + dict( + name='face-17', + id=40, + color=[255, 255, 255], + type='', + swap='face-26'), + 41: + dict( + name='face-18', + id=41, + color=[255, 255, 255], + type='', + swap='face-25'), + 42: + dict( + name='face-19', + id=42, + color=[255, 255, 255], + type='', + swap='face-24'), + 43: + dict( + name='face-20', + id=43, + color=[255, 255, 255], + type='', + swap='face-23'), + 44: + dict( + name='face-21', + id=44, + color=[255, 255, 255], + type='', + swap='face-22'), + 45: + dict( + name='face-22', + id=45, + color=[255, 255, 255], + type='', + swap='face-21'), + 46: + dict( + name='face-23', + id=46, + color=[255, 255, 255], + type='', + swap='face-20'), + 47: + dict( + name='face-24', + id=47, + color=[255, 255, 255], + type='', + swap='face-19'), + 48: + dict( + name='face-25', + id=48, + color=[255, 255, 255], + type='', + swap='face-18'), + 49: + dict( + name='face-26', + id=49, + color=[255, 255, 255], + type='', + swap='face-17'), + 50: + dict(name='face-27', id=50, color=[255, 255, 255], type='', swap=''), + 51: + dict(name='face-28', id=51, color=[255, 255, 255], type='', swap=''), + 52: + dict(name='face-29', id=52, color=[255, 255, 255], type='', swap=''), + 53: + dict(name='face-30', id=53, color=[255, 255, 255], type='', swap=''), + 54: + dict( + name='face-31', + id=54, + color=[255, 255, 255], + type='', + swap='face-35'), + 55: + dict( + name='face-32', + id=55, + color=[255, 255, 255], + type='', + swap='face-34'), + 56: + dict(name='face-33', id=56, color=[255, 255, 255], type='', swap=''), + 57: + dict( + name='face-34', + id=57, + color=[255, 255, 255], + type='', + swap='face-32'), + 58: + dict( + name='face-35', + id=58, + color=[255, 255, 255], + type='', + swap='face-31'), + 59: + dict( + name='face-36', + id=59, + color=[255, 255, 255], + type='', + swap='face-45'), + 60: + dict( + name='face-37', + id=60, + color=[255, 255, 255], + type='', + swap='face-44'), + 61: + dict( + name='face-38', + id=61, + color=[255, 255, 255], + type='', + swap='face-43'), + 62: + dict( + name='face-39', + id=62, + color=[255, 255, 255], + type='', + swap='face-42'), + 63: + dict( + name='face-40', + id=63, + color=[255, 255, 255], + type='', + swap='face-47'), + 64: + dict( + name='face-41', + id=64, + color=[255, 255, 255], + type='', + swap='face-46'), + 65: + dict( + name='face-42', + id=65, + color=[255, 255, 255], + type='', + swap='face-39'), + 66: + dict( + name='face-43', + id=66, + color=[255, 255, 255], + type='', + swap='face-38'), + 67: + dict( + name='face-44', + id=67, + color=[255, 255, 255], + type='', + swap='face-37'), + 68: + dict( + name='face-45', + id=68, + color=[255, 255, 255], + type='', + swap='face-36'), + 69: + dict( + name='face-46', + id=69, + color=[255, 255, 255], + type='', + swap='face-41'), + 70: + dict( + name='face-47', + id=70, + color=[255, 255, 255], + type='', + swap='face-40'), + 71: + dict( + name='face-48', + id=71, + color=[255, 255, 255], + type='', + swap='face-54'), + 72: + dict( + name='face-49', + id=72, + color=[255, 255, 255], + type='', + swap='face-53'), + 73: + dict( + name='face-50', + id=73, + color=[255, 255, 255], + type='', + swap='face-52'), + 74: + dict(name='face-51', id=74, color=[255, 255, 255], type='', swap=''), + 75: + dict( + name='face-52', + id=75, + color=[255, 255, 255], + type='', + swap='face-50'), + 76: + dict( + name='face-53', + id=76, + color=[255, 255, 255], + type='', + swap='face-49'), + 77: + dict( + name='face-54', + id=77, + color=[255, 255, 255], + type='', + swap='face-48'), + 78: + dict( + name='face-55', + id=78, + color=[255, 255, 255], + type='', + swap='face-59'), + 79: + dict( + name='face-56', + id=79, + color=[255, 255, 255], + type='', + swap='face-58'), + 80: + dict(name='face-57', id=80, color=[255, 255, 255], type='', swap=''), + 81: + dict( + name='face-58', + id=81, + color=[255, 255, 255], + type='', + swap='face-56'), + 82: + dict( + name='face-59', + id=82, + color=[255, 255, 255], + type='', + swap='face-55'), + 83: + dict( + name='face-60', + id=83, + color=[255, 255, 255], + type='', + swap='face-64'), + 84: + dict( + name='face-61', + id=84, + color=[255, 255, 255], + type='', + swap='face-63'), + 85: + dict(name='face-62', id=85, color=[255, 255, 255], type='', swap=''), + 86: + dict( + name='face-63', + id=86, + color=[255, 255, 255], + type='', + swap='face-61'), + 87: + dict( + name='face-64', + id=87, + color=[255, 255, 255], + type='', + swap='face-60'), + 88: + dict( + name='face-65', + id=88, + color=[255, 255, 255], + type='', + swap='face-67'), + 89: + dict(name='face-66', id=89, color=[255, 255, 255], type='', swap=''), + 90: + dict( + name='face-67', + id=90, + color=[255, 255, 255], + type='', + swap='face-65'), + 91: + dict( + name='left_hand_root', + id=91, + color=[255, 255, 255], + type='', + swap='right_hand_root'), + 92: + dict( + name='left_thumb1', + id=92, + color=[255, 128, 0], + type='', + swap='right_thumb1'), + 93: + dict( + name='left_thumb2', + id=93, + color=[255, 128, 0], + type='', + swap='right_thumb2'), + 94: + dict( + name='left_thumb3', + id=94, + color=[255, 128, 0], + type='', + swap='right_thumb3'), + 95: + dict( + name='left_thumb4', + id=95, + color=[255, 128, 0], + type='', + swap='right_thumb4'), + 96: + dict( + name='left_forefinger1', + id=96, + color=[255, 153, 255], + type='', + swap='right_forefinger1'), + 97: + dict( + name='left_forefinger2', + id=97, + color=[255, 153, 255], + type='', + swap='right_forefinger2'), + 98: + dict( + name='left_forefinger3', + id=98, + color=[255, 153, 255], + type='', + swap='right_forefinger3'), + 99: + dict( + name='left_forefinger4', + id=99, + color=[255, 153, 255], + type='', + swap='right_forefinger4'), + 100: + dict( + name='left_middle_finger1', + id=100, + color=[102, 178, 255], + type='', + swap='right_middle_finger1'), + 101: + dict( + name='left_middle_finger2', + id=101, + color=[102, 178, 255], + type='', + swap='right_middle_finger2'), + 102: + dict( + name='left_middle_finger3', + id=102, + color=[102, 178, 255], + type='', + swap='right_middle_finger3'), + 103: + dict( + name='left_middle_finger4', + id=103, + color=[102, 178, 255], + type='', + swap='right_middle_finger4'), + 104: + dict( + name='left_ring_finger1', + id=104, + color=[255, 51, 51], + type='', + swap='right_ring_finger1'), + 105: + dict( + name='left_ring_finger2', + id=105, + color=[255, 51, 51], + type='', + swap='right_ring_finger2'), + 106: + dict( + name='left_ring_finger3', + id=106, + color=[255, 51, 51], + type='', + swap='right_ring_finger3'), + 107: + dict( + name='left_ring_finger4', + id=107, + color=[255, 51, 51], + type='', + swap='right_ring_finger4'), + 108: + dict( + name='left_pinky_finger1', + id=108, + color=[0, 255, 0], + type='', + swap='right_pinky_finger1'), + 109: + dict( + name='left_pinky_finger2', + id=109, + color=[0, 255, 0], + type='', + swap='right_pinky_finger2'), + 110: + dict( + name='left_pinky_finger3', + id=110, + color=[0, 255, 0], + type='', + swap='right_pinky_finger3'), + 111: + dict( + name='left_pinky_finger4', + id=111, + color=[0, 255, 0], + type='', + swap='right_pinky_finger4'), + 112: + dict( + name='right_hand_root', + id=112, + color=[255, 255, 255], + type='', + swap='left_hand_root'), + 113: + dict( + name='right_thumb1', + id=113, + color=[255, 128, 0], + type='', + swap='left_thumb1'), + 114: + dict( + name='right_thumb2', + id=114, + color=[255, 128, 0], + type='', + swap='left_thumb2'), + 115: + dict( + name='right_thumb3', + id=115, + color=[255, 128, 0], + type='', + swap='left_thumb3'), + 116: + dict( + name='right_thumb4', + id=116, + color=[255, 128, 0], + type='', + swap='left_thumb4'), + 117: + dict( + name='right_forefinger1', + id=117, + color=[255, 153, 255], + type='', + swap='left_forefinger1'), + 118: + dict( + name='right_forefinger2', + id=118, + color=[255, 153, 255], + type='', + swap='left_forefinger2'), + 119: + dict( + name='right_forefinger3', + id=119, + color=[255, 153, 255], + type='', + swap='left_forefinger3'), + 120: + dict( + name='right_forefinger4', + id=120, + color=[255, 153, 255], + type='', + swap='left_forefinger4'), + 121: + dict( + name='right_middle_finger1', + id=121, + color=[102, 178, 255], + type='', + swap='left_middle_finger1'), + 122: + dict( + name='right_middle_finger2', + id=122, + color=[102, 178, 255], + type='', + swap='left_middle_finger2'), + 123: + dict( + name='right_middle_finger3', + id=123, + color=[102, 178, 255], + type='', + swap='left_middle_finger3'), + 124: + dict( + name='right_middle_finger4', + id=124, + color=[102, 178, 255], + type='', + swap='left_middle_finger4'), + 125: + dict( + name='right_ring_finger1', + id=125, + color=[255, 51, 51], + type='', + swap='left_ring_finger1'), + 126: + dict( + name='right_ring_finger2', + id=126, + color=[255, 51, 51], + type='', + swap='left_ring_finger2'), + 127: + dict( + name='right_ring_finger3', + id=127, + color=[255, 51, 51], + type='', + swap='left_ring_finger3'), + 128: + dict( + name='right_ring_finger4', + id=128, + color=[255, 51, 51], + type='', + swap='left_ring_finger4'), + 129: + dict( + name='right_pinky_finger1', + id=129, + color=[0, 255, 0], + type='', + swap='left_pinky_finger1'), + 130: + dict( + name='right_pinky_finger2', + id=130, + color=[0, 255, 0], + type='', + swap='left_pinky_finger2'), + 131: + dict( + name='right_pinky_finger3', + id=131, + color=[0, 255, 0], + type='', + swap='left_pinky_finger3'), + 132: + dict( + name='right_pinky_finger4', + id=132, + color=[0, 255, 0], + type='', + swap='left_pinky_finger4') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ankle', 'left_big_toe'), id=19, color=[0, 255, 0]), + 20: + dict(link=('left_ankle', 'left_small_toe'), id=20, color=[0, 255, 0]), + 21: + dict(link=('left_ankle', 'left_heel'), id=21, color=[0, 255, 0]), + 22: + dict( + link=('right_ankle', 'right_big_toe'), id=22, color=[255, 128, 0]), + 23: + dict( + link=('right_ankle', 'right_small_toe'), + id=23, + color=[255, 128, 0]), + 24: + dict(link=('right_ankle', 'right_heel'), id=24, color=[255, 128, 0]), + 25: + dict( + link=('left_hand_root', 'left_thumb1'), id=25, color=[255, 128, + 0]), + 26: + dict(link=('left_thumb1', 'left_thumb2'), id=26, color=[255, 128, 0]), + 27: + dict(link=('left_thumb2', 'left_thumb3'), id=27, color=[255, 128, 0]), + 28: + dict(link=('left_thumb3', 'left_thumb4'), id=28, color=[255, 128, 0]), + 29: + dict( + link=('left_hand_root', 'left_forefinger1'), + id=29, + color=[255, 153, 255]), + 30: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=30, + color=[255, 153, 255]), + 31: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=31, + color=[255, 153, 255]), + 32: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=32, + color=[255, 153, 255]), + 33: + dict( + link=('left_hand_root', 'left_middle_finger1'), + id=33, + color=[102, 178, 255]), + 34: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=34, + color=[102, 178, 255]), + 35: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=35, + color=[102, 178, 255]), + 36: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=36, + color=[102, 178, 255]), + 37: + dict( + link=('left_hand_root', 'left_ring_finger1'), + id=37, + color=[255, 51, 51]), + 38: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=38, + color=[255, 51, 51]), + 39: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=39, + color=[255, 51, 51]), + 40: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=40, + color=[255, 51, 51]), + 41: + dict( + link=('left_hand_root', 'left_pinky_finger1'), + id=41, + color=[0, 255, 0]), + 42: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=42, + color=[0, 255, 0]), + 43: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=43, + color=[0, 255, 0]), + 44: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=44, + color=[0, 255, 0]), + 45: + dict( + link=('right_hand_root', 'right_thumb1'), + id=45, + color=[255, 128, 0]), + 46: + dict( + link=('right_thumb1', 'right_thumb2'), id=46, color=[255, 128, 0]), + 47: + dict( + link=('right_thumb2', 'right_thumb3'), id=47, color=[255, 128, 0]), + 48: + dict( + link=('right_thumb3', 'right_thumb4'), id=48, color=[255, 128, 0]), + 49: + dict( + link=('right_hand_root', 'right_forefinger1'), + id=49, + color=[255, 153, 255]), + 50: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=50, + color=[255, 153, 255]), + 51: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=51, + color=[255, 153, 255]), + 52: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=52, + color=[255, 153, 255]), + 53: + dict( + link=('right_hand_root', 'right_middle_finger1'), + id=53, + color=[102, 178, 255]), + 54: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=54, + color=[102, 178, 255]), + 55: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=55, + color=[102, 178, 255]), + 56: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=56, + color=[102, 178, 255]), + 57: + dict( + link=('right_hand_root', 'right_ring_finger1'), + id=57, + color=[255, 51, 51]), + 58: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=58, + color=[255, 51, 51]), + 59: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=59, + color=[255, 51, 51]), + 60: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=60, + color=[255, 51, 51]), + 61: + dict( + link=('right_hand_root', 'right_pinky_finger1'), + id=61, + color=[0, 255, 0]), + 62: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=62, + color=[0, 255, 0]), + 63: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=63, + color=[0, 255, 0]), + 64: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=64, + color=[0, 255, 0]) + }, + joint_weights=[1.] * 133, + # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' + # 'evaluation/myeval_wholebody.py#L175' + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.068, 0.066, 0.066, + 0.092, 0.094, 0.094, 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, + 0.025, 0.020, 0.023, 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, + 0.013, 0.012, 0.011, 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, + 0.009, 0.007, 0.007, 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, 0.017, + 0.011, 0.009, 0.011, 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, + 0.034, 0.008, 0.008, 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, + 0.009, 0.009, 0.007, 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01, + 0.008, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, + 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, 0.019, + 0.022, 0.031, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, + 0.035, 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, + 0.019, 0.022, 0.031 + ]) diff --git a/mmpose/configs/_base_/datasets/ubody3d.py b/mmpose/configs/_base_/datasets/ubody3d.py new file mode 100644 index 0000000000000000000000000000000000000000..9242559ea1fd85291d0f9136b65fdd5cb66664fb --- /dev/null +++ b/mmpose/configs/_base_/datasets/ubody3d.py @@ -0,0 +1,958 @@ +dataset_info = dict( + dataset_name='ubody3d', + paper_info=dict( + author='Jing Lin, Ailing Zeng, Haoqian Wang, Lei Zhang, Yu Li', + title='One-Stage 3D Whole-Body Mesh Recovery with Component Aware' + 'Transformer', + container='IEEE Computer Society Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2023', + homepage='https://github.com/IDEA-Research/OSX', + ), + keypoint_info={ + 0: + dict(name='Pelvis', id=0, color=[0, 255, 0], type='', swap=''), + 1: + dict( + name='L_Hip', id=1, color=[0, 255, 0], type='lower', swap='R_Hip'), + 2: + dict( + name='R_Hip', id=2, color=[0, 255, 0], type='lower', swap='L_Hip'), + 3: + dict( + name='L_Knee', + id=3, + color=[0, 255, 0], + type='lower', + swap='R_Knee'), + 4: + dict( + name='R_Knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='L_Knee'), + 5: + dict( + name='L_Ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='R_Ankle'), + 6: + dict( + name='R_Ankle', + id=6, + color=[0, 255, 0], + type='lower', + swap='L_Ankle'), + 7: + dict(name='Neck', id=7, color=[0, 255, 0], type='upper', swap=''), + 8: + dict( + name='L_Shoulder', + id=8, + color=[0, 255, 0], + type='upper', + swap='R_Shoulder'), + 9: + dict( + name='R_Shoulder', + id=9, + color=[0, 255, 0], + type='upper', + swap='L_Shoulder'), + 10: + dict( + name='L_Elbow', + id=10, + color=[0, 255, 0], + type='upper', + swap='R_Elbow'), + 11: + dict( + name='R_Elbow', + id=11, + color=[0, 255, 0], + type='upper', + swap='L_Elbow'), + 12: + dict( + name='L_Wrist', + id=12, + color=[0, 255, 0], + type='upper', + swap='R_Wrist'), + 13: + dict( + name='R_Wrist', + id=13, + color=[0, 255, 0], + type='upper', + swap='L_Wrist'), + 14: + dict( + name='L_Big_toe', + id=14, + color=[0, 255, 0], + type='lower', + swap='R_Big_toe'), + 15: + dict( + name='L_Small_toe', + id=15, + color=[0, 255, 0], + type='lower', + swap='R_Small_toe'), + 16: + dict( + name='L_Heel', + id=16, + color=[0, 255, 0], + type='lower', + swap='R_Heel'), + 17: + dict( + name='R_Big_toe', + id=17, + color=[0, 255, 0], + type='lower', + swap='L_Big_toe'), + 18: + dict( + name='R_Small_toe', + id=18, + color=[0, 255, 0], + type='lower', + swap='L_Small_toe'), + 19: + dict( + name='R_Heel', + id=19, + color=[0, 255, 0], + type='lower', + swap='L_Heel'), + 20: + dict( + name='L_Ear', id=20, color=[0, 255, 0], type='upper', + swap='R_Ear'), + 21: + dict( + name='R_Ear', id=21, color=[0, 255, 0], type='upper', + swap='L_Ear'), + 22: + dict(name='L_Eye', id=22, color=[0, 255, 0], type='', swap='R_Eye'), + 23: + dict(name='R_Eye', id=23, color=[0, 255, 0], type='', swap='L_Eye'), + 24: + dict(name='Nose', id=24, color=[0, 255, 0], type='upper', swap=''), + 25: + dict( + name='L_Thumb_1', + id=25, + color=[255, 128, 0], + type='', + swap='R_Thumb_1'), + 26: + dict( + name='L_Thumb_2', + id=26, + color=[255, 128, 0], + type='', + swap='R_Thumb_2'), + 27: + dict( + name='L_Thumb_3', + id=27, + color=[255, 128, 0], + type='', + swap='R_Thumb_3'), + 28: + dict( + name='L_Thumb_4', + id=28, + color=[255, 128, 0], + type='', + swap='R_Thumb_4'), + 29: + dict( + name='L_Index_1', + id=29, + color=[255, 128, 0], + type='', + swap='R_Index_1'), + 30: + dict( + name='L_Index_2', + id=30, + color=[255, 128, 0], + type='', + swap='R_Index_2'), + 31: + dict( + name='L_Index_3', + id=31, + color=[255, 128, 0], + type='', + swap='R_Index_3'), + 32: + dict( + name='L_Index_4', + id=32, + color=[255, 128, 0], + type='', + swap='R_Index_4'), + 33: + dict( + name='L_Middle_1', + id=33, + color=[255, 128, 0], + type='', + swap='R_Middle_1'), + 34: + dict( + name='L_Middle_2', + id=34, + color=[255, 128, 0], + type='', + swap='R_Middle_2'), + 35: + dict( + name='L_Middle_3', + id=35, + color=[255, 128, 0], + type='', + swap='R_Middle_3'), + 36: + dict( + name='L_Middle_4', + id=36, + color=[255, 128, 0], + type='', + swap='R_Middle_4'), + 37: + dict( + name='L_Ring_1', + id=37, + color=[255, 128, 0], + type='', + swap='R_Ring_1'), + 38: + dict( + name='L_Ring_2', + id=38, + color=[255, 128, 0], + type='', + swap='R_Ring_2'), + 39: + dict( + name='L_Ring_3', + id=39, + color=[255, 128, 0], + type='', + swap='R_Ring_3'), + 40: + dict( + name='L_Ring_4', + id=40, + color=[255, 128, 0], + type='', + swap='R_Ring_4'), + 41: + dict( + name='L_Pinky_1', + id=41, + color=[255, 128, 0], + type='', + swap='R_Pinky_1'), + 42: + dict( + name='L_Pinky_2', + id=42, + color=[255, 128, 0], + type='', + swap='R_Pinky_2'), + 43: + dict( + name='L_Pinky_3', + id=43, + color=[255, 128, 0], + type='', + swap='R_Pinky_3'), + 44: + dict( + name='L_Pinky_4', + id=44, + color=[255, 128, 0], + type='', + swap='R_Pinky_4'), + 45: + dict( + name='R_Thumb_1', + id=45, + color=[255, 128, 0], + type='', + swap='L_Thumb_1'), + 46: + dict( + name='R_Thumb_2', + id=46, + color=[255, 128, 0], + type='', + swap='L_Thumb_2'), + 47: + dict( + name='R_Thumb_3', + id=47, + color=[255, 128, 0], + type='', + swap='L_Thumb_3'), + 48: + dict( + name='R_Thumb_4', + id=48, + color=[255, 128, 0], + type='', + swap='L_Thumb_4'), + 49: + dict( + name='R_Index_1', + id=49, + color=[255, 128, 0], + type='', + swap='L_Index_1'), + 50: + dict( + name='R_Index_2', + id=50, + color=[255, 128, 0], + type='', + swap='L_Index_2'), + 51: + dict( + name='R_Index_3', + id=51, + color=[255, 128, 0], + type='', + swap='L_Index_3'), + 52: + dict( + name='R_Index_4', + id=52, + color=[255, 128, 0], + type='', + swap='L_Index_4'), + 53: + dict( + name='R_Middle_1', + id=53, + color=[255, 128, 0], + type='', + swap='L_Middle_1'), + 54: + dict( + name='R_Middle_2', + id=54, + color=[255, 128, 0], + type='', + swap='L_Middle_2'), + 55: + dict( + name='R_Middle_3', + id=55, + color=[255, 128, 0], + type='', + swap='L_Middle_3'), + 56: + dict( + name='R_Middle_4', + id=56, + color=[255, 128, 0], + type='', + swap='L_Middle_4'), + 57: + dict( + name='R_Ring_1', + id=57, + color=[255, 128, 0], + type='', + swap='L_Ring_1'), + 58: + dict( + name='R_Ring_2', + id=58, + color=[255, 128, 0], + type='', + swap='L_Ring_2'), + 59: + dict( + name='R_Ring_3', + id=59, + color=[255, 128, 0], + type='', + swap='L_Ring_3'), + 60: + dict( + name='R_Ring_4', + id=60, + color=[255, 128, 0], + type='', + swap='L_Ring_4'), + 61: + dict( + name='R_Pinky_1', + id=61, + color=[255, 128, 0], + type='', + swap='L_Pinky_1'), + 62: + dict( + name='R_Pinky_2', + id=62, + color=[255, 128, 0], + type='', + swap='L_Pinky_2'), + 63: + dict( + name='R_Pinky_3', + id=63, + color=[255, 128, 0], + type='', + swap='L_Pinky_3'), + 64: + dict( + name='R_Pinky_4', + id=64, + color=[255, 128, 0], + type='', + swap='L_Pinky_4'), + 65: + dict(name='Face_1', id=65, color=[255, 255, 255], type='', swap=''), + 66: + dict(name='Face_2', id=66, color=[255, 255, 255], type='', swap=''), + 67: + dict( + name='Face_3', + id=67, + color=[255, 255, 255], + type='', + swap='Face_4'), + 68: + dict( + name='Face_4', + id=68, + color=[255, 255, 255], + type='', + swap='Face_3'), + 69: + dict( + name='Face_5', + id=69, + color=[255, 255, 255], + type='', + swap='Face_14'), + 70: + dict( + name='Face_6', + id=70, + color=[255, 255, 255], + type='', + swap='Face_13'), + 71: + dict( + name='Face_7', + id=71, + color=[255, 255, 255], + type='', + swap='Face_12'), + 72: + dict( + name='Face_8', + id=72, + color=[255, 255, 255], + type='', + swap='Face_11'), + 73: + dict( + name='Face_9', + id=73, + color=[255, 255, 255], + type='', + swap='Face_10'), + 74: + dict( + name='Face_10', + id=74, + color=[255, 255, 255], + type='', + swap='Face_9'), + 75: + dict( + name='Face_11', + id=75, + color=[255, 255, 255], + type='', + swap='Face_8'), + 76: + dict( + name='Face_12', + id=76, + color=[255, 255, 255], + type='', + swap='Face_7'), + 77: + dict( + name='Face_13', + id=77, + color=[255, 255, 255], + type='', + swap='Face_6'), + 78: + dict( + name='Face_14', + id=78, + color=[255, 255, 255], + type='', + swap='Face_5'), + 79: + dict(name='Face_15', id=79, color=[255, 255, 255], type='', swap=''), + 80: + dict(name='Face_16', id=80, color=[255, 255, 255], type='', swap=''), + 81: + dict(name='Face_17', id=81, color=[255, 255, 255], type='', swap=''), + 82: + dict(name='Face_18', id=82, color=[255, 255, 255], type='', swap=''), + 83: + dict( + name='Face_19', + id=83, + color=[255, 255, 255], + type='', + swap='Face_23'), + 84: + dict( + name='Face_20', + id=84, + color=[255, 255, 255], + type='', + swap='Face_22'), + 85: + dict(name='Face_21', id=85, color=[255, 255, 255], type='', swap=''), + 86: + dict( + name='Face_22', + id=86, + color=[255, 255, 255], + type='', + swap='Face_20'), + 87: + dict( + name='Face_23', + id=87, + color=[255, 255, 255], + type='', + swap='Face_19'), + 88: + dict( + name='Face_24', + id=88, + color=[255, 255, 255], + type='', + swap='Face_33'), + 89: + dict( + name='Face_25', + id=89, + color=[255, 255, 255], + type='', + swap='Face_32'), + 90: + dict( + name='Face_26', + id=90, + color=[255, 255, 255], + type='', + swap='Face_31'), + 91: + dict( + name='Face_27', + id=91, + color=[255, 255, 255], + type='', + swap='Face_30'), + 92: + dict( + name='Face_28', + id=92, + color=[255, 255, 255], + type='', + swap='Face_35'), + 93: + dict( + name='Face_29', + id=93, + color=[255, 255, 255], + type='', + swap='Face_34'), + 94: + dict( + name='Face_30', + id=94, + color=[255, 255, 255], + type='', + swap='Face_27'), + 95: + dict( + name='Face_31', + id=95, + color=[255, 255, 255], + type='', + swap='Face_26'), + 96: + dict( + name='Face_32', + id=96, + color=[255, 255, 255], + type='', + swap='Face_25'), + 97: + dict( + name='Face_33', + id=97, + color=[255, 255, 255], + type='', + swap='Face_24'), + 98: + dict( + name='Face_34', + id=98, + color=[255, 255, 255], + type='', + swap='Face_29'), + 99: + dict( + name='Face_35', + id=99, + color=[255, 255, 255], + type='', + swap='Face_28'), + 100: + dict( + name='Face_36', + id=100, + color=[255, 255, 255], + type='', + swap='Face_42'), + 101: + dict( + name='Face_37', + id=101, + color=[255, 255, 255], + type='', + swap='Face_41'), + 102: + dict( + name='Face_38', + id=102, + color=[255, 255, 255], + type='', + swap='Face_40'), + 103: + dict(name='Face_39', id=103, color=[255, 255, 255], type='', swap=''), + 104: + dict( + name='Face_40', + id=104, + color=[255, 255, 255], + type='', + swap='Face_38'), + 105: + dict( + name='Face_41', + id=105, + color=[255, 255, 255], + type='', + swap='Face_37'), + 106: + dict( + name='Face_42', + id=106, + color=[255, 255, 255], + type='', + swap='Face_36'), + 107: + dict( + name='Face_43', + id=107, + color=[255, 255, 255], + type='', + swap='Face_47'), + 108: + dict( + name='Face_44', + id=108, + color=[255, 255, 255], + type='', + swap='Face_46'), + 109: + dict(name='Face_45', id=109, color=[255, 255, 255], type='', swap=''), + 110: + dict( + name='Face_46', + id=110, + color=[255, 255, 255], + type='', + swap='Face_44'), + 111: + dict( + name='Face_47', + id=111, + color=[255, 255, 255], + type='', + swap='Face_43'), + 112: + dict( + name='Face_48', + id=112, + color=[255, 255, 255], + type='', + swap='Face_52'), + 113: + dict( + name='Face_49', + id=113, + color=[255, 255, 255], + type='', + swap='Face_51'), + 114: + dict(name='Face_50', id=114, color=[255, 255, 255], type='', swap=''), + 115: + dict( + name='Face_51', + id=115, + color=[255, 255, 255], + type='', + swap='Face_49'), + 116: + dict( + name='Face_52', + id=116, + color=[255, 255, 255], + type='', + swap='Face_48'), + 117: + dict( + name='Face_53', + id=117, + color=[255, 255, 255], + type='', + swap='Face_55'), + 118: + dict(name='Face_54', id=118, color=[255, 255, 255], type='', swap=''), + 119: + dict( + name='Face_55', + id=119, + color=[255, 255, 255], + type='', + swap='Face_53'), + 120: + dict( + name='Face_56', + id=120, + color=[255, 255, 255], + type='', + swap='Face_72'), + 121: + dict( + name='Face_57', + id=121, + color=[255, 255, 255], + type='', + swap='Face_71'), + 122: + dict( + name='Face_58', + id=122, + color=[255, 255, 255], + type='', + swap='Face_70'), + 123: + dict( + name='Face_59', + id=123, + color=[255, 255, 255], + type='', + swap='Face_69'), + 124: + dict( + name='Face_60', + id=124, + color=[255, 255, 255], + type='', + swap='Face_68'), + 125: + dict( + name='Face_61', + id=125, + color=[255, 255, 255], + type='', + swap='Face_67'), + 126: + dict( + name='Face_62', + id=126, + color=[255, 255, 255], + type='', + swap='Face_66'), + 127: + dict( + name='Face_63', + id=127, + color=[255, 255, 255], + type='', + swap='Face_65'), + 128: + dict(name='Face_64', id=128, color=[255, 255, 255], type='', swap=''), + 129: + dict( + name='Face_65', + id=129, + color=[255, 255, 255], + type='', + swap='Face_63'), + 130: + dict( + name='Face_66', + id=130, + color=[255, 255, 255], + type='', + swap='Face_62'), + 131: + dict( + name='Face_67', + id=131, + color=[255, 255, 255], + type='', + swap='Face_61'), + 132: + dict( + name='Face_68', + id=132, + color=[255, 255, 255], + type='', + swap='Face_60'), + 133: + dict( + name='Face_69', + id=133, + color=[255, 255, 255], + type='', + swap='Face_59'), + 134: + dict( + name='Face_70', + id=134, + color=[255, 255, 255], + type='', + swap='Face_58'), + 135: + dict( + name='Face_71', + id=135, + color=[255, 255, 255], + type='', + swap='Face_57'), + 136: + dict( + name='Face_72', + id=136, + color=[255, 255, 255], + type='', + swap='Face_56'), + }, + skeleton_info={ + 0: dict(link=('L_Ankle', 'L_Knee'), id=0, color=[0, 255, 0]), + 1: dict(link=('L_Knee', 'L_Hip'), id=1, color=[0, 255, 0]), + 2: dict(link=('R_Ankle', 'R_Knee'), id=2, color=[0, 255, 0]), + 3: dict(link=('R_Knee', 'R_Hip'), id=3, color=[0, 255, 0]), + 4: dict(link=('L_Hip', 'R_Hip'), id=4, color=[0, 255, 0]), + 5: dict(link=('L_Shoulder', 'L_Hip'), id=5, color=[0, 255, 0]), + 6: dict(link=('R_Shoulder', 'R_Hip'), id=6, color=[0, 255, 0]), + 7: dict(link=('L_Shoulder', 'R_Shoulder'), id=7, color=[0, 255, 0]), + 8: dict(link=('L_Shoulder', 'L_Elbow'), id=8, color=[0, 255, 0]), + 9: dict(link=('R_Shoulder', 'R_Elbow'), id=9, color=[0, 255, 0]), + 10: dict(link=('L_Elbow', 'L_Wrist'), id=10, color=[0, 255, 0]), + 11: dict(link=('R_Elbow', 'R_Wrist'), id=11, color=[255, 128, 0]), + 12: dict(link=('L_Eye', 'R_Eye'), id=12, color=[255, 128, 0]), + 13: dict(link=('Nose', 'L_Eye'), id=13, color=[255, 128, 0]), + 14: dict(link=('Nose', 'R_Eye'), id=14, color=[255, 128, 0]), + 15: dict(link=('L_Eye', 'L_Ear'), id=15, color=[255, 128, 0]), + 16: dict(link=('R_Eye', 'R_Ear'), id=16, color=[255, 128, 0]), + 17: dict(link=('L_Ear', 'L_Shoulder'), id=17, color=[255, 128, 0]), + 18: dict(link=('R_Ear', 'R_Shoulder'), id=18, color=[255, 128, 0]), + 19: dict(link=('L_Ankle', 'L_Big_toe'), id=19, color=[255, 128, 0]), + 20: dict(link=('L_Ankle', 'L_Small_toe'), id=20, color=[255, 128, 0]), + 21: dict(link=('L_Ankle', 'L_Heel'), id=21, color=[255, 128, 0]), + 22: dict(link=('R_Ankle', 'R_Big_toe'), id=22, color=[255, 128, 0]), + 23: dict(link=('R_Ankle', 'R_Small_toe'), id=23, color=[255, 128, 0]), + 24: dict(link=('R_Ankle', 'R_Heel'), id=24, color=[255, 128, 0]), + 25: dict(link=('L_Wrist', 'L_Thumb_1'), id=25, color=[255, 128, 0]), + 26: dict(link=('L_Thumb_1', 'L_Thumb_2'), id=26, color=[255, 128, 0]), + 27: dict(link=('L_Thumb_2', 'L_Thumb_3'), id=27, color=[255, 128, 0]), + 28: dict(link=('L_Thumb_3', 'L_Thumb_4'), id=28, color=[255, 128, 0]), + 29: dict(link=('L_Wrist', 'L_Index_1'), id=29, color=[255, 128, 0]), + 30: dict(link=('L_Index_1', 'L_Index_2'), id=30, color=[255, 128, 0]), + 31: + dict(link=('L_Index_2', 'L_Index_3'), id=31, color=[255, 255, 255]), + 32: + dict(link=('L_Index_3', 'L_Index_4'), id=32, color=[255, 255, 255]), + 33: dict(link=('L_Wrist', 'L_Middle_1'), id=33, color=[255, 255, 255]), + 34: + dict(link=('L_Middle_1', 'L_Middle_2'), id=34, color=[255, 255, 255]), + 35: + dict(link=('L_Middle_2', 'L_Middle_3'), id=35, color=[255, 255, 255]), + 36: + dict(link=('L_Middle_3', 'L_Middle_4'), id=36, color=[255, 255, 255]), + 37: dict(link=('L_Wrist', 'L_Ring_1'), id=37, color=[255, 255, 255]), + 38: dict(link=('L_Ring_1', 'L_Ring_2'), id=38, color=[255, 255, 255]), + 39: dict(link=('L_Ring_2', 'L_Ring_3'), id=39, color=[255, 255, 255]), + 40: dict(link=('L_Ring_3', 'L_Ring_4'), id=40, color=[255, 255, 255]), + 41: dict(link=('L_Wrist', 'L_Pinky_1'), id=41, color=[255, 255, 255]), + 42: + dict(link=('L_Pinky_1', 'L_Pinky_2'), id=42, color=[255, 255, 255]), + 43: + dict(link=('L_Pinky_2', 'L_Pinky_3'), id=43, color=[255, 255, 255]), + 44: + dict(link=('L_Pinky_3', 'L_Pinky_4'), id=44, color=[255, 255, 255]), + 45: dict(link=('R_Wrist', 'R_Thumb_1'), id=45, color=[255, 255, 255]), + 46: + dict(link=('R_Thumb_1', 'R_Thumb_2'), id=46, color=[255, 255, 255]), + 47: + dict(link=('R_Thumb_2', 'R_Thumb_3'), id=47, color=[255, 255, 255]), + 48: + dict(link=('R_Thumb_3', 'R_Thumb_4'), id=48, color=[255, 255, 255]), + 49: dict(link=('R_Wrist', 'R_Index_1'), id=49, color=[255, 255, 255]), + 50: + dict(link=('R_Index_1', 'R_Index_2'), id=50, color=[255, 255, 255]), + 51: + dict(link=('R_Index_2', 'R_Index_3'), id=51, color=[255, 255, 255]), + 52: + dict(link=('R_Index_3', 'R_Index_4'), id=52, color=[255, 255, 255]), + 53: dict(link=('R_Wrist', 'R_Middle_1'), id=53, color=[255, 255, 255]), + 54: + dict(link=('R_Middle_1', 'R_Middle_2'), id=54, color=[255, 255, 255]), + 55: + dict(link=('R_Middle_2', 'R_Middle_3'), id=55, color=[255, 255, 255]), + 56: + dict(link=('R_Middle_3', 'R_Middle_4'), id=56, color=[255, 255, 255]), + 57: dict(link=('R_Wrist', 'R_Pinky_1'), id=57, color=[255, 255, 255]), + 58: + dict(link=('R_Pinky_1', 'R_Pinky_2'), id=58, color=[255, 255, 255]), + 59: + dict(link=('R_Pinky_2', 'R_Pinky_3'), id=59, color=[255, 255, 255]), + 60: + dict(link=('R_Pinky_3', 'R_Pinky_4'), id=60, color=[255, 255, 255]), + }, + joint_weights=[1.] * 137, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/wflw.py b/mmpose/configs/_base_/datasets/wflw.py new file mode 100644 index 0000000000000000000000000000000000000000..80c29b696cf5031d8f21d7d8ed7e573043666f35 --- /dev/null +++ b/mmpose/configs/_base_/datasets/wflw.py @@ -0,0 +1,192 @@ +dataset_info = dict( + dataset_name='wflw', + paper_info=dict( + author='Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, ' + 'Quan and Cai, Yici and Zhou, Qiang', + title='Look at boundary: A boundary-aware face alignment algorithm', + container='Proceedings of the IEEE conference on computer ' + 'vision and pattern recognition', + year='2018', + homepage='https://wywu.github.io/projects/LAB/WFLW.html', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-32'), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-31'), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-30'), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-29'), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-28'), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-27'), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-26'), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-25'), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-24'), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-23'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-22'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-21'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-20'), + 13: + dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-19'), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-18'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-17'), + 16: dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap=''), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-15'), + 18: + dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-14'), + 19: + dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-13'), + 20: + dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap='kpt-12'), + 21: + dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap='kpt-11'), + 22: + dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-10'), + 23: + dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-9'), + 24: + dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap='kpt-8'), + 25: + dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap='kpt-7'), + 26: + dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap='kpt-6'), + 27: + dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap='kpt-5'), + 28: + dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap='kpt-4'), + 29: + dict(name='kpt-29', id=29, color=[255, 0, 0], type='', swap='kpt-3'), + 30: + dict(name='kpt-30', id=30, color=[255, 0, 0], type='', swap='kpt-2'), + 31: + dict(name='kpt-31', id=31, color=[255, 0, 0], type='', swap='kpt-1'), + 32: + dict(name='kpt-32', id=32, color=[255, 0, 0], type='', swap='kpt-0'), + 33: + dict(name='kpt-33', id=33, color=[255, 0, 0], type='', swap='kpt-46'), + 34: + dict(name='kpt-34', id=34, color=[255, 0, 0], type='', swap='kpt-45'), + 35: + dict(name='kpt-35', id=35, color=[255, 0, 0], type='', swap='kpt-44'), + 36: + dict(name='kpt-36', id=36, color=[255, 0, 0], type='', swap='kpt-43'), + 37: dict( + name='kpt-37', id=37, color=[255, 0, 0], type='', swap='kpt-42'), + 38: dict( + name='kpt-38', id=38, color=[255, 0, 0], type='', swap='kpt-50'), + 39: dict( + name='kpt-39', id=39, color=[255, 0, 0], type='', swap='kpt-49'), + 40: dict( + name='kpt-40', id=40, color=[255, 0, 0], type='', swap='kpt-48'), + 41: dict( + name='kpt-41', id=41, color=[255, 0, 0], type='', swap='kpt-47'), + 42: dict( + name='kpt-42', id=42, color=[255, 0, 0], type='', swap='kpt-37'), + 43: dict( + name='kpt-43', id=43, color=[255, 0, 0], type='', swap='kpt-36'), + 44: dict( + name='kpt-44', id=44, color=[255, 0, 0], type='', swap='kpt-35'), + 45: dict( + name='kpt-45', id=45, color=[255, 0, 0], type='', swap='kpt-34'), + 46: dict( + name='kpt-46', id=46, color=[255, 0, 0], type='', swap='kpt-33'), + 47: dict( + name='kpt-47', id=47, color=[255, 0, 0], type='', swap='kpt-41'), + 48: dict( + name='kpt-48', id=48, color=[255, 0, 0], type='', swap='kpt-40'), + 49: dict( + name='kpt-49', id=49, color=[255, 0, 0], type='', swap='kpt-39'), + 50: dict( + name='kpt-50', id=50, color=[255, 0, 0], type='', swap='kpt-38'), + 51: dict(name='kpt-51', id=51, color=[255, 0, 0], type='', swap=''), + 52: dict(name='kpt-52', id=52, color=[255, 0, 0], type='', swap=''), + 53: dict(name='kpt-53', id=53, color=[255, 0, 0], type='', swap=''), + 54: dict(name='kpt-54', id=54, color=[255, 0, 0], type='', swap=''), + 55: dict( + name='kpt-55', id=55, color=[255, 0, 0], type='', swap='kpt-59'), + 56: dict( + name='kpt-56', id=56, color=[255, 0, 0], type='', swap='kpt-58'), + 57: dict(name='kpt-57', id=57, color=[255, 0, 0], type='', swap=''), + 58: dict( + name='kpt-58', id=58, color=[255, 0, 0], type='', swap='kpt-56'), + 59: dict( + name='kpt-59', id=59, color=[255, 0, 0], type='', swap='kpt-55'), + 60: dict( + name='kpt-60', id=60, color=[255, 0, 0], type='', swap='kpt-72'), + 61: dict( + name='kpt-61', id=61, color=[255, 0, 0], type='', swap='kpt-71'), + 62: dict( + name='kpt-62', id=62, color=[255, 0, 0], type='', swap='kpt-70'), + 63: dict( + name='kpt-63', id=63, color=[255, 0, 0], type='', swap='kpt-69'), + 64: dict( + name='kpt-64', id=64, color=[255, 0, 0], type='', swap='kpt-68'), + 65: dict( + name='kpt-65', id=65, color=[255, 0, 0], type='', swap='kpt-75'), + 66: dict( + name='kpt-66', id=66, color=[255, 0, 0], type='', swap='kpt-74'), + 67: dict( + name='kpt-67', id=67, color=[255, 0, 0], type='', swap='kpt-73'), + 68: dict( + name='kpt-68', id=68, color=[255, 0, 0], type='', swap='kpt-64'), + 69: dict( + name='kpt-69', id=69, color=[255, 0, 0], type='', swap='kpt-63'), + 70: dict( + name='kpt-70', id=70, color=[255, 0, 0], type='', swap='kpt-62'), + 71: dict( + name='kpt-71', id=71, color=[255, 0, 0], type='', swap='kpt-61'), + 72: dict( + name='kpt-72', id=72, color=[255, 0, 0], type='', swap='kpt-60'), + 73: dict( + name='kpt-73', id=73, color=[255, 0, 0], type='', swap='kpt-67'), + 74: dict( + name='kpt-74', id=74, color=[255, 0, 0], type='', swap='kpt-66'), + 75: dict( + name='kpt-75', id=75, color=[255, 0, 0], type='', swap='kpt-65'), + 76: dict( + name='kpt-76', id=76, color=[255, 0, 0], type='', swap='kpt-82'), + 77: dict( + name='kpt-77', id=77, color=[255, 0, 0], type='', swap='kpt-81'), + 78: dict( + name='kpt-78', id=78, color=[255, 0, 0], type='', swap='kpt-80'), + 79: dict(name='kpt-79', id=79, color=[255, 0, 0], type='', swap=''), + 80: dict( + name='kpt-80', id=80, color=[255, 0, 0], type='', swap='kpt-78'), + 81: dict( + name='kpt-81', id=81, color=[255, 0, 0], type='', swap='kpt-77'), + 82: dict( + name='kpt-82', id=82, color=[255, 0, 0], type='', swap='kpt-76'), + 83: dict( + name='kpt-83', id=83, color=[255, 0, 0], type='', swap='kpt-87'), + 84: dict( + name='kpt-84', id=84, color=[255, 0, 0], type='', swap='kpt-86'), + 85: dict(name='kpt-85', id=85, color=[255, 0, 0], type='', swap=''), + 86: dict( + name='kpt-86', id=86, color=[255, 0, 0], type='', swap='kpt-84'), + 87: dict( + name='kpt-87', id=87, color=[255, 0, 0], type='', swap='kpt-83'), + 88: dict( + name='kpt-88', id=88, color=[255, 0, 0], type='', swap='kpt-92'), + 89: dict( + name='kpt-89', id=89, color=[255, 0, 0], type='', swap='kpt-91'), + 90: dict(name='kpt-90', id=90, color=[255, 0, 0], type='', swap=''), + 91: dict( + name='kpt-91', id=91, color=[255, 0, 0], type='', swap='kpt-89'), + 92: dict( + name='kpt-92', id=92, color=[255, 0, 0], type='', swap='kpt-88'), + 93: dict( + name='kpt-93', id=93, color=[255, 0, 0], type='', swap='kpt-95'), + 94: dict(name='kpt-94', id=94, color=[255, 0, 0], type='', swap=''), + 95: dict( + name='kpt-95', id=95, color=[255, 0, 0], type='', swap='kpt-93'), + 96: dict( + name='kpt-96', id=96, color=[255, 0, 0], type='', swap='kpt-97'), + 97: dict( + name='kpt-97', id=97, color=[255, 0, 0], type='', swap='kpt-96') + }, + skeleton_info={}, + joint_weights=[1.] * 98, + sigmas=[]) diff --git a/mmpose/configs/_base_/datasets/zebra.py b/mmpose/configs/_base_/datasets/zebra.py new file mode 100644 index 0000000000000000000000000000000000000000..eac71f796a761bbf87b123f8b7b8b4585df0c525 --- /dev/null +++ b/mmpose/configs/_base_/datasets/zebra.py @@ -0,0 +1,64 @@ +dataset_info = dict( + dataset_name='zebra', + paper_info=dict( + author='Graving, Jacob M and Chae, Daniel and Naik, Hemal and ' + 'Li, Liang and Koger, Benjamin and Costelloe, Blair R and ' + 'Couzin, Iain D', + title='DeepPoseKit, a software toolkit for fast and robust ' + 'animal pose estimation using deep learning', + container='Elife', + year='2019', + homepage='https://github.com/jgraving/DeepPoseKit-Data', + ), + keypoint_info={ + 0: + dict(name='snout', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='head', id=1, color=[255, 255, 255], type='', swap=''), + 2: + dict(name='neck', id=2, color=[255, 255, 255], type='', swap=''), + 3: + dict( + name='forelegL1', + id=3, + color=[255, 255, 255], + type='', + swap='forelegR1'), + 4: + dict( + name='forelegR1', + id=4, + color=[255, 255, 255], + type='', + swap='forelegL1'), + 5: + dict( + name='hindlegL1', + id=5, + color=[255, 255, 255], + type='', + swap='hindlegR1'), + 6: + dict( + name='hindlegR1', + id=6, + color=[255, 255, 255], + type='', + swap='hindlegL1'), + 7: + dict(name='tailbase', id=7, color=[255, 255, 255], type='', swap=''), + 8: + dict(name='tailtip', id=8, color=[255, 255, 255], type='', swap='') + }, + skeleton_info={ + 0: dict(link=('head', 'snout'), id=0, color=[255, 255, 255]), + 1: dict(link=('neck', 'head'), id=1, color=[255, 255, 255]), + 2: dict(link=('forelegL1', 'neck'), id=2, color=[255, 255, 255]), + 3: dict(link=('forelegR1', 'neck'), id=3, color=[255, 255, 255]), + 4: dict(link=('hindlegL1', 'tailbase'), id=4, color=[255, 255, 255]), + 5: dict(link=('hindlegR1', 'tailbase'), id=5, color=[255, 255, 255]), + 6: dict(link=('tailbase', 'neck'), id=6, color=[255, 255, 255]), + 7: dict(link=('tailtip', 'tailbase'), id=7, color=[255, 255, 255]) + }, + joint_weights=[1.] * 9, + sigmas=[]) diff --git a/mmpose/configs/_base_/default_runtime.py b/mmpose/configs/_base_/default_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..d87e8f15efa8c5f2a2a9fa1e827382b504e44f35 --- /dev/null +++ b/mmpose/configs/_base_/default_runtime.py @@ -0,0 +1,54 @@ +default_scope = 'mmpose' + +# hooks +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=10), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='PoseVisualizationHook', enable=False), + badcase=dict( + type='BadCaseAnalysisHook', + enable=False, + out_dir='badcase', + metric_type='loss', + badcase_thr=5)) + +# custom hooks +custom_hooks = [ + # Synchronize model buffers such as running_mean and running_var in BN + # at the end of each epoch + dict(type='SyncBuffersHook') +] + +# multi-processing backend +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +# visualizer +vis_backends = [ + dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend'), + # dict(type='WandbVisBackend'), +] +visualizer = dict( + type='PoseLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# logger +log_processor = dict( + type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' +load_from = None +resume = False + +# file I/O backend +backend_args = dict(backend='local') + +# training/validation/testing progress +train_cfg = dict(by_epoch=True) +val_cfg = dict() +test_cfg = dict() diff --git a/mmpose/configs/_base_/merged_COCO_AIC_MPII.py b/mmpose/configs/_base_/merged_COCO_AIC_MPII.py new file mode 100644 index 0000000000000000000000000000000000000000..46952757da1270d3b0fc570be0a61e4c93114ef3 --- /dev/null +++ b/mmpose/configs/_base_/merged_COCO_AIC_MPII.py @@ -0,0 +1,238 @@ +dataset_info = dict( + dataset_name='merged_COCO_AIC_MPII', + paper_info=dict( + author='Miroslav Purkrabek', + title='Merged Pose Estimation Dataset', + container='', + year='2024', + homepage='', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='thorax', + id=17, + color=[255, 128, 0], + type='upper', + swap=''), + 18: + dict( + name='neck', + id=18, + color=[255, 128, 0], + type='upper', + swap=''), + 19: + dict( + name='top_head', + id=19, + color=[255, 128, 0], + type='upper', + swap=''), + 20: + dict( + name='pelvis', + id=20, + color=[255, 128, 0], + type='lower', + swap=''), + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + # 4: + # dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 4: + dict(link=('left_hip', 'pelvis'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + # 7: + # dict( + # link=('left_shoulder', 'right_shoulder'), + # id=7, + # color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'thorax'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('pelvis', 'right_hip'), id=19, color=[51, 153, 255]), + 20: + dict( + link=('right_shoulder', 'thorax'), + id=20, + color=[51, 153, 255]), + 21: + dict( + link=('thorax', 'neck'), + id=21, + color=[51, 153, 255]), + 22: + dict( + link=('left_ear', 'top_head'), + id=22, + color=[51, 153, 255]), + 23: + dict( + link=('right_ear', 'top_head'), + id=23, + color=[51, 153, 255]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5, 1., 1., 1., 1. + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, + 0.079, 0.079, # Thorax and neck has the same as shoulders + 0.035, # Top of head has the same as ears + 0.107, # Pelvis has the same as hips + ]) diff --git a/mmpose/configs/animal_2d_keypoint/README.md b/mmpose/configs/animal_2d_keypoint/README.md new file mode 100644 index 0000000000000000000000000000000000000000..efcc3841a51c20d776360c99eccfaeb94247ff0d --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/README.md @@ -0,0 +1,20 @@ +# 2D Animal Keypoint Detection + +2D animal keypoint detection (animal pose estimation) aims to detect the key-point of different species, including rats, +dogs, macaques, and cheetah. It provides detailed behavioral analysis for neuroscience, medical and ecology applications. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_animal_keypoint.md) to prepare data. + +## Demo + +Please follow [DEMO](/demo/docs/en/2d_animal_demo.md) to generate fancy demos. + +
+ +
+ +
+ +
diff --git a/mmpose/configs/animal_2d_keypoint/rtmpose/README.md b/mmpose/configs/animal_2d_keypoint/rtmpose/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fbb103e36c5c9e66292904d15c8db467ce18f3b4 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/rtmpose/README.md @@ -0,0 +1,16 @@ +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### AP-10K Dataset + +Results on AP-10K validation set + +| Model | Input Size | AP | Details and Download | +| :-------: | :--------: | :---: | :------------------------------------------: | +| RTMPose-m | 256x256 | 0.722 | [rtmpose_cp10k.md](./ap10k/rtmpose_ap10k.md) | diff --git a/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py b/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..0e8c007b311f07d3a838d015d37b88fc11f760e2 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py @@ -0,0 +1,245 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/ap10k/', +# f'{data_root}': 's3://openmmlab/datasets/pose/ap10k/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.md b/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.md new file mode 100644 index 0000000000000000000000000000000000000000..4d035a372572aaac93ff980acbb367a1cc6a5efa --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.md @@ -0,0 +1,25 @@ + + + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +Results on AP-10K validation set + +| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | +| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | +| [rtmpose-m](/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.722 | 0.939 | 0.788 | 0.569 | 0.728 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.json) | diff --git a/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml b/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..0441d9e65faa6b0274f9152ff31ef1b66a112214 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py + In Collection: RTMPose + Alias: animal + Metadata: + Architecture: + - RTMPose + Training Data: AP-10K + Name: rtmpose-m_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.722 + AP@0.5: 0.939 + AP@0.75: 0.788 + AP (L): 0.728 + AP (M): 0.569 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/README.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..90a440dc286ef02104ed7bbf59a606a030f7a68e --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/README.md @@ -0,0 +1,68 @@ +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the +likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### Animal-Pose Dataset + +Results on AnimalPose validation set (1117 instances) + +| Model | Input Size | AP | AR | Details and Download | +| :--------: | :--------: | :---: | :---: | :-------------------------------------------------------: | +| HRNet-w32 | 256x256 | 0.740 | 0.780 | [hrnet_animalpose.md](./animalpose/hrnet_animalpose.md) | +| HRNet-w48 | 256x256 | 0.738 | 0.778 | [hrnet_animalpose.md](./animalpose/hrnet_animalpose.md) | +| ResNet-152 | 256x256 | 0.704 | 0.748 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | +| ResNet-101 | 256x256 | 0.696 | 0.736 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | +| ResNet-50 | 256x256 | 0.691 | 0.736 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | + +### AP-10K Dataset + +Results on AP-10K validation set + +| Model | Input Size | AP | Details and Download | +| :--------: | :--------: | :---: | :--------------------------------------------------: | +| HRNet-w48 | 256x256 | 0.728 | [hrnet_ap10k.md](./ap10k/hrnet_ap10k.md) | +| HRNet-w32 | 256x256 | 0.722 | [hrnet_ap10k.md](./ap10k/hrnet_ap10k.md) | +| ResNet-101 | 256x256 | 0.681 | [resnet_ap10k.md](./ap10k/resnet_ap10k.md) | +| ResNet-50 | 256x256 | 0.680 | [resnet_ap10k.md](./ap10k/resnet_ap10k.md) | +| CSPNeXt-m | 256x256 | 0.703 | [cspnext_udp_ap10k.md](./ap10k/cspnext_udp_ap10k.md) | + +### Desert Locust Dataset + +Results on Desert Locust test set + +| Model | Input Size | AUC | EPE | Details and Download | +| :--------: | :--------: | :---: | :--: | :-------------------------------------------: | +| ResNet-152 | 160x160 | 0.925 | 1.49 | [resnet_locust.md](./locust/resnet_locust.md) | +| ResNet-101 | 160x160 | 0.907 | 2.03 | [resnet_locust.md](./locust/resnet_locust.md) | +| ResNet-50 | 160x160 | 0.900 | 2.27 | [resnet_locust.md](./locust/resnet_locust.md) | + +### Grévy’s Zebra Dataset + +Results on Grévy’s Zebra test set + +| Model | Input Size | AUC | EPE | Details and Download | +| :--------: | :--------: | :---: | :--: | :----------------------------------------: | +| ResNet-152 | 160x160 | 0.921 | 1.67 | [resnet_zebra.md](./zebra/resnet_zebra.md) | +| ResNet-101 | 160x160 | 0.915 | 1.83 | [resnet_zebra.md](./zebra/resnet_zebra.md) | +| ResNet-50 | 160x160 | 0.914 | 1.87 | [resnet_zebra.md](./zebra/resnet_zebra.md) | + +### Animal-Kingdom Dataset + +Results on AnimalKingdom test set + +| Model | Input Size | class | PCK(0.05) | Details and Download | +| :-------: | :--------: | :-----------: | :-------: | :---------------------------------------------------: | +| HRNet-w32 | 256x256 | P1 | 0.6323 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P2 | 0.3741 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_mammals | 0.571 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_amphibians | 0.5358 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_reptiles | 0.51 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_birds | 0.7671 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_fishes | 0.6406 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.md new file mode 100644 index 0000000000000000000000000000000000000000..f32fb49d90f213a8511b8b09e340a9773907b9b1 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.md @@ -0,0 +1,47 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+AnimalKingdom (CVPR'2022) + +```bibtex +@InProceedings{ + Ng_2022_CVPR, + author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, + title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {19023-19034} + } +``` + +
+ +Results on AnimalKingdom validation set + +| Arch | Input Size | PCK(0.05) | Official Repo | Paper | ckpt | log | +| ------------------------------------------------------ | ---------- | --------- | ------------- | ------ | ------------------------------------------------------ | ------------------------------------------------------ | +| [P1_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py) | 256x256 | 0.6323 | 0.6342 | 0.6606 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.json) | +| [P2_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py) | 256x256 | 0.3741 | 0.3726 | 0.393 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.json) | +| [P3_mammals_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py) | 256x256 | 0.571 | 0.5719 | 0.6159 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.json) | +| [P3_amphibians_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py) | 256x256 | 0.5358 | 0.5432 | 0.5674 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.json) | +| [P3_reptiles_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py) | 256x256 | 0.51 | 0.5 | 0.5606 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.json) | +| [P3_birds_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py) | 256x256 | 0.7671 | 0.7636 | 0.7735 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.json) | +| [P3_fishes_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py) | 256x256 | 0.6406 | 0.636 | 0.6825 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.json) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.yml b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.yml new file mode 100644 index 0000000000000000000000000000000000000000..12f208a10b7a784e1a9faf444845c128bdfb4e88 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.yml @@ -0,0 +1,86 @@ +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: AnimalKingdom_P1 + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.6323 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P2 + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.3741 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_amphibian + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.5358 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_bird + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.7671 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_fish + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.6406 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_mammal + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.571 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_reptile + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.51 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7eb0136e9f8476c6863b52e9c2a366b7245fc3 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P1/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P1/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..f42057f8aa91de0ae2a234c7625dce725adf204b --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P2/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P2/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..5a83e7a97b9478031f7ca4dcc4dccba0350d432d --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_amphibian/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_amphibian/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ca3c91af610fe995aa24106e0bc6f72b012f9228 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_bird/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_bird/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..3923f30d104b22c21a4f1b1252a09e3fcbfb99fd --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_fish/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_fish/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..d061c4b6fbc2e01a0b30241cca7fd5212fe29eca --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_mammal/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_mammal/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..b06a49936bad84e9e01cd5510e779e1909d56520 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_reptile/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_reptile/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.md new file mode 100644 index 0000000000000000000000000000000000000000..58b971313fbf4a446a2c9720ac0a687fcc956513 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.md @@ -0,0 +1,40 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
+ +Results on AnimalPose validation set (1117 instances) + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.740 | 0.959 | 0.833 | 0.780 | 0.965 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256_20210426.log.json) | +| [pose_hrnet_w48](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.738 | 0.958 | 0.831 | 0.778 | 0.962 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256-34644726_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256_20210426.log.json) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..caba133370aafff30be90aa9171f8df66fefe7f4 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml @@ -0,0 +1,34 @@ +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: Animal-Pose + Name: td-hm_hrnet-w32_8xb64-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.740 + AP@0.5: 0.959 + AP@0.75: 0.833 + AR: 0.780 + AR@0.5: 0.965 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: Animal-Pose + Name: td-hm_hrnet-w48_8xb64-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.738 + AP@0.5: 0.958 + AP@0.75: 0.831 + AR: 0.778 + AR@0.5: 0.962 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256-34644726_20210426.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.md new file mode 100644 index 0000000000000000000000000000000000000000..20ddf54031e18f8bb9150fccfccff1f6cd5949bf --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.md @@ -0,0 +1,41 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
+ +Results on AnimalPose validation set (1117 instances) + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.691 | 0.947 | 0.770 | 0.736 | 0.955 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256-e1f30bff_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256_20210426.log.json) | +| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.696 | 0.948 | 0.774 | 0.736 | 0.951 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256-85563f4a_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256_20210426.log.json) | +| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py) | 256x256 | 0.704 | 0.938 | 0.786 | 0.748 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256-a0a7506c_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256_20210426.log.json) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..345c13c138aafccb5ce1be0ea4136634327248c8 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml @@ -0,0 +1,51 @@ +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: Animal-Pose + Name: td-hm_res50_8xb64-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.691 + AP@0.5: 0.947 + AP@0.75: 0.770 + AR: 0.736 + AR@0.5: 0.955 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256-e1f30bff_20210426.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: Animal-Pose + Name: td-hm_res101_8xb64-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.696 + AP@0.5: 0.948 + AP@0.75: 0.774 + AR: 0.736 + AR@0.5: 0.951 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256-85563f4a_20210426.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: Animal-Pose + Name: td-hm_res152_8xb32-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.704 + AP@0.5: 0.938 + AP@0.75: 0.786 + AR: 0.748 + AR@0.5: 0.946 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256-a0a7506c_20210426.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_ViTPose-base_8xb64-210e_animalpose-256x192.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_ViTPose-base_8xb64-210e_animalpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..b73fa4083330eb2c775c8c3b31241bd635bd40b7 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_ViTPose-base_8xb64-210e_animalpose-256x192.py @@ -0,0 +1,151 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='base', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.3, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint = 'models/pretrained/mae_pretrain_vit_small_20230913.pth' + # checkpoint='https://download.openmmlab.com/mmpose/' + # 'v1/pretrained_models/mae_pretrain_vit_base_20230913.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = "/datagrid/personal/purkrmir/data/AnimalPose/" + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='animalpose_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='animalpose_val.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'animalpose_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_ViTPose-small_8xb64-210e_animalpose-256x192.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_ViTPose-small_8xb64-210e_animalpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f626920050a9f9c8587c788b5831a8ec96083e15 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_ViTPose-small_8xb64-210e_animalpose-256x192.py @@ -0,0 +1,162 @@ +TRAIN_ROOT = "/datagrid/personal/purkrmir/data/AnimalPose/" + +BATCH_SIZE = 64 + +load_from = 'models/pretrained/vitpose-s.pth' +# load_from = 'models/pretrained/vitpose-s+_compatible.pth' + +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch={ + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 384 * 4 + }, + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.1, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=None + # init_cfg=dict( + # type='Pretrained', + # checkpoint='models/pretrained/vitpose-s+.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=20, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = TRAIN_ROOT + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=BATCH_SIZE, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=BATCH_SIZE, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..2680fe8956e7b1cbf186b1c536204917478d721f --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py @@ -0,0 +1,147 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=20, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..3d4a76d8f506c60493ef7e476cb5ed3310044ba2 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py @@ -0,0 +1,147 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=20, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet_reproduce-w48_8xb64-210e_animalpose-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet_reproduce-w48_8xb64-210e_animalpose-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..c4818e7ec858dae0dfc2ac69f553bc9495ac9a0b --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet_reproduce-w48_8xb64-210e_animalpose-256x256.py @@ -0,0 +1,152 @@ +TRAIN_ROOT = "/datagrid/personal/purkrmir/data/AnimalPose/" + +BATCH_SIZE = 64 + +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=20, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = TRAIN_ROOT + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=BATCH_SIZE, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='animalpose_train_coco.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=BATCH_SIZE, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='animalpose_val_coco.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'animalpose_val_coco.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffaabb06f160fb66260507db057686f4621b6b2 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py @@ -0,0 +1,118 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=20, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..8ed92929c9d42fa0caad87a5f6292f75745bd0bf --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py @@ -0,0 +1,118 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=20, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..c053c8881461de72345478da49293a6ca96c1ed4 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py @@ -0,0 +1,118 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=20, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..844d17df4ef919ac0c2a9a14bfc966da14752286 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py @@ -0,0 +1,220 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.md new file mode 100644 index 0000000000000000000000000000000000000000..fb10359685ecf9b546093b402c292c4c8a8ba0a9 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.md @@ -0,0 +1,58 @@ + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +Results on AP-10K validation set + +| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | +| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | +| [pose_cspnext_m](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.703 | 0.944 | 0.776 | 0.513 | 0.710 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.json) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..8fedc88374a9c027ed3f3268a42b5eed24a980f0 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py + In Collection: UDP + Metadata: + Architecture: &id001 + - UDP + - HRNet + Training Data: AP-10K + Name: cspnext-m_udp_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.703 + AP@0.5: 0.944 + AP@0.75: 0.776 + AP (L): 0.71 + AP (M): 0.513 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.md new file mode 100644 index 0000000000000000000000000000000000000000..fbdd2cbf9f54807a8fcc00adc31f5839fcf94ea1 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.md @@ -0,0 +1,41 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +Results on AP-10K validation set + +| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | +| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | +| [pose_hrnet_w32](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.722 | 0.935 | 0.789 | 0.557 | 0.729 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.log.json) | +| [pose_hrnet_w48](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.728 | 0.936 | 0.802 | 0.577 | 0.735 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.log.json) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..38aade8349ed34a214139574c0e83ae67b37e630 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml @@ -0,0 +1,34 @@ +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: AP-10K + Name: td-hm_hrnet-w32_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.722 + AP@0.5: 0.935 + AP@0.75: 0.789 + AP (L): 0.729 + AP (M): 0.557 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AP-10K + Name: td-hm_hrnet-w48_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.728 + AP@0.5: 0.936 + AP@0.75: 0.802 + AP (L): 0.735 + AP (M): 0.577 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.md new file mode 100644 index 0000000000000000000000000000000000000000..11ad6ed033516732bb921f0b32ed1d7336e6517b --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.md @@ -0,0 +1,41 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +Results on AP-10K validation set + +| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | +| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | +| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.680 | 0.926 | 0.738 | 0.552 | 0.687 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.log.json) | +| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.681 | 0.921 | 0.751 | 0.545 | 0.690 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.log.json) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..84cc4156b9447dacdf0554851602cc2c907814c9 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml @@ -0,0 +1,35 @@ +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: AP-10K + Name: td-hm_res50_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.680 + AP@0.5: 0.926 + AP@0.75: 0.738 + AP (L): 0.687 + AP (M): 0.552 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: AP-10K + Name: td-hm_res101_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.681 + AP@0.5: 0.921 + AP@0.75: 0.751 + AP (L): 0.690 + AP (M): 0.545 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..c61e6384aeea7efcca3ac2f2268fef01663e3234 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py @@ -0,0 +1,164 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..146114a887663a230f7a504e83f13da6fa4a2571 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py @@ -0,0 +1,164 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..be49577511584f892cc4c82797207e8ee1d6a8b4 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py @@ -0,0 +1,135 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..2172cbe938506ae2faa08ed731710e51203d579f --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py @@ -0,0 +1,135 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.md new file mode 100644 index 0000000000000000000000000000000000000000..bb7c8374926f95a1e726973f2bddc8af04a702ed --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.md @@ -0,0 +1,43 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+Desert Locust (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
+ +Results on Desert Locust test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py) | 160x160 | 1.000 | 0.900 | 2.27 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160-9efca22b_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160_20210407.log.json) | +| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py) | 160x160 | 1.000 | 0.907 | 2.03 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160-d77986b3_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160_20210407.log.json) | +| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py) | 160x160 | 1.000 | 0.925 | 1.49 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160-4ea9b372_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160_20210407.log.json) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml new file mode 100644 index 0000000000000000000000000000000000000000..c7d174fafc5136953beebf9b0dbc8dda5a800199 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml @@ -0,0 +1,45 @@ +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: Desert Locust + Name: td-hm_res50_8xb64-210e_locust-160x160 + Results: + - Dataset: Desert Locust + Metrics: + AUC: 0.9 + EPE: 2.27 + PCK@0.2: 1 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160-9efca22b_20210407.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: Desert Locust + Name: td-hm_res101_8xb64-210e_locust-160x160 + Results: + - Dataset: Desert Locust + Metrics: + AUC: 0.907 + EPE: 2.03 + PCK@0.2: 1 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160-d77986b3_20210407.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: Desert Locust + Name: td-hm_res152_8xb32-210e_locust-160x160 + Results: + - Dataset: Desert Locust + Metrics: + AUC: 0.925 + EPE: 1.49 + PCK@0.2: 1.0 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160-4ea9b372_20210407.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e6c2e39bb28913b7ba180d0ab74c71a24c6cb6 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=35, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'LocustDataset' +data_mode = 'topdown' +data_root = 'data/locust/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0a58bc88efab80a383df61137dbb45253da636 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=35, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'LocustDataset' +data_mode = 'topdown' +data_root = 'data/locust/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py new file mode 100644 index 0000000000000000000000000000000000000000..adbb89ee5b23f8697059f6778f1bfe13bd21432a --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=35, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'LocustDataset' +data_mode = 'topdown' +data_root = 'data/locust/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.md b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.md new file mode 100644 index 0000000000000000000000000000000000000000..0c12aed0f3407c5303f66326708ebc2d082c5a1f --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.md @@ -0,0 +1,43 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+Grévy’s Zebra (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
+ +Results on Grévy’s Zebra test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.914 | 1.87 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160-5a104833_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160_20210407.log.json) | +| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.915 | 1.83 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160-e8cb2010_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160_20210407.log.json) | +| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.921 | 1.67 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160-05de71dd_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160_20210407.log.json) | diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml new file mode 100644 index 0000000000000000000000000000000000000000..3ecedc9700739b50697314df1c4f2f23416f4cfd --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml @@ -0,0 +1,45 @@ +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: "Gr\xE9vy\u2019s Zebra" + Name: td-hm_res50_8xb64-210e_zebra-160x160 + Results: + - Dataset: "Gr\xE9vy\u2019s Zebra" + Metrics: + AUC: 0.914 + EPE: 1.87 + PCK@0.2: 1.0 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160-5a104833_20210407.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: "Gr\xE9vy\u2019s Zebra" + Name: td-hm_res101_8xb64-210e_zebra-160x160 + Results: + - Dataset: "Gr\xE9vy\u2019s Zebra" + Metrics: + AUC: 0.915 + EPE: 1.83 + PCK@0.2: 1.0 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160-e8cb2010_20210407.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: "Gr\xE9vy\u2019s Zebra" + Name: td-hm_res152_8xb32-210e_zebra-160x160 + Results: + - Dataset: "Gr\xE9vy\u2019s Zebra" + Metrics: + AUC: 0.921 + EPE: 1.67 + PCK@0.2: 1.0 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160-05de71dd_20210407.pth diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py new file mode 100644 index 0000000000000000000000000000000000000000..68c56d80fb91b068d684ec29b5c77da3e920a71f --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=9, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'ZebraDataset' +data_mode = 'topdown' +data_root = 'data/zebra/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py new file mode 100644 index 0000000000000000000000000000000000000000..abb14eefb84dd91912f84cf407faeabc83ec5c25 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=9, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'ZebraDataset' +data_mode = 'topdown' +data_root = 'data/zebra/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py new file mode 100644 index 0000000000000000000000000000000000000000..e4d2777751d7837e7c892868f3027b145610de24 --- /dev/null +++ b/mmpose/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=9, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'ZebraDataset' +data_mode = 'topdown' +data_root = 'data/zebra/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/README.md b/mmpose/configs/body_2d_keypoint/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d005d3fed76ccdb4260fef2f1a0f2c3466136d67 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/README.md @@ -0,0 +1,21 @@ +# Human Body 2D Pose Estimation + +Multi-person human pose estimation is defined as the task of detecting the poses (or keypoints) of all people from an input image. + +Existing approaches can be categorized into top-down and bottom-up approaches. + +Top-down methods (e.g. DeepPose) divide the task into two stages: human detection and pose estimation. They perform human detection first, followed by single-person pose estimation given human bounding boxes. + +Bottom-up approaches (e.g. Associative Embedding) first detect all the keypoints and then group/associate them into person instances. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_body_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/2d_human_pose_demo.md#2d-human-pose-demo) to run demos. + +
+
+
diff --git a/mmpose/configs/body_2d_keypoint/associative_embedding/README.md b/mmpose/configs/body_2d_keypoint/associative_embedding/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7f5fa8ea1734e3ef121567aaaa1032fc71b862b2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/associative_embedding/README.md @@ -0,0 +1,9 @@ +# Associative embedding: End-to-end learning for joint detection and grouping (AE) + +Associative Embedding is one of the most popular 2D bottom-up pose estimation approaches, that first detect all the keypoints and then group/associate them into person instances. + +In order to group all the predicted keypoints to individuals, a tag is also predicted for each detected keypoint. Tags of the same person are similar, while tags of different people are different. Thus the keypoints can be grouped according to the tags. + +
+ +
diff --git a/mmpose/configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py b/mmpose/configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..a4804cbe37ec3932d7b1a7d83b89bf286f1c5761 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py @@ -0,0 +1,166 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1.5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=300, + milestones=[200, 260], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=192) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', interval=50)) + +# codec settings +codec = dict( + type='AssociativeEmbedding', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=2, + decode_topk=30, + decode_center_shift=0.5, + decode_keypoint_order=[ + 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 + ], + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='AssociativeEmbeddingHead', + in_channels=32, + num_keypoints=17, + tag_dim=1, + tag_per_keypoint=True, + deconv_out_channels=None, + keypoint_loss=dict(type='KeypointMSELoss', use_target_weight=True), + tag_loss=dict(type='AssociativeEmbeddingLoss', loss_weight=0.001), + # The heatmap will be resized to the input size before decoding + # if ``restore_heatmap_size==True`` + decoder=dict(codec, heatmap_size=codec['input_size'])), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + shift_heatmap=False, + restore_heatmap_size=True, + align_corners=False)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=64, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none', + score_mode='bbox', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/associative_embedding/coco/hrnet_coco.md b/mmpose/configs/body_2d_keypoint/associative_embedding/coco/hrnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..caae01d60d592188694ed9da65c8712f4b9a9811 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/associative_embedding/coco/hrnet_coco.md @@ -0,0 +1,57 @@ + + +
+Associative Embedding (NIPS'2017) + +```bibtex +@inproceedings{newell2017associative, + title={Associative embedding: End-to-end learning for joint detection and grouping}, + author={Newell, Alejandro and Huang, Zhiao and Deng, Jia}, + booktitle={Advances in neural information processing systems}, + pages={2277--2287}, + year={2017} +} +``` + +
+ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 without multi-scale test + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [HRNet-w32](/configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py) | 512x512 | 0.656 | 0.864 | 0.719 | 0.711 | 0.893 | [ckpt](https://download.openmmlab.com/mmpose/bottom_up/hrnet_w32_coco_512x512-bcb8c247_20200816.pth) | [log](https://download.openmmlab.com/mmpose/bottom_up/hrnet_w32_coco_512x512_20200816.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/associative_embedding/coco/hrnet_coco.yml b/mmpose/configs/body_2d_keypoint/associative_embedding/coco/hrnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..5fcd749f0f78e72ac1cf7c87bd85411d18048723 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/associative_embedding/coco/hrnet_coco.yml @@ -0,0 +1,25 @@ +Collections: +- Name: AE + Paper: + Title: "Associative embedding: End-to-end learning for joint detection and grouping" + URL: https://arxiv.org/abs/1611.05424 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/associative_embedding.md +Models: +- Config: configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py + In Collection: AE + Metadata: + Architecture: + - AE + - HRNet + Training Data: COCO + Name: ae_hrnet-w32_8xb24-300e_coco-512x512 + Results: + - Dataset: COCO + Metrics: + AP: 0.656 + AP@0.5: 0.864 + AP@0.75: 0.719 + AR: 0.711 + AR@0.5: 0.893 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/bottom_up/hrnet_w32_coco_512x512-bcb8c247_20200816.pth diff --git a/mmpose/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py b/mmpose/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..955293dcb1314f1d57cdb9efc4f62669cf41fabc --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py @@ -0,0 +1,164 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=160) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='DecoupledHeatmap', input_size=(512, 512), heatmap_size=(128, 128)) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='CIDHead', + in_channels=480, + num_keypoints=17, + gfd_channels=32, + coupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=1.0), + decoupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=4.0), + contrastive_loss=dict( + type='InfoNCELoss', temperature=0.05, loss_weight=1.0), + decoder=codec, + ), + train_cfg=dict(max_train_instances=200), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + shift_heatmap=False, + align_corners=False)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=64, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=20, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_thr=0.8, + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py b/mmpose/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..a114088ae217d8c8a2e0d16bab4459e163c6a129 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py @@ -0,0 +1,164 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=160) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='DecoupledHeatmap', input_size=(512, 512), heatmap_size=(128, 128)) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='CIDHead', + in_channels=720, + num_keypoints=17, + gfd_channels=48, + coupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=1.0), + decoupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=4.0), + contrastive_loss=dict( + type='InfoNCELoss', temperature=0.05, loss_weight=1.0), + decoder=codec, + ), + train_cfg=dict(max_train_instances=200), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + shift_heatmap=False, + align_corners=False)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=64, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=20, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_thr=0.8, + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/cid/coco/hrnet_coco.md b/mmpose/configs/body_2d_keypoint/cid/coco/hrnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..f82cb04db0150ec1b63868ea875c5654fcb800d3 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/cid/coco/hrnet_coco.md @@ -0,0 +1,42 @@ + + +
+CID (CVPR'2022) + +```bibtex +@InProceedings{Wang_2022_CVPR, + author = {Wang, Dongkai and Zhang, Shiliang}, + title = {Contextual Instance Decoupling for Robust Multi-Person Pose Estimation}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {11060-11068} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 without multi-scale test + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [CID](/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py) | 512x512 | 0.704 | 0.894 | 0.775 | 0.753 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_42b7e6e6-20230207.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_20230207.json) | +| [CID](/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py) | 512x512 | 0.715 | 0.900 | 0.782 | 0.765 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_a36c3ecf-20230207.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_20230207.json) | diff --git a/mmpose/configs/body_2d_keypoint/cid/coco/hrnet_coco.yml b/mmpose/configs/body_2d_keypoint/cid/coco/hrnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..b230d20e247c24d7e3c998714eabbe6b132007dc --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/cid/coco/hrnet_coco.yml @@ -0,0 +1,41 @@ +Collections: +- Name: CID + Paper: + Title: Contextual Instance Decoupling for Robust Multi-Person Pose Estimation + URL: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_CVPR_2022_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/cid.md +Models: +- Config: configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py + In Collection: CID + Metadata: + Architecture: &id001 + - CID + - HRNet + Training Data: COCO + Name: cid_hrnet-w32_8xb20-140e_coco-512x512 + Results: + - Dataset: COCO + Metrics: + AP: 0.704 + AP@0.5: 0.894 + AP@0.75: 0.775 + AR: 0.753 + AR@0.5: 0.928 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_42b7e6e6-20230207.pth +- Config: configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py + In Collection: CID + Metadata: + Architecture: *id001 + Training Data: COCO + Name: cid_hrnet-w48_8xb20-140e_coco-512x512 + Results: + - Dataset: COCO + Metrics: + AP: 0.715 + AP@0.5: 0.9 + AP@0.75: 0.782 + AR: 0.765 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_a36c3ecf-20230207.pth diff --git a/mmpose/configs/body_2d_keypoint/dekr/README.md b/mmpose/configs/body_2d_keypoint/dekr/README.md new file mode 100644 index 0000000000000000000000000000000000000000..04726421c0d67793dc4d2fc55fcf2cf491d3813e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/README.md @@ -0,0 +1,22 @@ +# Bottom-up Human Pose Estimation via Disentangled Keypoint Regression (DEKR) + + + +
+DEKR (CVPR'2021) + +```bibtex +@inproceedings{geng2021bottom, + title={Bottom-up human pose estimation via disentangled keypoint regression}, + author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={14676--14686}, + year={2021} +} +``` + +
+ +DEKR is a popular 2D bottom-up pose estimation approach that simultaneously detects all the instances and regresses the offsets from the instance centers to joints. + +In order to predict the offsets more accurately, the offsets of different joints are regressed using separated branches with deformable convolutional layers. Thus convolution kernels with different shapes are adopted to extract features for the corresponding joint. diff --git a/mmpose/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py b/mmpose/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..743de8882cb7293b632fd3f6dedc37b15e9a0a55 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py @@ -0,0 +1,189 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + num_keypoints=17, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec, + # This rescore net is adapted from the official repo. + # If you are not using the original COCO dataset for training, + # please make sure to remove the `rescore_cfg` item + rescore_cfg=dict( + in_channels=74, + norm_indexes=(5, 6), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=10, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py b/mmpose/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..57f656fb4d4c5f17f50e651ff5b160017d902971 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py @@ -0,0 +1,190 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='SPR', + input_size=(640, 640), + heatmap_size=(160, 160), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=720, + num_keypoints=17, + num_heatmap_filters=48, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec, + # This rescore net is adapted from the official repo. + # If you are not using the original COCO dataset for training, + # please make sure to remove the `rescore_cfg` item + rescore_cfg=dict( + in_channels=74, + norm_indexes=(5, 6), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=10, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/dekr/coco/hrnet_coco.md b/mmpose/configs/body_2d_keypoint/dekr/coco/hrnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..648b9bc735eea503707402c9f90f837288872f50 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/coco/hrnet_coco.md @@ -0,0 +1,58 @@ + + +
+DEKR (CVPR'2021) + +```bibtex +@inproceedings{geng2021bottom, + title={Bottom-up human pose estimation via disentangled keypoint regression}, + author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={14676--14686}, + year={2021} +} +``` + +
+ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 without multi-scale test + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [HRNet-w32](/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py) | 512x512 | 0.686 | 0.868 | 0.750 | 0.735 | 0.898 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_20221228.json) | +| [HRNet-w48](/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py) | 640x640 | 0.714 | 0.883 | 0.777 | 0.762 | 0.915 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_74796c32-20230124.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_20230124.json) | diff --git a/mmpose/configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml b/mmpose/configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..0246b0723b93e8c04f61324de7add7f1e569ebce --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml @@ -0,0 +1,41 @@ +Collections: +- Name: DEKR + Paper: + Title: Bottom-up human pose estimation via disentangled keypoint regression + URL: https://arxiv.org/abs/2104.02300 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/dekr.md +Models: +- Config: configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py + In Collection: DEKR + Metadata: + Architecture: &id001 + - DEKR + - HRNet + Training Data: COCO + Name: dekr_hrnet-w32_8xb10-140e_coco-512x512 + Results: + - Dataset: COCO + Metrics: + AP: 0.686 + AP@0.5: 0.868 + AP@0.75: 0.750 + AR: 0.735 + AR@0.5: 0.898 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth +- Config: configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py + In Collection: DEKR + Metadata: + Architecture: *id001 + Training Data: COCO + Name: dekr_hrnet-w48_8xb10-140e_coco-640x640 + Results: + - Dataset: COCO + Metrics: + AP: 0.714 + AP@0.5: 0.883 + AP@0.75: 0.777 + AR: 0.762 + AR@0.5: 0.915 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_74796c32-20230124.pth diff --git a/mmpose/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py b/mmpose/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..c990eecdd09eb74bb53ca29bd69ebc88670c9b2b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py @@ -0,0 +1,190 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=20) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=300, + milestones=[200, 260], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + num_keypoints=14, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.004, + ), + decoder=codec, + # This rescore net is adapted from the official repo. + # If you are not using the original CrowdPose dataset for training, + # please make sure to remove the `rescore_cfg` item + rescore_cfg=dict( + in_channels=59, + norm_indexes=(0, 1), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/kpt_rescore_crowdpose-300c7efe.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'bottomup' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=10, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + nms_mode='none', + score_mode='keypoint', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py b/mmpose/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..7d88ee5d20a15686dbeb61dd477509e2d07f243b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py @@ -0,0 +1,191 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=20) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=300, + milestones=[200, 260], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=40) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='SPR', + input_size=(640, 640), + heatmap_size=(160, 160), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=720, + num_keypoints=14, + num_heatmap_filters=48, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.004, + ), + decoder=codec, + # This rescore net is adapted from the official repo. + # If you are not using the original CrowdPose dataset for training, + # please make sure to remove the `rescore_cfg` item + rescore_cfg=dict( + in_channels=59, + norm_indexes=(0, 1), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/kpt_rescore_crowdpose-300c7efe.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'bottomup' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=5, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + nms_mode='none', + score_mode='keypoint', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.md b/mmpose/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.md new file mode 100644 index 0000000000000000000000000000000000000000..0bbedbe696bb00e79eb3c73956d83a3747d763d8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.md @@ -0,0 +1,56 @@ + + +
+DEKR (CVPR'2021) + +```bibtex +@inproceedings{geng2021bottom, + title={Bottom-up human pose estimation via disentangled keypoint regression}, + author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={14676--14686}, + year={2021} +} +``` + +
+ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test without multi-scale test + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [HRNet-w32](/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py) | 512x512 | 0.663 | 0.857 | 0.714 | 0.740 | 0.671 | 0.576 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_147bae97-20221228.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_20221228.json) | +| [HRNet-w48](/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py) | 640x640 | 0.679 | 0.869 | 0.731 | 0.753 | 0.688 | 0.593 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_4ea6031e-20230128.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_20230128.json) | diff --git a/mmpose/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml b/mmpose/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..5bbb7f4b256a227493d988f4efa468d4a1f8e699 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml @@ -0,0 +1,37 @@ +Models: +- Config: configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py + In Collection: DEKR + Metadata: + Architecture: &id001 + - DEKR + - HRNet + Training Data: CrowdPose + Name: dekr_hrnet-w32_8xb10-300e_crowdpose-512x512 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.663 + AP@0.5: 0.857 + AP@0.75: 0.714 + AP (E): 0.74 + AP (M): 0.671 + AP (L): 0.576 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_147bae97-20221228.pth +- Config: configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py + In Collection: DEKR + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: dekr_hrnet-w48_8xb5-300e_crowdpose-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.679 + AP@0.5: 0.869 + AP@0.75: 0.731 + AP (E): 0.753 + AP (M): 0.688 + AP (L): 0.593 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_4ea6031e-20230128.pth diff --git a/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_coco.md b/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..4016bc87e0c38bdb3ac2b36bef1fa08e78ab5aea --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_coco.md @@ -0,0 +1,62 @@ + + +
+ED-Pose (ICLR'2023) + +```bibtex +@inproceedings{ +yang2023explicit, +title={Explicit Box Detection Unifies End-to-End Multi-Person Pose Estimation}, +author={Jie Yang and Ailing Zeng and Shilong Liu and Feng Li and Ruimao Zhang and Lei Zhang}, +booktitle={International Conference on Learning Representations}, +year={2023}, +url={https://openreview.net/forum?id=s4WVupnJjmX} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017. + +| Arch | BackBone | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :-------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :--------------------------------------------: | :-------------------------------------------: | +| [edpose_res50_coco](/configs/body_2d_keypoint/edpose/coco/edpose_res50_8xb2-50e_coco-800x1333.py) | ResNet-50 | 0.716 | 0.897 | 0.783 | 0.793 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/edpose/coco/edpose_res50_coco_3rdparty.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/edpose/coco/edpose_res50_coco_3rdparty.json) | + +The checkpoint is converted from the official repo. The training of EDPose is not supported yet. It will be supported in the future updates. + +The above config follows [Pure Python style](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta). Please install `mmengine>=0.8.2` to use this config. diff --git a/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_coco.yml b/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..4d00ee4114fc0820051c20b9f94571f0b1313efa --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_coco.yml @@ -0,0 +1,26 @@ +Collections: +- Name: ED-Pose + Paper: + Title: Explicit Box Detection Unifies End-to-End Multi-Person Pose Estimation + URL: https://arxiv.org/pdf/2302.01593.pdf + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/edpose.md +Models: +- Config: configs/body_2d_keypoint/edpose/coco/edpose_res50_8xb2-50e_coco-800x1333.py + In Collection: ED-Pose + Alias: edpose + Metadata: + Architecture: &id001 + - ED-Pose + - ResNet + Training Data: COCO + Name: edpose_res50_8xb2-50e_coco-800x1333 + Results: + - Dataset: COCO + Metrics: + AP: 0.716 + AP@0.5: 0.897 + AP@0.75: 0.783 + AR: 0.793 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/edpose/coco/edpose_res50_coco_3rdparty.pth diff --git a/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_res50_8xb2-50e_coco-800x1333.py b/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_res50_8xb2-50e_coco-800x1333.py new file mode 100644 index 0000000000000000000000000000000000000000..a1592538db4d876c2842fbdb359719a37f9edfe6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/edpose/coco/edpose_res50_8xb2-50e_coco-800x1333.py @@ -0,0 +1,236 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from mmpose.configs._base_.default_runtime import * # noqa + +from mmcv.transforms import RandomChoice, RandomChoiceResize +from mmengine.dataset import DefaultSampler +from mmengine.model import PretrainedInit +from mmengine.optim import LinearLR, MultiStepLR +from torch.nn import GroupNorm +from torch.optim import Adam + +from mmpose.codecs import EDPoseLabel +from mmpose.datasets import (BottomupRandomChoiceResize, BottomupRandomCrop, + CocoDataset, LoadImage, PackPoseInputs, + RandomFlip) +from mmpose.evaluation import CocoMetric +from mmpose.models import (BottomupPoseEstimator, ChannelMapper, EDPoseHead, + PoseDataPreprocessor, ResNet) +from mmpose.models.utils import FrozenBatchNorm2d + +# runtime +train_cfg.update(max_epochs=50, val_interval=10) # noqa + +# optimizer +optim_wrapper = dict(optimizer=dict( + type=Adam, + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict(type=LinearLR, begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type=MultiStepLR, + begin=0, + end=140, + milestones=[33, 45], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks.update( # noqa + checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict(type=EDPoseLabel, num_select=50, num_keypoints=17) + +# model settings +model = dict( + type=BottomupPoseEstimator, + data_preprocessor=dict( + type=PoseDataPreprocessor, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type=ResNet, + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type=FrozenBatchNorm2d, requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type=PretrainedInit, checkpoint='torchvision://resnet50')), + neck=dict( + type=ChannelMapper, + in_channels=[512, 1024, 2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type=GroupNorm, num_groups=32), + num_outs=4), + head=dict( + type=EDPoseHead, + num_queries=900, + num_feature_levels=4, + num_keypoints=17, + as_two_stage=True, + encoder=dict( + num_layers=6, + layer_cfg=dict( # DeformableDetrTransformerEncoderLayer + self_attn_cfg=dict( # MultiScaleDeformableAttention + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=4, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.0))), + decoder=dict( + num_layers=6, + embed_dims=256, + layer_cfg=dict( # DeformableDetrTransformerDecoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + batch_first=True), + cross_attn_cfg=dict( # MultiScaleDeformableAttention + embed_dims=256, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.1)), + query_dim=4, + num_feature_levels=4, + num_group=100, + num_dn=100, + num_box_decoder_layers=2, + return_intermediate=True), + out_head=dict(num_classes=2), + positional_encoding=dict( + num_pos_feats=128, + temperatureH=20, + temperatureW=20, + normalize=True), + denosing_cfg=dict( + dn_box_noise_scale=0.4, + dn_label_noise_ratio=0.5, + dn_labelbook_size=100, + dn_attn_mask_type_list=['match2dn', 'dn2dn', 'group2group']), + data_decoder=codec), + test_cfg=dict(Pmultiscale_test=False, flip_test=False, num_select=50), + train_cfg=dict()) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = CocoDataset +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type=LoadImage), + dict(type=RandomFlip, direction='horizontal'), + dict( + type=RandomChoice, + transforms=[ + [ + dict( + type=RandomChoiceResize, + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type=BottomupRandomChoiceResize, + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type=BottomupRandomCrop, + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type=BottomupRandomChoiceResize, + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type=PackPoseInputs), +] + +val_pipeline = [ + dict(type=LoadImage), + dict( + type=BottomupRandomChoiceResize, + scales=[(800, 1333)], + keep_ratio=True, + backend='pillow'), + dict( + type=PackPoseInputs, + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) + +val_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + drop_last=False, + sampler=dict(type=DefaultSampler, shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type=CocoMetric, + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/README.md b/mmpose/configs/body_2d_keypoint/integral_regression/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d60eaa1a575686006139a8584851e994b7b29e60 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/README.md @@ -0,0 +1,15 @@ +# Top-down integral-regression-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, integral regression based methods use a simple integral operation relates and unifies the heatmap and joint regression differentiably, thus obtain the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Integral Human Pose Regression](https://arxiv.org/abs/1711.08229). + +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :------------------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| ResNet-50+Debias-IPR | 256x256 | 0.675 | 0.765 | [resnet_debias_coco.md](./coco/resnet_debias_coco.md) | +| ResNet-50+DSNT | 256x256 | 0.674 | 0.764 | [resnet_dsnt_coco.md](./coco/resnet_dsnt_coco.md) | +| ResNet-50+IPR | 256x256 | 0.633 | 0.730 | [resnet_ipr_coco.md](./coco/resnet_ipr_coco.md) | diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py b/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..3dfaeeda8b850fa361eebbf5342ec64842d858e8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py @@ -0,0 +1,134 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='IntegralRegressionLabel', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2.0, + normalize=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + ), + head=dict( + type='DSNTHead', + in_channels=2048, + in_featuremap_size=(8, 8), + num_joints=17, + loss=dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='KeypointMSELoss', use_target_weight=True) + ]), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + shift_heatmap=True, + ), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py b/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..9618c810ea20b0d147f71930034b616f6bed3a97 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py @@ -0,0 +1,136 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='IntegralRegressionLabel', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2.0, + normalize=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + ), + head=dict( + type='DSNTHead', + in_channels=2048, + in_featuremap_size=(8, 8), + num_joints=17, + debias=True, + beta=10., + loss=dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='JSDiscretLoss', use_target_weight=True) + ]), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + shift_heatmap=True, + ), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py b/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..8c3897fce1acd0deabaedacea3b38b08b9138330 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py @@ -0,0 +1,134 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='IntegralRegressionLabel', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2.0, + normalize=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + ), + head=dict( + type='DSNTHead', + in_channels=2048, + in_featuremap_size=(8, 8), + num_joints=17, + loss=dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='JSDiscretLoss', use_target_weight=True) + ]), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + shift_heatmap=True, + ), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.md b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..40e3660e4f24f0d67d680d0f0ddc6c9f6c6c1014 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.md @@ -0,0 +1,57 @@ + + +
+Debias IPR (ICCV'2021) + +```bibtex +@inproceedings{gu2021removing, + title={Removing the Bias of Integral Pose Regression}, + author={Gu, Kerui and Yang, Linlin and Yao, Angela}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11067--11076}, + year={2021} + } +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [debias-ipr_resnet_50](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py) | 256x256 | 0.675 | 0.872 | 0.740 | 0.765 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..b965238a5d482ec7212f8f95d0e4abadfe6773ce --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml @@ -0,0 +1,25 @@ +Collections: +- Name: DebiasIPR + Paper: + Title: Removing the Bias of Integral Pose Regression + URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Gu_Removing_the_Bias_of_Integral_Pose_Regression_ICCV_2021_paper.pdf + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/debias_ipr.md +Models: +- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias--8xb64-210e_coco-256x256.py + In Collection: DebiasIPR + Metadata: + Architecture: &id001 + - Debias + - ResNet + Training Data: COCO + Name: ipr_res50_debias--8xb64-210e_coco-256x256 + Results: + - Dataset: COCO + Metrics: + AP: 0.675 + AP@0.5: 0.872 + AP@0.75: 0.74 + AR: 0.765 + AR@0.5: 0.928 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.pth diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.md b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..608974ae82e6f05913c98c35fb8fff3e5220b83a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.md @@ -0,0 +1,56 @@ + + +
+DSNT (2018) + +```bibtex +@article{nibali2018numerical, + title={Numerical Coordinate Regression with Convolutional Neural Networks}, + author={Nibali, Aiden and He, Zhen and Morgan, Stuart and Prendergast, Luke}, + journal={arXiv preprint arXiv:1801.07372}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ipr_resnet_50_dsnt](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py) | 256x256 | 0.674 | 0.870 | 0.744 | 0.764 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..f34e839c105c514b63f1b434207d56cfa1d57726 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml @@ -0,0 +1,25 @@ +Collections: +- Name: DSNT + Paper: + Title: Numerical Coordinate Regression with Convolutional Neural Networks + URL: https://arxiv.org/abs/1801.07372v2 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/dsnt.md +Models: +- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py + In Collection: DSNT + Metadata: + Architecture: &id001 + - DSNT + - ResNet + Training Data: COCO + Name: ipr_res50_dsnt-8xb64-210e_coco-256x256 + Results: + - Dataset: COCO + Metrics: + AP: 0.674 + AP@0.5: 0.87 + AP@0.75: 0.744 + AR: 0.764 + AR@0.5: 0.928 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.pth diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.md b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..ce4fbae5011b451b4fd639ac896de1b150b2592a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.md @@ -0,0 +1,57 @@ + + +
+IPR (ECCV'2018) + +```bibtex +@inproceedings{sun2018integral, + title={Integral human pose regression}, + author={Sun, Xiao and Xiao, Bin and Wei, Fangyin and Liang, Shuang and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={529--545}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ipr_resnet_50](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py) | 256x256 | 0.633 | 0.860 | 0.703 | 0.730 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..fa22133f3eb3fd4020f4c17ead813fc1908b23cb --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml @@ -0,0 +1,25 @@ +Collections: +- Name: IPR + Paper: + Title: Integral human pose regression + URL: https://arxiv.org/abs/1711.08229 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/ipr.md +Models: +- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py + In Collection: IPR + Metadata: + Architecture: &id001 + - IPR + - ResNet + Training Data: COCO + Name: ipr_res50_8xb64-210e_coco-256x256 + Results: + - Dataset: COCO + Metrics: + AP: 0.633 + AP@0.5: 0.86 + AP@0.75: 0.703 + AR: 0.73 + AR@0.5: 0.919 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmo/README.md b/mmpose/configs/body_2d_keypoint/rtmo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7480e92ee77d0b129466bf07734f9895fbe17d38 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/README.md @@ -0,0 +1,27 @@ +# RTMO: Towards High-Performance One-Stage Real-Time Multi-Person Pose Estimation + + + +
+RTMO + +```bibtex +@misc{lu2023rtmo, + title={{RTMO}: Towards High-Performance One-Stage Real-Time Multi-Person Pose Estimation}, + author={Peng Lu and Tao Jiang and Yining Li and Xiangtai Li and Kai Chen and Wenming Yang}, + year={2023}, + eprint={2312.07526}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +RTMO is a one-stage pose estimation model that seamlessly integrates coordinate classification into the YOLO architecture. It introduces a Dynamic Coordinate Classifier (DCC) module that handles keypoint localization through dual 1D heatmaps. The DCC employs dynamic bin allocation, localizing the coordinate bins to each predicted bounding box to improve efficiency. It also uses learnable bin representations based on positional encodings, enabling computation of bin-keypoint similarity for precise localization. + +RTMO is trained end-to-end using a multi-task loss, with losses for bounding box regression, keypoint heatmap classification via a novel MLE loss, keypoint coordinate proxy regression, and keypoint visibility classification. The MLE loss models annotation uncertainty and balances optimization between easy and hard samples. + +During inference, RTMO employs grid-based dense predictions to simultaneously output human detection boxes and poses in a single pass. It selectively decodes heatmaps only for high-scoring grids after NMS, minimizing computational cost. + +Compared to prior one-stage methods that regress keypoint coordinates directly, RTMO achieves higher accuracy through coordinate classification while retaining real-time speeds. It also outperforms lightweight top-down approaches for images with many people, as the latter have inference times that scale linearly with the number of human instances. diff --git a/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-l_16xb16-600e_body7-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-l_16xb16-600e_body7-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..45e4295c6ceedf42e72def61bed556f34eae34b2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-l_16xb16-600e_body7-640x640.py @@ -0,0 +1,533 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=600, val_interval=20, dynamic_intervals=[(580, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=40, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=280, + end=280, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=280, end=281), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=281, + T_max=300, + end=580, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=580, end=600), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/coco.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +# data settings +data_mode = 'bottomup' +data_root = 'data/' + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[(i, i) for i in range(17)]) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +train_dataset = dict( + type='CombinedDataset', + metainfo=dict(from_file=metafile), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + sample_ratio_factor=[1, 0.3, 0.5, 0.3, 0.3, 0.4, 0.3], + test_mode=False, + pipeline=train_pipeline_stage1) + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=train_dataset) + +# val datasets +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json', + score_mode='bbox', + nms_mode='none', +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=20, + new_train_dataset=dataset_coco, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 280: { + 'proxy_target_cc': True, + 'overlaps_power': 1.0, + 'loss_cls.loss_weight': 2.0, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 1.0 +deepen_factor = 1.0 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco' + '_20211126_140236-d3bd2b23.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[256, 512, 1024], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=512, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=17, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=512, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile)), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=512, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1e-2, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-m_16xb16-600e_body7-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-m_16xb16-600e_body7-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..6c1a0053668a31289b3a8a7cf73a546dbe1910d7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-m_16xb16-600e_body7-640x640.py @@ -0,0 +1,532 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=600, val_interval=20, dynamic_intervals=[(580, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=40, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=280, + end=280, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=280, end=281), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=281, + T_max=300, + end=580, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=580, end=600), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/coco.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +# data settings +data_mode = 'bottomup' +data_root = 'data/' + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[(i, i) for i in range(17)]) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +train_dataset = dict( + type='CombinedDataset', + metainfo=dict(from_file=metafile), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + sample_ratio_factor=[1, 0.3, 0.5, 0.3, 0.3, 0.4, 0.3], + test_mode=False, + pipeline=train_pipeline_stage1) + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=train_dataset) + +# val datasets +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json', + score_mode='bbox', + nms_mode='none', +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=20, + new_train_dataset=dataset_coco, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 280: { + 'proxy_target_cc': True, + 'overlaps_power': 1.0, + 'loss_cls.loss_weight': 2.0, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 0.75 +deepen_factor = 0.67 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'pretrained_models/yolox_m_8x8_300e_coco_20230829.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[192, 384, 768], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=384, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=17, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=384, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile)), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=384, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1e-2, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-s_8xb32-600e_body7-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-s_8xb32-600e_body7-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..83d7c21d8abf72a37a8457c44202307874d434c6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-s_8xb32-600e_body7-640x640.py @@ -0,0 +1,535 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=600, val_interval=20, dynamic_intervals=[(580, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=40, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=280, + end=280, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=280, end=281), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=281, + T_max=300, + end=580, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=580, end=600), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/coco.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_prob=0, + rotate_prob=0, + scale_prob=0, + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +# data settings +data_mode = 'bottomup' +data_root = 'data/' + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[(i, i) for i in range(17)]) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +train_dataset = dict( + type='CombinedDataset', + metainfo=dict(from_file=metafile), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + sample_ratio_factor=[1, 0.3, 0.5, 0.3, 0.3, 0.4, 0.3], + test_mode=False, + pipeline=train_pipeline_stage1) + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=train_dataset) + +# val datasets +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json', + score_mode='bbox', + nms_mode='none', +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=20, + new_train_dataset=dataset_coco, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 280: { + 'proxy_target_cc': True, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 0.5 +deepen_factor = 0.33 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_' + '20211121_095711-4592a793.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[128, 256, 512], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=17, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=256, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile), + use_keypoints_for_center=True), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=256, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1.0, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-t_8xb32-600e_body7-416x416.py b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-t_8xb32-600e_body7-416x416.py new file mode 100644 index 0000000000000000000000000000000000000000..566fe34455dacd437d736d6d94f7fa2d627d8a34 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo-t_8xb32-600e_body7-416x416.py @@ -0,0 +1,529 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=600, val_interval=20, dynamic_intervals=[(580, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=40, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=280, + end=280, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=280, end=281), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=281, + T_max=300, + end=580, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=580, end=600), +] + +# data +input_size = (416, 416) +metafile = 'configs/_base_/datasets/coco.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(416, 416), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(416, 416), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(416, 416), + shift_prob=0, + rotate_prob=0, + scale_prob=0, + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +# data settings +data_mode = 'bottomup' +data_root = 'data/' + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[(i, i) for i in range(17)]) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +train_dataset = dict( + type='CombinedDataset', + metainfo=dict(from_file=metafile), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + sample_ratio_factor=[1, 0.3, 0.5, 0.3, 0.3, 0.4, 0.3], + test_mode=False, + pipeline=train_pipeline_stage1) + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=train_dataset) + +# val datasets +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json', + score_mode='bbox', + nms_mode='none', +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=20, + new_train_dataset=dataset_coco, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 280: { + 'proxy_target_cc': True, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 0.375 +deepen_factor = 0.33 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(320, 640), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_' + '20211124_171234-b4047906.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[96, 192, 384], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=192, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=17, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=192, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile), + use_keypoints_for_center=True), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=192, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1.0, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo_body7.md b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo_body7.md new file mode 100644 index 0000000000000000000000000000000000000000..e6174b2942c9d2f1833757193dae35e5c8b46dbf --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo_body7.md @@ -0,0 +1,132 @@ + + +
+RTMO + +```bibtex +@misc{lu2023rtmo, + title={{RTMO}: Towards High-Performance One-Stage Real-Time Multi-Person Pose Estimation}, + author={Peng Lu and Tao Jiang and Yining Li and Xiangtai Li and Kai Chen and Wenming Yang}, + year={2023}, + eprint={2312.07526}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
+ +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +
+JHMDB (ICCV'2013) + +```bibtex +@inproceedings{Jhuang:ICCV:2013, + title = {Towards understanding action recognition}, + author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, + booktitle = {International Conf. on Computer Vision (ICCV)}, + month = Dec, + pages = {3192-3199}, + year = {2013} +} +``` + +
+ +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +
+PoseTrack18 (CVPR'2018) + +```bibtex +@inproceedings{andriluka2018posetrack, + title={Posetrack: A benchmark for human pose estimation and tracking}, + author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={5167--5176}, + year={2018} +} +``` + +
+ +
+Halpe (CVPR'2020) + +```bibtex +@inproceedings{li2020pastanet, + title={PaStaNet: Toward Human Activity Knowledge Engine}, + author={Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu and Ma, Ze and Chen, Mingyang and Lu, Cewu}, + booktitle={CVPR}, + year={2020} +} +``` + +
+ +Results on COCO val2017 + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | onnx | +| :--------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :--------------------------------: | :-------------------------------: | :--------------------------------: | +| [RTMO-t](/configs/body_2d_keypoint/rtmo/body7/rtmo-t_8xb32-600e_body7-416x416.py) | 640x640 | 0.574 | 0.803 | 0.613 | 0.611 | 0.836 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-t_8xb32-600e_body7-416x416-f48f75cb_20231219.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-t_8xb32-600e_body7-416x416_20231219.json) | [onnx](https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-t_8xb32-600e_body7-416x416-f48f75cb_20231219.zip) | +| [RTMO-s](/configs/body_2d_keypoint/rtmo/body7/rtmo-s_8xb32-600e_body7-640x640.py) | 640x640 | 0.686 | 0.879 | 0.744 | 0.723 | 0.908 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-600e_body7-640x640-dac2bf74_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-600e_body7-640x640_20231211.json) | [onnx](https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-s_8xb32-600e_body7-640x640-dac2bf74_20231211.zip) | +| [RTMO-m](/configs/body_2d_keypoint/rtmo/body7/rtmo-m_16xb16-600e_body7-640x640.py) | 640x640 | 0.726 | 0.899 | 0.790 | 0.763 | 0.926 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-m_16xb16-600e_body7-640x640_20231211.json) | [onnx](https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip) | +| [RTMO-l](/configs/body_2d_keypoint/rtmo/body7/rtmo-l_16xb16-600e_body7-640x640.py) | 640x640 | 0.748 | 0.911 | 0.813 | 0.786 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-600e_body7-640x640-b37118ce_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-600e_body7-640x640_20231211.json) | [onnx](https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-l_16xb16-600e_body7-640x640-b37118ce_20231211.zip) | diff --git a/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo_body7.yml b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo_body7.yml new file mode 100644 index 0000000000000000000000000000000000000000..6f802531bba7d0561bebce100723e1779331d13c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/body7/rtmo_body7.yml @@ -0,0 +1,74 @@ +Models: +- Config: configs/body_2d_keypoint/rtmo/body7/rtmo-t_8xb32-600e_body7-416x416.py + In Collection: RTMO + Metadata: + Architecture: &id001 + - RTMO + Training Data: &id002 + - AI Challenger + - COCO + - CrowdPose + - MPII + - sub-JHMDB + - Halpe + - PoseTrack18 + Name: rtmo-t_8xb32-600e_body7-416x416 + Results: + - Dataset: COCO + Metrics: + AP: 0.574 + AP@0.5: 0.803 + AP@0.75: 0.613 + AR: 0.611 + AR@0.5: 0.836 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-t_8xb32-600e_body7-416x416-f48f75cb_20231219.pth +- Config: configs/body_2d_keypoint/rtmo/body7/rtmo-s_8xb32-600e_body7-640x640.py + In Collection: RTMO + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmo-s_8xb32-600e_body7-640x640 + Results: + - Dataset: COCO + Metrics: + AP: 0.686 + AP@0.5: 0.879 + AP@0.75: 0.744 + AR: 0.723 + AR@0.5: 0.908 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-600e_body7-640x640-dac2bf74_20231211.pth +- Config: configs/body_2d_keypoint/rtmo/body7/rtmo-m_16xb16-600e_body7-640x640.py + In Collection: RTMO + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmo-m_16xb16-600e_body7-640x640 + Results: + - Dataset: COCO + Metrics: + AP: 0.726 + AP@0.5: 0.899 + AP@0.75: 0.790 + AR: 0.763 + AR@0.5: 0.926 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.pth +- Config: configs/body_2d_keypoint/rtmo/body7/rtmo-l_16xb16-600e_body7-640x640.py + In Collection: RTMO + Alias: rtmo + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmo-l_16xb16-600e_body7-640x640 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.911 + AP@0.75: 0.813 + AR: 0.786 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-600e_body7-640x640-b37118ce_20231211.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-l_16xb16-600e_coco-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-l_16xb16-600e_coco-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..97bbd109ca3a9937cc57ad3afefe5ea9134ec265 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-l_16xb16-600e_coco-640x640.py @@ -0,0 +1,321 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=600, val_interval=20, dynamic_intervals=[(580, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=40, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=280, + end=280, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=280, end=281), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=281, + T_max=300, + end=580, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=580, end=600), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/coco.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +data_mode = 'bottomup' +data_root = 'data/' + +# train datasets +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=train_pipeline_stage1, +) + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dataset_coco) + +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json', + score_mode='bbox', + nms_mode='none', +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=20, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 280: { + 'proxy_target_cc': True, + 'overlaps_power': 1.0, + 'loss_cls.loss_weight': 2.0, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 1.0 +deepen_factor = 1.0 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco' + '_20211126_140236-d3bd2b23.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[256, 512, 1024], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=512, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=17, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=512, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile)), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=512, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1e-2, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-m_16xb16-600e_coco-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-m_16xb16-600e_coco-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..de669ba604469cf08d2c8d81457c896d4f321cc4 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-m_16xb16-600e_coco-640x640.py @@ -0,0 +1,320 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=600, val_interval=20, dynamic_intervals=[(580, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=40, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=280, + end=280, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=280, end=281), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=281, + T_max=300, + end=580, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=580, end=600), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/coco.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +data_mode = 'bottomup' +data_root = 'data/' + +# train datasets +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=train_pipeline_stage1, +) + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dataset_coco) + +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json', + score_mode='bbox', + nms_mode='none', +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=20, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 280: { + 'proxy_target_cc': True, + 'overlaps_power': 1.0, + 'loss_cls.loss_weight': 2.0, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 0.75 +deepen_factor = 0.67 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'pretrained_models/yolox_m_8x8_300e_coco_20230829.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[192, 384, 768], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=384, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=17, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=384, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile)), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=384, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1e-2, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-s_8xb32-600e_coco-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-s_8xb32-600e_coco-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..755c47bf82a021f2b75f303da9ea579ca28fd4b8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo-s_8xb32-600e_coco-640x640.py @@ -0,0 +1,323 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=600, val_interval=20, dynamic_intervals=[(580, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=40, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=280, + end=280, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=280, end=281), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=281, + T_max=300, + end=580, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=580, end=600), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/coco.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_prob=0, + rotate_prob=0, + scale_prob=0, + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +data_mode = 'bottomup' +data_root = 'data/' + +# train datasets +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=train_pipeline_stage1, +) + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dataset_coco) + +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json', + score_mode='bbox', + nms_mode='none', +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=20, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 280: { + 'proxy_target_cc': True, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 0.5 +deepen_factor = 0.33 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_' + '20211121_095711-4592a793.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[128, 256, 512], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=17, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=256, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile), + use_keypoints_for_center=True), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=256, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1.0, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo_coco.md b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..23aac68f0dfda5393ab7680fea4636dcb48e1426 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo_coco.md @@ -0,0 +1,43 @@ + + +
+RTMO + +```bibtex +@misc{lu2023rtmo, + title={{RTMO}: Towards High-Performance One-Stage Real-Time Multi-Person Pose Estimation}, + author={Peng Lu and Tao Jiang and Yining Li and Xiangtai Li and Kai Chen and Wenming Yang}, + year={2023}, + eprint={2312.07526}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [RTMO-s](/configs/body_2d_keypoint/rtmo/coco/rtmo-s_8xb32-600e_coco-640x640.py) | 640x640 | 0.677 | 0.878 | 0.737 | 0.715 | 0.908 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-600e_coco-640x640-8db55a59_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-600e_coco-640x640_20231211.json) | +| [RTMO-m](/configs/body_2d_keypoint/rtmo/coco/rtmo-m_16xb16-600e_coco-640x640.py) | 640x640 | 0.709 | 0.890 | 0.778 | 0.747 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-m_16xb16-600e_coco-640x640-6f4e0306_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-m_16xb16-600e_coco-640x640_20231211.json) | +| [RTMO-l](/configs/body_2d_keypoint/rtmo/coco/rtmo-l_16xb16-600e_coco-640x640.py) | 640x640 | 0.724 | 0.899 | 0.788 | 0.762 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-600e_coco-640x640-516a421f_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-600e_coco-640x640_20231211.json) | diff --git a/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo_coco.yml b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..c3fc84429f21658eb035048e023bf0b5e62ebfc0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/coco/rtmo_coco.yml @@ -0,0 +1,56 @@ +Collections: +- Name: RTMO + Paper: + Title: 'RTMO: Towards High-Performance One-Stage Real-Time Multi-Person Pose Estimation' + URL: https://arxiv.org/abs/2312.07526 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/rtmo.md +Models: +- Config: configs/body_2d_keypoint/rtmo/coco/rtmo-s_8xb32-600e_coco-640x640.py + In Collection: RTMO + Metadata: + Architecture: &id001 + - RTMO + Training Data: CrowdPose + Name: rtmo-s_8xb32-600e_coco-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.673 + AP@0.5: 0.878 + AP@0.75: 0.737 + AR: 0.715 + AR@0.5: 0.908 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-600e_coco-640x640-8db55a59_20231211.pth +- Config: configs/body_2d_keypoint/rtmo/coco/rtmo-m_16xb16-600e_coco-640x640.py + In Collection: RTMO + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: rtmo-m_16xb16-600e_coco-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.709 + AP@0.5: 0.890 + AP@0.75: 0.778 + AR: 0.747 + AR@0.5: 0.920 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-m_16xb16-600e_coco-640x640-6f4e0306_20231211.pth +- Config: configs/body_2d_keypoint/rtmo/coco/rtmo-l_16xb16-600e_coco-640x640.py + In Collection: RTMO + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: rtmo-l_16xb16-600e_coco-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.724 + AP@0.5: 0.899 + AP@0.75: 0.788 + AR: 0.762 + AR@0.5: 0.927 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-600e_coco-640x640-516a421f_20231211.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_body7-crowdpose-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_body7-crowdpose-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..6ba9fbe04ce8dab52f50f690a7fdef1caa24e09d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_body7-crowdpose-640x640.py @@ -0,0 +1,502 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=700, val_interval=50, dynamic_intervals=[(670, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=50, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=350, + end=349, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=349, end=350), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=350, + T_max=320, + end=670, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=670, end=700), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/crowdpose.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_prob=0, + rotate_prob=0, + scale_prob=0, + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +# data settings +data_mode = 'bottomup' +data_root = 'data/' + +# mapping +aic_crowdpose = [(3, 0), (0, 1), (4, 2), (1, 3), (5, 4), (2, 5), + (9, 6), (6, 7), (10, 8), (7, 9), (11, 10), (8, 11), (12, 12), + (13, 13)] + +coco_crowdpose = [ + (5, 0), + (6, 1), + (7, 2), + (8, 3), + (9, 4), + (10, 5), + (11, 6), + (12, 7), + (13, 8), + (14, 9), + (15, 10), + (16, 11), +] + +mpii_crowdpose = [ + (13, 0), + (12, 1), + (14, 2), + (11, 3), + (15, 4), + (10, 5), + (3, 6), + (2, 7), + (4, 8), + (1, 9), + (5, 10), + (0, 11), + (9, 12), + (7, 13), +] + +jhmdb_crowdpose = [(4, 0), (3, 1), (8, 2), (7, 3), (12, 4), (11, 5), (6, 6), + (5, 7), (10, 8), (9, 9), (14, 10), (13, 11), (2, 12), + (0, 13)] + +halpe_crowdpose = [ + (5, 0), + (6, 1), + (7, 2), + (8, 3), + (9, 4), + (10, 5), + (11, 6), + (12, 7), + (13, 8), + (14, 9), + (15, 10), + (16, 11), +] + +posetrack_crowdpose = [ + (5, 0), + (6, 1), + (7, 2), + (8, 3), + (9, 4), + (10, 5), + (11, 6), + (12, 7), + (13, 8), + (14, 9), + (15, 10), + (16, 11), + (2, 12), + (1, 13), +] + +# train datasets +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=14, mapping=coco_crowdpose) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=14, mapping=aic_crowdpose) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=14, + mapping=[(i, i) for i in range(14)]) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=14, mapping=mpii_crowdpose) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=14, + mapping=jhmdb_crowdpose) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=14, + mapping=halpe_crowdpose) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=14, + mapping=posetrack_crowdpose) + ], +) + +train_dataset_stage1 = dict( + type='CombinedDataset', + metainfo=dict(from_file=metafile), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + sample_ratio_factor=[1, 0.3, 1, 0.3, 0.3, 0.4, 0.3], + test_mode=False, + pipeline=train_pipeline_stage1) + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=train_dataset_stage1) + +# val datasets +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + score_mode='bbox', + nms_mode='none', + iou_type='keypoints_crowd', + prefix='crowdpose', + use_area=False, +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=30, + new_train_dataset=dataset_crowdpose, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 350: { + 'proxy_target_cc': True, + 'overlaps_power': 1.0, + 'loss_cls.loss_weight': 2.0, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 1.0 +deepen_factor = 1.0 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco' + '_20211126_140236-d3bd2b23.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[256, 512, 1024], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=512, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=14, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=512, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile)), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=512, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1e-3, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_crowdpose-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_crowdpose-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..6b2c78b5a3032b2df8a5ca200788939517926dcb --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_crowdpose-640x640.py @@ -0,0 +1,326 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=700, val_interval=50, dynamic_intervals=[(670, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=50, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=350, + end=349, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=349, end=350), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=350, + T_max=320, + end=670, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=670, end=700), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/crowdpose.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.2, + rotate_factor=30, + scale_factor=(0.5, 1.5), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.6, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_prob=0, + rotate_prob=0, + scale_prob=0, + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +data_mode = 'bottomup' +data_root = 'data/' + +# train datasets +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=train_pipeline_stage1, +) + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dataset_crowdpose) + +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + score_mode='bbox', + nms_mode='none', + iou_type='keypoints_crowd', + prefix='crowdpose', + use_area=False, +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=30, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 350: { + 'proxy_target_cc': True, + 'overlaps_power': 1.0, + 'loss_cls.loss_weight': 2.0, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 1.0 +deepen_factor = 1.0 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco' + '_20211126_140236-d3bd2b23.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[256, 512, 1024], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=512, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=14, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=512, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile)), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=512, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1e-3, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-m_16xb16-700e_crowdpose-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-m_16xb16-700e_crowdpose-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..af8da87942c89a40117309db4fc4067235693eb9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-m_16xb16-700e_crowdpose-640x640.py @@ -0,0 +1,325 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=700, val_interval=50, dynamic_intervals=[(670, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=50, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=350, + end=349, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=349, end=350), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=350, + T_max=320, + end=670, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=670, end=700), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/crowdpose.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.2, + rotate_factor=30, + scale_factor=(0.5, 1.5), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.6, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_prob=0, + rotate_prob=0, + scale_prob=0, + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +data_mode = 'bottomup' +data_root = 'data/' + +# train datasets +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=train_pipeline_stage1, +) + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dataset_crowdpose) + +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + score_mode='bbox', + nms_mode='none', + iou_type='keypoints_crowd', + prefix='crowdpose', + use_area=False, +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=30, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 350: { + 'proxy_target_cc': True, + 'overlaps_power': 1.0, + 'loss_cls.loss_weight': 2.0, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 0.75 +deepen_factor = 0.67 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'pretrained_models/yolox_m_8x8_300e_coco_20230829.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[192, 384, 768], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=384, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=14, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=384, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile)), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=384, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1e-3, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-s_8xb32-700e_crowdpose-640x640.py b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-s_8xb32-700e_crowdpose-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..288da890e88e77d34e923ed420bf5bb40ffdb3d5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-s_8xb32-700e_crowdpose-640x640.py @@ -0,0 +1,326 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=700, val_interval=50, dynamic_intervals=[(670, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=50, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + constructor='ForceDefaultOptimWrapperConstructor', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + force_default_settings=True, + custom_keys=dict({'neck.encoder': dict(lr_mult=0.05)})), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=350, + end=349, + by_epoch=True, + convert_to_iter_based=True), + # this scheduler is used to increase the lr from 2e-4 to 5e-4 + dict(type='ConstantLR', by_epoch=True, factor=2.5, begin=349, end=350), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=350, + T_max=320, + end=670, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=670, end=700), +] + +# data +input_size = (640, 640) +metafile = 'configs/_base_/datasets/crowdpose.py' +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.2, + rotate_factor=30, + scale_factor=(0.5, 1.5), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.6, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_prob=0, + rotate_prob=0, + scale_prob=0, + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='BottomupGetHeatmapMask', get_invalid=True), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +data_mode = 'bottomup' +data_root = 'data/' + +# train datasets +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=train_pipeline_stage1, +) + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dataset_crowdpose) + +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + score_mode='bbox', + nms_mode='none', + iou_type='keypoints_crowd', + prefix='crowdpose', + use_area=False, +) +test_evaluator = val_evaluator + +# hooks +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=30, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict( + type='RTMOModeSwitchHook', + epoch_attributes={ + 350: { + 'proxy_target_cc': True, + 'overlaps_power': 1.0, + 'loss_cls.loss_weight': 2.0, + 'loss_mle.loss_weight': 5.0, + 'loss_oks.loss_weight': 10.0 + }, + }, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] + +# model +widen_factor = 0.5 +deepen_factor = 0.33 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_' + '20211121_095711-4592a793.pth', + prefix='backbone.', + )), + neck=dict( + type='HybridEncoder', + in_channels=[128, 256, 512], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_dim=256, + output_indices=[1, 2], + encoder_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + ffn_drop=0.0, + act_cfg=dict(type='GELU'))), + projector=dict( + type='ChannelMapper', + in_channels=[256, 256], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='BN'), + num_outs=2)), + head=dict( + type='RTMOHead', + num_keypoints=14, + featmap_strides=(16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + cls_feat_channels=256, + channels_per_group=36, + pose_vec_channels=256, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + assigner=dict( + type='SimOTAAssigner', + dynamic_k_indicator='oks', + oks_calculator=dict(type='PoseOKS', metainfo=metafile)), + prior_generator=dict( + type='MlvlPointGenerator', + centralize_points=True, + strides=[16, 32]), + dcc_cfg=dict( + in_channels=256, + feat_channels=128, + num_bins=(192, 256), + spe_channels=128, + gau_cfg=dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + pos_enc='add')), + overlaps_power=0.5, + loss_cls=dict( + type='VariFocalLoss', + reduction='sum', + use_target_weight=True, + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo=metafile, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_mle=dict( + type='MLECCLoss', + use_target_weight=True, + loss_weight=1e-3, + ), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + input_size=input_size, + score_thr=0.1, + nms_thr=0.65, + )) diff --git a/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo_crowdpose.md b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo_crowdpose.md new file mode 100644 index 0000000000000000000000000000000000000000..314afb40f8f2177548472443008e1608487646fc --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo_crowdpose.md @@ -0,0 +1,44 @@ + + +
+RTMO + +```bibtex +@misc{lu2023rtmo, + title={{RTMO}: Towards High-Performance One-Stage Real-Time Multi-Person Pose Estimation}, + author={Peng Lu and Tao Jiang and Yining Li and Xiangtai Li and Kai Chen and Wenming Yang}, + year={2023}, + eprint={2312.07526}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on COCO val2017 + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [RTMO-s](/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-s_8xb32-700e_crowdpose-640x640.py) | 640x640 | 0.673 | 0.882 | 0.729 | 0.737 | 0.682 | 0.591 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-700e_crowdpose-640x640-79f81c0d_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-700e_crowdpose-640x640_20231211.json) | +| [RTMO-m](/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-m_16xb16-700e_crowdpose-640x640.py) | 640x640 | 0.711 | 0.897 | 0.771 | 0.774 | 0.719 | 0.634 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rrtmo-m_16xb16-700e_crowdpose-640x640-0eaf670d_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-m_16xb16-700e_crowdpose-640x640_20231211.json) | +| [RTMO-l](/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_crowdpose-640x640.py) | 640x640 | 0.732 | 0.907 | 0.793 | 0.792 | 0.741 | 0.653 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-700e_crowdpose-640x640-1008211f_20231211.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-700e_crowdpose-640x640_20231211.json) | +| [RTMO-l](/configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_body7-crowdpose-640x640.py)\* | 640x640 | 0.838 | 0.947 | 0.893 | 0.888 | 0.847 | 0.772 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-700e_body7-crowdpose-640x640-5bafdc11_20231219.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-700e_body7-crowdpose-640x640_20231219.json) | + +\* indicates the model is trained using a combined dataset composed of AI Challenger, COCO, CrowdPose, Halpe, MPII, PoseTrack18 and sub-JHMDB. diff --git a/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo_crowdpose.yml b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo_crowdpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..d808f15e1250c8c2927f62d504a85f18e91c4c19 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmo/crowdpose/rtmo_crowdpose.yml @@ -0,0 +1,70 @@ +Models: +- Config: configs/body_2d_keypoint/rtmo/crowdpose/rtmo-s_8xb32-700e_crowdpose-640x640.py + In Collection: RTMO + Metadata: + Architecture: &id001 + - RTMO + Training Data: CrowdPose + Name: rtmo-s_8xb32-700e_crowdpose-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.673 + AP@0.5: 0.882 + AP@0.75: 0.729 + AP (E): 0.737 + AP (M): 0.682 + AP (L): 0.591 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-s_8xb32-700e_crowdpose-640x640-79f81c0d_20231211.pth +- Config: configs/body_2d_keypoint/rtmo/crowdpose/rtmo-m_16xb16-700e_crowdpose-640x640.py + In Collection: RTMO + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: rtmo-m_16xb16-700e_crowdpose-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.711 + AP@0.5: 0.897 + AP@0.75: 0.771 + AP (E): 0.774 + AP (M): 0.719 + AP (L): 0.634 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rrtmo-m_16xb16-700e_crowdpose-640x640-0eaf670d_20231211.pth +- Config: configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_crowdpose-640x640.py + In Collection: RTMO + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: rtmo-l_16xb16-700e_crowdpose-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.732 + AP@0.5: 0.907 + AP@0.75: 0.793 + AP (E): 0.792 + AP (M): 0.741 + AP (L): 0.653 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-700e_crowdpose-640x640-1008211f_20231211.pth +- Config: configs/body_2d_keypoint/rtmo/crowdpose/rtmo-l_16xb16-700e_body7-crowdpose-640x640.py + In Collection: RTMO + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: rtmo-l_16xb16-700e_body7-crowdpose-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.838 + AP@0.5: 0.947 + AP@0.75: 0.893 + AP (E): 0.888 + AP (M): 0.847 + AP (L): 0.772 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmo/rtmo-l_16xb16-700e_body7-crowdpose-640x640-5bafdc11_20231219.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/README.md b/mmpose/configs/body_2d_keypoint/rtmpose/README.md new file mode 100644 index 0000000000000000000000000000000000000000..38fd938376e533a5dcf1d02dce7491f97d308d53 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/README.md @@ -0,0 +1,57 @@ +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :----------------: | :--------: | :---: | :---: | :---------------------------------------: | +| RTMPose-t | 256x192 | 0.682 | 0.736 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-s | 256x192 | 0.716 | 0.768 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-m | 256x192 | 0.746 | 0.795 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-l | 256x192 | 0.758 | 0.806 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-t-aic-coco | 256x192 | 0.685 | 0.738 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-s-aic-coco | 256x192 | 0.722 | 0.772 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-m-aic-coco | 256x192 | 0.758 | 0.806 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-l-aic-coco | 256x192 | 0.765 | 0.813 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-m-aic-coco | 384x288 | 0.770 | 0.816 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-l-aic-coco | 384x288 | 0.773 | 0.819 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | + +### MPII Dataset + +| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | +| :-------: | :--------: | :------: | :------: | :---------------------------------------: | +| RTMPose-m | 256x256 | 0.907 | 0.348 | [rtmpose_mpii.md](./mpii/rtmpose_mpii.md) | + +### CrowdPose Dataset + +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :------------------------------------------------------: | +| RTMPose-m | 256x192 | 0.706 | 0.788 | [rtmpose_crowdpose.md](./crowdpose/rtmpose_crowdpose.md) | + +### Human-Art Dataset + +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| RTMPose-s | 256x192 | 0.311 | 0.381 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +| RTMPose-m | 256x192 | 0.355 | 0.417 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +| RTMPose-l | 256x192 | 0.378 | 0.442 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| RTMPose-s | 256x192 | 0.698 | 0.732 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +| RTMPose-m | 256x192 | 0.728 | 0.759 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +| RTMPose-l | 256x192 | 0.753 | 0.783 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..1cf3380435bd1caee7e082954c268021073d7cd8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py @@ -0,0 +1,553 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-body7_210e-256x192-5e9558ef_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE'), +] diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..19b3c8afb6f0d3b5d60bfe9f330c8ac8db15dc30 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py @@ -0,0 +1,553 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-body7_210e-384x288-b15bc30d_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE'), +] diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..293a5f07ea470e6bab484a7d0ce5693bd84db888 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py @@ -0,0 +1,535 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=5, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=5, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..0aa16f3db405fce481a3788029429dec50dfa732 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py @@ -0,0 +1,535 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..be462bfddf334e36d9361b8242c074f299d4f4b9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py @@ -0,0 +1,553 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-body7_210e-256x192-e0c9327b_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE') +] diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..64cfc8a604b37b8ed6de85c96e99d7295399b452 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py @@ -0,0 +1,553 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-body7_210e-384x288-b9bc2b57_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE') +] diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..e694dd27d9e29f8615f62fc483dd976cf2644aaa --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py @@ -0,0 +1,529 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..5ee967a3097eb51e877a2e1c4a6e3a1330bdc20e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py @@ -0,0 +1,542 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +# backend_args = dict(backend='local') +backend_args = dict( + backend='petrel', + path_mapping=dict({ + f'{data_root}': 's3://openmmlab/datasets/', + f'{data_root}': 's3://openmmlab/datasets/' + })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +# default_hooks = dict( +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..05e6ec09808ca47f337222dfac326a0ff45a8d50 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py @@ -0,0 +1,535 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 1024 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.6, 1.4], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0a69775106fb57df5d089dce7b5252c0e0f904 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py @@ -0,0 +1,553 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-s_udp-body7_210e-256x192-8c9ccbdb_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE') +] diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..8d70bd27aeaf17ae36fb0c9daf24db91cc17ff5c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py @@ -0,0 +1,536 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 1024 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.6, 1.4], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc7f80a2bbbf71a958689c4fc45df3d15c22a4e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py @@ -0,0 +1,554 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') + +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE') +] diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..e50aa42f0e5faafb2324e2d4f7d704f11a3c1cda --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py @@ -0,0 +1,535 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 20 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.md b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.md new file mode 100644 index 0000000000000000000000000000000000000000..5355a7f35be16d5e79a1336c234de54a036a5746 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.md @@ -0,0 +1,76 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +- Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset. +- `*` denotes model trained on 7 public datasets: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) +- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. + +| Config | Input Size | AP
(COCO) | PCK@0.1
(Body8) | AUC
(Body8) | EPE
(Body8) | Params(M) | FLOPS(G) | Download | +| :--------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :-----------------: | :-------: | :------: | :-----------------------------------------------: | +| [RTMPose-t\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py) | 256x192 | 65.9 | 91.44 | 63.18 | 19.45 | 3.34 | 0.36 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth) | +| [RTMPose-s\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py) | 256x192 | 69.7 | 92.45 | 65.15 | 17.85 | 5.47 | 0.68 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth) | +| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py) | 256x192 | 74.9 | 94.25 | 68.59 | 15.12 | 13.59 | 1.93 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth) | +| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py) | 256x192 | 76.7 | 95.08 | 70.14 | 13.79 | 27.66 | 4.16 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth) | +| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py) | 384x288 | 76.6 | 94.64 | 70.38 | 13.98 | 13.72 | 4.33 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth) | +| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py) | 384x288 | 78.3 | 95.36 | 71.58 | 13.08 | 27.79 | 9.35 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth) | diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.yml b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..bd8231ccbe1546bc5e69bbb43aa257ad55d834da --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.yml @@ -0,0 +1,98 @@ +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - AI Challenger + - COCO + - CrowdPose + - MPII + - sub-JHMDB + - Halpe + - PoseTrack18 + Name: rtmpose-t_8xb256-420e_body8-256x192 + Results: + - Dataset: Body8 + Metrics: + AP: 0.659 + Mean@0.1: 0.914 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb256-420e_body8-256x192 + Results: + - Dataset: Body8 + Metrics: + AP: 0.697 + Mean@0.1: 0.925 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py + In Collection: RTMPose + Alias: + - human + - body + - body17 + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_body8-256x192 + Results: + - Dataset: Body8 + Metrics: + AP: 0.749 + Mean@0.1: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb256-420e_body8-256x192 + Results: + - Dataset: Body8 + Metrics: + AP: 0.767 + Mean@0.1: 0.951 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_body8-384x288 + Results: + - Dataset: Body8 + Metrics: + AP: 0.766 + Mean@0.1: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py + In Collection: RTMPose + Alias: rtmpose-l + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb256-420e_body8-384x288 + Results: + - Dataset: Body8 + Metrics: + AP: 0.783 + Mean@0.1: 0.964 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.md b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.md new file mode 100644 index 0000000000000000000000000000000000000000..153b71c663c1a94336781ff89ba73499ffdc6e76 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.md @@ -0,0 +1,74 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+AlphaPose (TPAMI'2022) + +```bibtex +@article{alphapose, + author = {Fang, Hao-Shu and Li, Jiefeng and Tang, Hongyang and Xu, Chao and Zhu, Haoyi and Xiu, Yuliang and Li, Yong-Lu and Lu, Cewu}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + title = {AlphaPose: Whole-Body Regional Multi-Person Pose Estimation and Tracking in Real-Time}, + year = {2022} +} +``` + +
+ +- `*` denotes model trained on 7 public datasets: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) +- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. + +| Config | Input Size | PCK@0.1
(Body8) | AUC
(Body8) | Params(M) | FLOPS(G) | Download | +| :--------------------------------------------------------------: | :--------: | :---------------------: | :-----------------: | :-------: | :------: | :-----------------------------------------------------------------: | +| [RTMPose-t\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 91.89 | 66.35 | 3.51 | 0.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth) | +| [RTMPose-s\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 93.01 | 68.62 | 5.70 | 0.70 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth) | +| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 94.75 | 71.91 | 13.93 | 1.95 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth) | +| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 95.37 | 73.19 | 28.11 | 4.19 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth) | +| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.15 | 73.56 | 14.06 | 4.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth) | +| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.56 | 74.38 | 28.24 | 9.40 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth) | +| [RTMPose-x\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py) | 384x288 | 95.74 | 74.82 | 50.00 | 17.29 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth) | diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.yml b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.yml new file mode 100644 index 0000000000000000000000000000000000000000..142918a5940dd57ca8c7aa16246aded54dadaf96 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.yml @@ -0,0 +1,107 @@ +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - AI Challenger + - COCO + - CrowdPose + - MPII + - sub-JHMDB + - Halpe + - PoseTrack18 + Name: rtmpose-t_8xb1024-700e_body8-halpe26-256x192 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.919 + AUC: 0.664 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb1024-700e_body8-halpe26-256x192 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.930 + AUC: 0.682 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py + In Collection: RTMPose + Alias: body26 + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb512-700e_body8-halpe26-256x192 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.947 + AUC: 0.719 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb512-700e_body8-halpe26-256x192 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.954 + AUC: 0.732 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb512-700e_body8-halpe26-384x288 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.952 + AUC: 0.736 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb512-700e_body8-halpe26-384x288 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.956 + AUC: 0.744 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-x_8xb256-700e_body8-halpe26-384x288 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.957 + AUC: 0.748 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..662bd72924b4e77c3f559c62475b0363d1472d0e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py @@ -0,0 +1,272 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..7b5895962bc23a3b3c2d6bfcd30466cbcb5a8f92 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py @@ -0,0 +1,272 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..85c7695c59e299ab7e68c7991847460014cd4d7a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py @@ -0,0 +1,234 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = "/datagrid/personal/purkrmir/data/COCO/original/" + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/densepose.json', + # ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/densepose.json') + # ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..c7840f6c46c636bd340488c5f4059b804d02783e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py @@ -0,0 +1,272 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=128 * 2, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..1293a1ae1c40d94b65df4c27097e4b5d506a2b1b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py @@ -0,0 +1,272 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=128 * 2, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f21d0e18c624d6e13f16fc394abd8a7ccd23e167 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6c9e9fdc55453eb93ab45ea6f8adfc18b333afc6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py @@ -0,0 +1,272 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=128 * 2, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..c0abcbb1dd880a6a74afc5cc2d47fed0bc3b72da --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..215a297944dce6d4d651aa3ac9d43b2878dd40b1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py @@ -0,0 +1,273 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # Turn off EMA while training the tiny model + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe0978b2b66127c7ec31886b21117fa4de89048 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py @@ -0,0 +1,233 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # Turn off EMA while training the tiny model + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.md b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..d3cc9298df5e723f77cf26a4184c2efc7ca4469b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.md @@ -0,0 +1,71 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.682 | 0.883 | 0.759 | 0.736 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | +| [rtmpose-s](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.716 | 0.892 | 0.789 | 0.768 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | +| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.746 | 0.899 | 0.817 | 0.795 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | +| [rtmpose-l](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.758 | 0.906 | 0.826 | 0.806 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | +| [rtmpose-t-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.685 | 0.880 | 0.761 | 0.738 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.json) | +| [rtmpose-s-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.722 | 0.892 | 0.794 | 0.772 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.json) | +| [rtmpose-m-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.758 | 0.903 | 0.826 | 0.806 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.json) | +| [rtmpose-l-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.765 | 0.906 | 0.835 | 0.813 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.json) | +| [rtmpose-m-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py) | 384x288 | 0.770 | 0.908 | 0.833 | 0.816 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.json) | +| [rtmpose-l-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py) | 384x288 | 0.773 | 0.907 | 0.835 | 0.819 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.json) | diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..adb734073a6cdc5d47e54c8391631ad35d944ecd --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml @@ -0,0 +1,170 @@ +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: COCO + Name: rtmpose-t_8xb256-420e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.682 + AP@0.5: 0.883 + AP@0.75: 0.759 + AR: 0.736 + AR@0.5: 0.92 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: rtmpose-s_8xb256-420e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.716 + AP@0.5: 0.892 + AP@0.75: 0.789 + AR: 0.768 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: rtmpose-m_8xb256-420e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.746 + AP@0.5: 0.899 + AP@0.75: 0.817 + AR: 0.795 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: rtmpose-l_8xb256-420e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.758 + AP@0.5: 0.906 + AP@0.75: 0.826 + AR: 0.806 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: &id002 + - COCO + - AI Challenger + Name: rtmpose-t_8xb256-420e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.685 + AP@0.5: 0.88 + AP@0.75: 0.761 + AR: 0.738 + AR@0.5: 0.918 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb256-420e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.722 + AP@0.5: 0.892 + AP@0.75: 0.794 + AR: 0.772 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.758 + AP@0.5: 0.903 + AP@0.75: 0.826 + AR: 0.806 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb256-420e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.765 + AP@0.5: 0.906 + AP@0.75: 0.835 + AR: 0.813 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_aic-coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.770 + AP@0.5: 0.908 + AP@0.75: 0.833 + AR: 0.816 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb256-420e_aic-coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.773 + AP@0.5: 0.907 + AP@0.75: 0.835 + AR: 0.819 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..e93a2f1099cd4d298a1a745a03eb7ddffd3b8998 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py @@ -0,0 +1,234 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 5e-4 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=14, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='crowdpose/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'crowdpose/annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.md b/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.md new file mode 100644 index 0000000000000000000000000000000000000000..42bcf0f65f3b76b453d82e8e24bc040cd14bcb0b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.md @@ -0,0 +1,60 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.706 | 0.841 | 0.765 | 0.799 | 0.719 | 0.582 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.json) | diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml b/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..5fb842f56355267e7b22509c76717c97223b2721 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py + In Collection: RTMPose + Metadata: + Architecture: + - RTMPose + Training Data: CrowdPose + Name: rtmpose-t_8xb256-420e_coco-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.706 + AP@0.5: 0.841 + AP@0.75: 0.765 + AP (E): 0.799 + AP (M): 0.719 + AP (L): 0.582 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..384a712d95ffd5e5e6286cadfe53d6abd0f425fd --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + # bbox_file=f'{data_root}HumanArt/person_detection_results/' + # 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..30178cbb6dd68d56dd95e934c54ebf96b04482d8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + # bbox_file=f'{data_root}HumanArt/person_detection_results/' + # 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..b4263f25e741e25a0ec5b85900ff1b2587d2805d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + # bbox_file=f'{data_root}HumanArt/person_detection_results/' + # 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..869f04217d6caecfd422d387730cbfd28cc208c1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py @@ -0,0 +1,233 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + # bbox_file=f'{data_root}HumanArt/person_detection_results/' + # 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # Turn off EMA while training the tiny model + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.md b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.md new file mode 100644 index 0000000000000000000000000000000000000000..385ce0612a77e697aead79d037d064617f1a911f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.md @@ -0,0 +1,117 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +
+Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
+ +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.161 | 0.283 | 0.154 | 0.221 | 0.373 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | +| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.249 | 0.395 | 0.256 | 0.323 | 0.485 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | +| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.199 | 0.328 | 0.198 | 0.261 | 0.418 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | +| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.311 | 0.462 | 0.323 | 0.381 | 0.540 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | +| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.239 | 0.372 | 0.243 | 0.302 | 0.455 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | +| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.355 | 0.503 | 0.377 | 0.417 | 0.568 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | +| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.260 | 0.393 | 0.267 | 0.323 | 0.472 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | +| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.378 | 0.521 | 0.399 | 0.442 | 0.584 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.444 | 0.725 | 0.453 | 0.488 | 0.750 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | +| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.655 | 0.872 | 0.720 | 0.693 | 0.890 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | +| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.480 | 0.739 | 0.498 | 0.521 | 0.763 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | +| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.698 | 0.893 | 0.768 | 0.732 | 0.903 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | +| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.532 | 0.765 | 0.563 | 0.571 | 0.789 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | +| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.728 | 0.895 | 0.791 | 0.759 | 0.906 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | +| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.564 | 0.789 | 0.602 | 0.599 | 0.808 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | +| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.753 | 0.905 | 0.812 | 0.783 | 0.915 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.682 | 0.883 | 0.759 | 0.736 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | +| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.665 | 0.875 | 0.739 | 0.721 | 0.916 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | +| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.716 | 0.892 | 0.789 | 0.768 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | +| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.706 | 0.888 | 0.780 | 0.759 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | +| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.746 | 0.899 | 0.817 | 0.795 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | +| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.725 | 0.892 | 0.795 | 0.775 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | +| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.758 | 0.906 | 0.826 | 0.806 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | +| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.748 | 0.901 | 0.816 | 0.796 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | + +Results on COCO val2017 with ground-truth bounding box + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.679 | 0.895 | 0.755 | 0.710 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | +| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.725 | 0.916 | 0.798 | 0.753 | 0.925 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | +| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.744 | 0.916 | 0.818 | 0.770 | 0.930 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | +| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.770 | 0.927 | 0.840 | 0.794 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.yml b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.yml new file mode 100644 index 0000000000000000000000000000000000000000..2d6cf6ff26c41d4585cb498af0a1bc092f11c116 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.yml @@ -0,0 +1,138 @@ +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - COCO + - Human-Art + Name: rtmpose-l_8xb256-420e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.901 + AP@0.75: 0.816 + AR: 0.796 + AR@0.5: 0.938 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.378 + AP@0.5: 0.521 + AP@0.75: 0.399 + AR: 0.442 + AR@0.5: 0.584 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.753 + AP@0.5: 0.905 + AP@0.75: 0.812 + AR: 0.783 + AR@0.5: 0.915 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth +- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.725 + AP@0.5: 0.892 + AP@0.75: 0.795 + AR: 0.775 + AR@0.5: 0.929 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.355 + AP@0.5: 0.503 + AP@0.75: 0.377 + AR: 0.417 + AR@0.5: 0.568 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.728 + AP@0.5: 0.895 + AP@0.75: 0.791 + AR: 0.759 + AR@0.5: 0.906 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth +- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb256-420e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.706 + AP@0.5: 0.888 + AP@0.75: 0.780 + AR: 0.759 + AR@0.5: 0.928 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.311 + AP@0.5: 0.462 + AP@0.75: 0.323 + AR: 0.381 + AR@0.5: 0.540 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.698 + AP@0.5: 0.893 + AP@0.75: 0.768 + AR: 0.732 + AR@0.5: 0.903 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth +- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-t_8xb256-420e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.665 + AP@0.5: 0.875 + AP@0.75: 0.739 + AR: 0.721 + AR@0.5: 0.916 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.249 + AP@0.5: 0.395 + AP@0.75: 0.256 + AR: 0.323 + AR@0.5: 0.485 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.655 + AP@0.5: 0.872 + AP@0.75: 0.720 + AR: 0.693 + AR@0.5: 0.890 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ca67020f510e739041a342e9aa15f68098dec189 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py @@ -0,0 +1,228 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=16, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/', +# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.md b/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..990edb45eb2b882e6ddfe14253562dce5a5adba9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.md @@ -0,0 +1,43 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean / w. flip | Mean@0.1 | ckpt | log | +| :------------------------------------------------------- | :--------: | :------------: | :------: | :------------------------------------------------------: | :------------------------------------------------------: | +| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py) | 256x256 | 0.907 | 0.348 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.json) | diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml b/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..2e1eb28659f49681496cacdbd5bb4f2062e5358b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml @@ -0,0 +1,15 @@ +Models: +- Config: configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py + In Collection: RTMPose + Metadata: + Architecture: + - RTMPose + Training Data: MPII + Name: rtmpose-m_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.907 + Mean@0.1: 0.348 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.pth diff --git a/mmpose/configs/body_2d_keypoint/simcc/README.md b/mmpose/configs/body_2d_keypoint/simcc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6148c18bf5061743de2eacf531554f567fa50516 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/README.md @@ -0,0 +1,20 @@ +# Top-down SimCC-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, SimCC based methods reformulate human pose estimation as two classification tasks for horizontal and vertical coordinates, and uniformly divide each pixel into several bins, thus obtain the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation](https://arxiv.org/abs/2107.03332). + +
+ +
+ +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :---------------------------: | :--------: | :---: | :---: | :-----------------------------------------------: | +| ResNet-50+SimCC | 384x288 | 0.735 | 0.790 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNet-50+SimCC | 256x192 | 0.721 | 0.781 | [resnet_coco.md](./coco/resnet_coco.md) | +| S-ViPNAS-MobileNet-V3+SimCC | 256x192 | 0.695 | 0.755 | [vipnas_coco.md](./coco/vipnas_coco.md) | +| MobileNet-V2+SimCC(wo/deconv) | 256x192 | 0.620 | 0.678 | [mobilenetv2_coco.md](./coco/mobilenetv2_coco.md) | diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.md b/mmpose/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..42438774bade657bd5c927d08d99353acbcf7f82 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.md @@ -0,0 +1,55 @@ + + +
+SimCC (ECCV'2022) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2107.03332, + title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, + author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, + year={2021} +} +``` + +
+ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [simcc_mobilenetv2_wo_deconv](/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py) | 256x192 | 0.620 | 0.855 | 0.697 | 0.678 | 0.902 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml b/mmpose/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..00ef5aaecd3bfde35036e309d7a438fd2d9ea219 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py + In Collection: SimCC + Metadata: + Architecture: &id001 + - SimCC + - MobilenetV2 + Training Data: COCO + Name: simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.62 + AP@0.5: 0.855 + AP@0.75: 0.697 + AR: 0.678 + AR@0.5: 0.902 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.pth diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/resnet_coco.md b/mmpose/configs/body_2d_keypoint/simcc/coco/resnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..80592b4db38a306c297b952264dc0e0b51d64fde --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/resnet_coco.md @@ -0,0 +1,56 @@ + + +
+SimCC (ECCV'2022) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2107.03332, + title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, + author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, + year={2021} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [simcc_resnet_50](/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.721 | 0.897 | 0.798 | 0.781 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.log.json) | +| [simcc_resnet_50](/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py) | 384x288 | 0.735 | 0.899 | 0.800 | 0.790 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/resnet_coco.yml b/mmpose/configs/body_2d_keypoint/simcc/coco/resnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..1e56c9e477f36576909633a4107f24db5872bbfe --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/resnet_coco.yml @@ -0,0 +1,41 @@ +Collections: +- Name: SimCC + Paper: + Title: A Simple Coordinate Classification Perspective for Human Pose Estimation + URL: https://arxiv.org/abs/2107.03332 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/simcc.md +Models: +- Config: configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py + In Collection: SimCC + Metadata: + Architecture: &id001 + - SimCC + - ResNet + Training Data: COCO + Name: simcc_res50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.721 + AP@0.5: 0.900 + AP@0.75: 0.798 + AR: 0.781 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth +- Config: configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py + In Collection: SimCC + Metadata: + Architecture: *id001 + Training Data: COCO + Name: simcc_res50_8xb32-140e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.735 + AP@0.5: 0.899 + AP@0.75: 0.800 + AR: 0.790 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.pth diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..800803d190265cbe8183143e7c7ed7b9ebabb21d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='SimCCHead', + in_channels=1280, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + deconv_out_channels=None, + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..c04358299fe4189daf7ad19bbf76d18e8dd9305c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(288, 384), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='SimCCHead', + in_channels=2048, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..33232a4463ef44872e97b4fba455e0f3e990f109 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py @@ -0,0 +1,114 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict(type='MultiStepLR', milestones=[170, 200], gamma=0.1, by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='SimCCHead', + in_channels=2048, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..ba8ba040cb639b02f701cf36bb8ad03eeb5ffdec --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py @@ -0,0 +1,119 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_MobileNetV3'), + head=dict( + type='SimCCHead', + in_channels=160, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + deconv_type='vipnas', + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160), + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=data_root + 'person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/vipnas_coco.md b/mmpose/configs/body_2d_keypoint/simcc/coco/vipnas_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..a9d8b98fc3a9dbae4128ac9ce3c09a24de0474eb --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/vipnas_coco.md @@ -0,0 +1,54 @@ + + +
+SimCC (ECCV'2022) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2107.03332, + title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, + author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, + year={2021} +} +``` + +
+ + + +
+ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [simcc_S-ViPNAS-MobileNetV3](/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py) | 256x192 | 0.695 | 0.883 | 0.772 | 0.755 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml b/mmpose/configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..95077c05c658a42e767833d2102b2e0603288f72 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py + In Collection: SimCC + Metadata: + Architecture: &id001 + - SimCC + - ViPNAS + Training Data: COCO + Name: simcc_vipnas-mbv3_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.695 + AP@0.5: 0.883 + AP@0.75: 0.772 + AR: 0.755 + AR@0.5: 0.927 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.pth diff --git a/mmpose/configs/body_2d_keypoint/simcc/mpii/simcc_res50_wo-deconv-8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/simcc/mpii/simcc_res50_wo-deconv-8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ef8b47959ea1b70003fe9906889ccec3ee452a51 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/simcc/mpii/simcc_res50_wo-deconv-8xb64-210e_mpii-256x256.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(256, 256), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='SimCCHead', + in_channels=2048, + out_channels=16, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + deconv_out_channels=None, + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/README.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..47aae219e463a9f53bf3ec51c83a4a2d5c63daf2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/README.md @@ -0,0 +1,133 @@ +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :-------------: | :--------: | :---: | :---: | :-------------------------------------------------: | +| ViTPose-h | 256x192 | 0.790 | 0.840 | [vitpose_coco.md](./coco/vitpose_coco.md) | +| HRNet-w48+UDP | 256x192 | 0.768 | 0.817 | [hrnet_udp_coco.md](./coco/hrnet_udp_coco.md) | +| MSPN 4-stg | 256x192 | 0.765 | 0.826 | [mspn_coco.md](./coco/mspn_coco.md) | +| HRNet-w48+Dark | 256x192 | 0.764 | 0.814 | [hrnet_dark_coco.md](./coco/hrnet_dark_coco.md) | +| HRNet-w48 | 256x192 | 0.756 | 0.809 | [hrnet_coco.md](./coco/hrnet_coco.md) | +| HRFormer-B | 256x192 | 0.754 | 0.807 | [hrformer_coco.md](./coco/hrformer_coco.md) | +| RSN-50-3x | 256x192 | 0.750 | 0.814 | [rsn_coco.md](./coco/rsn_coco.md) | +| CSPNeXt-l | 256x192 | 0.750 | 0.800 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | +| HRNet-w32 | 256x192 | 0.749 | 0.804 | [hrnet_coco.md](./coco/hrnet_coco.md) | +| Swin-L | 256x192 | 0.743 | 0.798 | [swin_coco.md](./coco/swin_coco.md) | +| ViTPose-s | 256x192 | 0.739 | 0.792 | [vitpose_coco.md](./coco/vitpose_coco.md) | +| HRFormer-S | 256x192 | 0.738 | 0.793 | [hrformer_coco.md](./coco/hrformer_coco.md) | +| Swin-B | 256x192 | 0.737 | 0.794 | [swin_coco.md](./coco/swin_coco.md) | +| SEResNet-101 | 256x192 | 0.734 | 0.790 | [seresnet_coco.md](./coco/seresnet_coco.md) | +| SCNet-101 | 256x192 | 0.733 | 0.789 | [scnet_coco.md](./coco/scnet_coco.md) | +| ResNet-101+Dark | 256x192 | 0.733 | 0.786 | [resnet_dark_coco.md](./coco/resnet_dark_coco.md) | +| CSPNeXt-m | 256x192 | 0.732 | 0.785 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | +| ResNetV1d-101 | 256x192 | 0.732 | 0.785 | [resnetv1d_coco.md](./coco/resnetv1d_coco.md) | +| SEResNet-50 | 256x192 | 0.729 | 0.784 | [seresnet_coco.md](./coco/seresnet_coco.md) | +| SCNet-50 | 256x192 | 0.728 | 0.784 | [scnet_coco.md](./coco/scnet_coco.md) | +| ResNet-101 | 256x192 | 0.726 | 0.783 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNeXt-101 | 256x192 | 0.726 | 0.781 | [resnext_coco.md](./coco/resnext_coco.md) | +| HourglassNet | 256x256 | 0.726 | 0.780 | [hourglass_coco.md](./coco/hourglass_coco.md) | +| ResNeSt-101 | 256x192 | 0.725 | 0.781 | [resnest_coco.md](./coco/resnest_coco.md) | +| RSN-50 | 256x192 | 0.724 | 0.790 | [rsn_coco.md](./coco/rsn_coco.md) | +| Swin-T | 256x192 | 0.724 | 0.782 | [swin_coco.md](./coco/swin_coco.md) | +| MSPN 1-stg | 256x192 | 0.723 | 0.788 | [mspn_coco.md](./coco/mspn_coco.md) | +| ResNetV1d-50 | 256x192 | 0.722 | 0.777 | [resnetv1d_coco.md](./coco/resnetv1d_coco.md) | +| ResNeSt-50 | 256x192 | 0.720 | 0.775 | [resnest_coco.md](./coco/resnest_coco.md) | +| ResNet-50 | 256x192 | 0.718 | 0.774 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNeXt-50 | 256x192 | 0.715 | 0.771 | [resnext_coco.md](./coco/resnext_coco.md) | +| PVT-S | 256x192 | 0.714 | 0.773 | [pvt_coco.md](./coco/pvt_coco.md) | +| CSPNeXt-s | 256x192 | 0.697 | 0.753 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | +| LiteHRNet-30 | 256x192 | 0.676 | 0.736 | [litehrnet_coco.md](./coco/litehrnet_coco.md) | +| CSPNeXt-tiny | 256x192 | 0.665 | 0.723 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | +| MobileNet-v2 | 256x192 | 0.648 | 0.709 | [mobilenetv2_coco.md](./coco/mobilenetv2_coco.md) | +| LiteHRNet-18 | 256x192 | 0.642 | 0.705 | [litehrnet_coco.md](./coco/litehrnet_coco.md) | +| CPM | 256x192 | 0.627 | 0.689 | [cpm_coco.md](./coco/cpm_coco.md) | +| ShuffleNet-v2 | 256x192 | 0.602 | 0.668 | [shufflenetv2_coco.md](./coco/shufflenetv2_coco.md) | +| ShuffleNet-v1 | 256x192 | 0.587 | 0.654 | [shufflenetv1_coco.md](./coco/shufflenetv1_coco.md) | +| AlexNet | 256x192 | 0.448 | 0.521 | [alexnet_coco.md](./coco/alexnet_coco.md) | + +### MPII Dataset + +| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | +| :------------: | :--------: | :------: | :------: | :-------------------------------------------------: | +| HRNet-w48+Dark | 256x256 | 0.905 | 0.360 | [hrnet_dark_mpii.md](./mpii/hrnet_dark_mpii.md) | +| HRNet-w48 | 256x256 | 0.902 | 0.303 | [hrnet_mpii.md](./mpii/cspnext_udp_mpii.md) | +| HRNet-w48 | 256x256 | 0.901 | 0.337 | [hrnet_mpii.md](./mpii/hrnet_mpii.md) | +| HRNet-w32 | 256x256 | 0.900 | 0.334 | [hrnet_mpii.md](./mpii/hrnet_mpii.md) | +| HourglassNet | 256x256 | 0.889 | 0.317 | [hourglass_mpii.md](./mpii/hourglass_mpii.md) | +| ResNet-152 | 256x256 | 0.889 | 0.303 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +| ResNetV1d-152 | 256x256 | 0.888 | 0.300 | [resnetv1d_mpii.md](./mpii/resnetv1d_mpii.md) | +| SCNet-50 | 256x256 | 0.888 | 0.290 | [scnet_mpii.md](./mpii/scnet_mpii.md) | +| ResNeXt-152 | 256x256 | 0.887 | 0.294 | [resnext_mpii.md](./mpii/resnext_mpii.md) | +| SEResNet-50 | 256x256 | 0.884 | 0.292 | [seresnet_mpii.md](./mpii/seresnet_mpii.md) | +| ResNet-50 | 256x256 | 0.882 | 0.286 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +| ResNetV1d-50 | 256x256 | 0.881 | 0.290 | [resnetv1d_mpii.md](./mpii/resnetv1d_mpii.md) | +| CPM | 368x368\* | 0.876 | 0.285 | [cpm_mpii.md](./mpii/cpm_mpii.md) | +| LiteHRNet-30 | 256x256 | 0.869 | 0.271 | [litehrnet_mpii.md](./mpii/litehrnet_mpii.md) | +| LiteHRNet-18 | 256x256 | 0.859 | 0.260 | [litehrnet_mpii.md](./mpii/litehrnet_mpii.md) | +| MobileNet-v2 | 256x256 | 0.854 | 0.234 | [mobilenetv2_mpii.md](./mpii/mobilenetv2_mpii.md) | +| ShuffleNet-v2 | 256x256 | 0.828 | 0.205 | [shufflenetv2_mpii.md](./mpii/shufflenetv2_mpii.md) | +| ShuffleNet-v1 | 256x256 | 0.824 | 0.195 | [shufflenetv1_mpii.md](./mpii/shufflenetv1_mpii.md) | + +### CrowdPose Dataset + +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Model | Input Size | AP | AR | Details and Download | +| :--------: | :--------: | :---: | :---: | :--------------------------------------------------------: | +| HRNet-w32 | 256x192 | 0.675 | 0.816 | [hrnet_crowdpose.md](./crowdpose/hrnet_crowdpose.md) | +| CSPNeXt-m | 256x192 | 0.662 | 0.755 | [hrnet_crowdpose.md](./crowdpose/cspnext_udp_crowdpose.md) | +| ResNet-101 | 256x192 | 0.647 | 0.800 | [resnet_crowdpose.md](./crowdpose/resnet_crowdpose.md) | +| HRNet-w32 | 256x192 | 0.637 | 0.785 | [resnet_crowdpose.md](./crowdpose/resnet_crowdpose.md) | + +### AIC Dataset + +Results on AIC val set with ground-truth bounding boxes. + +| Model | Input Size | AP | AR | Details and Download | +| :--------: | :--------: | :---: | :---: | :----------------------------------: | +| HRNet-w32 | 256x192 | 0.323 | 0.366 | [hrnet_aic.md](./aic/hrnet_aic.md) | +| ResNet-101 | 256x192 | 0.294 | 0.337 | [resnet_aic.md](./aic/resnet_aic.md) | + +### JHMDB Dataset + +| Model | Input Size | PCK(norm. by person size) | PCK (norm. by torso size) | Details and Download | +| :-------: | :--------: | :-----------------------: | :-----------------------: | :----------------------------------------: | +| ResNet-50 | 256x256 | 96.0 | 80.1 | [resnet_jhmdb.md](./jhmdb/resnet_jhmdb.md) | +| CPM | 368x368 | 89.8 | 65.7 | [cpm_jhmdb.md](./jhmdb/cpm_jhmdb.md) | + +### PoseTrack2018 Dataset + +Results on PoseTrack2018 val with ground-truth bounding boxes. + +| Model | Input Size | AP | Details and Download | +| :-------: | :--------: | :--: | :----------------------------------------------------------: | +| HRNet-w48 | 256x192 | 84.6 | [hrnet_posetrack18.md](./posetrack18/hrnet_posetrack18.md) | +| HRNet-w32 | 256x192 | 83.4 | [hrnet_posetrack18.md](./posetrack18/hrnet_posetrack18.md) | +| ResNet-50 | 256x192 | 81.2 | [resnet_posetrack18.md](./posetrack18/resnet_posetrack18.md) | + +### Human-Art Dataset + +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| ViTPose-s | 256x192 | 0.381 | 0.448 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | +| ViTPose-b | 256x192 | 0.410 | 0.475 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| ViTPose-s | 256x192 | 0.738 | 0.768 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | +| ViTPose-b | 256x192 | 0.759 | 0.790 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.md new file mode 100644 index 0000000000000000000000000000000000000000..4b2cefcdcbe8909a7134a556f7da48881c6333c3 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.md @@ -0,0 +1,38 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
+ +Results on AIC val set with ground-truth bounding boxes + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py) | 256x192 | 0.323 | 0.761 | 0.218 | 0.366 | 0.789 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192-30a4e465_20200826.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192_20200826.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml new file mode 100644 index 0000000000000000000000000000000000000000..0bbc52ccb8d3fc439887937333898f84ca40b167 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml @@ -0,0 +1,18 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py + In Collection: HRNet + Metadata: + Architecture: + - HRNet + Training Data: AI Challenger + Name: td-hm_hrnet-w32_8xb64-210e_aic-256x192 + Results: + - Dataset: AI Challenger + Metrics: + AP: 0.323 + AP@0.5: 0.761 + AP@0.75: 0.218 + AR: 0.366 + AR@0.5: 0.789 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192-30a4e465_20200826.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.md new file mode 100644 index 0000000000000000000000000000000000000000..1cb0f57eb38ac56e2aaaeef20ad7bda5cb240e96 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.md @@ -0,0 +1,55 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
+ +Results on AIC val set with ground-truth bounding boxes + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py) | 256x192 | 0.294 | 0.736 | 0.172 | 0.337 | 0.762 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192-79b35445_20200826.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192_20200826.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml new file mode 100644 index 0000000000000000000000000000000000000000..e320056858565d88ac3b1a4e3e4960019be02ffb --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: AI Challenger + Name: td-hm_res101_8xb64-210e_aic-256x192 + Results: + - Dataset: AI Challenger + Metrics: + AP: 0.294 + AP@0.5: 0.736 + AP@0.75: 0.172 + AR: 0.337 + AR@0.5: 0.762 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192-79b35445_20200826.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..4d4c504d388fbe627ef7f62393e5135604403110 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py @@ -0,0 +1,151 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=14, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AicDataset' +data_mode = 'topdown' +data_root = 'data/aic/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/aic_val.json', + data_prefix=dict(img='ai_challenger_keypoint_validation_20170911/' + 'keypoint_validation_images_20170911/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/aic_val.json', + use_area=False) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..e61da3a5c4b6cbb89e78576c34ca8040f0fcca05 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AicDataset' +data_mode = 'topdown' +data_root = 'data/aic/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/aic_val.json', + data_prefix=dict(img='ai_challenger_keypoint_validation_20170911/' + 'keypoint_validation_images_20170911/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/aic_val.json', + use_area=False) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..6f82685ba81d28da32bfd9e578df397c9690ef5f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.md @@ -0,0 +1,40 @@ + + +
+AlexNet (NeurIPS'2012) + +```bibtex +@inproceedings{krizhevsky2012imagenet, + title={Imagenet classification with deep convolutional neural networks}, + author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E}, + booktitle={Advances in neural information processing systems}, + pages={1097--1105}, + year={2012} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_alexnet](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py) | 256x192 | 0.448 | 0.767 | 0.461 | 0.521 | 0.829 | [ckpt](https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192-a7b1fd15_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192_20200727.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..0c851c3c793f7360617313a2b4f09e49ebc87484 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - AlexNet + Training Data: COCO + Name: td-hm_alexnet_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.448 + AP@0.5: 0.767 + AP@0.75: 0.461 + AR: 0.521 + AR@0.5: 0.829 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192-a7b1fd15_20200727.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..3d4453a36986ccca8511e3f589dfac39cc3185ec --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.md @@ -0,0 +1,41 @@ + + +
+CPM (CVPR'2016) + +```bibtex +@inproceedings{wei2016convolutional, + title={Convolutional pose machines}, + author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={4724--4732}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [cpm](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py) | 256x192 | 0.627 | 0.862 | 0.709 | 0.689 | 0.906 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192-0e978875_20220920.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192_20220920.log) | +| [cpm](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py) | 384x288 | 0.652 | 0.865 | 0.730 | 0.710 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288-165487b8_20221011.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288_20221011.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..2c1cad9713c6a8be51e59ec67047267c8a425e1f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml @@ -0,0 +1,40 @@ +Collections: +- Name: CPM + Paper: + Title: Convolutional pose machines + URL: http://openaccess.thecvf.com/content_cvpr_2016/html/Wei_Convolutional_Pose_Machines_CVPR_2016_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/cpm.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py + In Collection: CPM + Metadata: + Architecture: &id001 + - CPM + Training Data: COCO + Name: td-hm_cpm_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.627 + AP@0.5: 0.862 + AP@0.75: 0.709 + AR: 0.689 + AR@0.5: 0.906 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192-0e978875_20220920.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_cpm_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.652 + AP@0.5: 0.865 + AP@0.75: 0.730 + AR: 0.710 + AR@0.5: 0.907 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288-165487b8_20221011.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..fc1eb0d36c8b185369c8a722522f527fa37e0f8c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py @@ -0,0 +1,284 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=19, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6cce193544c775b5f4c749e3ca9c81ff547a507e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py @@ -0,0 +1,214 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..096bf307859ee2946e8d42c66dc10ed23dbfe545 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py @@ -0,0 +1,284 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=19, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f86e9a8d609c2f200c888ad183f3cd890f35c388 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py @@ -0,0 +1,214 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..94cc7d02d2789fd5a82ff9d352063f5afe99aaf0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py @@ -0,0 +1,284 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-s_imagenet_600e-ea671761.pth')), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=19, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6f50542e5bceb86c652bf4d8ab893386197217ef --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py @@ -0,0 +1,214 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-s_imagenet_600e-ea671761.pth')), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..cef1b204501573d4e0d3228c36595eb784fdc83b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py @@ -0,0 +1,284 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-tiny_imagenet_600e-3a2dd350.pth')), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=19, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..7ec0bb2be7dbd59be7401cca1d4995d7741ee2b6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py @@ -0,0 +1,214 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-tiny_imagenet_600e-3a2dd350.pth')), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..7aad2bf6b31428bb8ff52149d0e8cb9f85820709 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.md @@ -0,0 +1,69 @@ + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_cspnext_t_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.665 | 0.874 | 0.723 | 0.723 | 0.917 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.json) | +| [pose_cspnext_s_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.697 | 0.886 | 0.776 | 0.753 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.json) | +| [pose_cspnext_m_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.732 | 0.896 | 0.806 | 0.785 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.json) | +| [pose_cspnext_l_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.750 | 0.904 | 0.822 | 0.800 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.json) | +| [pose_cspnext_t_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.655 | 0.884 | 0.731 | 0.689 | 0.890 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.json) | +| [pose_cspnext_s_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.700 | 0.905 | 0.783 | 0.733 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.json) | +| [pose_cspnext_m_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.748 | 0.925 | 0.818 | 0.777 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.json) | +| [pose_cspnext_l_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.772 | 0.936 | 0.839 | 0.799 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.json) | + +Note that, UDP also adopts the unbiased encoding/decoding algorithm of [DARK](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020). + +Flip test and detector is not used in the result of aic-coco training. diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..aab5c44e1b651eb86335f0afee53872e5a5c5c34 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml @@ -0,0 +1,139 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: &id001 + - CSPNeXt + - UDP + Training Data: COCO + Name: cspnext-tiny_udp_8xb256-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.665 + AP@0.5: 0.874 + AP@0.75: 0.723 + AR: 0.723 + AR@0.5: 0.917 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: cspnext-s_udp_8xb256-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.697 + AP@0.5: 0.886 + AP@0.75: 0.776 + AR: 0.753 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: cspnext-m_udp_8xb256-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.732 + AP@0.5: 0.896 + AP@0.75: 0.806 + AR: 0.785 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: cspnext-l_udp_8xb256-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.750 + AP@0.5: 0.904 + AP@0.75: 0.822 + AR: 0.8 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AIC + Name: cspnext-tiny_udp_8xb256-210e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.655 + AP@0.5: 0.884 + AP@0.75: 0.731 + AR: 0.689 + AR@0.5: 0.89 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AIC + Name: cspnext-s_udp_8xb256-210e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.7 + AP@0.5: 0.905 + AP@0.75: 0.783 + AR: 0.733 + AR@0.5: 0.918 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AIC + Name: cspnext-m_udp_8xb256-210e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.925 + AP@0.75: 0.818 + AR: 0.777 + AR@0.5: 0.933 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AIC + Name: cspnext-l_udp_8xb256-210e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.772 + AP@0.5: 0.936 + AP@0.75: 0.839 + AR: 0.799 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..dc7dee47c3ec6917f3fffb034e36df3a5a226504 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.md @@ -0,0 +1,42 @@ + + +
+Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py) | 256x256 | 0.726 | 0.896 | 0.799 | 0.780 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256-4ec713ba_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256_20200709.log.json) | +| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py) | 384x384 | 0.746 | 0.900 | 0.812 | 0.797 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384-be91ba2b_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384_20200812.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..6d9cfd91e99a8d53a437f4d71e318fb1226b18e6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml @@ -0,0 +1,40 @@ +Collections: +- Name: Hourglass + Paper: + Title: Stacked hourglass networks for human pose estimation + URL: https://link.springer.com/chapter/10.1007/978-3-319-46484-8_29 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hourglass.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py + In Collection: Hourglass + Metadata: + Architecture: &id001 + - Hourglass + Training Data: COCO + Name: td-hm_hourglass52_8xb32-210e_coco-256x256 + Results: + - Dataset: COCO + Metrics: + AP: 0.726 + AP@0.5: 0.896 + AP@0.75: 0.799 + AR: 0.780 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256-4ec713ba_20200709.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py + In Collection: Hourglass + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hourglass52_8xb32-210e_coco-384x384 + Results: + - Dataset: COCO + Metrics: + AP: 0.746 + AP@0.5: 0.900 + AP@0.75: 0.812 + AR: 0.797 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384-be91ba2b_20200812.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..ef793f06fc392006ea57e9ece676b610e675bbf1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.md @@ -0,0 +1,43 @@ + + +
+HRFormer (NIPS'2021) + +```bibtex +@article{yuan2021hrformer, + title={HRFormer: High-Resolution Vision Transformer for Dense Predict}, + author={Yuan, Yuhui and Fu, Rao and Huang, Lang and Lin, Weihong and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, + journal={Advances in Neural Information Processing Systems}, + volume={34}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrformer_small](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py) | 256x192 | 0.738 | 0.904 | 0.812 | 0.793 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192-5310d898_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192_20220316.log.json) | +| [pose_hrformer_small](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py) | 384x288 | 0.757 | 0.905 | 0.824 | 0.807 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288-98d237ed_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288_20220316.log.json) | +| [pose_hrformer_base](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py) | 256x192 | 0.754 | 0.906 | 0.827 | 0.807 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192-6f5f1169_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192_20220316.log.json) | +| [pose_hrformer_base](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py) | 384x288 | 0.774 | 0.909 | 0.842 | 0.823 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_384x288-ecf0758d_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_384x288_20220316.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..5ac7dc3636a7a020a396db422d8de1521a172cd1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml @@ -0,0 +1,72 @@ +Collections: +- Name: HRFormer + Paper: + Title: 'HRFormer: High-Resolution Vision Transformer for Dense Predict' + URL: https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrformer.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py + In Collection: HRFormer + Metadata: + Architecture: &id001 + - HRFormer + Training Data: COCO + Name: td-hm_hrformer-small_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.738 + AP@0.5: 0.904 + AP@0.75: 0.812 + AR: 0.793 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192-5310d898_20220316.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py + In Collection: HRFormer + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrformer-small_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.905 + AP@0.75: 0.824 + AR: 0.807 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288-98d237ed_20220316.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py + In Collection: HRFormer + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrformer-base_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.906 + AP@0.75: 0.827 + AR: 0.807 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192-6f5f1169_20220316.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py + In Collection: HRFormer + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrformer-base_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.774 + AP@0.5: 0.909 + AP@0.75: 0.842 + AR: 0.823 + AR@0.5: 0.945 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_384x288-ecf0758d_20220316.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..efe9cd27b91e2189760dec73682b997e2be58b95 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.md @@ -0,0 +1,62 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+Albumentations (Information'2020) + +```bibtex +@article{buslaev2020albumentations, + title={Albumentations: fast and flexible image augmentations}, + author={Buslaev, Alexander and Iglovikov, Vladimir I and Khvedchenya, Eugene and Parinov, Alex and Druzhinin, Mikhail and Kalinin, Alexandr A}, + journal={Information}, + volume={11}, + number={2}, + pages={125}, + year={2020}, + publisher={Multidisciplinary Digital Publishing Institute} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [coarsedropout](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py) | 256x192 | 0.753 | 0.908 | 0.822 | 0.805 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout-0f16a0ce_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout_20210320.log.json) | +| [gridmask](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py) | 256x192 | 0.752 | 0.906 | 0.825 | 0.804 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask-868180df_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask_20210320.log.json) | +| [photometric](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py) | 256x192 | 0.754 | 0.908 | 0.825 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric-308cf591_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric_20210320.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..7a29de4f64a702fddb2f42d9527c1762879f89b1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml @@ -0,0 +1,56 @@ +Collections: +- Name: Albumentations + Paper: + Title: 'Albumentations: fast and flexible image augmentations' + URL: https://www.mdpi.com/649002 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/albumentations.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py + In Collection: Albumentations + Metadata: + Architecture: &id001 + - HRNet + Training Data: COCO + Name: td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.753 + AP@0.5: 0.908 + AP@0.75: 0.822 + AR: 0.805 + AR@0.5: 0.944 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout-0f16a0ce_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py + In Collection: Albumentations + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.752 + AP@0.5: 0.906 + AP@0.75: 0.825 + AR: 0.804 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask-868180df_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py + In Collection: Albumentations + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.908 + AP@0.75: 0.825 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric-308cf591_20210320.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..51fbf1322e474f8dd5cb8ca46de7c14a9bb26540 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md @@ -0,0 +1,43 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py) | 384x288 | 0.761 | 0.908 | 0.826 | 0.811 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288_20220909.log) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.756 | 0.908 | 0.826 | 0.809 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py) | 384x288 | 0.767 | 0.911 | 0.832 | 0.817 | 0.947 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288_20220915.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..a0e5debe859ccc059e892b28d85aadb11f1a1857 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml @@ -0,0 +1,124 @@ +Collections: +- Name: HRNet + Paper: + Title: Deep high-resolution representation learning for human pose estimation + URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Deep_High-Resolution_Representation_Learning_for_Human_Pose_Estimation_CVPR_2019_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnet.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: COCO + Name: td-hm_hrnet-w32_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.746 + AP@0.5: 0.904 + AP@0.75: 0.819 + AR: 0.799 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.76 + AP@0.5: 0.906 + AP@0.75: 0.83 + AR: 0.81 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.756 + AP@0.5: 0.907 + AP@0.75: 0.825 + AR: 0.806 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.767 + AP@0.5: 0.91 + AP@0.75: 0.831 + AR: 0.816 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AI Challenger + Name: td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.907 + AP@0.75: 0.829 + AR: 0.809 + AR@0.5: 0.944 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge-b05435b9_20221025.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AI Challenger + Name: td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine + Results: + - Dataset: COCO + Metrics: + AP: 0.756 + AP@0.5: 0.906 + AP@0.75: 0.826 + AR: 0.807 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine-4ce66880_20221026.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.749 + AP@0.5: 0.907 + AP@0.75: 0.822 + AR: 0.802 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192-f1e84e3b_20220914.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco_aic.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco_aic.md new file mode 100644 index 0000000000000000000000000000000000000000..1e066d563ca460bccb8294e9a1b9ffa3b7a33d51 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco_aic.md @@ -0,0 +1,61 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +
+AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
+ +MMPose supports training model with combined datasets. [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) and [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) are two examples. + +- [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) leverages AIC data with partial keypoints as auxiliary data to train a COCO model +- [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) constructs a combined dataset whose keypoints are the union of COCO and AIC keypoints to train a model that predicts keypoints of both datasets. + +Evaluation results on COCO val2017 of models trained with solely COCO dataset and combined dataset as shown below. These models are evaluated with detector having human AP of 56.4 on COCO val2017 dataset. + +| Train Set | Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :------------------------------------------- | :------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------: | :------------------------------------: | +| [coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | pose_hrnet_w32 | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) | pose_hrnet_w32 | 256x192 | 0.756 | 0.907 | 0.828 | 0.809 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge-a9ea6d77_20230818.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge_20230818.json) | +| [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) | pose_hrnet_w32 | 256x192 | 0.755 | 0.904 | 0.825 | 0.807 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine-458125cc_20230818.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine_20230818.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..c18382ec68035c0f7d560d782bbb44b5af2d5024 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.md @@ -0,0 +1,60 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.907 | 0.825 | 0.807 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192-0e00bf12_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192_20220914.log) | +| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.766 | 0.907 | 0.829 | 0.815 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288-9bab4c9b_20220917.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288_20220917.log) | +| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py) | 256x192 | 0.764 | 0.907 | 0.831 | 0.814 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192-e1ebdd6f_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py) | 384x288 | 0.772 | 0.911 | 0.833 | 0.821 | 0.948 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288-39c3c381_20220916.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288_20220916.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..9f14e9ffad78d3d36f5f89f2166c6b3cda7ab2ff --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml @@ -0,0 +1,73 @@ +Collections: +- Name: DarkPose + Paper: + Title: Distribution-aware coordinate representation for human pose estimation + URL: http://openaccess.thecvf.com/content_CVPR_2020/html/Zhang_Distribution-Aware_Coordinate_Representation_for_Human_Pose_Estimation_CVPR_2020_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/dark.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: &id001 + - HRNet + - DarkPose + Training Data: COCO + Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.907 + AP@0.75: 0.825 + AR: 0.807 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192-0e00bf12_20220914.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.766 + AP@0.5: 0.907 + AP@0.75: 0.829 + AR: 0.815 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288-9bab4c9b_20220917.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.764 + AP@0.5: 0.907 + AP@0.75: 0.831 + AR: 0.814 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192-e1ebdd6f_20220913.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.772 + AP@0.5: 0.911 + AP@0.75: 0.833 + AR: 0.821 + AR@0.5: 0.948 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288-39c3c381_20220916.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_fp16_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_fp16_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..3e52624dc760eaf8bc4d6c7f75a29c4e1747a6e0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_fp16_coco.md @@ -0,0 +1,56 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+FP16 (ArXiv'2017) + +```bibtex +@article{micikevicius2017mixed, + title={Mixed precision training}, + author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, + journal={arXiv preprint arXiv:1710.03740}, + year={2017} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32_fp16](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.907 | 0.822 | 0.802 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192-f1e84e3b_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192_20220914.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..2b85d85a25125cb8eef083a87e597591850a1402 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.md @@ -0,0 +1,63 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py) | 256x192 | 0.762 | 0.907 | 0.829 | 0.810 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192-73ede547_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192_20220914.log) | +| [pose_hrnet_w32_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py) | 384x288 | 0.768 | 0.909 | 0.832 | 0.815 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288-9a3f7c85_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288_20220914.log) | +| [pose_hrnet_w48_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py) | 256x192 | 0.768 | 0.908 | 0.833 | 0.817 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192-3feaef8f_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py) | 384x288 | 0.773 | 0.911 | 0.836 | 0.821 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288-70d7ab01_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288_20220913.log) | +| [pose_hrnet_w32_udp_regress](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py) | 256x192 | 0.759 | 0.907 | 0.827 | 0.813 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192-9c0b77b4_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192_20220226.log) | + +Note that, UDP also adopts the unbiased encoding/decoding algorithm of [DARK](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020). diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..01cba761ec4a97c8d84273de7f2cf720de62ed5b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml @@ -0,0 +1,90 @@ +Collections: +- Name: UDP + Paper: + Title: 'The Devil Is in the Details: Delving Into Unbiased Data Processing for + Human Pose Estimation' + URL: http://openaccess.thecvf.com/content_CVPR_2020/html/Huang_The_Devil_Is_in_the_Details_Delving_Into_Unbiased_Data_CVPR_2020_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/udp.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: &id001 + - HRNet + - UDP + Training Data: COCO + Name: td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.762 + AP@0.5: 0.907 + AP@0.75: 0.829 + AR: 0.810 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192-73ede547_20220914.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.768 + AP@0.5: 0.909 + AP@0.75: 0.832 + AR: 0.815 + AR@0.5: 0.945 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288-9a3f7c85_20220914.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.768 + AP@0.5: 0.908 + AP@0.75: 0.833 + AR: 0.817 + AR@0.5: 0.945 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192-3feaef8f_20220913.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.773 + AP@0.5: 0.911 + AP@0.75: 0.836 + AR: 0.821 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288-70d7ab01_20220913.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.759 + AP@0.5: 0.907 + AP@0.75: 0.827 + AR: 0.813 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192-9c0b77b4_20220926.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..28f608d54af3d7098886a077dbdfd5b7f4e50b4f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.md @@ -0,0 +1,42 @@ + + +
+LiteHRNet (CVPR'2021) + +```bibtex +@inproceedings{Yulitehrnet21, + title={Lite-HRNet: A Lightweight High-Resolution Network}, + author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, + booktitle={CVPR}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py) | 256x192 | 0.642 | 0.867 | 0.719 | 0.705 | 0.911 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192-6bace359_20211230.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192_20211230.log.json) | +| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py) | 384x288 | 0.676 | 0.876 | 0.746 | 0.735 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288-8d4dac48_20211230.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288_20211230.log.json) | +| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py) | 256x192 | 0.676 | 0.880 | 0.756 | 0.736 | 0.922 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192-4176555b_20210626.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192_20210626.log.json) | +| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py) | 384x288 | 0.700 | 0.883 | 0.776 | 0.758 | 0.926 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288-a3aef5c4_20210626.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288_20210626.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..f92360587237823bde9ce0f042c08c8f5915ca3f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml @@ -0,0 +1,72 @@ +Collections: +- Name: LiteHRNet + Paper: + Title: 'Lite-HRNet: A Lightweight High-Resolution Network' + URL: https://arxiv.org/abs/2104.06403 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/litehrnet.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py + In Collection: LiteHRNet + Metadata: + Architecture: &id001 + - LiteHRNet + Training Data: COCO + Name: td-hm_litehrnet-18_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.642 + AP@0.5: 0.867 + AP@0.75: 0.719 + AR: 0.705 + AR@0.5: 0.911 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192-6bace359_20211230.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py + In Collection: LiteHRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_litehrnet-18_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.676 + AP@0.5: 0.876 + AP@0.75: 0.746 + AR: 0.735 + AR@0.5: 0.919 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288-8d4dac48_20211230.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py + In Collection: LiteHRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_litehrnet-30_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.676 + AP@0.5: 0.88 + AP@0.75: 0.756 + AR: 0.736 + AR@0.5: 0.922 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192-4176555b_20210626.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py + In Collection: LiteHRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_litehrnet-30_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.7 + AP@0.5: 0.883 + AP@0.75: 0.776 + AR: 0.758 + AR@0.5: 0.926 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288-a3aef5c4_20210626.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..aed9fd0246bf4d6f0d3379d7317478ab9013eef2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.md @@ -0,0 +1,41 @@ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py) | 256x192 | 0.648 | 0.874 | 0.725 | 0.709 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192-55a04c35_20221016.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192_20221016.log) | +| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py) | 384x288 | 0.677 | 0.882 | 0.746 | 0.734 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288-d3ab1457_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288_20221013.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..c7993fe516dd6a79895ed08f50c624d23c4ee0aa --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml @@ -0,0 +1,35 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - MobilenetV2 + Training Data: COCO + Name: td-hm_mobilenetv2_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.648 + AP@0.5: 0.874 + AP@0.75: 0.725 + AR: 0.709 + AR@0.5: 0.918 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192-55a04c35_20221016.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_mobilenetv2_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.677 + AP@0.5: 0.882 + AP@0.75: 0.746 + AR: 0.734 + AR@0.5: 0.920 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288-d3ab1457_20221013.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..d86bc2c2ada7c560e1e2770a1e864c27a8417d3c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.md @@ -0,0 +1,42 @@ + + +
+MSPN (ArXiv'2019) + +```bibtex +@article{li2019rethinking, + title={Rethinking on Multi-Stage Networks for Human Pose Estimation}, + author={Li, Wenbo and Wang, Zhicheng and Yin, Binyi and Peng, Qixiang and Du, Yuming and Xiao, Tianzi and Yu, Gang and Lu, Hongtao and Wei, Yichen and Sun, Jian}, + journal={arXiv preprint arXiv:1901.00148}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [mspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.723 | 0.895 | 0.794 | 0.788 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192-8fbfb5d0_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192_20201123.log.json) | +| [2xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.754 | 0.903 | 0.826 | 0.816 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192-c8765a5c_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192_20201123.log.json) | +| [3xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.758 | 0.904 | 0.830 | 0.821 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192-e348f18e_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192_20201123.log.json) | +| [4xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.765 | 0.906 | 0.835 | 0.826 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192-7b837afb_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192_20201123.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..77eca18b6f50220650671a6f2b88eabd06a14baf --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml @@ -0,0 +1,72 @@ +Collections: +- Name: MSPN + Paper: + Title: Rethinking on Multi-Stage Networks for Human Pose Estimation + URL: https://arxiv.org/abs/1901.00148 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/mspn.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py + In Collection: MSPN + Metadata: + Architecture: &id001 + - MSPN + Training Data: COCO + Name: td-hm_mspn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.723 + AP@0.5: 0.895 + AP@0.75: 0.794 + AR: 0.788 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192-8fbfb5d0_20201123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py + In Collection: MSPN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_2xmspn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.903 + AP@0.75: 0.826 + AR: 0.816 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192-c8765a5c_20201123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py + In Collection: MSPN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_3xmspn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.758 + AP@0.5: 0.904 + AP@0.75: 0.83 + AR: 0.821 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192-e348f18e_20201123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py + In Collection: MSPN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_4xmspn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.765 + AP@0.5: 0.906 + AP@0.75: 0.835 + AR: 0.826 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192-7b837afb_20201123.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..8a375a4c2022442f0ce1cb19820da0cea5a1e802 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.md @@ -0,0 +1,57 @@ + + +
+PVT (ICCV'2021) + +```bibtex +@inproceedings{wang2021pyramid, + title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={568--578}, + year={2021} +} +``` + +
+ +
+PVTV2 (CVMJ'2022) + +```bibtex +@article{wang2022pvt, + title={PVT v2: Improved baselines with Pyramid Vision Transformer}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={Computational Visual Media}, + pages={1--10}, + year={2022}, + publisher={Springer} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_pvt-s](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py) | 256x192 | 0.714 | 0.896 | 0.794 | 0.773 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192-4324a49d_20220501.pth) | [log](https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192_20220501.log.json) | +| [pose_pvtv2-b2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py) | 256x192 | 0.737 | 0.905 | 0.812 | 0.791 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192-b4212737_20220501.pth) | [log](https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192_20220501.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..2b4303d7040486806f98e01edb6d296538c3089f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml @@ -0,0 +1,35 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - PVT + Training Data: COCO + Name: td-hm_pvt-s_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.714 + AP@0.5: 0.896 + AP@0.75: 0.794 + AR: 0.773 + AR@0.5: 0.936 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192-4324a49d_20220501.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_pvtv2-b2_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.905 + AP@0.75: 0.812 + AR: 0.791 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192-b4212737_20220501.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..cb7ada4d6b72dfea2a029773148ed852ba00b1a8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.md @@ -0,0 +1,46 @@ + + +
+ResNeSt (ArXiv'2020) + +```bibtex +@article{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, + journal={arXiv preprint arXiv:2004.08955}, + year={2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnest_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py) | 256x192 | 0.720 | 0.899 | 0.800 | 0.775 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192-6e65eece_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192_20210320.log.json) | +| [pose_resnest_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py) | 384x288 | 0.737 | 0.900 | 0.811 | 0.789 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288-dcd20436_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288_20210320.log.json) | +| [pose_resnest_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py) | 256x192 | 0.725 | 0.900 | 0.807 | 0.781 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192-2ffcdc9d_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192_20210320.log.json) | +| [pose_resnest_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py) | 384x288 | 0.745 | 0.905 | 0.818 | 0.798 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288-80660658_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288_20210320.log.json) | +| [pose_resnest_200](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py) | 256x192 | 0.731 | 0.905 | 0.812 | 0.787 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192-db007a48_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192_20210517.log.json) | +| [pose_resnest_200](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py) | 384x288 | 0.753 | 0.907 | 0.827 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288-b5bb76cb_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288_20210517.log.json) | +| [pose_resnest_269](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.907 | 0.819 | 0.792 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192-2a7882ac_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192_20210517.log.json) | +| [pose_resnest_269](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py) | 384x288 | 0.754 | 0.908 | 0.828 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288-b142b9fb_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288_20210517.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..082c6a0aa278868876a472edbb747813b95281ac --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml @@ -0,0 +1,131 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNeSt + Training Data: COCO + Name: td-hm_resnest50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.720 + AP@0.5: 0.899 + AP@0.75: 0.8 + AR: 0.775 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192-6e65eece_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.9 + AP@0.75: 0.811 + AR: 0.789 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288-dcd20436_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.725 + AP@0.5: 0.9 + AP@0.75: 0.807 + AR: 0.781 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192-2ffcdc9d_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.745 + AP@0.5: 0.905 + AP@0.75: 0.818 + AR: 0.798 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288-80660658_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest200_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.731 + AP@0.5: 0.905 + AP@0.75: 0.812 + AR: 0.787 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192-db007a48_20210517.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest200_8xb16-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.753 + AP@0.5: 0.907 + AP@0.75: 0.827 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288-b5bb76cb_20210517.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest269_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.907 + AP@0.75: 0.819 + AR: 0.792 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192-2a7882ac_20210517.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest269_8xb16-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.908 + AP@0.75: 0.828 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288-b142b9fb_20210517.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..dbe14267edc6271c1b8cda43cbbfba6be17d4689 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.md @@ -0,0 +1,68 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.718 | 0.898 | 0.796 | 0.774 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192-04af38ce_20220923.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192_20220923.log) | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py) | 384x288 | 0.731 | 0.900 | 0.799 | 0.782 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288-7b8db90e_20220923.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288_20220923.log) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py) | 256x192 | 0.728 | 0.904 | 0.809 | 0.783 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192_20220926.log) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py) | 384x288 | 0.749 | 0.906 | 0.817 | 0.799 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192_20220926.log) | +| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py) | 256x192 | 0.736 | 0.904 | 0.818 | 0.791 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192-0345f330_20220928.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192_20220928.log) | +| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py) | 384x288 | 0.750 | 0.908 | 0.821 | 0.800 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288-7fbb906f_20220927.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288_20220927.log) | + +The following model is equipped with a visibility prediction head and has been trained using COCO and AIC datasets. + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm-vis_res50_8xb64-210e_coco-aic-256x192-merge.py) | 256x192 | 0.729 | 0.900 | 0.807 | 0.783 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm-vis_res50_8xb64-210e_coco-aic-256x192-merge-21815b2c_20230726.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192_20220923.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..296be8898bc73d00923eef74d7275802ae9f7c9e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml @@ -0,0 +1,121 @@ +Collections: +- Name: SimpleBaseline2D + Paper: + Title: Simple baselines for human pose estimation and tracking + URL: http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/simplebaseline2d.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: COCO + Name: td-hm_res50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.718 + AP@0.5: 0.898 + AP@0.75: 0.796 + AR: 0.774 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192-04af38ce_20220923.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.731 + AP@0.5: 0.9 + AP@0.75: 0.799 + AR: 0.782 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288-7b8db90e_20220923.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.728 + AP@0.5: 0.904 + AP@0.75: 0.809 + AR: 0.783 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.749 + AP@0.5: 0.906 + AP@0.75: 0.817 + AR: 0.799 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res152_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.736 + AP@0.5: 0.904 + AP@0.75: 0.818 + AR: 0.791 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192-0345f330_20220928.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res152_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.75 + AP@0.5: 0.908 + AP@0.75: 0.821 + AR: 0.8 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288-7fbb906f_20220927.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res50_fp16-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.716 + AP@0.5: 0.898 + AP@0.75: 0.798 + AR: 0.772 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192-463da051_20220927.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..6f1b0107f30e336df3004788a174d4bfd2f7aef0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.md @@ -0,0 +1,79 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.724 | 0.897 | 0.797 | 0.777 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192-c129dcb6_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192_20220926.log) | +| [pose_resnet_50_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.735 | 0.902 | 0.801 | 0.786 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288-8b90b538_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288_20220926.log) | +| [pose_resnet_101_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.733 | 0.900 | 0.810 | 0.786 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192-528ec248_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192_20220926.log) | +| [pose_resnet_101_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.749 | 0.905 | 0.818 | 0.799 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288-487d40a4_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288_20220926.log) | +| [pose_resnet_152_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py) | 256x192 | 0.743 | 0.906 | 0.819 | 0.796 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192-f754df5f_20221031.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192_20221031.log) | +| [pose_resnet_152_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py) | 384x288 | 0.755 | 0.907 | 0.825 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288-329f8454_20221031.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288_20221031.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..02e4a7f43f61b709a6ede2f3a42ab5ac91e56cd8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml @@ -0,0 +1,100 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + - DarkPose + Training Data: COCO + Name: td-hm_res50_dark-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.724 + AP@0.5: 0.897 + AP@0.75: 0.797 + AR: 0.777 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192-c129dcb6_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res50_dark-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.735 + AP@0.5: 0.902 + AP@0.75: 0.801 + AR: 0.786 + AR@0.5: 0.938 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288-8b90b538_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res101_dark-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.733 + AP@0.5: 0.9 + AP@0.75: 0.81 + AR: 0.786 + AR@0.5: 0.938 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192-528ec248_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res101_dark-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.749 + AP@0.5: 0.905 + AP@0.75: 0.818 + AR: 0.799 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288-487d40a4_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res152_dark-8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.743 + AP@0.5: 0.906 + AP@0.75: 0.819 + AR: 0.796 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192-f754df5f_20221031.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res152_dark-8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.907 + AP@0.75: 0.825 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288-329f8454_20221031.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_fp16_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_fp16_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..2731ca8534b509b694f6d5f6958ad6f080c171c1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_fp16_coco.md @@ -0,0 +1,73 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+FP16 (ArXiv'2017) + +```bibtex +@article{micikevicius2017mixed, + title={Mixed precision training}, + author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, + journal={arXiv preprint arXiv:1710.03740}, + year={2017} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50_fp16](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py) | 256x192 | 0.716 | 0.898 | 0.798 | 0.772 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192-463da051_20220927.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192_20220927.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..106720153251651f5ae1a53ace2a7333c5882898 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.md @@ -0,0 +1,45 @@ + + +
+ResNetV1D (CVPR'2019) + +```bibtex +@inproceedings{he2019bag, + title={Bag of tricks for image classification with convolutional neural networks}, + author={He, Tong and Zhang, Zhi and Zhang, Hang and Zhang, Zhongyue and Xie, Junyuan and Li, Mu}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={558--567}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py) | 256x192 | 0.722 | 0.897 | 0.796 | 0.777 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192-27545d63_20221020.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192_20221020.log) | +| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py) | 384x288 | 0.730 | 0.899 | 0.800 | 0.782 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288-0646b46e_20221020.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288_20221020.log) | +| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py) | 256x192 | 0.732 | 0.901 | 0.808 | 0.785 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192-ee9e7212_20221021.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192_20221021.log) | +| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py) | 384x288 | 0.748 | 0.906 | 0.817 | 0.798 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288-d0b5875f_20221028.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288_20221028.log) | +| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.904 | 0.814 | 0.790 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192-fd49f947_20221021.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192_20221021.log) | +| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py) | 384x288 | 0.751 | 0.907 | 0.821 | 0.801 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288-b9a99602_20221022.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288_20221022.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..765c8aaabc2fa08b5ca343a5f0bd8ac6a94c764b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml @@ -0,0 +1,99 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNetV1D + Training Data: COCO + Name: td-hm_resnetv1d50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.722 + AP@0.5: 0.897 + AP@0.75: 0.796 + AR: 0.777 + AR@0.5: 0.936 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192-27545d63_20221020.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.73 + AP@0.5: 0.899 + AP@0.75: 0.8 + AR: 0.782 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288-0646b46e_20221020.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.732 + AP@0.5: 0.901 + AP@0.75: 0.808 + AR: 0.785 + AR@0.5: 0.940 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192-ee9e7212_20221021.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.906 + AP@0.75: 0.817 + AR: 0.798 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288-d0b5875f_20221028.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d152_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.904 + AP@0.75: 0.814 + AR: 0.790 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192-fd49f947_20221021.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d152_8xb48-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.751 + AP@0.5: 0.907 + AP@0.75: 0.821 + AR: 0.801 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288-b9a99602_20221022.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..8862fddf6ca5c3ffe0c9df407787a1d7a0312c36 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.md @@ -0,0 +1,45 @@ + + +
+ResNext (CVPR'2017) + +```bibtex +@inproceedings{xie2017aggregated, + title={Aggregated residual transformations for deep neural networks}, + author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1492--1500}, + year={2017} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnext_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py) | 256x192 | 0.715 | 0.897 | 0.791 | 0.771 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192-dcff15f6_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192_20200727.log.json) | +| [pose_resnext_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py) | 384x288 | 0.724 | 0.899 | 0.794 | 0.777 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288-412c848f_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288_20200727.log.json) | +| [pose_resnext_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py) | 256x192 | 0.726 | 0.900 | 0.801 | 0.781 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192-c7eba365_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192_20200727.log.json) | +| [pose_resnext_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py) | 384x288 | 0.744 | 0.903 | 0.815 | 0.794 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288-f5eabcd6_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288_20200727.log.json) | +| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py) | 256x192 | 0.730 | 0.903 | 0.808 | 0.785 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192-102449aa_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192_20200727.log.json) | +| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py) | 384x288 | 0.742 | 0.904 | 0.810 | 0.794 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288-806176df_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288_20200727.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..1ebb616ecdcbfc4f94c2a0ded053e9ef18e66c45 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml @@ -0,0 +1,99 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNext + Training Data: COCO + Name: td-hm_resnext50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.715 + AP@0.5: 0.897 + AP@0.75: 0.791 + AR: 0.771 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192-dcff15f6_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.724 + AP@0.5: 0.899 + AP@0.75: 0.794 + AR: 0.777 + AR@0.5: 0.936 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288-412c848f_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.726 + AP@0.5: 0.9 + AP@0.75: 0.801 + AR: 0.781 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192-c7eba365_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.744 + AP@0.5: 0.903 + AP@0.75: 0.815 + AR: 0.794 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288-f5eabcd6_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext152_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.73 + AP@0.5: 0.903 + AP@0.75: 0.808 + AR: 0.785 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192-102449aa_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext152_8xb48-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.742 + AP@0.5: 0.904 + AP@0.75: 0.81 + AR: 0.794 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288-806176df_20200727.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..40f570c3c142266e13024c30ebd35ca2fcc1d00f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.md @@ -0,0 +1,44 @@ + + +
+RSN (ECCV'2020) + +```bibtex +@misc{cai2020learning, + title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, + author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, + year={2020}, + eprint={2003.04030}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rsn_18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py) | 256x192 | 0.704 | 0.887 | 0.781 | 0.773 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192-9049ed09_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192_20221013.log) | +| [rsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.724 | 0.894 | 0.799 | 0.790 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192-c35901d5_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192_20221013.log) | +| [2xrsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.748 | 0.900 | 0.821 | 0.810 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192-9ede341e_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192_20221013.log) | +| [3xrsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.750 | 0.900 | 0.824 | 0.814 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192-c3e3c4fe_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192_20221013.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..2974aaf2c0d84117178ad5c017ba0acbea6b024f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml @@ -0,0 +1,72 @@ +Collections: +- Name: RSN + Paper: + Title: Learning Delicate Local Representations for Multi-Person Pose Estimation + URL: https://link.springer.com/chapter/10.1007/978-3-030-58580-8_27 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/rsn.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py + In Collection: RSN + Metadata: + Architecture: &id001 + - RSN + Training Data: COCO + Name: td-hm_rsn18_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.704 + AP@0.5: 0.887 + AP@0.75: 0.781 + AR: 0.773 + AR@0.5: 0.927 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192-9049ed09_20221013.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py + In Collection: RSN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_rsn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.724 + AP@0.5: 0.894 + AP@0.75: 0.799 + AR: 0.79 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192-c35901d5_20221013.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py + In Collection: RSN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_2xrsn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.9 + AP@0.75: 0.821 + AR: 0.81 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192-9ede341e_20221013.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py + In Collection: RSN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_3xrsn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.75 + AP@0.5: 0.9 + AP@0.75: 0.824 + AR: 0.814 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192-c3e3c4fe_20221013.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..5fb5833e236c1136456a0f8cf4ada0ad47b3caa9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.md @@ -0,0 +1,43 @@ + + +
+SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py) | 256x192 | 0.728 | 0.899 | 0.807 | 0.784 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192-6920f829_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192_20200709.log.json) | +| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py) | 384x288 | 0.751 | 0.906 | 0.818 | 0.802 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288-9cacd0ea_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288_20200709.log.json) | +| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py) | 256x192 | 0.733 | 0.902 | 0.811 | 0.789 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192-6d348ef9_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192_20200709.log.json) | +| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py) | 384x288 | 0.752 | 0.906 | 0.823 | 0.804 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288-0b6e631b_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288_20200709.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..cf68c67f90621472b71916e876d79794df3d583c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml @@ -0,0 +1,66 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SCNet + Training Data: COCO + Name: td-hm_scnet50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.728 + AP@0.5: 0.899 + AP@0.75: 0.807 + AR: 0.784 + AR@0.5: 0.938 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192-6920f829_20200709.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: topdown_heatmap_scnet50_coco_384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.751 + AP@0.5: 0.906 + AP@0.75: 0.818 + AR: 0.802 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288-9cacd0ea_20200709.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_scnet101_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.733 + AP@0.5: 0.902 + AP@0.75: 0.811 + AR: 0.789 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192-6d348ef9_20200709.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_scnet101_8xb48-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.752 + AP@0.5: 0.906 + AP@0.75: 0.823 + AR: 0.804 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288-0b6e631b_20200709.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..b704d9d1902f6dbdb6dd80517d4c44f35ae86097 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.md @@ -0,0 +1,47 @@ + + +
+SEResNet (CVPR'2018) + +```bibtex +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py) | 256x192 | 0.729 | 0.903 | 0.807 | 0.784 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192-25058b66_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192_20200727.log.json) | +| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py) | 384x288 | 0.748 | 0.904 | 0.819 | 0.799 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288-bc0b7680_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288_20200727.log.json) | +| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py) | 256x192 | 0.734 | 0.905 | 0.814 | 0.790 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192-83f29c4d_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192_20200727.log.json) | +| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py) | 384x288 | 0.754 | 0.907 | 0.823 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288-48de1709_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288_20200727.log.json) | +| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py) | 256x192 | 0.730 | 0.899 | 0.810 | 0.787 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192-1c628d79_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192_20200727.log.json) | +| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py) | 384x288 | 0.753 | 0.906 | 0.824 | 0.806 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288-58b23ee8_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288_20200727.log.json) | + +Note that * means without imagenet pre-training. diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..945e84e223fc6ee0fa8820e331dea7df91bd8650 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml @@ -0,0 +1,98 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SEResNet + Training Data: COCO + Name: td-hm_seresnet50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.729 + AP@0.5: 0.903 + AP@0.75: 0.807 + AR: 0.784 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192-25058b66_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.904 + AP@0.75: 0.819 + AR: 0.799 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288-bc0b7680_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.734 + AP@0.5: 0.905 + AP@0.75: 0.814 + AR: 0.79 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192-83f29c4d_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.907 + AP@0.75: 0.823 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288-48de1709_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet152_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.73 + AP@0.5: 0.899 + AP@0.75: 0.81 + AR: 0.787 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192-1c628d79_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet152_8xb48-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.753 + AP@0.5: 0.906 + AP@0.75: 0.824 + AR: 0.806 + AR@0.5: 0.945 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288-58b23ee8_20200727.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..0c8be860ab7d2a58b3ba813347d754b9f5a98268 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.md @@ -0,0 +1,41 @@ + + +
+ShufflenetV1 (CVPR'2018) + +```bibtex +@inproceedings{zhang2018shufflenet, + title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, + author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={6848--6856}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py) | 256x192 | 0.587 | 0.849 | 0.654 | 0.654 | 0.896 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192-7a7ea4f4_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192_20221013.log) | +| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py) | 384x288 | 0.626 | 0.862 | 0.696 | 0.687 | 0.903 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288-8342f8ba_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288_20221013.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..fbdc89936d59ecdbdfbc410f4b92f00070423145 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml @@ -0,0 +1,35 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ShufflenetV1 + Training Data: COCO + Name: td-hm_shufflenetv1_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.587 + AP@0.5: 0.849 + AP@0.75: 0.654 + AR: 0.654 + AR@0.5: 0.896 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192-7a7ea4f4_20221013.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_shufflenetv1_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.626 + AP@0.5: 0.862 + AP@0.75: 0.696 + AR: 0.687 + AR@0.5: 0.903 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288-8342f8ba_20221013.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..f613f4fef145e8444b207a608b661b11aba31983 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.md @@ -0,0 +1,41 @@ + + +
+ShufflenetV2 (ECCV'2018) + +```bibtex +@inproceedings{ma2018shufflenet, + title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, + author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={116--131}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py) | 256x192 | 0.602 | 0.857 | 0.672 | 0.668 | 0.902 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192-51fb931e_20221014.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192_20221014.log) | +| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py) | 384x288 | 0.638 | 0.866 | 0.707 | 0.699 | 0.910 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288-d30ab55c_20221014.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288_20221014.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..cdda3a8667ee0e22146a257dfd25c514a50dc6f2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml @@ -0,0 +1,35 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ShufflenetV2 + Training Data: COCO + Name: td-hm_shufflenetv2_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.602 + AP@0.5: 0.857 + AP@0.75: 0.672 + AR: 0.668 + AR@0.5: 0.902 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192-51fb931e_20221014.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_shufflenetv2_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.638 + AP@0.5: 0.866 + AP@0.75: 0.707 + AR: 0.699 + AR@0.5: 0.91 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288-d30ab55c_20221014.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..5bcc5bd187526b01e711fe3049e3007146409cd5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.md @@ -0,0 +1,78 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+Swin (ICCV'2021) + +```bibtex +@inproceedings{liu2021swin, + title={Swin transformer: Hierarchical vision transformer using shifted windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={10012--10022}, + year={2021} +} +``` + +
+ + + +
+FPN (CVPR'2017) + +```bibtex +@inproceedings{lin2017feature, + title={Feature pyramid networks for object detection}, + author={Lin, Tsung-Yi and Doll{\'a}r, Piotr and Girshick, Ross and He, Kaiming and Hariharan, Bharath and Belongie, Serge}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2117--2125}, + year={2017} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_swin_t](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.724 | 0.901 | 0.806 | 0.782 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192-eaefe010_20220503.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192_20220503.log.json) | +| [pose_swin_b](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.904 | 0.820 | 0.794 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192-7432be9e_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192_20220705.log.json) | +| [pose_swin_b](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py) | 384x288 | 0.759 | 0.910 | 0.832 | 0.811 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288-3abf54f9_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288_20220705.log.json) | +| [pose_swin_l](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.743 | 0.906 | 0.821 | 0.798 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192-642a89db_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192_20220705.log.json) | +| [pose_swin_l](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py) | 384x288 | 0.763 | 0.912 | 0.830 | 0.814 | 0.949 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288-c36b7845_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288_20220705.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..09ede5fa5c4ec01b77d997b4b318527fa0e27daf --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml @@ -0,0 +1,99 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - Swin + Training Data: COCO + Name: td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.724 + AP@0.5: 0.901 + AP@0.75: 0.806 + AR: 0.782 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192-eaefe010_20220503.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.904 + AP@0.75: 0.82 + AR: 0.794 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192-7432be9e_20220705.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.759 + AP@0.5: 0.91 + AP@0.75: 0.832 + AR: 0.811 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288-3abf54f9_20220705.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.743 + AP@0.5: 0.906 + AP@0.75: 0.821 + AR: 0.798 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192-642a89db_20220705.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.763 + AP@0.5: 0.912 + AP@0.75: 0.83 + AR: 0.814 + AR@0.5: 0.949 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288-c36b7845_20220705.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/swin_b_p4_w7_fpn_coco_256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: topdown_heatmap_swin_b_p4_w7_fpn_coco_256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.741 + AP@0.5: 0.907 + AP@0.75: 0.821 + AR: 0.798 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_fpn_coco_256x192-a3b91c45_20220705.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm-vis_res50_8xb64-210e_coco-aic-256x192-merge.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm-vis_res50_8xb64-210e_coco-aic-256x192-merge.py new file mode 100644 index 0000000000000000000000000000000000000000..f5def39ed911b661b0a651a4c0b132b66bab934d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm-vis_res50_8xb64-210e_coco-aic-256x192-merge.py @@ -0,0 +1,167 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='VisPredictHead', + loss=dict( + type='BCELoss', + use_target_weight=True, + use_sigmoid=True, + loss_weight=1e-3, + ), + pose_cfg=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec)), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + # score_mode='bbox', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..7af125c24d81c4bfa81cdafa3cb95f9729511b66 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py @@ -0,0 +1,152 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MSPN', + unit_channels=256, + num_stages=2, + num_units=4, + num_blocks=[3, 4, 6, 3], + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=2, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 2, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..0680f6995eee3dc9a345eab0353f5dc65c023f0f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='RSN', + unit_channels=256, + num_stages=2, + num_units=4, + num_blocks=[3, 4, 6, 3], + num_steps=4, + norm_cfg=dict(type='BN'), + ), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=2, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 2, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..41162f01e5ac5c63977c11ea70b49372ef2b8476 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py @@ -0,0 +1,152 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MSPN', + unit_channels=256, + num_stages=3, + num_units=4, + num_blocks=[3, 4, 6, 3], + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=3, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] * 2 + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 3, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..99326451c6d05162bc3df0c8d71e8305baf574fd --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='RSN', + unit_channels=256, + num_stages=3, + num_units=4, + num_blocks=[3, 4, 6, 3], + num_steps=4, + norm_cfg=dict(type='BN'), + ), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=3, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] * 2 + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 3, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..999245e74dfc87985e34e3122979fe02486c5b4f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py @@ -0,0 +1,152 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MSPN', + unit_channels=256, + num_stages=4, + num_units=4, + num_blocks=[3, 4, 6, 3], + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=4, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] * 3 + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 4, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..5a557805052048f2ccbb6c6dc89fc3e578922a36 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py @@ -0,0 +1,153 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='base', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.3, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_base_20230913.pth'), + ), + neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + final_layer=dict(kernel_size=3, padding=1), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec, + ), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..06522b7b911370b214cb0917f00b327c500194aa --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='base', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.3, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_base_20230913.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..03ae669807ff5849aec01c37633669be790555e6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py @@ -0,0 +1,153 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=32, + layer_decay_rate=0.85, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='huge', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.55, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_huge_20230913.pth'), + ), + neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + final_layer=dict(kernel_size=3, padding=1), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec, + ), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6b8afcf0f4ba2cd9a60c760db428168b556f882d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=32, + layer_decay_rate=0.85, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='huge', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.55, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_huge_20230913.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..2035e786dfe538f85b4cdcb19ed44dc11b4ba8f9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py @@ -0,0 +1,153 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=24, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='large', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.5, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_large_20230913.pth'), + ), + neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + final_layer=dict(kernel_size=3, padding=1), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec, + ), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d0e90578cb654283de23e77f2353e94d0b0e42 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=24, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='large', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.5, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_large_20230913.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..d8216089b79d8d97f59e592a04ab4fac5c448587 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py @@ -0,0 +1,158 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch={ + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 384 * 4 + }, + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.1, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_small_20230913.pth'), + ), + neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + final_layer=dict(kernel_size=3, padding=1), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec, + ), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..5b77da96eba1e1fa83c93a5010609ec5cced0a5b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py @@ -0,0 +1,155 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch={ + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 384 * 4 + }, + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.1, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_small_20230913.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..4051f4c5ec52fe170d5a6a050e867fe5ebb255a3 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(40, 56), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='AlexNet', num_classes=-1), + head=dict( + type='HeatmapHead', + in_channels=256, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..38b23cf7182c45a507b87c4a372fd2e174e32eb1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(36, 48), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=17, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=17, + out_channels=17, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..17f7eb9677fbf0d285628e059835a45f443caeef --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(24, 32), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=17, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=17, + out_channels=17, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..b9d49c8e6a7df8160db26ff6a0cbabe20b6f4a4a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=17, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py new file mode 100644 index 0000000000000000000000000000000000000000..d9932ff9e3773a591650ee94a95da2784bf562eb --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(384, 384), heatmap_size=(96, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=17, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..8b81dbdaac0c4df6eed9f287379b25e81ab6ce7d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py @@ -0,0 +1,174 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRFormer', + in_channels=3, + norm_cfg=norm_cfg, + extra=dict( + drop_path_rate=0.2, + with_rpe=True, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, ), + num_heads=[2], + mlp_ratios=[4]), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + num_blocks=(2, 2), + num_channels=(78, 156), + num_heads=[2, 4], + mlp_ratios=[4, 4], + window_sizes=[7, 7]), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2), + num_channels=(78, 156, 312), + num_heads=[2, 4, 8], + mlp_ratios=[4, 4, 4], + window_sizes=[7, 7, 7]), + stage4=dict( + num_modules=2, + num_branches=4, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2, 2), + num_channels=(78, 156, 312, 624), + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + window_sizes=[7, 7, 7, 7])), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrformer_base-32815020_20220226.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=78, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..351685464c9560dd748da728372dbcf46a8dfc70 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py @@ -0,0 +1,174 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRFormer', + in_channels=3, + norm_cfg=norm_cfg, + extra=dict( + drop_path_rate=0.2, + with_rpe=True, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, ), + num_heads=[2], + mlp_ratios=[4]), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + num_blocks=(2, 2), + num_channels=(78, 156), + num_heads=[2, 4], + mlp_ratios=[4, 4], + window_sizes=[7, 7]), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2), + num_channels=(78, 156, 312), + num_heads=[2, 4, 8], + mlp_ratios=[4, 4, 4], + window_sizes=[7, 7, 7]), + stage4=dict( + num_modules=2, + num_branches=4, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2, 2), + num_channels=(78, 156, 312, 624), + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + window_sizes=[7, 7, 7, 7])), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrformer_base-32815020_20220226.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=78, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6c59395c8ad5365285c3a26d9fbeb3855b050433 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py @@ -0,0 +1,174 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRFormer', + in_channels=3, + norm_cfg=norm_cfg, + extra=dict( + drop_path_rate=0.1, + with_rpe=True, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, ), + num_heads=[2], + num_mlp_ratios=[4]), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + num_blocks=(2, 2), + num_channels=(32, 64), + num_heads=[1, 2], + mlp_ratios=[4, 4], + window_sizes=[7, 7]), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2), + num_channels=(32, 64, 128), + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + window_sizes=[7, 7, 7]), + stage4=dict( + num_modules=2, + num_branches=4, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2, 2), + num_channels=(32, 64, 128, 256), + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + window_sizes=[7, 7, 7, 7])), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrformer_small-09516375_20220226.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..eee3521a7c617e30efa16224520bda00fe2e64e7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py @@ -0,0 +1,174 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRFormer', + in_channels=3, + norm_cfg=norm_cfg, + extra=dict( + drop_path_rate=0.1, + with_rpe=True, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, ), + num_heads=[2], + num_mlp_ratios=[4]), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + num_blocks=(2, 2), + num_channels=(32, 64), + num_heads=[1, 2], + mlp_ratios=[4, 4], + window_sizes=[7, 7]), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2), + num_channels=(32, 64, 128), + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + window_sizes=[7, 7, 7]), + stage4=dict( + num_modules=2, + num_branches=4, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2, 2), + num_channels=(32, 64, 128, 256), + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + window_sizes=[7, 7, 7, 7])), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrformer_small-09516375_20220226.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..ea486d830a5d397f0e65958c832933a3de6fee6d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..ae15d35ee11973169434b0b6d6b03ec46c9530a4 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d2ed0bfd422568e71aca13c7be56217dd5d381 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py @@ -0,0 +1,221 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=3)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=19, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py new file mode 100644 index 0000000000000000000000000000000000000000..847a40da2f08516a24e8bb765aac454a5cf0dc5f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py @@ -0,0 +1,187 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..a3ac0bd58901ec998641eec822561abb97779fc0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py @@ -0,0 +1,165 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict( + type='CoarseDropout', + max_holes=8, + max_height=40, + max_width=40, + min_holes=1, + min_height=10, + min_width=10, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..7273a0503bd7e67505820de75a4be106922f43f0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..67b13b8babfe0ac672902f42212b66c5254433a2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..306d0aeb44b8014c3fa31743ff92b55b3b417927 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py @@ -0,0 +1,7 @@ +_base_ = ['./td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'] + +# fp16 settings +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', +) diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..d380ad243db94d0ef80a55cee830fe28954c3b0e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py @@ -0,0 +1,162 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict( + type='GridDropout', + unit_size_min=10, + unit_size_max=40, + random_offset=True, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f0bc7486ca27f2e58a41077527de9add9d9600b3 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py @@ -0,0 +1,153 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..143a686ef7536a6cfccdbdf431de9188062caa3e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..113a91e18ce1fd3a934199f872ee6989c1e7cf95 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..d147de838a2fce6b0293ede36ecac81b51942036 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py @@ -0,0 +1,155 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + heatmap_type='combined') + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=3 * 17, + deconv_out_channels=None, + loss=dict(type='CombinedTargetMSELoss', use_target_weight=True), + decoder=codec), + train_cfg=dict(compute_acc=False), + test_cfg=dict( + flip_test=True, + flip_mode='udp_combined', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..1c5ff70ab47a0cf027c04983e6c1f3640ba56802 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..f83b7d31a43bd0d84d55fbc2825438efa607fff0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..daf3cbaddc15d9ded726a3ce7183f2364ddb74c6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..eec52999c960c693c92b472cbff1d89d752dd2f1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..b705cb7fb3b59f158be04b4496e2a49922213f4f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..cfa17ef098e5b471aba21b9d1a53dc154d8125cb --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..caa7c267a09ea1080980dfeba1f26c22b9655169 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py @@ -0,0 +1,140 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6f5a564d115bf7c94b6706ce337acbbccd94fb34 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py @@ -0,0 +1,140 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..663593552563dbe296ac3c780fda650dd8298c41 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py @@ -0,0 +1,140 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(3, 8, 3), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6b5d347cd9537af2a690ee3c6d02323a8c53bbd8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py @@ -0,0 +1,140 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(3, 8, 3), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..ff8eaccb7e093a16416ea52983d6cb7feb6d7814 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..d01e4c6c3dc9924079d35bde2445fb93b3541cba --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e2e9893c6429c99b847747170690654411e68b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py @@ -0,0 +1,152 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MSPN', + unit_channels=256, + num_stages=1, + num_units=4, + num_blocks=[3, 4, 6, 3], + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=1, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3], + loss=[ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ], + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..1b474b3f2fe7a5db3571846f7ab54c5c05c33136 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py @@ -0,0 +1,127 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='PyramidVisionTransformer', + num_layers=[3, 4, 6, 3], + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_small.pth'), + ), + neck=dict(type='FeatureMapProcessor', select_index=3), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..e8921e68030e89110afe8c44717b051b02616a13 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py @@ -0,0 +1,128 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='PyramidVisionTransformerV2', + embed_dims=64, + num_layers=[3, 4, 6, 3], + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b2.pth'), + ), + neck=dict(type='FeatureMapProcessor', select_index=3), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..cd13e4a4222f21baa200c4c8ccb17986aacfc935 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..5486548481df742e6bc53bd32d65501971e356f5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..39b7b3220d64d2ac905288c6bf2c0dd1ca2be7f1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..f7c99503d46a7e0dc4402250e073b6ce9128d121 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..beccab1bd105b618b601d5d331cc0fc680df1bf7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..25d5039f05e3d9b2387be6bc0690e5d3904faded --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..acd91192447b4ef5f41745db0c4b93357b53b778 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..49bd2b224bea33419d392931391ba90806ee24a7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py @@ -0,0 +1,126 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True, + blur_kernel_size=17) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..7dbe1b43f77f35fb6564b9d6322a1b8c08d93a60 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..d74cc1392d27911a1e3d2b3239840717da5a4fb5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..fdec305b10c5aaa202957650a81975158d0d1b9c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..b34ad210f37ce883b21377192fbe035a7c1fcd56 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..66a6a27822fb72e7aef421bf1bf2230598c26125 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py @@ -0,0 +1,7 @@ +_base_ = ['./td-hm_res50_8xb64-210e_coco-256x192.py'] + +# fp16 settings +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', +) diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..5bfbace9f6313fe89201ba5c243e51b4aa90ca27 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..030ae95d634e40f172dae07eb2bef163084906a3 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..bdcdb6c75fb74e65ca53797eb33039f6d36357ce --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=128) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=200, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest200'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..1a5e1e8e4a570e09b9fd3a5f096584275bfb8858 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=200, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest200'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..b519e9d2ef951298da6f3d4794d5c8660e83159d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=128) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=269, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest269'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..b3588d1fa31e29ec960a35050ff8659e712712ec --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=269, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest269'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..43295bb41f1b4b2c87119baec30b7efc9ecb80d9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..e45320b036372894e9ddd0bcee6c457e86a8ecee --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..4fc55228face0a1586627e3ffa823ffe645c812a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6c8cc4e808c2fff488bc4b5c977a34d7978a6d03 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..a85a7f80c43b090426182ab9c3acaa5659b0f4d5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..7a728ce806415f8da3afd036835171b64976a41a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=384) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=48, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..c241cdd3ddbee8398c3da8d96d7d3d46bce99f24 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1cea135b49cf1d70e10194685c394dc2c8bc1a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..508233371b4a819fe0b14a01798cfe48e6b32303 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext101_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..eafed7f07526dce3b46bcd800272764a1614a051 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext101_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..27c2c263b05193b10f0c0af2235b81d86cca1bc4 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=152, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..c02caeb7461f1fb312d02cfe7496c57a8b9b11e2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=384) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=152, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=48, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..b088a44ca6a043abac5e52596486362124d244c5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnext50_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..9f97235218992e772298fcb74c1494331eeb50a7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnext50_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..18d16bd26784ad9f706e39bd83c25fc913ef4b08 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-2, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 190, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='RSN', + unit_channels=256, + num_stages=1, + num_units=4, + num_blocks=[2, 2, 2, 2], + num_steps=4, + norm_cfg=dict(type='BN'), + ), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=1, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3], + loss=[ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ], + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..069cb413123be20dee06dd8014b583dfa267fa46 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='RSN', + unit_channels=256, + num_stages=1, + num_units=4, + num_blocks=[3, 4, 6, 3], + num_steps=4, + norm_cfg=dict(type='BN'), + ), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=1, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3], + loss=[ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ], + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..544c87242f5f3e7e4a0b129aa927e21a8c5a4430 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet101-94250a77.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=1, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..1af2e44ef013ea525d0d7cfe19312c07a1b5ae93 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet101-94250a77.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=48, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..efa1ad924cf5da56fc0ab69cee89eee48355376d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=1, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..9d784d80296e085f201da67e7f45732af6fe8938 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..b515b744c4c9b43126b2e85b9c32b5663016be70 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f6d9fab2eda60ecef464a645b572866d1954cbcf --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..a0ef9bf5711f01625f3faf0d46122dae2eca8c35 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=152, + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..13524c121772b7a73b79dd7d9c2fd4fd6e5ad882 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=384) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=152, + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=48, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..93fb78fac56a697164a383229436c79de5392be5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2002a70a94d7104d07ec2c921a6c1123f859ab --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..029f48d3d90bdc113066c67200cbe15772bd0b9b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV1', + groups=3, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), + ), + head=dict( + type='HeatmapHead', + in_channels=960, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..f06c325bd1213995bb51bd9c1e477de0604e4cb7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV1', + groups=3, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), + ), + head=dict( + type='HeatmapHead', + in_channels=960, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..333998490e38105e4f73a55af6358e868943117a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV2', + widen_factor=1.0, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..e7be5484e8d56f6001a3c1e5de91dd1b8c32821f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV2', + widen_factor=1.0, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..81877b893f69b66a2263a4d5dfea8407d56668af --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py @@ -0,0 +1,139 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_base_patch4_window7_224_22k.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1d5fa12f97259031d65030e5abee8cb61d372d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py @@ -0,0 +1,139 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_base_patch4_window12_384_22k.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..14d08a49f865a901b0832f40dd2819b8ee43d58c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py @@ -0,0 +1,148 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.5, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_base_patch4_window7_224_22k.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1536, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..692c8df1a616dabbcb93a9be67f4626862eae172 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py @@ -0,0 +1,148 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.5, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_base_patch4_window12_384_22k.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1536, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..068ee0649f4cf97f5887ff5b17f44d6e1e1609b3 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py @@ -0,0 +1,139 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_tiny_patch4_window7_224.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..b85adb998bb5f2660ef00d1d395a6ca8bb4763c0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='VGG', + depth=16, + norm_cfg=dict(type='BN'), + init_cfg=dict(type='Pretrained', checkpoint='mmcls://vgg16_bn'), + ), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..04fcc1ad2ef3152e217fa20bc0a325d44b1e6f0d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_MobileNetV3'), + head=dict( + type='ViPNASHead', + in_channels=160, + out_channels=17, + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..8190d7ffd2ca650f939935487551f0a62a8bf078 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_ResNet', depth=50), + head=dict( + type='ViPNASHead', + in_channels=608, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..a2c19453f3e3e1be0490c6e55becd2ba4ae14f04 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.md @@ -0,0 +1,39 @@ + + +
+VGG (ICLR'2015) + +```bibtex +@article{simonyan2014very, + title={Very deep convolutional networks for large-scale image recognition}, + author={Simonyan, Karen and Zisserman, Andrew}, + journal={arXiv preprint arXiv:1409.1556}, + year={2014} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [vgg](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py) | 256x192 | 0.699 | 0.890 | 0.769 | 0.754 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192-7e7c58d6_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192_20210517.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..166fa05fcddc05ffe60a996ec63bb747d58ea7dd --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - VGG + Training Data: COCO + Name: td-hm_vgg16-bn_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.699 + AP@0.5: 0.89 + AP@0.75: 0.769 + AR: 0.754 + AR@0.5: 0.927 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192-7e7c58d6_20210517.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..b6a178865bd6844c0f73fbe7db43aa4be795dc71 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.md @@ -0,0 +1,40 @@ + + +
+ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [S-ViPNAS-MobileNetV3](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py) | 256x192 | 0.700 | 0.887 | 0.783 | 0.758 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192-e0987441_20221010.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192_20221010.log) | +| [S-ViPNAS-Res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.711 | 0.894 | 0.787 | 0.769 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192-35d4bff9_20220917.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192_20220917.log) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..cbdaa5bcabf800b60b14a044e5de0e71f753017a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml @@ -0,0 +1,40 @@ +Collections: +- Name: ViPNAS + Paper: + Title: 'ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search' + URL: https://arxiv.org/abs/2105.10154 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/vipnas.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: &id001 + - ViPNAS + Training Data: COCO + Name: td-hm_vipnas-mbv3_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.7 + AP@0.5: 0.887 + AP@0.75: 0.783 + AR: 0.758 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: (https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192-e0987441_20221010.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_vipnas-res50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.711 + AP@0.5: 0.894 + AP@0.75: 0.787 + AR: 0.769 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192-35d4bff9_20220917.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..054a7b0f6ffe7e3167010cf1485d53e300a3ca97 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.md @@ -0,0 +1,61 @@ +To utilize ViTPose, you'll need to have [MMPreTrain](https://github.com/open-mmlab/mmpretrain). To install the required version, run the following command: + +```shell +mim install 'mmpretrain>=1.0.0' +``` + + + +
+ViTPose (NeurIPS'2022) + +```bibtex +@inproceedings{ + xu2022vitpose, + title={Vi{TP}ose: Simple Vision Transformer Baselines for Human Pose Estimation}, + author={Yufei Xu and Jing Zhang and Qiming Zhang and Dacheng Tao}, + booktitle={Advances in Neural Information Processing Systems}, + year={2022}, +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.739 | 0.903 | 0.816 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | +| [ViTPose-B](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.905 | 0.829 | 0.810 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | +| [ViTPose-L](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py) | 256x192 | 0.782 | 0.914 | 0.850 | 0.834 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | +| [ViTPose-H](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.788 | 0.917 | 0.855 | 0.839 | 0.954 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | +| [ViTPose-H\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.790 | 0.916 | 0.857 | 0.840 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_3rdparty_coco-256x192-5b738c8e_20230314.pth) | - | + +*Models with * are converted from the [official repo](https://github.com/ViTAE-Transformer/ViTPose). The config files of these models are only for validation.* + +> With simple decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.736 | 0.900 | 0.811 | 0.790 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.json) | +| [ViTPose-B](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.756 | 0.906 | 0.826 | 0.809 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.json) | +| [ViTPose-L](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.780 | 0.914 | 0.851 | 0.833 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.json) | +| [ViTPose-H](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.789 | 0.916 | 0.856 | 0.839 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..10cc7bf972d6c3e76d5f084e8187c97fdf529cb9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml @@ -0,0 +1,155 @@ +Collections: +- Name: ViTPose + Paper: + Title: 'ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation' + URL: https://arxiv.org/abs/2204.12484 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/vitpose.md + Metadata: + Training Resources: 8x A100 GPUs +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Metadata: + Architecture: &id001 + - ViTPose + - Classic Head + Model Size: Small + Training Data: COCO + Name: td-hm_ViTPose-small_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.739 + AP@0.5: 0.903 + AP@0.75: 0.816 + AR: 0.792 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Base + Training Data: COCO + Name: td-hm_ViTPose-base_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.905 + AP@0.75: 0.829 + AR: 0.81 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Large + Training Data: COCO + Name: td-hm_ViTPose-large_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.782 + AP@0.5: 0.914 + AP@0.75: 0.850 + AR: 0.834 + AR@0.5: 0.952 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Huge + Training Data: COCO + Name: td-hm_ViTPose-huge_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.788 + AP@0.5: 0.917 + AP@0.75: 0.855 + AR: 0.839 + AR@0.5: 0.954 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Alias: vitpose-s + Metadata: + Architecture: &id002 + - ViTPose + - Simple Head + Model Size: Small + Training Data: COCO + Name: td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.736 + AP@0.5: 0.900 + AP@0.75: 0.811 + AR: 0.790 + AR@0.5: 0.940 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Alias: + - vitpose + - vitpose-b + Metadata: + Architecture: *id002 + Model Size: Base + Training Data: COCO + Name: td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.756 + AP@0.5: 0.906 + AP@0.75: 0.826 + AR: 0.809 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Alias: vitpose-l + Metadata: + Architecture: *id002 + Model Size: Large + Training Data: COCO + Name: td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.780 + AP@0.5: 0.914 + AP@0.75: 0.851 + AR: 0.833 + AR@0.5: 0.952 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Alias: vitpose-h + Metadata: + Architecture: *id002 + Model Size: Huge + Training Data: COCO + Name: td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.789 + AP@0.5: 0.916 + AP@0.75: 0.856 + AR: 0.839 + AR@0.5: 0.953 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..b083719303620be25ca2f2aa587ae85f15d6c613 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py @@ -0,0 +1,216 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='crowdpose/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'crowdpose/annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.md new file mode 100644 index 0000000000000000000000000000000000000000..24c35348389b3f532fffa418a21b6edce6d21cb0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.md @@ -0,0 +1,56 @@ + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [pose_cspnext_m](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py) | 256x192 | 0.662 | 0.821 | 0.723 | 0.759 | 0.675 | 0.539 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..6e5b4cd691ccb083db97b33b3531b0e69f39af12 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml @@ -0,0 +1,20 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py + In Collection: UDP + Metadata: + Architecture: + - UDP + - CSPNeXt + Training Data: CrowdPose + Name: cspnext-m_udp_8xb64-210e_crowpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.662 + AP (E): 0.759 + AP (H): 0.539 + AP (M): 0.675 + AP@0.5: 0.821 + AP@0.75: 0.723 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.md new file mode 100644 index 0000000000000000000000000000000000000000..c0d24d47175d592b536d905881df852959406eed --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.md @@ -0,0 +1,38 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.675 | 0.825 | 0.729 | 0.770 | 0.687 | 0.553 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192-960be101_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192_20201227.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..c37fa9154feab028c1dd3d3511fccabcc2805042 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml @@ -0,0 +1,19 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py + In Collection: HRNet + Metadata: + Architecture: + - HRNet + Training Data: CrowdPose + Name: td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.675 + AP (E): 0.77 + AP (H): 0.553 + AP (M): 0.687 + AP@0.5: 0.825 + AP@0.75: 0.729 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192-960be101_20201227.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.md new file mode 100644 index 0000000000000000000000000000000000000000..56a771806d361061652b57e624b13169db1bb410 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.md @@ -0,0 +1,58 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.637 | 0.808 | 0.692 | 0.738 | 0.650 | 0.506 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192-c6a526b6_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192_20201227.log.json) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.647 | 0.810 | 0.703 | 0.745 | 0.658 | 0.521 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192-8f5870f4_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192_20201227.log.json) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py) | 320x256 | 0.661 | 0.821 | 0.714 | 0.759 | 0.672 | 0.534 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256-c88c512a_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256_20201227.log.json) | +| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.656 | 0.818 | 0.712 | 0.754 | 0.666 | 0.533 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192-dbd49aba_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192_20201227.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..1477c28deb33691632ccdb33035ed8075e43e241 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml @@ -0,0 +1,71 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: CrowdPose + Name: td-hm_res50_8xb64-210e_crowdpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.637 + AP (E): 0.738 + AP (H): 0.506 + AP (M): 0.65 + AP@0.5: 0.808 + AP@0.75: 0.692 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192-c6a526b6_20201227.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: td-hm_res101_8xb64-210e_crowdpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.647 + AP (E): 0.745 + AP (H): 0.521 + AP (M): 0.658 + AP@0.5: 0.81 + AP@0.75: 0.703 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192-8f5870f4_20201227.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: td-hm_res101_8xb64-210e_crowdpose-320x256 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.661 + AP (E): 0.759 + AP (H): 0.534 + AP (M): 0.672 + AP@0.5: 0.821 + AP@0.75: 0.714 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256-c88c512a_20201227.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: td-hm_res152_8xb64-210e_crowdpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.656 + AP (E): 0.754 + AP (H): 0.533 + AP (M): 0.666 + AP@0.5: 0.818 + AP@0.75: 0.712 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192-dbd49aba_20201227.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..3117314a43ab214da46a83c5621f1860bcb3f57f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py @@ -0,0 +1,152 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=14, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..79cae1d130a3713944069e37a3258811b068e655 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py new file mode 100644 index 0000000000000000000000000000000000000000..eac5caf859095d3867fdd45fde58774b8c5ce54e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 320), heatmap_size=(64, 80), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..5b99439535a54b4bc69ca5ee270aa5a0d7fa26bf --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..d669b2e2670657a25def5234037e371bede0882d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/hrnet_exlpose.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/hrnet_exlpose.md new file mode 100644 index 0000000000000000000000000000000000000000..3e387923d5ab435c8cd4428282043529f014d9cd --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/hrnet_exlpose.md @@ -0,0 +1,38 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+ExLPose (2023) + +```bibtex +@inproceedings{ExLPose_2023_CVPR, + title={Human Pose Estimation in Extremely Low-Light Conditions}, + author={Sohyun Lee, Jaesung Rim, Boseung Jeong, Geonu Kim, ByungJu Woo, Haechan Lee, Sunghyun Cho, Suha Kwak}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2023} +} +``` + +
+ +Results on ExLPose-LLA val set with ground-truth bounding boxes + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-256x192.py) | 256x192 | 0.401 | 0.64 | 0.40 | 0.452 | 0.693 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-ll-256x192.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-ll-256x192.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/hrnet_exlpose.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/hrnet_exlpose.yml new file mode 100644 index 0000000000000000000000000000000000000000..2b8637f5283e9e13bf613ec23a91222f16a37af1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/hrnet_exlpose.yml @@ -0,0 +1,18 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-256x192.py + In Collection: HRNet + Metadata: + Architecture: + - HRNet + Training Data: ExLPose-LL + Name: td-hm_hrnet-w32_8xb64-210e_exlpose-256x192 + Results: + - Dataset: ExLPose + Metrics: + AP: 0.401 + AP@0.5: 0.64 + AP@0.75: 0.40 + AR: 0.452 + AR@0.5: 0.693 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-ll-256x192.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..c1fea18a4a08fee42effafcd6424b4ab4822acca --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-256x192.py @@ -0,0 +1,149 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=14, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'ExlposeDataset' +data_mode = 'topdown' +data_root = 'data/ExLPose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ExLPose/ExLPose_train_LL.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ExLPose/ExLPose_test_LL-A.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ExLPose/ExLPose_test_LL-A.json', + use_area=False) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.md new file mode 100644 index 0000000000000000000000000000000000000000..6e5f3476cb12db673fc57504dc484565555e04ce --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.md @@ -0,0 +1,80 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +
+Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
+ +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.252 | 0.397 | 0.255 | 0.321 | 0.485 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.399 | 0.545 | 0.420 | 0.466 | 0.613 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | +| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.271 | 0.413 | 0.277 | 0.339 | 0.499 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.417 | 0.553 | 0.442 | 0.481 | 0.617 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.533 | 0.771 | 0.562 | 0.574 | 0.792 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.754 | 0.906 | 0.812 | 0.783 | 0.916 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | +| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.557 | 0.782 | 0.593 | 0.595 | 0.804 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.769 | 0.906 | 0.825 | 0.796 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.741 | 0.902 | 0.814 | 0.795 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | +| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.756 | 0.908 | 0.826 | 0.809 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.751 | 0.905 | 0.822 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.yml new file mode 100644 index 0000000000000000000000000000000000000000..08aa3f1f47d579b2e2696986554ee7072757f763 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.yml @@ -0,0 +1,74 @@ +Collections: +- Name: HRNet + Paper: + Title: Deep high-resolution representation learning for human pose estimation + URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Deep_High-Resolution_Representation_Learning_for_Human_Pose_Estimation_CVPR_2019_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnet.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: &id002 + - COCO + - Human-Art + Name: td-hm_hrnet-w32_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.741 + AP@0.5: 0.902 + AP@0.75: 0.814 + AR: 0.795 + AR@0.5: 0.941 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.399 + AP@0.5: 0.545 + AP@0.75: 0.420 + AR: 0.466 + AR@0.5: 0.613 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.754 + AP@0.5: 0.906 + AP@0.75: 0.812 + AR: 0.783 + AR@0.5: 0.916 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: td-hm_hrnet-w48_8xb32-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.751 + AP@0.5: 0.905 + AP@0.75: 0.822 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.417 + AP@0.5: 0.553 + AP@0.75: 0.442 + AR: 0.481 + AR@0.5: 0.617 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.769 + AP@0.5: 0.906 + AP@0.75: 0.825 + AR: 0.796 + AR@0.5: 0.919 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..4aa431e044f7a6cbf8fcad8a25298b2e14fedfa2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='base', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.3, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_base.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/' +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..925f68e3d18903e511a5c89426e5bd595aa4d1b6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=32, + layer_decay_rate=0.85, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmcls.VisionTransformer', + arch='huge', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.55, + with_cls_token=False, + output_cls_token=False, + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_huge.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/' +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea9dbf3952876d9c70a06b982bf29eb461cfa8e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=24, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmcls.VisionTransformer', + arch='large', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.5, + with_cls_token=False, + output_cls_token=False, + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_large.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/' +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..ed7817d2fe2f2d43c917f04c66bacf6b79f0a1f9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py @@ -0,0 +1,155 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch={ + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 384 * 4 + }, + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.1, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_small.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/' +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..bf9fa25beb8ed2e5bc4dd565ef35d56e031fb779 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6a5ae0707c2ac1973a293d41a51ac8bb471ae9fe --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.md new file mode 100644 index 0000000000000000000000000000000000000000..a4d2dd6c50fff926f0b099e46a2e81f4c16076b1 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.md @@ -0,0 +1,97 @@ +To utilize ViTPose, you'll need to have [MMPreTrain](https://github.com/open-mmlab/mmpretrain). To install the required version, run the following command: + +```shell +mim install 'mmpretrain>=1.0.0' +``` + + + +
+ +ViTPose (NeurIPS'2022) + +```bibtex +@inproceedings{ + xu2022vitpose, + title={Vi{TP}ose: Simple Vision Transformer Baselines for Human Pose Estimation}, + author={Yufei Xu and Jing Zhang and Qiming Zhang and Dacheng Tao}, + booktitle={Advances in Neural Information Processing Systems}, + year={2022}, +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +
+Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
+ +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.228 | 0.371 | 0.229 | 0.298 | 0.467 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | +| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.381 | 0.532 | 0.405 | 0.448 | 0.602 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | +| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.270 | 0.423 | 0.272 | 0.340 | 0.510 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | +| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.410 | 0.549 | 0.434 | 0.475 | 0.615 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | +| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.342 | 0.498 | 0.357 | 0.413 | 0.577 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | +| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.459 | 0.592 | 0.487 | 0.525 | 0.656 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | +| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.377 | 0.541 | 0.391 | 0.447 | 0.615 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | +| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.468 | 0.594 | 0.498 | 0.534 | 0.655 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.507 | 0.758 | 0.531 | 0.551 | 0.780 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | +| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.738 | 0.905 | 0.802 | 0.768 | 0.911 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | +| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.555 | 0.782 | 0.590 | 0.599 | 0.809 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | +| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.759 | 0.905 | 0.823 | 0.790 | 0.917 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | +| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.637 | 0.838 | 0.689 | 0.677 | 0.859 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | +| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.789 | 0.916 | 0.845 | 0.819 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | +| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.665 | 0.860 | 0.715 | 0.701 | 0.871 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | +| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.800 | 0.926 | 0.855 | 0.828 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.739 | 0.903 | 0.816 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | +| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.737 | 0.902 | 0.811 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | +| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.905 | 0.829 | 0.810 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | +| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.758 | 0.906 | 0.829 | 0.812 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | +| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py) | 256x192 | 0.782 | 0.914 | 0.850 | 0.834 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | +| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.782 | 0.914 | 0.849 | 0.835 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | +| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.788 | 0.917 | 0.855 | 0.839 | 0.954 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | +| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.788 | 0.914 | 0.853 | 0.841 | 0.956 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.yml new file mode 100644 index 0000000000000000000000000000000000000000..cbbe965c2d9f2147de60d5b6a072934c8efa12ca --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.yml @@ -0,0 +1,145 @@ +Collections: +- Name: ViTPose + Paper: + Title: 'ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation' + URL: https://arxiv.org/abs/2204.12484 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/vitpose.md + Metadata: + Training Resources: 8x A100 GPUs +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py + In Collection: ViTPose + Metadata: + Architecture: &id001 + - ViTPose + - Classic Head + Model Size: Small + Training Data: &id002 + - COCO + - Human-Art + Name: td-hm_ViTPose-small_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.902 + AP@0.75: 0.811 + AR: 0.792 + AR@0.5: 0.942 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.381 + AP@0.5: 0.532 + AP@0.75: 0.405 + AR: 0.448 + AR@0.5: 0.602 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.738 + AP@0.5: 0.905 + AP@0.75: 0.802 + AR: 0.768 + AR@0.5: 0.911 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Base + Training Data: *id002 + Name: td-hm_ViTPose-base_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.758 + AP@0.5: 0.906 + AP@0.75: 0.829 + AR: 0.812 + AR@0.5: 0.946 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.410 + AP@0.5: 0.549 + AP@0.75: 0.434 + AR: 0.475 + AR@0.5: 0.615 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.759 + AP@0.5: 0.905 + AP@0.75: 0.823 + AR: 0.790 + AR@0.5: 0.917 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Large + Training Data: *id002 + Name: td-hm_ViTPose-large_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.782 + AP@0.5: 0.914 + AP@0.75: 0.849 + AR: 0.835 + AR@0.5: 0.953 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.459 + AP@0.5: 0.592 + AP@0.75: 0.487 + AR: 0.525 + AR@0.5: 0.656 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.789 + AP@0.5: 0.916 + AP@0.75: 0.845 + AR: 0.819 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Huge + Training Data: *id002 + Name: td-hm_ViTPose-huge_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.788 + AP@0.5: 0.914 + AP@0.75: 0.853 + AR: 0.841 + AR@0.5: 0.956 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.468 + AP@0.5: 0.594 + AP@0.75: 0.498 + AR: 0.534 + AR@0.5: 0.655 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.800 + AP@0.5: 0.926 + AP@0.75: 0.855 + AR: 0.828 + AR@0.5: 0.933 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.md new file mode 100644 index 0000000000000000000000000000000000000000..29df027e3f76c0801c1c89303e776f78d5c6047e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.md @@ -0,0 +1,56 @@ + + +
+CPM (CVPR'2016) + +```bibtex +@inproceedings{wei2016convolutional, + title={Convolutional pose machines}, + author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={4724--4732}, + year={2016} +} +``` + +
+ + + +
+JHMDB (ICCV'2013) + +```bibtex +@inproceedings{Jhuang:ICCV:2013, + title = {Towards understanding action recognition}, + author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, + booktitle = {International Conf. on Computer Vision (ICCV)}, + month = Dec, + pages = {3192-3199}, + year = {2013} +} +``` + +
+ +Results on Sub-JHMDB dataset + +The models are pre-trained on MPII dataset only. NO test-time augmentation (multi-scale /rotation testing) is used. + +- Normalized by Person Size + +| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | +| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | +| Sub1 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py) | 368x368 | 96.1 | 91.9 | 81.0 | 78.9 | 96.6 | 90.8 | 87.3 | 89.5 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368_20201122.log.json) | +| Sub2 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py) | 368x368 | 98.1 | 93.6 | 77.1 | 70.9 | 94.0 | 89.1 | 84.7 | 87.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368_20201122.log.json) | +| Sub3 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py) | 368x368 | 97.9 | 94.9 | 87.3 | 84.0 | 98.6 | 94.4 | 86.2 | 92.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368_20201122.log.json) | +| Average | cpm | 368x368 | 97.4 | 93.5 | 81.5 | 77.9 | 96.4 | 91.4 | 86.1 | 89.8 | - | - | + +- Normalized by Torso Size + +| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | +| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | +| Sub1 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py) | 368x368 | 89.0 | 63.0 | 54.0 | 54.9 | 68.2 | 63.1 | 61.2 | 66.0 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368_20201122.log.json) | +| Sub2 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py) | 368x368 | 90.3 | 57.9 | 46.8 | 44.3 | 60.8 | 58.2 | 62.4 | 61.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368_20201122.log.json) | +| Sub3 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py) | 368x368 | 91.0 | 72.6 | 59.9 | 54.0 | 73.2 | 68.5 | 65.8 | 70.3 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368_20201122.log.json) | +| Average | cpm | 368x368 | 90.1 | 64.5 | 53.6 | 51.1 | 67.4 | 63.3 | 63.1 | 65.7 | - | - | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml new file mode 100644 index 0000000000000000000000000000000000000000..f9f6d7568b50b65bd8f5754539f861b15daca8c7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml @@ -0,0 +1,116 @@ +Models: +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py + In Collection: CPM + Metadata: + Architecture: &id001 + - CPM + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 87.3 + Elb: 81 + Head: 96.1 + Hip: 96.6 + Knee: 90.8 + Mean: 89.5 + Sho: 91.9 + Wri: 78.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 84.7 + Elb: 77.1 + Head: 98.1 + Hip: 94.0 + Knee: 89.1 + Mean: 87.4 + Sho: 93.6 + Wri: 70.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 86.2 + Elb: 87.3 + Head: 97.9 + Hip: 98.6 + Knee: 94.4 + Mean: 92.4 + Sho: 94.9 + Wri: 84.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 61.2 + Elb: 54.0 + Head: 89.0 + Hip: 68.2 + Knee: 63.1 + Mean: 66.0 + Sho: 63.0 + Wri: 54.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 62.4 + Elb: 46.8 + Head: 90.3 + Hip: 60.8 + Knee: 58.2 + Mean: 61.1 + Sho: 57.9 + Wri: 44.3 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 65.8 + Elb: 59.9 + Head: 91.0 + Hip: 73.2 + Knee: 68.5 + Mean: 70.3 + Sho: 72.6 + Wri: 54.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.md new file mode 100644 index 0000000000000000000000000000000000000000..22422e731653b1b01840feb0f79eb82459bb968f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.md @@ -0,0 +1,81 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+JHMDB (ICCV'2013) + +```bibtex +@inproceedings{Jhuang:ICCV:2013, + title = {Towards understanding action recognition}, + author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, + booktitle = {International Conf. on Computer Vision (ICCV)}, + month = Dec, + pages = {3192-3199}, + year = {2013} +} +``` + +
+ +Results on Sub-JHMDB dataset + +The models are pre-trained on MPII dataset only. *NO* test-time augmentation (multi-scale /rotation testing) is used. + +- Normalized by Person Size + +| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | +| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | +| Sub1 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py) | 256x256 | 99.1 | 98.0 | 93.8 | 91.3 | 99.4 | 96.5 | 92.8 | 96.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256_20201122.log.json) | +| Sub2 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py) | 256x256 | 99.3 | 97.1 | 90.6 | 87.0 | 98.9 | 96.3 | 94.1 | 95.0 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256_20201122.log.json) | +| Sub3 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py) | 256x256 | 99.0 | 97.9 | 94.0 | 91.6 | 99.7 | 98.0 | 94.7 | 96.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256_20201122.log.json) | +| Average | pose_resnet_50 | 256x256 | 99.2 | 97.7 | 92.8 | 90.0 | 99.3 | 96.9 | 93.9 | 96.0 | - | - | +| Sub1 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py) | 256x256 | 99.1 | 98.5 | 94.6 | 92.0 | 99.4 | 94.6 | 92.5 | 96.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256_20201122.log.json) | +| Sub2 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py) | 256x256 | 99.3 | 97.8 | 91.0 | 87.0 | 99.1 | 96.5 | 93.8 | 95.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256_20201122.log.json) | +| Sub3 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py) | 256x256 | 98.8 | 98.4 | 94.3 | 92.1 | 99.8 | 97.5 | 93.8 | 96.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256_20201122.log.json) | +| Average | pose_resnet_50 (2 Deconv.) | 256x256 | 99.1 | 98.2 | 93.3 | 90.4 | 99.4 | 96.2 | 93.4 | 96.0 | - | - | + +- Normalized by Torso Size + +| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | +| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | +| Sub1 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py) | 256x256 | 93.3 | 83.2 | 74.4 | 72.7 | 85.0 | 81.2 | 78.9 | 81.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256_20201122.log.json) | +| Sub2 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py) | 256x256 | 94.1 | 74.9 | 64.5 | 62.5 | 77.9 | 71.9 | 78.6 | 75.5 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256_20201122.log.json) | +| Sub3 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py) | 256x256 | 97.0 | 82.2 | 74.9 | 70.7 | 84.7 | 83.7 | 84.2 | 82.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256_20201122.log.json) | +| Average | pose_resnet_50 | 256x256 | 94.8 | 80.1 | 71.3 | 68.6 | 82.5 | 78.9 | 80.6 | 80.1 | - | - | +| Sub1 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py) | 256x256 | 92.4 | 80.6 | 73.2 | 70.5 | 82.3 | 75.4 | 75.0 | 79.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256_20201122.log.json) | +| Sub2 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py) | 256x256 | 93.4 | 73.6 | 63.8 | 60.5 | 75.1 | 68.4 | 75.5 | 73.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256_20201122.log.json) | +| Sub3 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py) | 256x256 | 96.1 | 81.2 | 72.6 | 67.9 | 83.6 | 80.9 | 81.5 | 81.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256_20201122.log.json) | +| Average | pose_resnet_50 (2 Deconv.) | 256x256 | 94.0 | 78.5 | 69.9 | 66.3 | 80.3 | 74.9 | 77.3 | 78.0 | - | - | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml new file mode 100644 index 0000000000000000000000000000000000000000..d7480d12a0ce45db7ea28af0b02e2493b34c04b8 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml @@ -0,0 +1,231 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub1-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 92.8 + Elb: 93.8 + Head: 99.1 + Hip: 99.4 + Knee: 96.5 + Mean: 96.1 + Sho: 98.0 + Wri: 91.3 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub2-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 94.1 + Elb: 90.6 + Head: 99.3 + Hip: 98.9 + Knee: 96.3 + Mean: 95.0 + Sho: 97.1 + Wri: 87.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub3-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 94.7 + Elb: 94.0 + Head: 99.0 + Hip: 99.7 + Knee: 98.0 + Mean: 96.7 + Sho: 97.9 + Wri: 91.6 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 92.5 + Elb: 94.6 + Head: 99.1 + Hip: 99.4 + Knee: 94.6 + Mean: 96.1 + Sho: 98.5 + Wri: 92.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 93.8 + Elb: 91.0 + Head: 99.3 + Hip: 99.1 + Knee: 96.5 + Mean: 95.2 + Sho: 97.8 + Wri: 87.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 93.8 + Elb: 94.3 + Head: 98.8 + Hip: 99.8 + Knee: 97.5 + Mean: 96.7 + Sho: 98.4 + Wri: 92.1 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub1-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 78.9 + Elb: 74.4 + Head: 93.3 + Hip: 85.0 + Knee: 81.2 + Mean: 81.9 + Sho: 83.2 + Wri: 72.7 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub2-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 78.6 + Elb: 64.5 + Head: 94.1 + Hip: 77.9 + Knee: 71.9 + Mean: 75.5 + Sho: 74.9 + Wri: 62.5 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub3-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 84.2 + Elb: 74.9 + Head: 97.0 + Hip: 84.7 + Knee: 83.7 + Mean: 82.9 + Sho: 82.2 + Wri: 70.7 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 75.0 + Elb: 73.2 + Head: 92.4 + Hip: 82.3 + Knee: 75.4 + Mean: 79.2 + Sho: 80.6 + Wri: 70.5 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 75.5 + Elb: 63.8 + Head: 93.4 + Hip: 75.1 + Knee: 68.4 + Mean: 73.7 + Sho: 73.6 + Wri: 60.5 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 81.5 + Elb: 72.6 + Head: 96.1 + Hip: 83.6 + Knee: 80.9 + Mean: 81.2 + Sho: 81.2 + Wri: 67.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py new file mode 100644 index 0000000000000000000000000000000000000000..479039f5428f7f5e736beb4cfe9c7b88c986e4ed --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py @@ -0,0 +1,127 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=15, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=15, + out_channels=15, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py new file mode 100644 index 0000000000000000000000000000000000000000..88b60e9f87dfc783610aa8222a4256d9625efc60 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py @@ -0,0 +1,127 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=15, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=15, + out_channels=15, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py new file mode 100644 index 0000000000000000000000000000000000000000..602b2bcfd6aac7df667da5d71ea9d8ea233778ad --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py @@ -0,0 +1,127 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=15, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=15, + out_channels=15, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..8d104e1e86e0818947f86612dbbe8b4c9b30e31f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..6135ce29ab3b070586d0324f95c37b272002459e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..44d95b15b2a0e73eb93deefc32e5e3f093212648 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..9578a66c18b3b58a9cd85ecb4941913eac6175ea --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[8, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..856c89e660b7e2e866c4bd48eff32bf9faff731d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[8, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..73065968848a063b462504c55e4a2ac85ffd49d9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[8, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..0c2888bb88431497c30d7a046595488c7edaa87b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.md @@ -0,0 +1,39 @@ + + +
+CPM (CVPR'2016) + +```bibtex +@inproceedings{wei2016convolutional, + title={Convolutional pose machines}, + author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={4724--4732}, + year={2016} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [cpm](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py) | 368x368 | 0.876 | 0.285 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368-116e62b8_20200822.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368_20200822.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..3e2e439253e2eccb96d42ec887d6889abe520d65 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml @@ -0,0 +1,15 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py + In Collection: CPM + Metadata: + Architecture: + - CPM + Training Data: MPII + Name: td-hm_cpm_8xb64-210e_mpii-368x368 + Results: + - Dataset: MPII + Metrics: + Mean: 0.876 + Mean@0.1: 0.285 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368-116e62b8_20200822.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..fc8d6fdcea8d717c9ecbc70fb966364dea14257e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py @@ -0,0 +1,210 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/', +# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..80aec4c28e443d64a5db78d81508b182695f487d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.md @@ -0,0 +1,57 @@ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py) | 256x256 | 0.902 | 0.303 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..7256f3b15443a4f51a68a32c6f271ef6d8c58089 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py + In Collection: UDP + Metadata: + Architecture: + - UDP + - CSPNeXt + Training Data: MPII + Name: cspnext-m_udp_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.902 + Mean@0.1: 0.303 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..98e795de4ff558bf49270e9c1ed6890e9de1f311 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.md @@ -0,0 +1,41 @@ + + +
+Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py) | 256x256 | 0.889 | 0.317 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256-ae358435_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256_20200812.log.json) | +| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py) | 384x384 | 0.894 | 0.367 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384-04090bc3_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384_20200812.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..eb22cd98ce6536a4280cb188694d47d51c47e050 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml @@ -0,0 +1,28 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py + In Collection: Hourglass + Metadata: + Architecture: &id001 + - Hourglass + Training Data: MPII + Name: td-hm_hourglass52_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.889 + Mean@0.1: 0.317 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256-ae358435_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py + In Collection: Hourglass + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_hourglass52_8xb32-210e_mpii-384x384 + Results: + - Dataset: MPII + Metrics: + Mean: 0.894 + Mean@0.1: 0.367 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384-04090bc3_20200812.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..a03a96ba2e5aa9a39bd1849ec933b4ac58e4f438 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.md @@ -0,0 +1,57 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py) | 256x256 | 0.904 | 0.354 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark-f1601c5b_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark_20200927.log.json) | +| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py) | 256x256 | 0.905 | 0.360 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark-0decd39f_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark_20200927.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..0283b5c827de5a2f170460ddb7e159328faf9e76 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml @@ -0,0 +1,29 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py + In Collection: DarkPose + Metadata: + Architecture: &id001 + - HRNet + - DarkPose + Training Data: MPII + Name: td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.904 + Mean@0.1: 0.354 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark-f1601c5b_20200927.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.905 + Mean@0.1: 0.36 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark-0decd39f_20200927.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..7e8a69f64f5165db52c2d672464c080a73427215 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.md @@ -0,0 +1,40 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py) | 256x256 | 0.900 | 0.334 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256-6c4f923f_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_20200812.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py) | 256x256 | 0.901 | 0.337 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256-92cab7bd_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_20200812.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..f32129742da5306e1d9f1ea7b8048480a7efa12c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml @@ -0,0 +1,28 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: MPII + Name: td-hm_hrnet-w32_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.9 + Mean@0.1: 0.334 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256-6c4f923f_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_hrnet-w48_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.901 + Mean@0.1: 0.337 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256-92cab7bd_20200812.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..e6647569508429fe88683ddcb16c0eaebd3309a6 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.md @@ -0,0 +1,39 @@ + + +
+LiteHRNet (CVPR'2021) + +```bibtex +@inproceedings{Yulitehrnet21, + title={Lite-HRNet: A Lightweight High-Resolution Network}, + author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, + booktitle={CVPR}, + year={2021} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py) | 256x256 | 0.859 | 0.260 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256-cabd7984_20210623.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256_20210623.log.json) | +| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py) | 256x256 | 0.869 | 0.271 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256-faae8bd8_20210622.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256_20210622.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..c4314b7a74d57bbe45cd635aba06313043cc912e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml @@ -0,0 +1,28 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py + In Collection: LiteHRNet + Metadata: + Architecture: &id001 + - LiteHRNet + Training Data: MPII + Name: td-hm_litehrnet-18_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.859 + Mean@0.1: 0.26 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256-cabd7984_20210623.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py + In Collection: LiteHRNet + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_litehrnet-30_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.869 + Mean@0.1: 0.271 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256-faae8bd8_20210622.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..8bb280a8210042fb5d5f7a3f085e7a7b98ad651b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.md @@ -0,0 +1,39 @@ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py) | 256x256 | 0.854 | 0.234 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256-e068afa7_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256_20200812.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..afc54f79340332a73b079284f6920d9476972e7f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: MPII + Name: td-hm_mobilenetv2_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.854 + Mean@0.1: 0.234 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256-e068afa7_20200812.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..b8d98c4d6e9b6e716ce543b7ddb3101eef58eb20 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.md @@ -0,0 +1,58 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.882 | 0.286 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256_20200812.log.json) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256-416f5d71_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256_20200812.log.json) | +| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.889 | 0.303 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256-3ecba29d_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256_20200812.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..ff92c4f7ce78a07c141c049902d6f2cd320e0dcb --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml @@ -0,0 +1,42 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: MPII + Name: td-hm_res50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.882 + Mean@0.1: 0.286 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_res101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.888 + Mean@0.1: 0.29 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256-416f5d71_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_res152_8xb32-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.889 + Mean@0.1: 0.303 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256-3ecba29d_20200812.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..23362650980ca29784bb769d3bd55538272b79d5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.md @@ -0,0 +1,41 @@ + + +
+ResNetV1D (CVPR'2019) + +```bibtex +@inproceedings{he2019bag, + title={Bag of tricks for image classification with convolutional neural networks}, + author={He, Tong and Zhang, Zhi and Zhang, Hang and Zhang, Zhongyue and Xie, Junyuan and Li, Mu}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={558--567}, + year={2019} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.881 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256-2337a92e_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256_20200812.log.json) | +| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.883 | 0.295 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256-2851d710_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256_20200812.log.json) | +| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.300 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256-8b10a87c_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256_20200812.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..e98e722db1428a622c82c53255fb962c890d3f58 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml @@ -0,0 +1,42 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNetV1D + Training Data: MPII + Name: td-hm_resnetv1d50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.881 + Mean@0.1: 0.29 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256-2337a92e_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_resnetv1d101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.883 + Mean@0.1: 0.295 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256-2851d710_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_resnetv1d152_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.888 + Mean@0.1: 0.3 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256-8b10a87c_20200812.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..bf9d5acf8c1e48aca593f1162c35e32e73b6774d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.md @@ -0,0 +1,39 @@ + + +
+ResNext (CVPR'2017) + +```bibtex +@inproceedings{xie2017aggregated, + title={Aggregated residual transformations for deep neural networks}, + author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1492--1500}, + year={2017} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.887 | 0.294 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256-df302719_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256_20200927.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..580dda77b013b7eae4ed288925741cb8d3f6f246 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNext + Training Data: MPII + Name: td-hm_resnext152_8xb32-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.887 + Mean@0.1: 0.294 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256-df302719_20200927.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..cf0e4befffc4b023dfe84a23d7880cb959154f1e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.md @@ -0,0 +1,40 @@ + + +
+SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256-a54b6af5_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256_20200812.log.json) | +| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.887 | 0.293 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256-b4c2d184_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256_20200812.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..b1ec80fd8005dd50847171cd7c916cece02dd94d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml @@ -0,0 +1,29 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - SCNet + Training Data: MPII + Name: td-hm_scnet50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.888 + Mean@0.1: 0.29 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256-a54b6af5_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_scnet101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.887 + Mean@0.1: 0.293 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256-b4c2d184_20200812.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..1c92ecf9ea19a05665faf7ff2feb62a1ec5753a2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.md @@ -0,0 +1,43 @@ + + +
+SEResNet (CVPR'2018) + +```bibtex +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.292 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256-1bb21f79_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256_20200927.log.json) | +| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.295 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256-0ba14ff5_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256_20200927.log.json) | +| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.287 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256-6ea1e774_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256_20200927.log.json) | + +Note that * means without imagenet pre-training. diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..e71050811a4a028626e7cbbb4563533c9fdd6e57 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml @@ -0,0 +1,42 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - SEResNet + Training Data: MPII + Name: td-hm_seresnet50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.884 + Mean@0.1: 0.292 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256-1bb21f79_20200927.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_seresnet101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.884 + Mean@0.1: 0.295 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256-0ba14ff5_20200927.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_seresnet152_8xb32-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.884 + Mean@0.1: 0.287 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256-6ea1e774_20200927.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..3cdaaaf5eaa1b2d9f311b9c63dc6ec37cdac2cdc --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.md @@ -0,0 +1,39 @@ + + +
+ShufflenetV1 (CVPR'2018) + +```bibtex +@inproceedings{zhang2018shufflenet, + title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, + author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={6848--6856}, + year={2018} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py) | 256x256 | 0.824 | 0.195 | [ckpt](https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256-dcc1c896_20200925.pth) | [log](https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256_20200925.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..b9edecc42838b43aec08941d4d87c37c72d74de0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ShufflenetV1 + Training Data: MPII + Name: td-hm_shufflenetv1_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.824 + Mean@0.1: 0.195 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256-dcc1c896_20200925.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..8ab7b026ba02ec179e07ec772f12b46966748fd9 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.md @@ -0,0 +1,39 @@ + + +
+ShufflenetV2 (ECCV'2018) + +```bibtex +@inproceedings{ma2018shufflenet, + title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, + author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={116--131}, + year={2018} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py) | 256x256 | 0.828 | 0.205 | [ckpt](https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256-4fb9df2d_20200925.pth) | [log](https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256_20200925.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..efa6e14f51b13c14c8a25b3929f0150e69a13589 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ShufflenetV2 + Training Data: MPII + Name: td-hm_shufflenetv2_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.828 + Mean@0.1: 0.205 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256-4fb9df2d_20200925.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py new file mode 100644 index 0000000000000000000000000000000000000000..794c49420ab69ae202685bb70c6d8ec8e1b2a02b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=16, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=16, + out_channels=16, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py new file mode 100644 index 0000000000000000000000000000000000000000..e9546504e0d3ead0b6977c33a4172a2581532a7f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py @@ -0,0 +1,118 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(384, 384), heatmap_size=(96, 96), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=16, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..cd854a40a3f5d6def990488b5967058997d2348f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py @@ -0,0 +1,118 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=16, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..459f24f3bdbbdc4a93e43e16382b023d6ff76e50 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..5d47ed6fdc161e019c94b1ab64751a096a0d4537 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3fce96000a2ff5e1165a88a322e1cfd1226c0a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py @@ -0,0 +1,146 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..18b31539a33d542ae8a0f83b42835a7cf97ec5c2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..bdab446f5038c6d86231d27284aee3b3723bea14 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py @@ -0,0 +1,137 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..84089add2a0d2c6c7d7d8b75275cf097a9b68e7f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py @@ -0,0 +1,137 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(3, 8, 3), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..41b9d3ba9ba964f34f1204d185e36dcbcb3821e0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py @@ -0,0 +1,118 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2'), + ), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..def5d2fd1681262689afd40b20a0299e64118136 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..bf515d0d21e6796af7fc79fb39ec27cd0fb0c7b0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..dee56ae77b0c7b7fa40690e712e7c7ad4648f279 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..0cbf684e38c1358cd939621294765249e1e5d68e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..24653a9e56b982b150ced4157c486428a34f9d04 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..48bcfec5eb5017036168fae73396d809fcb3f567 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..30afb101037cc31d9dd51ac02487e5ef749921c7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py @@ -0,0 +1,118 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=152, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..fb5c6b702c28300525db4137973889967af9d09c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet101-94250a77.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..c2f7723724b80d730f70d00f7649adb5935a10fc --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..56b7fccb2e121fdd9734f9a43963f7fe1cc7511c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..79bb29e4b34fba243bca0635df2d8548e19ed76b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py @@ -0,0 +1,116 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=152, + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..257dc360ad1ea41cec56d57bd4de19a59146a7a5 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..83eaca208f237d6eff8b7930e36bc91213af4fdf --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV1', + groups=3, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), + ), + head=dict( + type='HeatmapHead', + in_channels=960, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..cd05c23596c21c7aa2f491c7e95399f2ec1126c7 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV2', + widen_factor=1.0, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.md new file mode 100644 index 0000000000000000000000000000000000000000..5d26a103db205eca0a9466a2f362ed29b1c64d0f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.md @@ -0,0 +1,55 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+PoseTrack18 (CVPR'2018) + +```bibtex +@inproceedings{andriluka2018posetrack, + title={Posetrack: A benchmark for human pose estimation and tracking}, + author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={5167--5176}, + year={2018} +} +``` + +
+ +Results on PoseTrack2018 val with ground-truth bounding boxes + +| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | +| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py) | 256x192 | 86.2 | 89.0 | 84.5 | 79.2 | 82.3 | 82.5 | 78.7 | 83.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192_20201028.log.json) | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py) | 384x288 | 87.1 | 89.0 | 85.1 | 80.2 | 80.6 | 82.8 | 79.6 | 83.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288_20211130.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py) | 256x192 | 88.3 | 90.2 | 86.0 | 81.0 | 80.7 | 83.3 | 80.6 | 84.6 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192_20211130.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py) | 384x288 | 87.8 | 90.0 | 86.2 | 81.3 | 81.0 | 83.4 | 80.9 | 84.6 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288_20211130.log.json) | + +The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. + +Results on PoseTrack2018 val with [MMDetection](https://github.com/open-mmlab/mmdetection) pre-trained [Cascade R-CNN](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) (X-101-64x4d-FPN) human detector + +| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | +| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py) | 256x192 | 78.0 | 82.9 | 79.5 | 73.8 | 76.9 | 76.6 | 70.2 | 76.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192_20201028.log.json) | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py) | 384x288 | 79.9 | 83.6 | 80.4 | 74.5 | 74.8 | 76.1 | 70.5 | 77.3 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288_20211130.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py) | 256x192 | 80.1 | 83.4 | 80.6 | 74.8 | 74.3 | 76.8 | 70.5 | 77.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192_20211130.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py) | 384x288 | 80.2 | 83.8 | 80.9 | 75.2 | 74.7 | 76.7 | 71.7 | 77.8 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288_20211130.log.json) | + +The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml new file mode 100644 index 0000000000000000000000000000000000000000..a0dcc78f7c65b0e712caa6e4f4204bfc8a3d8626 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml @@ -0,0 +1,154 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: PoseTrack18 + Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 78.7 + Elb: 84.5 + Head: 86.2 + Hip: 82.3 + Knee: 82.5 + Shou: 89 + Total: 83.4 + Wri: 79.2 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 79.6 + Elb: 84.5 + Head: 87.1 + Hip: 80.6 + Knee: 82.8 + Shou: 89 + Total: 83.7 + Wri: 80.2 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 79.6 + Elb: 85.1 + Head: 88.3 + Hip: 80.6 + Knee: 82.8 + Shou: 90.2 + Total: 84.6 + Wri: 81 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 80.6 + Elb: 86.2 + Head: 87.8 + Hip: 81 + Knee: 83.4 + Shou: 90 + Total: 84.6 + Wri: 81.3 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 70.2 + Elb: 79.5 + Head: 78.0 + Hip: 76.9 + Knee: 76.6 + Shou: 82.9 + Total: 76.9 + Wri: 73.8 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 70.5 + Elb: 80.4 + Head: 79.9 + Hip: 74.8 + Knee: 76.1 + Shou: 83.6 + Total: 77.3 + Wri: 74.5 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 70.4 + Elb: 80.6 + Head: 80.1 + Hip: 74.3 + Knee: 76.8 + Shou: 83.4 + Total: 77.4 + Wri: 74.8 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 71.7 + Elb: 80.9 + Head: 80.2 + Hip: 74.7 + Knee: 76.7 + Shou: 83.8 + Total: 77.8 + Wri: 75.2 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.md b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.md new file mode 100644 index 0000000000000000000000000000000000000000..86f476e5b7d0cbfef712e822d660ca6a91f78849 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.md @@ -0,0 +1,58 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+PoseTrack18 (CVPR'2018) + +```bibtex +@inproceedings{andriluka2018posetrack, + title={Posetrack: A benchmark for human pose estimation and tracking}, + author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={5167--5176}, + year={2018} +} +``` + +
+ +Results on PoseTrack2018 val with ground-truth bounding boxes + +| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | +| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py) | 256x192 | 86.5 | 87.7 | 82.5 | 75.8 | 80.1 | 78.8 | 74.2 | 81.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192-a62807c7_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192_20201028.log.json) | + +The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml new file mode 100644 index 0000000000000000000000000000000000000000..478ffa247e660611a0f4eca6dbf594188ff9b7c2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml @@ -0,0 +1,22 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: PoseTrack18 + Name: td-hm_res50_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 74.2 + Elb: 82.5 + Head: 86.5 + Hip: 80.1 + Knee: 78.8 + Shou: 87.7 + Total: 81.2 + Wri: 75.8 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192-a62807c7_20201028.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..fe8e385f1daac0ac4df7a805203a88e87f487730 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py @@ -0,0 +1,155 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation + bbox_file='data/posetrack18/annotations/' + 'posetrack18_val_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.4), + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..513207441068ff0dcf37a98e995d3be47baf4817 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py @@ -0,0 +1,155 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation + bbox_file='data/posetrack18/annotations/' + 'posetrack18_val_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.4), + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..cac23f14e47b4ba1f6ed5cb6c43ea6c11c5e89ad --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py @@ -0,0 +1,155 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation + bbox_file='data/posetrack18/annotations/' + 'posetrack18_val_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.4), + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee99469fed8ae914e7aa91b3a32281f9f18ca1b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py @@ -0,0 +1,155 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation + bbox_file='data/posetrack18/annotations/' + 'posetrack18_val_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.4), + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f8e529d120733235c82e8088cb983127cf35f95d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py @@ -0,0 +1,126 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_256x192-ec54d7f3_20200709.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/README.md b/mmpose/configs/body_2d_keypoint/topdown_regression/README.md new file mode 100644 index 0000000000000000000000000000000000000000..adc278ce0e5363f1c7afdaac6ee6d3b05ef3a9d3 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/README.md @@ -0,0 +1,32 @@ +# Top-down regression-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). + +
+ +
+ +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :--------------: | :--------: | :---: | :---: | :-------------------------------------------------------: | +| ResNet-152+RLE | 256x192 | 0.731 | 0.805 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | +| ResNet-101+RLE | 256x192 | 0.722 | 0.768 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | +| ResNet-50+RLE | 256x192 | 0.706 | 0.768 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | +| MobileNet-v2+RLE | 256x192 | 0.593 | 0.644 | [mobilenetv2_rle_coco.md](./coco/mobilenetv2_rle_coco.md) | +| ResNet-152 | 256x192 | 0.584 | 0.688 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNet-101 | 256x192 | 0.562 | 0.670 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNet-50 | 256x192 | 0.528 | 0.639 | [resnet_coco.md](./coco/resnet_coco.md) | + +### MPII Dataset + +| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | +| :-----------: | :--------: | :------: | :------: | :---------------------------------------------: | +| ResNet-50+RLE | 256x256 | 0.861 | 0.277 | [resnet_rle_mpii.md](./mpii/resnet_rle_mpii.md) | +| ResNet-152 | 256x256 | 0.850 | 0.208 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +| ResNet-101 | 256x256 | 0.841 | 0.200 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +| ResNet-50 | 256x256 | 0.826 | 0.180 | [resnet_mpii.md](./mpii/resnet_mpii.md) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.md b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..eddf5a79d31d974bf9a0e1d0fe128b32f5fa6065 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.md @@ -0,0 +1,74 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+RLE (ICCV'2021) + +```bibtex +@inproceedings{li2021human, + title={Human pose regression with residual log-likelihood estimation}, + author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11025--11034}, + year={2021} +} +``` + +
+ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [deeppose_mobilenetv2_rle_pretrained](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py) | 256x192 | 0.593 | 0.836 | 0.660 | 0.644 | 0.877 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..c0f470432b444bdc5dced66291bc91d7a8bd18a2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml @@ -0,0 +1,20 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: &id001 + - DeepPose + - RLE + - MobileNet + Training Data: COCO + Name: td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.593 + AP@0.5: 0.836 + AP@0.75: 0.66 + AR: 0.644 + AR@0.5: 0.877 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.md b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..77ed459aeda6a43d01b0219812c90509b8414282 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.md @@ -0,0 +1,59 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [deeppose_resnet_50](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.541 | 0.824 | 0.601 | 0.649 | 0.893 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.log.json) | +| [deeppose_resnet_101](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py) | 256x192 | 0.562 | 0.831 | 0.629 | 0.670 | 0.900 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192-2f247111_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_20210205.log.json) | +| [deeppose_resnet_152](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py) | 256x192 | 0.584 | 0.842 | 0.659 | 0.688 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192-7df89a88_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_20210205.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..e66b3043c6dfe8f9171e21b31cb6d3ae6d283932 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml @@ -0,0 +1,57 @@ +Collections: +- Name: DeepPose + Paper: + Title: "Deeppose: Human pose estimation via deep neural networks" + URL: http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/deeppose.md +Models: +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py + In Collection: DeepPose + Metadata: + Architecture: &id001 + - DeepPose + - ResNet + Training Data: COCO + Name: td-reg_res50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.541 + AP@0.5: 0.824 + AP@0.75: 0.601 + AR: 0.649 + AR@0.5: 0.893 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py + In Collection: DeepPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.562 + AP@0.5: 0.831 + AP@0.75: 0.629 + AR: 0.67 + AR@0.5: 0.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192-2f247111_20210205.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py + In Collection: DeepPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res152_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.584 + AP@0.5: 0.842 + AP@0.75: 0.659 + AR: 0.688 + AR@0.5: 0.907 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192-7df89a88_20210205.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.md b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..d3f4f5a2883ef69ddbfd11921a630758151a0be2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.md @@ -0,0 +1,78 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+RLE (ICCV'2021) + +```bibtex +@inproceedings{li2021human, + title={Human pose regression with residual log-likelihood estimation}, + author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11025--11034}, + year={2021} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [deeppose_resnet_50_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.706 | 0.888 | 0.776 | 0.753 | 0.924 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.log.json) | +| [deeppose_resnet_50_rle_pretrained](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py) | 256x192 | 0.719 | 0.891 | 0.788 | 0.764 | 0.925 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.log.json) | +| [deeppose_resnet_101_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.722 | 0.894 | 0.794 | 0.768 | 0.930 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle-16c3d461_20220615.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle_20220615.log.json) | +| [deeppose_resnet_152_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.731 | 0.897 | 0.805 | 0.777 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle-c05bdccf_20220615.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle_20220615.log.json) | +| [deeppose_resnet_152_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py) | 384x288 | 0.749 | 0.901 | 0.815 | 0.793 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle-b77c4c37_20220624.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle_20220624.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..97ae41b8f2af552b5f0e77264baf86f308912ecc --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml @@ -0,0 +1,90 @@ +Collections: +- Name: RLE + Paper: + Title: Human pose regression with residual log-likelihood estimation + URL: https://arxiv.org/abs/2107.11291 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/rle.md +Models: +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: &id001 + - DeepPose + - RLE + - ResNet + Training Data: COCO + Name: td-reg_res50_rle-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.706 + AP@0.5: 0.888 + AP@0.75: 0.776 + AR: 0.753 + AR@0.5: 0.924 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.719 + AP@0.5: 0.891 + AP@0.75: 0.788 + AR: 0.764 + AR@0.5: 0.925 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res101_rle-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.722 + AP@0.5: 0.894 + AP@0.75: 0.794 + AR: 0.768 + AR@0.5: 0.93 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle-16c3d461_20220615.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res152_rle-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.731 + AP@0.5: 0.897 + AP@0.75: 0.805 + AR: 0.777 + AR@0.5: 0.933 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle-c05bdccf_20220615.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py + In Collection: RLE + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res152_rle-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.749 + AP@0.5: 0.901 + AP@0.75: 0.815 + AR: 0.793 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle-b77c4c37_20220624.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..97f5d926c66be84ef1bc8fb8f1f187730cebd46d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py @@ -0,0 +1,126 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/top_down/' + 'mobilenetv2/mobilenetv2_coco_256x192-d1e58e7b_20200727.pth')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=1280, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + ), +) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..94f35d0fc36c749638ff397f5af5eb50a006894f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=17, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..21b4a3cdcbab80fa080ca90581a6ab3ee44fdbe4 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..fa56fba4987e9f4c6c4f0e284e5949c0c6f46d6c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=17, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..e2a832b652b33aaa629fdb4a07863f223051461f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..6d319e927eb21ddcb71e40ecae1050c3421871d2 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(288, 384)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..fa7e487acf470dfbd988979ffb7570f72d409df0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=17, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..db530f6ec4f065fa16228ee66fee33db5afddc4f --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..6b74aba7f3c138901d35652c9b7f19bebf23cceb --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..150fd48020f0e47e63e5e2356bc91ae29499c546 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.md @@ -0,0 +1,58 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [deeppose_resnet_50](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.826 | 0.180 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256-c63cd0b6_20210203.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_20210203.log.json) | +| [deeppose_resnet_101](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.841 | 0.200 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256-87516a90_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256_20210205.log.json) | +| [deeppose_resnet_152](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py) | 256x256 | 0.850 | 0.208 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256-15f5e6f9_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256_20210205.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..a744083e97a054b07c89b0d283189ef51f236bf0 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml @@ -0,0 +1,42 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py + In Collection: DeepPose + Metadata: + Architecture: &id001 + - DeepPose + - ResNet + Training Data: MPII + Name: td-reg_res50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.826 + Mean@0.1: 0.18 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256-c63cd0b6_20210203.pth +- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py + In Collection: DeepPose + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-reg_res101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.841 + Mean@0.1: 0.2 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256-87516a90_20210205.pth +- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py + In Collection: DeepPose + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-reg_res152_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.85 + Mean@0.1: 0.208 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256-15f5e6f9_20210205.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.md b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.md new file mode 100644 index 0000000000000000000000000000000000000000..bf3a67a49a41be72076fa2831902aa194d17d346 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.md @@ -0,0 +1,73 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+RLE (ICCV'2021) + +```bibtex +@inproceedings{li2021human, + title={Human pose regression with residual log-likelihood estimation}, + author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11025--11034}, + year={2021} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [deeppose_resnet_50_rle](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py) | 256x256 | 0.861 | 0.277 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle-5f92a619_20220504.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle_20220504.log.json) | diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml new file mode 100644 index 0000000000000000000000000000000000000000..a03586d42cd7690154d336444e948ea317dbfc9c --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py + In Collection: RLE + Metadata: + Architecture: + - DeepPose + - RLE + - ResNet + Training Data: MPII + Name: td-reg_res50_rle-8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.861 + Mean@0.1: 0.277 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle-5f92a619_20220504.pth diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..6c7821f91b1161491ad2166b36bd582e194f384b --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py @@ -0,0 +1,116 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=16, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a19b0d6e720c9f60e19d62a8712e532390cc84 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py @@ -0,0 +1,118 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=16, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +file_client_args = dict(backend='disk') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', file_client_args=file_client_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', file_client_args=file_client_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..901fd4b8d61c2aa7a5cc920d1590acf8a4ece88d --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py @@ -0,0 +1,116 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=16, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..9d46484755dec533fe5519a782de86404bf9986e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py @@ -0,0 +1,116 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=16, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/yoloxpose/README.md b/mmpose/configs/body_2d_keypoint/yoloxpose/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8195b1e23673183aedee37f4b060d05f792e885e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/yoloxpose/README.md @@ -0,0 +1,22 @@ +# YOLO-Pose: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss + + + +
+YOLO-Pose (CVPRW'2022) + +```bibtex +@inproceedings{maji2022yolo, + title={Yolo-pose: Enhancing yolo for multi person pose estimation using object keypoint similarity loss}, + author={Maji, Debapriya and Nagori, Soyeb and Mathew, Manu and Poddar, Deepak}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={2637--2646}, + year={2022} +} +``` + +
+ +YOLO-Pose is a bottom-up pose estimation approach that simultaneously detects all person instances and regresses keypoint locations in a single pass. + +We implement **YOLOX-Pose** based on the **YOLOX** object detection framework and inherits the benefits of unified pose estimation and object detection from YOLO-pose. To predict keypoint locations more accurately, separate branches with adaptive convolutions are used to regress the offsets for different joints. This allows optimizing the feature extraction for each keypoint. diff --git a/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_coco.md b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_coco.md new file mode 100644 index 0000000000000000000000000000000000000000..fc98239e135dc4ee82038e4a09bcb40c7ddd923a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_coco.md @@ -0,0 +1,59 @@ + + +
+YOLO-Pose (CVPRW'2022) + +```bibtex +@inproceedings{maji2022yolo, + title={Yolo-pose: Enhancing yolo for multi person pose estimation using object keypoint similarity loss}, + author={Maji, Debapriya and Nagori, Soyeb and Mathew, Manu and Poddar, Deepak}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={2637--2646}, + year={2022} +} +``` + +
+ + + +
+YOLOX + +```bibtex +@article{ge2021yolox, + title={Yolox: Exceeding yolo series in 2021}, + author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2107.08430}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [yoloxpose_tiny](/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_tiny_4xb64-300e_coco-416.py) | 416x416 | 0.526 | 0.793 | 0.556 | 0.571 | 0.833 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_tiny_4xb64-300e_coco-416-76eb44ca_20230829.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_tiny_4xb64-300e_coco-416-20230829.json) | +| [yoloxpose_s](/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_s_8xb32-300e_coco-640.py) | 640x640 | 0.641 | 0.872 | 0.702 | 0.682 | 0.902 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_s_8xb32-300e_coco-640-56c79c1f_20230829.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_s_8xb32-300e_coco-640-20230829.json) | +| [yoloxpose_m](/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_m_8xb32-300e_coco-640.py) | 640x640 | 0.695 | 0.899 | 0.766 | 0.733 | 0.926 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_m_8xb32-300e_coco-640-84e9a538_20230829.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_m_8xb32-300e_coco-640-20230829.json) | +| [yoloxpose_l](/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_l_8xb32-300e_coco-640.py) | 640x640 | 0.712 | 0.901 | 0.782 | 0.749 | 0.926 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_l_8xb32-300e_coco-640-de0f8dee_20230829.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_l_8xb32-300e_coco-640-20230829.json) | diff --git a/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_coco.yml b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_coco.yml new file mode 100644 index 0000000000000000000000000000000000000000..378ae5dbfefce0821645147a971842142bbd1570 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_coco.yml @@ -0,0 +1,72 @@ +Collections: +- Name: YOLOXPose + Paper: + Title: 'YOLO-Pose: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss' + URL: https://arxiv.org/abs/2204.06806 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/yolopose.md +Models: +- Config: configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_tiny_4xb64-300e_coco-416.py + In Collection: YOLOXPose + Metadata: + Architecture: &id001 + - YOLOXPose + Training Data: COCO + Name: yoloxpose_tiny_4xb64-300e_coco-416 + Results: + - Dataset: COCO + Metrics: + AP: 0.526 + AP@0.5: 0.793 + AP@0.75: 0.556 + AR: 0.571 + AR@0.5: 0.833 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_tiny_4xb64-300e_coco-416-76eb44ca_20230829.pth +- Config: configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_s_8xb32-300e_coco-640.py + In Collection: YOLOXPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: yoloxpose_s_8xb32-300e_coco-640 + Results: + - Dataset: COCO + Metrics: + AP: 0.641 + AP@0.5: 0.872 + AP@0.75: 0.702 + AR: 0.682 + AR@0.5: 0.902 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_s_8xb32-300e_coco-640-56c79c1f_20230829.pth +- Config: configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_m_8xb32-300e_coco-640.py + In Collection: YOLOXPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: yoloxpose_m_8xb32-300e_coco-640 + Results: + - Dataset: COCO + Metrics: + AP: 0.695 + AP@0.5: 0.899 + AP@0.75: 0.766 + AR: 0.733 + AR@0.5: 0.926 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_m_8xb32-300e_coco-640-84e9a538_20230829.pth +- Config: configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_l_8xb32-300e_coco-640.py + In Collection: YOLOXPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: yoloxpose_l_8xb32-300e_coco-640 + Results: + - Dataset: COCO + Metrics: + AP: 0.712 + AP@0.5: 0.901 + AP@0.75: 0.782 + AR: 0.749 + AR@0.5: 0.926 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/yolox_pose/yoloxpose_l_8xb32-300e_coco-640-de0f8dee_20230829.pth diff --git a/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_l_8xb32-300e_coco-640.py b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_l_8xb32-300e_coco-640.py new file mode 100644 index 0000000000000000000000000000000000000000..db61ea854ac674b9d5e441e5620cca6042f0a3aa --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_l_8xb32-300e_coco-640.py @@ -0,0 +1,17 @@ +_base_ = './yoloxpose_s_8xb32-300e_coco-640.py' + +widen_factor = 1 +deepen_factor = 1 +checkpoint = 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_' \ + 'l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth' + +# model settings +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint), + ), + neck=dict( + in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3), + head=dict(head_module_cfg=dict(widen_factor=widen_factor))) diff --git a/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_m_8xb32-300e_coco-640.py b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_m_8xb32-300e_coco-640.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa895bc54c05e47d713609313b5b8d43220765a --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_m_8xb32-300e_coco-640.py @@ -0,0 +1,16 @@ +_base_ = './yoloxpose_s_8xb32-300e_coco-640.py' + +widen_factor = 0.75 +deepen_factor = 0.67 +checkpoint = 'https://download.openmmlab.com/mmpose/v1/pretrained_models/' \ + 'yolox_m_8x8_300e_coco_20230829.pth' + +# model settings +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint), + ), + neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), + head=dict(head_module_cfg=dict(widen_factor=widen_factor))) diff --git a/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_s_8xb32-300e_coco-640.py b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_s_8xb32-300e_coco-640.py new file mode 100644 index 0000000000000000000000000000000000000000..948a916b06707cfc73c1c9f1ac97a9ef928e23c4 --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_s_8xb32-300e_coco-640.py @@ -0,0 +1,266 @@ +_base_ = '../../../_base_/default_runtime.py' + +# runtime +train_cfg = dict( + _delete_=True, + type='EpochBasedTrainLoop', + max_epochs=300, + val_interval=10, + dynamic_intervals=[(280, 1)]) + +auto_scale_lr = dict(base_batch_size=256) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.004, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, + bias_decay_mult=0, + bypass_duplicate=True, + ), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0002, + begin=5, + T_max=280, + end=280, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=280, end=300), +] + +# model +widen_factor = 0.5 +deepen_factor = 0.33 + +model = dict( + type='BottomupPoseEstimator', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + data_preprocessor=dict( + type='PoseDataPreprocessor', + pad_size_divisor=32, + mean=[0, 0, 0], + std=[1, 1, 1], + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1), + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v2.0/' + 'yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_' + '20211121_095711-4592a793.pth', + prefix='backbone.', + )), + neck=dict( + type='YOLOXPAFPN', + in_channels=[128, 256, 512], + out_channels=128, + num_csp_blocks=1, + use_depthwise=False, + upsample_cfg=dict(scale_factor=2, mode='nearest'), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + head=dict( + type='YOLOXPoseHead', + num_keypoints=17, + featmap_strides=(8, 16, 32), + head_module_cfg=dict( + num_classes=1, + in_channels=256, + feat_channels=256, + widen_factor=widen_factor, + stacked_convs=2, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + prior_generator=dict( + type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]), + assigner=dict(type='SimOTAAssigner', dynamic_k_indicator='oks'), + overlaps_power=0.5, + loss_cls=dict(type='BCELoss', reduction='sum', loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_obj=dict( + type='BCELoss', + use_target_weight=True, + reduction='sum', + loss_weight=1.0), + loss_oks=dict( + type='OKSLoss', + reduction='none', + metainfo='configs/_base_/datasets/coco.py', + norm_target_weight=True, + loss_weight=30.0), + loss_vis=dict( + type='BCELoss', + use_target_weight=True, + reduction='mean', + loss_weight=1.0), + loss_bbox_aux=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + ), + test_cfg=dict( + score_thr=0.01, + nms_thr=0.65, + )) + +# data +input_size = (640, 640) +codec = dict(type='YOLOXPoseAnnotationProcessor', input_size=input_size) + +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict( + type='YOLOXMixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict( + type='BottomupRandomAffine', + input_size=(640, 640), + shift_prob=0, + rotate_prob=0, + scale_prob=0, + scale_type='long', + pad_val=(114, 114, 114), + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] + +data_mode = 'bottomup' +data_root = 'data/' + +dataset_coco = dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='coco/train2017/'), + pipeline=train_pipeline_stage1, +) + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dataset_coco) + +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json', + score_mode='bbox', + nms_mode='none', +) +test_evaluator = val_evaluator + +custom_hooks = [ + dict( + type='YOLOXPoseModeSwitchHook', + num_last_epochs=20, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), +] diff --git a/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_tiny_4xb64-300e_coco-416.py b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_tiny_4xb64-300e_coco-416.py new file mode 100644 index 0000000000000000000000000000000000000000..d13d104e02bbff1497bb1629ff94b0172672984e --- /dev/null +++ b/mmpose/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_tiny_4xb64-300e_coco-416.py @@ -0,0 +1,77 @@ +_base_ = './yoloxpose_s_8xb32-300e_coco-640.py' + +# model settings +widen_factor = 0.375 +deepen_factor = 0.33 +checkpoint = 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_' \ + 'tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth' + +model = dict( + data_preprocessor=dict(batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(320, 640), + size_divisor=32, + interval=1), + ]), + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint), + ), + neck=dict( + in_channels=[96, 192, 384], + out_channels=96, + ), + head=dict(head_module_cfg=dict(widen_factor=widen_factor), )) + +# dataset settings +train_pipeline_stage1 = [ + dict(type='LoadImage', backend_args=None), + dict( + type='Mosaic', + img_scale=_base_.input_size, + pad_val=114.0, + pre_transform=[dict(type='LoadImage', backend_args=None)]), + dict( + type='BottomupRandomAffine', + input_size=_base_.input_size, + shift_factor=0.1, + rotate_factor=10, + scale_factor=(0.75, 1.0), + pad_val=114, + distribution='uniform', + transform_mode='perspective', + bbox_keep_corner=False, + clip_border=True, + ), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip'), + dict(type='FilterAnnotations', by_kpt=True, by_box=True, keep_empty=False), + dict(type='GenerateTarget', encoder=_base_.codec), + dict( + type='PackPoseInputs', + extra_mapping_labels={ + 'bbox': 'bboxes', + 'bbox_labels': 'labels', + 'keypoints': 'keypoints', + 'keypoints_visible': 'keypoints_visible', + 'area': 'areas' + }), +] +train_dataloader = dict( + batch_size=64, dataset=dict(pipeline=train_pipeline_stage1)) + +input_size = (416, 416) +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', input_size=input_size, pad_val=(114, 114, 114)), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale')) +] + +val_dataloader = dict(dataset=dict(pipeline=val_pipeline, )) +test_dataloader = val_dataloader diff --git a/mmpose/configs/body_3d_keypoint/README.md b/mmpose/configs/body_3d_keypoint/README.md new file mode 100644 index 0000000000000000000000000000000000000000..61cacfb94f75ced615ddb25ae8f8f0489dca1527 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/README.md @@ -0,0 +1,13 @@ +# Human Body 3D Pose Estimation + +3D pose estimation is the detection and analysis of X, Y, Z coordinates of human body joints from RGB images. For single-person 3D pose estimation from a monocular camera, existing works can be classified into three categories: (1) from 2D poses to 3D poses (2D-to-3D pose lifting) (2) jointly learning 2D and 3D poses, and (3) directly regressing 3D poses from images. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_body_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/3d_human_pose_demo.md) to run demos. + +
diff --git a/mmpose/configs/body_3d_keypoint/image_pose_lift/README.md b/mmpose/configs/body_3d_keypoint/image_pose_lift/README.md new file mode 100644 index 0000000000000000000000000000000000000000..36b8bfe48699f4c7a1a109b8996f134b74b71c74 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/image_pose_lift/README.md @@ -0,0 +1,13 @@ +# A simple yet effective baseline for 3d human pose estimation + +Simple 3D baseline proposes to break down the task of 3d human pose estimation into 2 stages: (1) Image → 2D pose (2) 2D pose → 3D pose. + +The authors find that "lifting" ground truth 2D joint locations to 3D space is a task that can be solved with a low error rate. Based on the success of 2d human pose estimation, it directly "lifts" 2d joint locations to 3d space. + +## Results and Models + +### Human3.6m Dataset + +| Arch | MPJPE | P-MPJPE | ckpt | log | Details and Download | +| :------------------------------------------ | :---: | :-----: | :-----------------------------------------: | :-----------------------------------------: | :---------------------------------------------------------: | +| [SimpleBaseline3D](/configs/body_3d_keypoint/image_pose_lift/h36m/image-pose-lift_tcn_8xb64-200e_h36m.py) | 43.4 | 34.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth) | [log](https://download.openmmlab.com/mmpose/body3d/simple_baseline/20210415_065056.log.json) | [simplebaseline3d_h36m.md](./h36m/simplebaseline3d_h36m.md) | diff --git a/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/image-pose-lift_tcn_8xb64-200e_h36m.py b/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/image-pose-lift_tcn_8xb64-200e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c1c2db806fc0c5b0a0d726f1ff066bb2bd1313 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/image-pose-lift_tcn_8xb64-200e_h36m.py @@ -0,0 +1,168 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=200, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) + +# learning policy +param_scheduler = [ + dict(type='StepLR', step_size=100000, gamma=0.96, end=80, by_epoch=False) +] + +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1)) + +# codec settings +# 3D keypoint normalization parameters +# From file: '{data_root}/annotation_body3d/fps50/joint3d_rel_stats.pkl' +target_mean = [[-2.55652589e-04, -7.11960570e-03, -9.81433052e-04], + [-5.65463051e-03, 3.19636009e-01, 7.19329269e-02], + [-1.01705840e-02, 6.91147892e-01, 1.55352986e-01], + [2.55651315e-04, 7.11954606e-03, 9.81423866e-04], + [-5.09729780e-03, 3.27040413e-01, 7.22258095e-02], + [-9.99656606e-03, 7.08277383e-01, 1.58016408e-01], + [2.90583676e-03, -2.11363307e-01, -4.74210915e-02], + [5.67537804e-03, -4.35088906e-01, -9.76974016e-02], + [5.93884964e-03, -4.91891970e-01, -1.10666618e-01], + [7.37352083e-03, -5.83948619e-01, -1.31171400e-01], + [5.41920653e-03, -3.83931702e-01, -8.68145417e-02], + [2.95964662e-03, -1.87567488e-01, -4.34536934e-02], + [1.26585822e-03, -1.20170579e-01, -2.82526049e-02], + [4.67186639e-03, -3.83644089e-01, -8.55125784e-02], + [1.67648571e-03, -1.97007177e-01, -4.31368364e-02], + [8.70569015e-04, -1.68664569e-01, -3.73902498e-02]], +target_std = [[0.11072244, 0.02238818, 0.07246294], + [0.15856311, 0.18933832, 0.20880479], + [0.19179935, 0.24320062, 0.24756193], + [0.11072181, 0.02238805, 0.07246253], + [0.15880454, 0.19977188, 0.2147063], + [0.18001944, 0.25052739, 0.24853247], + [0.05210694, 0.05211406, 0.06908241], + [0.09515367, 0.10133032, 0.12899733], + [0.11742458, 0.12648469, 0.16465091], + [0.12360297, 0.13085539, 0.16433336], + [0.14602232, 0.09707956, 0.13952731], + [0.24347532, 0.12982249, 0.20230181], + [0.2446877, 0.21501816, 0.23938235], + [0.13876084, 0.1008926, 0.1424411], + [0.23687529, 0.14491219, 0.20980829], + [0.24400695, 0.23975028, 0.25520584]] +# 2D keypoint normalization parameters +# From file: '{data_root}/annotation_body3d/fps50/joint2d_stats.pkl' +keypoints_mean = [[532.08351635, 419.74137558], [531.80953144, 418.2607141], + [530.68456967, 493.54259285], [529.36968722, 575.96448516], + [532.29767646, 421.28483336], [531.93946631, 494.72186795], + [529.71984447, 578.96110365], [532.93699382, 370.65225054], + [534.1101856, 317.90342311], [534.55416813, 304.24143901], + [534.86955004, 282.31030885], [534.11308566, 330.11296796], + [533.53637525, 376.2742511], [533.49380107, 391.72324565], + [533.52579142, 330.09494668], [532.50804964, 374.190479], + [532.72786934, 380.61615716]], +keypoints_std = [[107.73640054, 63.35908715], [119.00836213, 64.1215443], + [119.12412107, 50.53806215], [120.61688045, 56.38444891], + [101.95735275, 62.89636486], [106.24832897, 48.41178119], + [108.46734966, 54.58177071], [109.07369806, 68.70443672], + [111.20130351, 74.87287863], [111.63203838, 77.80542514], + [113.22330788, 79.90670556], [105.7145833, 73.27049436], + [107.05804267, 73.93175781], [107.97449418, 83.30391802], + [121.60675105, 74.25691526], [134.34378973, 77.48125087], + [131.79990652, 89.86721124]] +codec = dict( + type='ImagePoseLifting', + num_keypoints=17, + root_index=0, + remove_root=True, + target_mean=target_mean, + target_std=target_std, + keypoints_mean=keypoints_mean, + keypoints_std=keypoints_std) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(1, 1, 1), + dropout=0.5, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=16, + loss=dict(type='MSELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root', 'target_root_index', 'target_mean', + 'target_std')) +] +val_pipeline = train_pipeline + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=1, + causal=True, + keypoint_2d_src='gt', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=1, + causal=True, + keypoint_2d_src='gt', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/simplebaseline3d_h36m.md b/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/simplebaseline3d_h36m.md new file mode 100644 index 0000000000000000000000000000000000000000..0f741b90e3f65222576cf8bc110f1fe7528aa454 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/simplebaseline3d_h36m.md @@ -0,0 +1,44 @@ + + +
+SimpleBaseline3D (ICCV'2017) + +```bibtex +@inproceedings{martinez_2017_3dbaseline, + title={A simple yet effective baseline for 3d human pose estimation}, + author={Martinez, Julieta and Hossain, Rayat and Romero, Javier and Little, James J.}, + booktitle={ICCV}, + year={2017} +} +``` + +
+ + + +
+Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
+ +Results on Human3.6M dataset with ground truth 2D detections + +| Arch | MPJPE | P-MPJPE | ckpt | log | +| :-------------------------------------------------------------- | :---: | :-----: | :-------------------------------------------------------------: | :------------------------------------------------------------: | +| [SimpleBaseline3D1](/configs/body_3d_keypoint/image_pose_lift/h36m/image-pose-lift_tcn_8xb64-200e_h36m.py) | 43.4 | 34.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth) | [log](https://download.openmmlab.com/mmpose/body3d/simple_baseline/20210415_065056.log.json) | + +1 Differing from the original paper, we didn't apply the `max-norm constraint` because we found this led to a better convergence and performance. diff --git a/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/simplebaseline3d_h36m.yml b/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/simplebaseline3d_h36m.yml new file mode 100644 index 0000000000000000000000000000000000000000..17894ee3b1d2e6058e92fe57d3c0bff02bcdc817 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/image_pose_lift/h36m/simplebaseline3d_h36m.yml @@ -0,0 +1,21 @@ +Collections: +- Name: SimpleBaseline3D + Paper: + Title: A simple yet effective baseline for 3d human pose estimation + URL: http://openaccess.thecvf.com/content_iccv_2017/html/Martinez_A_Simple_yet_ICCV_2017_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/en/papers/algorithms/simplebaseline3d.md +Models: +- Config: configs/body_3d_keypoint/image_pose_lift/h36m/image-pose-lift_tcn_8xb64-200e_h36m.py + In Collection: SimpleBaseline3D + Metadata: + Architecture: &id001 + - SimpleBaseline3D + Training Data: Human3.6M + Name: image-pose-lift_tcn_8xb64-200e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 43.4 + P-MPJPE: 34.3 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth diff --git a/mmpose/configs/body_3d_keypoint/motionbert/README.md b/mmpose/configs/body_3d_keypoint/motionbert/README.md new file mode 100644 index 0000000000000000000000000000000000000000..562ce7612a8975bcca6bfae668552eb1123b0ff8 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/motionbert/README.md @@ -0,0 +1,23 @@ +# MotionBERT: A Unified Perspective on Learning Human Motion Representations + +Motionbert proposes a pretraining stage in which a motion encoder is trained to recover the underlying 3D motion from noisy partial 2D observations. The motion representations acquired in this way incorporate geometric, kinematic, and physical knowledge about human motion, which can be easily transferred to multiple downstream tasks. + +## Results and Models + +### Human3.6m Dataset + +| Arch | MPJPE | P-MPJPE | ckpt | log | Details and Download | +| :-------------------------------------------------------------------- | :---: | :-----: | :-------------------------------------------------------------------: | :-: | :---------------------------------------------: | +| [MotionBERT\*](/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m.py) | 35.3 | 27.7 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_h36m-f554954f_20230531.pth) | / | [motionbert_h36m.md](./h36m/motionbert_h36m.md) | +| [MotionBERT-finetuned\*](/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m.py) | 27.5 | 21.6 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_ft_h36m-d80af323_20230531.pth) | / | [motionbert_h36m.md](./h36m/motionbert_h36m.md) | + +### Human3.6m Dataset from official repo 1 + +| Arch | MPJPE | Average MPJPE | P-MPJPE | ckpt | log | Details and Download | +| :------------------------------------------------------------- | :---: | :-----------: | :-----: | :-------------------------------------------------------------: | :-: | :---------------------------------------------: | +| [MotionBERT\*](/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m-original.py) | 39.8 | 39.2 | 33.4 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_h36m-f554954f_20230531.pth) | / | [motionbert_h36m.md](./h36m/motionbert_h36m.md) | +| [MotionBERT-finetuned\*](/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m-original.py) | 37.7 | 37.2 | 32.2 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_ft_h36m-d80af323_20230531.pth) | / | [motionbert_h36m.md](./h36m/motionbert_h36m.md) | + +1 Please refer to the [doc](./h36m/motionbert_h36m.md) for more details. + +*Models with * are converted from the official repo. The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* diff --git a/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m-original.py b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m-original.py new file mode 100644 index 0000000000000000000000000000000000000000..caf2e56530384f118062055711305881fa5505c2 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m-original.py @@ -0,0 +1,137 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=240, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.01)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.99, end=120, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +train_codec = dict( + type='MotionBERTLabel', num_keypoints=17, concat_vis=True, mode='train') +val_codec = dict( + type='MotionBERTLabel', num_keypoints=17, concat_vis=True, rootrel=True) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='DSTFormer', + in_channels=3, + feat_size=512, + depth=5, + num_heads=8, + mlp_ratio=2, + seq_len=243, + att_fuse=True, + ), + head=dict( + type='MotionRegressionHead', + in_channels=512, + out_channels=3, + embedding_size=512, + loss=dict(type='MPJPEVelocityJointLoss'), + decoder=val_codec, + ), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict(type='GenerateTarget', encoder=train_codec), + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(center_mode='static', center_x=0.), + target_flip_cfg=dict(center_mode='static', center_x=0.), + flip_label=True), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'factor', 'camera_param')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=val_codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'factor', 'camera_param')) +] + +# data loaders +train_dataloader = dict( + batch_size=32, + prefetch_factor=4, + pin_memory=True, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train_original.npz', + seq_len=1, + multiple_target=243, + multiple_target_step=81, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) + +val_dataloader = dict( + batch_size=32, + prefetch_factor=4, + pin_memory=True, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test_original.npz', + factor_file='annotation_body3d/fps50/h36m_factors.npy', + seq_len=1, + seq_step=1, + multiple_target=243, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +skip_list = [ + 'S9_Greet', 'S9_SittingDown', 'S9_Wait_1', 'S9_Greeting', 'S9_Waiting_1' +] +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe', skip_list=skip_list), + dict(type='MPJPE', mode='p-mpjpe', skip_list=skip_list) +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m.py b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..ea91556198fd56f978e988311ad803a4a2193ab5 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m.py @@ -0,0 +1,136 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=240, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.01)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.99, end=120, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +train_codec = dict( + type='MotionBERTLabel', num_keypoints=17, concat_vis=True, mode='train') +val_codec = dict( + type='MotionBERTLabel', num_keypoints=17, concat_vis=True, rootrel=True) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='DSTFormer', + in_channels=3, + feat_size=512, + depth=5, + num_heads=8, + mlp_ratio=2, + seq_len=243, + att_fuse=True, + ), + head=dict( + type='MotionRegressionHead', + in_channels=512, + out_channels=3, + embedding_size=512, + loss=dict(type='MPJPEVelocityJointLoss'), + decoder=val_codec, + ), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict(type='GenerateTarget', encoder=train_codec), + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(center_mode='static', center_x=0.), + target_flip_cfg=dict(center_mode='static', center_x=0.), + flip_label=True), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'factor', 'camera_param')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=val_codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'factor', 'camera_param')) +] + +# data loaders +train_dataloader = dict( + batch_size=32, + prefetch_factor=4, + pin_memory=True, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=1, + multiple_target=243, + multiple_target_step=81, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) + +val_dataloader = dict( + batch_size=32, + prefetch_factor=4, + pin_memory=True, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=1, + seq_step=1, + multiple_target=243, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +skip_list = [ + 'S9_Greet', 'S9_SittingDown', 'S9_Wait_1', 'S9_Greeting', 'S9_Waiting_1' +] +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe', skip_list=skip_list), + dict(type='MPJPE', mode='p-mpjpe', skip_list=skip_list) +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m-original.py b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m-original.py new file mode 100644 index 0000000000000000000000000000000000000000..555fd8ae0e7b9a9a0b4e6b2743ff581f382096d6 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m-original.py @@ -0,0 +1,142 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=120, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.01)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.99, end=60, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +train_codec = dict( + type='MotionBERTLabel', num_keypoints=17, concat_vis=True, mode='train') +val_codec = dict( + type='MotionBERTLabel', num_keypoints=17, concat_vis=True, rootrel=True) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='DSTFormer', + in_channels=3, + feat_size=512, + depth=5, + num_heads=8, + mlp_ratio=2, + seq_len=243, + att_fuse=True, + ), + head=dict( + type='MotionRegressionHead', + in_channels=512, + out_channels=3, + embedding_size=512, + loss=dict(type='MPJPEVelocityJointLoss'), + decoder=val_codec, + ), + test_cfg=dict(flip_test=True), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/' + 'pose_lift/h36m/motionbert_pretrain_h36m-29ffebf5_20230719.pth'), +) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict(type='GenerateTarget', encoder=train_codec), + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(center_mode='static', center_x=0.), + target_flip_cfg=dict(center_mode='static', center_x=0.), + flip_label=True), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'factor', 'camera_param')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=val_codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'factor', 'camera_param')) +] + +# data loaders +train_dataloader = dict( + batch_size=32, + prefetch_factor=4, + pin_memory=True, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train_original.npz', + seq_len=1, + multiple_target=243, + multiple_target_step=81, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) + +val_dataloader = dict( + batch_size=32, + prefetch_factor=4, + pin_memory=True, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test_original.npz', + factor_file='annotation_body3d/fps50/h36m_factors.npy', + seq_len=1, + seq_step=1, + multiple_target=243, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +skip_list = [ + 'S9_Greet', 'S9_SittingDown', 'S9_Wait_1', 'S9_Greeting', 'S9_Waiting_1' +] +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe', skip_list=skip_list), + dict(type='MPJPE', mode='p-mpjpe', skip_list=skip_list) +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m.py b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..256a765539674749d5fa5d67f33a4468454fe4b8 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m.py @@ -0,0 +1,141 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=120, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.01)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.99, end=60, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +train_codec = dict( + type='MotionBERTLabel', num_keypoints=17, concat_vis=True, mode='train') +val_codec = dict( + type='MotionBERTLabel', num_keypoints=17, concat_vis=True, rootrel=True) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='DSTFormer', + in_channels=3, + feat_size=512, + depth=5, + num_heads=8, + mlp_ratio=2, + seq_len=243, + att_fuse=True, + ), + head=dict( + type='MotionRegressionHead', + in_channels=512, + out_channels=3, + embedding_size=512, + loss=dict(type='MPJPEVelocityJointLoss'), + decoder=val_codec, + ), + test_cfg=dict(flip_test=True), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/' + 'pose_lift/h36m/motionbert_pretrain_h36m-29ffebf5_20230719.pth'), +) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict(type='GenerateTarget', encoder=train_codec), + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(center_mode='static', center_x=0.), + target_flip_cfg=dict(center_mode='static', center_x=0.), + flip_label=True), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'factor', 'camera_param')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=val_codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'factor', 'camera_param')) +] + +# data loaders +train_dataloader = dict( + batch_size=32, + prefetch_factor=4, + pin_memory=True, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=1, + multiple_target=243, + multiple_target_step=81, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) + +val_dataloader = dict( + batch_size=32, + prefetch_factor=4, + pin_memory=True, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=1, + seq_step=1, + multiple_target=243, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +skip_list = [ + 'S9_Greet', 'S9_SittingDown', 'S9_Wait_1', 'S9_Greeting', 'S9_Waiting_1' +] +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe', skip_list=skip_list), + dict(type='MPJPE', mode='p-mpjpe', skip_list=skip_list) +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_h36m.md b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_h36m.md new file mode 100644 index 0000000000000000000000000000000000000000..8d8f1b57842ce27b0e56615b1a8dbaa3bf00558f --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_h36m.md @@ -0,0 +1,55 @@ + + +
+MotionBERT (2022) + +```bibtex + @misc{Zhu_Ma_Liu_Liu_Wu_Wang_2022, + title={Learning Human Motion Representations: A Unified Perspective}, + author={Zhu, Wentao and Ma, Xiaoxuan and Liu, Zhaoyang and Liu, Libin and Wu, Wayne and Wang, Yizhou}, + year={2022}, + month={Oct}, + language={en-US} + } +``` + +
+ + + +
+Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, +author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, +title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, +journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, +publisher = {IEEE Computer Society}, +volume = {36}, +number = {7}, +pages = {1325-1339}, +month = {jul}, +year = {2014} +} +``` + +
+ +Results on Human3.6M dataset with ground truth 2D detections + +| Arch | MPJPE | average MPJPE | P-MPJPE | ckpt | +| :-------------------------------------------------------------------------------------- | :---: | :-----------: | :-----: | :--------------------------------------------------------------------------------------: | +| [MotionBERT\*](/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m.py) | 34.5 | 34.6 | 27.1 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_h36m-f554954f_20230531.pth) | +| [MotionBERT-finetuned\*](/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m.py) | 26.9 | 26.8 | 21.0 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_ft_h36m-d80af323_20230531.pth) | + +Results on Human3.6M dataset converted from the [official repo](https://github.com/Walter0807/MotionBERT)1 with ground truth 2D detections + +| Arch | MPJPE | average MPJPE | P-MPJPE | ckpt | log | +| :------------------------------------------------------------------------------------- | :---: | :-----------: | :-----: | :------------------------------------------------------------------------------------: | :-: | +| [MotionBERT\*](/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m-original.py) | 39.8 | 39.2 | 33.4 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_h36m-f554954f_20230531.pth) | / | +| [MotionBERT-finetuned\*](/configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m-original.py) | 37.7 | 37.2 | 32.2 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_ft_h36m-d80af323_20230531.pth) | / | + +1 By default, we test models with [Human 3.6m dataset](/docs/en/dataset_zoo/3d_body_keypoint.md#human3-6m) processed by MMPose. The official repo's dataset includes more data and applies a different pre-processing technique. To achieve the same result with the official repo, please download the [test annotation file](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/h36m_test_original.npz), [train annotation file](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/h36m_train_original.npz) and [factors](https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/h36m_factors.npy) under `$MMPOSE/data/h36m/annotation_body3d/fps50` and test with the configs we provided. + +*Models with * are converted from the [official repo](https://github.com/Walter0807/MotionBERT). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* diff --git a/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_h36m.yml b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_h36m.yml new file mode 100644 index 0000000000000000000000000000000000000000..2dc285426cee847d590b78714c1809a161d0378e --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/motionbert/h36m/motionbert_h36m.yml @@ -0,0 +1,45 @@ +Collections: +- Name: MotionBERT + Paper: + Title: "Learning Human Motion Representations: A Unified Perspective" + URL: https://arxiv.org/abs/2210.06551 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/en/papers/algorithms/motionbert.md +Models: +- Config: configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-243frm_8xb32-240e_h36m.py + In Collection: MotionBERT + Metadata: + Architecture: &id001 + - MotionBERT + Training Data: Human3.6M (MotionBERT) + Name: motionbert_dstformer-243frm_8xb32-240e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 34.5 + P-MPJPE: 27.1 + Task: Body 3D Keypoint + - Dataset: Human3.6M (MotionBERT) + Metrics: + MPJPE: 39.8 + P-MPJPE: 33.4 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_h36m-f554954f_20230531.pth +- Config: configs/body_3d_keypoint/motionbert/h36m/motionbert_dstformer-ft-243frm_8xb32-120e_h36m.py + In Collection: MotionBERT + Alias: human3d + Metadata: + Architecture: *id001 + Training Data: Human3.6M (MotionBERT) + Name: motionbert_dstformer-ft-243frm_8xb32-120e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 26.9 + P-MPJPE: 21.0 + Task: Body 3D Keypoint + - Dataset: Human3.6M (MotionBERT) + Metrics: + MPJPE: 37.7 + P-MPJPE: 32.2 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_3d_keypoint/pose_lift/h36m/motionbert_ft_h36m-d80af323_20230531.pth diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/README.md b/mmpose/configs/body_3d_keypoint/video_pose_lift/README.md new file mode 100644 index 0000000000000000000000000000000000000000..faf92b789931fa9a6b9709b28c2271f077303c7a --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/README.md @@ -0,0 +1,17 @@ +# 3D human pose estimation in video with temporal convolutions and semi-supervised training + +Based on the success of 2d human pose estimation, it directly "lifts" a sequence of 2d keypoints to 3d keypoints. + +## Results and Models + +### Human3.6m Dataset + +| Arch | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | Details and Download | +| :-------------------------------------------- | :---: | :-----: | :-----: | :-------------------------------------------: | :------------------------------------------: | :---------------------------------------------: | +| [VideoPose3D-supervised-27frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-supv_8xb128-160e_h36m.py) | 40.1 | 30.1 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised_20210527.log.json) | [videpose3d_h36m.md](./h36m/videpose3d_h36m.md) | +| [VideoPose3D-supervised-81frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-81frm-supv_8xb128-160e_h36m.py) | 39.1 | 29.3 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised_20210527.log.json) | [videpose3d_h36m.md](./h36m/videpose3d_h36m.md) | +| [VideoPose3D-supervised-243frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv_8xb128-160e_h36m.py) | 37.6 | 28.3 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_20210527.log.json) | [videpose3d_h36m.md](./h36m/videpose3d_h36m.md) | +| [VideoPose3D-supervised-CPN-1frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-1frm-supv-cpn-ft_8xb128-160e_h36m.py) | 53.0 | 41.3 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft_20210527.log.json) | [videpose3d_h36m.md](./h36m/videpose3d_h36m.md) | +| [VideoPose3D-supervised-CPN-243frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py) | 47.9 | 38.0 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft_20210527.log.json) | [videpose3d_h36m.md](./h36m/videpose3d_h36m.md) | +| [VideoPose3D-semi-supervised-27frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv_8xb64-200e_h36m.py) | 57.2 | 42.4 | 54.2 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_20210527.log.json) | [videpose3d_h36m.md](./h36m/videpose3d_h36m.md) | +| [VideoPose3D-semi-supervised-CPN-27frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py) | 67.3 | 50.4 | 63.6 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft_20210527.log.json) | [videpose3d_h36m.md](./h36m/videpose3d_h36m.md) | diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-1frm-supv-cpn-ft_8xb128-160e_h36m.py b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-1frm-supv-cpn-ft_8xb128-160e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..c1190fe83ef95895726dadd9314db8907be9559e --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-1frm-supv-cpn-ft_8xb128-160e_h36m.py @@ -0,0 +1,132 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=160, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-4)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.98, end=80, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=4, + kernel_sizes=(1, 1, 1, 1, 1), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=1, + causal=False, + pad_video_seq=False, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_train.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=1, + causal=False, + pad_video_seq=False, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..3ef3df570b0bab3b66027c5c54acb0edd3ef694f --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py @@ -0,0 +1,132 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=200, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-4)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.98, end=200, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=4, + kernel_sizes=(3, 3, 3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=243, + causal=False, + pad_video_seq=True, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_train.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=243, + causal=False, + pad_video_seq=True, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv_8xb128-160e_h36m.py b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv_8xb128-160e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..0d241c498f98e3f2e5e10c4e5434a82d218ab371 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv_8xb128-160e_h36m.py @@ -0,0 +1,128 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=160, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=4, + kernel_sizes=(3, 3, 3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=243, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=243, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..08bcda8ed76ebd08b8e525f904c41abb91d9a21e --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py @@ -0,0 +1,119 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = None + +# optimizer + +# learning policy + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + ), + traj_backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + traj_head=dict( + type='TrajectoryRegressionHead', + in_channels=1024, + num_joints=1, + loss=dict(type='MPJPELoss', use_target_weight=True), + decoder=codec, + ), + semi_loss=dict( + type='SemiSupervisionLoss', + joint_parents=[0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15], + warmup_iterations=1311376 // 64 // 8 * 5), +) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +val_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=27, + causal=False, + pad_video_seq=True, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe'), + dict(type='MPJPE', mode='n-mpjpe') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv_8xb64-200e_h36m.py b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv_8xb64-200e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..d145f05b17e917885bf76e7c51ed628b5b096d27 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv_8xb64-200e_h36m.py @@ -0,0 +1,117 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = None + +# optimizer + +# learning policy + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + ), + traj_backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + traj_head=dict( + type='TrajectoryRegressionHead', + in_channels=1024, + num_joints=1, + loss=dict(type='MPJPELoss', use_target_weight=True), + decoder=codec, + ), + semi_loss=dict( + type='SemiSupervisionLoss', + joint_parents=[0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15], + warmup_iterations=1311376 // 64 // 8 * 5), +) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +val_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=27, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe'), + dict(type='MPJPE', mode='n-mpjpe') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-supv_8xb128-160e_h36m.py b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-supv_8xb128-160e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..803f907b7bdc1d4cb0fe3496ad05322c48533cf9 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-supv_8xb128-160e_h36m.py @@ -0,0 +1,128 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=160, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=27, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=27, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-81frm-supv_8xb128-160e_h36m.py b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-81frm-supv_8xb128-160e_h36m.py new file mode 100644 index 0000000000000000000000000000000000000000..4b370fe76eb80b292ef59a435c0cc0aa2d48f4b3 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-81frm-supv_8xb128-160e_h36m.py @@ -0,0 +1,128 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=160, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=3, + kernel_sizes=(3, 3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=81, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=81, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/videopose3d_h36m.md b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/videopose3d_h36m.md new file mode 100644 index 0000000000000000000000000000000000000000..069b8de2da41641bac980e8621c4ac47b00c0457 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/videopose3d_h36m.md @@ -0,0 +1,67 @@ + + +
+ +VideoPose3D (CVPR'2019) + +```bibtex +@inproceedings{pavllo20193d, +title={3d human pose estimation in video with temporal convolutions and semi-supervised training}, +author={Pavllo, Dario and Feichtenhofer, Christoph and Grangier, David and Auli, Michael}, +booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, +pages={7753--7762}, +year={2019} +} +``` + +
+ + + +
+Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, +author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, +title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, +journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, +publisher = {IEEE Computer Society}, +volume = {36}, +number = {7}, +pages = {1325-1339}, +month = {jul}, +year = {2014} +} +``` + +
+ +Testing results on Human3.6M dataset with ground truth 2D detections, supervised training + +| Arch | Receptive Field | MPJPE | P-MPJPE | ckpt | log | +| :--------------------------------------------------------- | :-------------: | :---: | :-----: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [VideoPose3D-supervised-27frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-supv_8xb128-160e_h36m.py) | 27 | 40.1 | 30.1 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised_20210527.log.json) | +| [VideoPose3D-supervised-81frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-81frm-supv_8xb128-160e_h36m.py) | 81 | 39.1 | 29.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised_20210527.log.json) | +| [VideoPose3D-supervised-243frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv_8xb128-160e_h36m.py) | 243 | 37.6 | 28.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_20210527.log.json) | + +Testing results on Human3.6M dataset with CPN 2D detections1, supervised training + +| Arch | Receptive Field | MPJPE | P-MPJPE | ckpt | log | +| :--------------------------------------------------------- | :-------------: | :---: | :-----: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [VideoPose3D-supervised-CPN-1frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-1frm-supv-cpn-ft_8xb128-160e_h36m.py) | 1 | 53.0 | 41.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft_20210527.log.json) | +| [VideoPose3D-supervised-CPN-243frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py) | 243 | 47.9 | 38.0 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft_20210527.log.json) | + +Testing results on Human3.6M dataset with ground truth 2D detections, semi-supervised training + +| Training Data | Arch | Receptive Field | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | +| :------------ | :-------------------------------------------------: | :-------------: | :---: | :-----: | :-----: | :-------------------------------------------------: | :-------------------------------------------------: | +| 10% S1 | [VideoPose3D-semi-supervised-27frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv_8xb64-200e_h36m.py) | 27 | 57.2 | 42.4 | 54.2 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_20210527.log.json) | + +Testing results on Human3.6M dataset with CPN 2D detections1, semi-supervised training + +| Training Data | Arch | Receptive Field | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | +| :------------ | :-------------------------------------------------: | :-------------: | :---: | :-----: | :-----: | :-------------------------------------------------: | :-------------------------------------------------: | +| 10% S1 | [VideoPose3D-semi-supervised-CPN-27frm](/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py) | 27 | 67.3 | 50.4 | 63.6 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft_20210527.log.json) | + +1 CPN 2D detections are provided by [official repo](https://github.com/facebookresearch/VideoPose3D/blob/master/DATASETS.md). The reformatted version used in this repository can be downloaded from [train_detection](https://download.openmmlab.com/mmpose/body3d/videopose/cpn_ft_h36m_dbb_train.npy) and [test_detection](https://download.openmmlab.com/mmpose/body3d/videopose/cpn_ft_h36m_dbb_test.npy). diff --git a/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/videopose3d_h36m.yml b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/videopose3d_h36m.yml new file mode 100644 index 0000000000000000000000000000000000000000..818fe0483b6f455002d3600d61fa429c0db72ab3 --- /dev/null +++ b/mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/videopose3d_h36m.yml @@ -0,0 +1,102 @@ +Collections: +- Name: VideoPose3D + Paper: + Title: 3d human pose estimation in video with temporal convolutions and semi-supervised + training + URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Pavllo_3D_Human_Pose_Estimation_in_Video_With_Temporal_Convolutions_and_CVPR_2019_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/en/papers/algorithms/videopose3d.md +Models: +- Config: configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-supv_8xb128-160e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: &id001 + - VideoPose3D + Training Data: Human3.6M + Name: video-pose-lift_tcn-27frm-supv_8xb128-160e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 40.0 + P-MPJPE: 30.1 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth +- Config: configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-81frm-supv_8xb128-160e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: video-pose-lift_tcn-81frm-supv_8xb128-160e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 38.9 + P-MPJPE: 29.2 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth +- Config: configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv_8xb128-160e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: video-pose-lift_tcn-243frm-supv_8xb128-160e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 37.6 + P-MPJPE: 28.3 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth +- Config: configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-1frm-supv-cpn-ft_8xb128-160e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: video-pose-lift_tcn-1frm-supv-cpn-ft_8xb128-160e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 52.9 + P-MPJPE: 41.3 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth +- Config: configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 47.9 + P-MPJPE: 38.0 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth +- Config: configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv_8xb64-200e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: video-pose-lift_tcn-27frm-semi-supv_8xb64-200e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 58.1 + N-MPJPE: 54.7 + P-MPJPE: 42.8 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth +- Config: configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: video-pose-lift_tcn-27frm-semi-supv-cpn-ft_8xb64-200e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 67.4 + N-MPJPE: 63.2 + P-MPJPE: 50.1 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth diff --git a/mmpose/configs/face_2d_keypoint/README.md b/mmpose/configs/face_2d_keypoint/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9f9370a754902883013e479779a5db7acb2c9699 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/README.md @@ -0,0 +1,16 @@ +# 2D Face Landmark Detection + +2D face landmark detection (also referred to as face alignment) is defined as the task of detecting the face keypoints from an input image. + +Normally, the input images are cropped face images, where the face locates at the center; +or the rough location (or the bounding box) of the hand is provided. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_face_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/2d_face_demo.md) to run demos. + +
diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/README.md b/mmpose/configs/face_2d_keypoint/rtmpose/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d0c7f55fb42d9501dbfac7511e1097d4a5aa8c1d --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/README.md @@ -0,0 +1,32 @@ +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### COCO-WholeBody-Face Dataset + +Results on COCO-WholeBody-Face val set + +| Model | Input Size | NME | Details and Download | +| :-------: | :--------: | :----: | :------------------------------------------------------------------------------------: | +| RTMPose-m | 256x256 | 0.0466 | [rtmpose_coco_wholebody_face.md](./coco_wholebody_face/rtmpose_coco_wholebody_face.md) | + +### WFLW Dataset + +Results on WFLW dataset + +| Model | Input Size | NME | Details and Download | +| :-------: | :--------: | :--: | :---------------------------------------: | +| RTMPose-m | 256x256 | 4.01 | [rtmpose_wflw.md](./wflw/rtmpose_wflw.md) | + +### LaPa Dataset + +Results on LaPa dataset + +| Model | Input Size | NME | Details and Download | +| :-------: | :--------: | :--: | :---------------------------------------: | +| RTMPose-m | 256x256 | 1.29 | [rtmpose_lapa.md](./lapa/rtmpose_lapa.md) | diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py b/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..958a361c07a9dbfc45daabcab2fb08ba889e9525 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py @@ -0,0 +1,231 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 60 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=68, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.md b/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.md new file mode 100644 index 0000000000000000000000000000000000000000..77d99bc63f7452b80e2983341794326a20c80fa1 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.md @@ -0,0 +1,39 @@ + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0466 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.json) | diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml b/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml new file mode 100644 index 0000000000000000000000000000000000000000..fdc2599e713aa710c102a71c67906090600ef6d6 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml @@ -0,0 +1,14 @@ +Models: +- Config: configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: RTMPose + Metadata: + Architecture: + - RTMPose + Training Data: COCO-WholeBody-Face + Name: rtmpose-m_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0466 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.pth diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..abbb2ce985129538b7ecef8e5b1995bee1effa3a --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py @@ -0,0 +1,690 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# lapa coco wflw 300w cofw halpe + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=max_epochs - 30, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=106, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# train dataset +dataset_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +kpt_68_to_106 = [ + # + (0, 0), + (1, 2), + (2, 4), + (3, 6), + (4, 8), + (5, 10), + (6, 12), + (7, 14), + (8, 16), + (9, 18), + (10, 20), + (11, 22), + (12, 24), + (13, 26), + (14, 28), + (15, 30), + (16, 32), + # + (17, 33), + (18, 34), + (19, 35), + (20, 36), + (21, 37), + # + (22, 42), + (23, 43), + (24, 44), + (25, 45), + (26, 46), + # + (27, 51), + (28, 52), + (29, 53), + (30, 54), + # + (31, 58), + (32, 59), + (33, 60), + (34, 61), + (35, 62), + # + (36, 66), + (39, 70), + # + ((37, 38), 68), + ((40, 41), 72), + # + (42, 75), + (45, 79), + # + ((43, 44), 77), + ((46, 47), 81), + # + (48, 84), + (49, 85), + (50, 86), + (51, 87), + (52, 88), + (53, 89), + (54, 90), + (55, 91), + (56, 92), + (57, 93), + (58, 94), + (59, 95), + (60, 96), + (61, 97), + (62, 98), + (63, 99), + (64, 100), + (65, 101), + (66, 102), + (67, 103) +] + +mapping_halpe = [ + # + (26, 0), + (27, 2), + (28, 4), + (29, 6), + (30, 8), + (31, 10), + (32, 12), + (33, 14), + (34, 16), + (35, 18), + (36, 20), + (37, 22), + (38, 24), + (39, 26), + (40, 28), + (41, 30), + (42, 32), + # + (43, 33), + (44, 34), + (45, 35), + (46, 36), + (47, 37), + # + (48, 42), + (49, 43), + (50, 44), + (51, 45), + (52, 46), + # + (53, 51), + (54, 52), + (55, 53), + (56, 54), + # + (57, 58), + (58, 59), + (59, 60), + (60, 61), + (61, 62), + # + (62, 66), + (65, 70), + # + ((63, 64), 68), + ((66, 67), 72), + # + (68, 75), + (71, 79), + # + ((69, 70), 77), + ((72, 73), 81), + # + (74, 84), + (75, 85), + (76, 86), + (77, 87), + (78, 88), + (79, 89), + (80, 90), + (81, 91), + (82, 92), + (83, 93), + (84, 94), + (85, 95), + (86, 96), + (87, 97), + (88, 98), + (89, 99), + (90, 100), + (91, 101), + (92, 102), + (93, 103) +] + +mapping_wflw = [ + # + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), + (17, 17), + (18, 18), + (19, 19), + (20, 20), + (21, 21), + (22, 22), + (23, 23), + (24, 24), + (25, 25), + (26, 26), + (27, 27), + (28, 28), + (29, 29), + (30, 30), + (31, 31), + (32, 32), + # + (33, 33), + (34, 34), + (35, 35), + (36, 36), + (37, 37), + (38, 38), + (39, 39), + (40, 40), + (41, 41), + # + (42, 42), + (43, 43), + (44, 44), + (45, 45), + (46, 46), + (47, 47), + (48, 48), + (49, 49), + (50, 50), + # + (51, 51), + (52, 52), + (53, 53), + (54, 54), + # + (55, 58), + (56, 59), + (57, 60), + (58, 61), + (59, 62), + # + (60, 66), + (61, 67), + (62, 68), + (63, 69), + (64, 70), + (65, 71), + (66, 72), + (67, 73), + # + (68, 75), + (69, 76), + (70, 77), + (71, 78), + (72, 79), + (73, 80), + (74, 81), + (75, 82), + # + (76, 84), + (77, 85), + (78, 86), + (79, 87), + (80, 88), + (81, 89), + (82, 90), + (83, 91), + (84, 92), + (85, 93), + (86, 94), + (87, 95), + (88, 96), + (89, 97), + (90, 98), + (91, 99), + (92, 100), + (93, 101), + (94, 102), + (95, 103), + # + (96, 104), + # + (97, 105) +] + +mapping_cofw = [ + # + (0, 33), + (2, 38), + (4, 35), + (5, 40), + # + (1, 46), + (3, 50), + (6, 44), + (7, 48), + # + (8, 60), + (10, 64), + (12, 62), + (13, 66), + # + (9, 72), + (11, 68), + (14, 70), + (15, 74), + # + (18, 57), + (19, 63), + (20, 54), + (21, 60), + # + (22, 84), + (23, 90), + (24, 87), + (25, 98), + (26, 102), + (27, 93), + # + (28, 16) +] +dataset_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_133kpt.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[ + dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, + dataset_cofw, dataset_halpe + ], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# test dataset +val_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +val_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +val_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_test.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_test.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..62fa305115e48a619966cdaa2ac9f03cce38bfa9 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py @@ -0,0 +1,691 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# lapa coco wflw 300w cofw halpe + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=max_epochs - 30, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth') + ), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=106, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +# train dataset +dataset_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +kpt_68_to_106 = [ + # + (0, 0), + (1, 2), + (2, 4), + (3, 6), + (4, 8), + (5, 10), + (6, 12), + (7, 14), + (8, 16), + (9, 18), + (10, 20), + (11, 22), + (12, 24), + (13, 26), + (14, 28), + (15, 30), + (16, 32), + # + (17, 33), + (18, 34), + (19, 35), + (20, 36), + (21, 37), + # + (22, 42), + (23, 43), + (24, 44), + (25, 45), + (26, 46), + # + (27, 51), + (28, 52), + (29, 53), + (30, 54), + # + (31, 58), + (32, 59), + (33, 60), + (34, 61), + (35, 62), + # + (36, 66), + (39, 70), + # + ((37, 38), 68), + ((40, 41), 72), + # + (42, 75), + (45, 79), + # + ((43, 44), 77), + ((46, 47), 81), + # + (48, 84), + (49, 85), + (50, 86), + (51, 87), + (52, 88), + (53, 89), + (54, 90), + (55, 91), + (56, 92), + (57, 93), + (58, 94), + (59, 95), + (60, 96), + (61, 97), + (62, 98), + (63, 99), + (64, 100), + (65, 101), + (66, 102), + (67, 103) +] + +mapping_halpe = [ + # + (26, 0), + (27, 2), + (28, 4), + (29, 6), + (30, 8), + (31, 10), + (32, 12), + (33, 14), + (34, 16), + (35, 18), + (36, 20), + (37, 22), + (38, 24), + (39, 26), + (40, 28), + (41, 30), + (42, 32), + # + (43, 33), + (44, 34), + (45, 35), + (46, 36), + (47, 37), + # + (48, 42), + (49, 43), + (50, 44), + (51, 45), + (52, 46), + # + (53, 51), + (54, 52), + (55, 53), + (56, 54), + # + (57, 58), + (58, 59), + (59, 60), + (60, 61), + (61, 62), + # + (62, 66), + (65, 70), + # + ((63, 64), 68), + ((66, 67), 72), + # + (68, 75), + (71, 79), + # + ((69, 70), 77), + ((72, 73), 81), + # + (74, 84), + (75, 85), + (76, 86), + (77, 87), + (78, 88), + (79, 89), + (80, 90), + (81, 91), + (82, 92), + (83, 93), + (84, 94), + (85, 95), + (86, 96), + (87, 97), + (88, 98), + (89, 99), + (90, 100), + (91, 101), + (92, 102), + (93, 103) +] + +mapping_wflw = [ + # + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), + (17, 17), + (18, 18), + (19, 19), + (20, 20), + (21, 21), + (22, 22), + (23, 23), + (24, 24), + (25, 25), + (26, 26), + (27, 27), + (28, 28), + (29, 29), + (30, 30), + (31, 31), + (32, 32), + # + (33, 33), + (34, 34), + (35, 35), + (36, 36), + (37, 37), + (38, 38), + (39, 39), + (40, 40), + (41, 41), + # + (42, 42), + (43, 43), + (44, 44), + (45, 45), + (46, 46), + (47, 47), + (48, 48), + (49, 49), + (50, 50), + # + (51, 51), + (52, 52), + (53, 53), + (54, 54), + # + (55, 58), + (56, 59), + (57, 60), + (58, 61), + (59, 62), + # + (60, 66), + (61, 67), + (62, 68), + (63, 69), + (64, 70), + (65, 71), + (66, 72), + (67, 73), + # + (68, 75), + (69, 76), + (70, 77), + (71, 78), + (72, 79), + (73, 80), + (74, 81), + (75, 82), + # + (76, 84), + (77, 85), + (78, 86), + (79, 87), + (80, 88), + (81, 89), + (82, 90), + (83, 91), + (84, 92), + (85, 93), + (86, 94), + (87, 95), + (88, 96), + (89, 97), + (90, 98), + (91, 99), + (92, 100), + (93, 101), + (94, 102), + (95, 103), + # + (96, 104), + # + (97, 105) +] + +mapping_cofw = [ + # + (0, 33), + (2, 38), + (4, 35), + (5, 40), + # + (1, 46), + (3, 50), + (6, 44), + (7, 48), + # + (8, 60), + (10, 64), + (12, 62), + (13, 66), + # + (9, 72), + (11, 68), + (14, 70), + (15, 74), + # + (18, 57), + (19, 63), + (20, 54), + (21, 60), + # + (22, 84), + (23, 90), + (24, 87), + (25, 98), + (26, 102), + (27, 93), + # + (28, 16) +] +dataset_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_133kpt.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[ + dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, + dataset_cofw, dataset_halpe + ], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + pin_memory=True, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# test dataset +val_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +val_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +val_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_test.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_test.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..751bedffe77aa1dc08bf5360a1f3b5ea9781f209 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py @@ -0,0 +1,689 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# lapa coco wflw 300w cofw halpe + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=90, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=106, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +# train dataset +dataset_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +kpt_68_to_106 = [ + # + (0, 0), + (1, 2), + (2, 4), + (3, 6), + (4, 8), + (5, 10), + (6, 12), + (7, 14), + (8, 16), + (9, 18), + (10, 20), + (11, 22), + (12, 24), + (13, 26), + (14, 28), + (15, 30), + (16, 32), + # + (17, 33), + (18, 34), + (19, 35), + (20, 36), + (21, 37), + # + (22, 42), + (23, 43), + (24, 44), + (25, 45), + (26, 46), + # + (27, 51), + (28, 52), + (29, 53), + (30, 54), + # + (31, 58), + (32, 59), + (33, 60), + (34, 61), + (35, 62), + # + (36, 66), + (39, 70), + # + ((37, 38), 68), + ((40, 41), 72), + # + (42, 75), + (45, 79), + # + ((43, 44), 77), + ((46, 47), 81), + # + (48, 84), + (49, 85), + (50, 86), + (51, 87), + (52, 88), + (53, 89), + (54, 90), + (55, 91), + (56, 92), + (57, 93), + (58, 94), + (59, 95), + (60, 96), + (61, 97), + (62, 98), + (63, 99), + (64, 100), + (65, 101), + (66, 102), + (67, 103) +] + +mapping_halpe = [ + # + (26, 0), + (27, 2), + (28, 4), + (29, 6), + (30, 8), + (31, 10), + (32, 12), + (33, 14), + (34, 16), + (35, 18), + (36, 20), + (37, 22), + (38, 24), + (39, 26), + (40, 28), + (41, 30), + (42, 32), + # + (43, 33), + (44, 34), + (45, 35), + (46, 36), + (47, 37), + # + (48, 42), + (49, 43), + (50, 44), + (51, 45), + (52, 46), + # + (53, 51), + (54, 52), + (55, 53), + (56, 54), + # + (57, 58), + (58, 59), + (59, 60), + (60, 61), + (61, 62), + # + (62, 66), + (65, 70), + # + ((63, 64), 68), + ((66, 67), 72), + # + (68, 75), + (71, 79), + # + ((69, 70), 77), + ((72, 73), 81), + # + (74, 84), + (75, 85), + (76, 86), + (77, 87), + (78, 88), + (79, 89), + (80, 90), + (81, 91), + (82, 92), + (83, 93), + (84, 94), + (85, 95), + (86, 96), + (87, 97), + (88, 98), + (89, 99), + (90, 100), + (91, 101), + (92, 102), + (93, 103) +] + +mapping_wflw = [ + # + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), + (17, 17), + (18, 18), + (19, 19), + (20, 20), + (21, 21), + (22, 22), + (23, 23), + (24, 24), + (25, 25), + (26, 26), + (27, 27), + (28, 28), + (29, 29), + (30, 30), + (31, 31), + (32, 32), + # + (33, 33), + (34, 34), + (35, 35), + (36, 36), + (37, 37), + (38, 38), + (39, 39), + (40, 40), + (41, 41), + # + (42, 42), + (43, 43), + (44, 44), + (45, 45), + (46, 46), + (47, 47), + (48, 48), + (49, 49), + (50, 50), + # + (51, 51), + (52, 52), + (53, 53), + (54, 54), + # + (55, 58), + (56, 59), + (57, 60), + (58, 61), + (59, 62), + # + (60, 66), + (61, 67), + (62, 68), + (63, 69), + (64, 70), + (65, 71), + (66, 72), + (67, 73), + # + (68, 75), + (69, 76), + (70, 77), + (71, 78), + (72, 79), + (73, 80), + (74, 81), + (75, 82), + # + (76, 84), + (77, 85), + (78, 86), + (79, 87), + (80, 88), + (81, 89), + (82, 90), + (83, 91), + (84, 92), + (85, 93), + (86, 94), + (87, 95), + (88, 96), + (89, 97), + (90, 98), + (91, 99), + (92, 100), + (93, 101), + (94, 102), + (95, 103), + # + (96, 104), + # + (97, 105) +] + +mapping_cofw = [ + # + (0, 33), + (2, 38), + (4, 35), + (5, 40), + # + (1, 46), + (3, 50), + (6, 44), + (7, 48), + # + (8, 60), + (10, 64), + (12, 62), + (13, 66), + # + (9, 72), + (11, 68), + (14, 70), + (15, 74), + # + (18, 57), + (19, 63), + (20, 54), + (21, 60), + # + (22, 84), + (23, 90), + (24, 87), + (25, 98), + (26, 102), + (27, 93), + # + (28, 16) +] +dataset_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_133kpt.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[ + dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, + dataset_cofw, dataset_halpe + ], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# test dataset +val_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +val_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +val_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_test.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_test.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.md b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.md new file mode 100644 index 0000000000000000000000000000000000000000..5f989fa7831ca3ebde51626f13c8e4db69a179ab --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.md @@ -0,0 +1,71 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +- Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset. +- `Face6` and `*` denote model trained on 6 public datasets: + - [COCO-Wholebody-Face](https://github.com/jin-s13/COCO-WholeBody/) + - [WFLW](https://wywu.github.io/projects/LAB/WFLW.html) + - [300W](https://ibug.doc.ic.ac.uk/resources/300-W/) + - [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/) + - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) + - [LaPa](https://github.com/JDAI-CV/lapa-dataset) + +| Config | Input Size | NME
(LaPa) | FLOPS
(G) | Download | +| :--------------------------------------------------------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------------------------------------------: | +| [RTMPose-t\*](./rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_face6-256x256.py) | 256x256 | 1.67 | 0.652 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth) | +| [RTMPose-s\*](./rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_face6-256x256.py) | 256x256 | 1.59 | 1.119 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth) | +| [RTMPose-m\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_face6-256x256.py) | 256x256 | 1.44 | 2.852 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth) | diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.yml b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.yml new file mode 100644 index 0000000000000000000000000000000000000000..38b8395bd90b23d25f4ad29f953c67ffb60cdfb2 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.yml @@ -0,0 +1,51 @@ +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - COCO-Wholebody-Face + - WFLW + - 300W + - COFW + - Halpe + - LaPa + Name: rtmpose-t_8xb256-120e_face6-256x256 + Results: + - Dataset: Face6 + Metrics: + NME: 1.67 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth +- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb256-120e_face6-256x256 + Results: + - Dataset: Face6 + Metrics: + NME: 1.59 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth +- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-120e_face6-256x256 + Alias: face + Results: + - Dataset: Face6 + Metrics: + NME: 1.44 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py b/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..fee1201db1f56efd162292dbb2b6155b7865dced --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py @@ -0,0 +1,246 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=106, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/LaPa/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/LaPa/', +# f'{data_root}': 's3://openmmlab/datasets/pose/LaPa/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.md b/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.md new file mode 100644 index 0000000000000000000000000000000000000000..9638de7551c0e0cabaa2ca1ba606bf8abc42b311 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.md @@ -0,0 +1,40 @@ + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+LaPa (AAAI'2020) + +```bibtex +@inproceedings{liu2020new, + title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, + author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, + booktitle={AAAI}, + pages={11637--11644}, + year={2020} +} +``` + +
+ +Results on LaPa val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | +| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py) | 256x256 | 1.29 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.json) | diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.yml b/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.yml new file mode 100644 index 0000000000000000000000000000000000000000..96acff8de6c25f064622a9711565ed0ffc594912 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.yml @@ -0,0 +1,15 @@ +Models: +- Config: configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py + In Collection: RTMPose + Alias: face + Metadata: + Architecture: + - RTMPose + Training Data: LaPa + Name: rtmpose-m_8xb64-120e_lapa-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 1.29 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.pth diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py b/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..cbfd788d6062dc70aa3716920a189e681a393497 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py @@ -0,0 +1,231 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 60 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=98, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/WFLW/', +# f'{data_root}': 's3://openmmlab/datasets/pose/WFLW/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.md b/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.md new file mode 100644 index 0000000000000000000000000000000000000000..b0070258da1b81c6ee5bd7ebe198eae968067f80 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.md @@ -0,0 +1,42 @@ + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train. + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | +| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py) | 256x256 | 4.01 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.json) | diff --git a/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml b/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..1112fdf69dc000a7c03d7d331b10d9649d173df8 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml @@ -0,0 +1,14 @@ +Models: +- Config: configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py + In Collection: RTMPose + Metadata: + Architecture: + - RTMPose + Training Data: WFLW + Name: rtmpose-m_8xb64-60e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 4.01 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.md new file mode 100644 index 0000000000000000000000000000000000000000..ace8776c4e28e66559e4dcecec0785e6ef5a0771 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.md @@ -0,0 +1,44 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+300W (IMAVIS'2016) + +```bibtex +@article{sagonas2016300, + title={300 faces in-the-wild challenge: Database and results}, + author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, + journal={Image and vision computing}, + volume={47}, + pages={3--18}, + year={2016}, + publisher={Elsevier} +} +``` + +
+ +Results on 300W dataset + +The model is trained on 300W train. + +| Arch | Input Size | NME*common* | NME*challenge* | NME*full* | NME*test* | ckpt | log | +| :--------------------------------- | :--------: | :--------------------: | :-----------------------: | :------------------: | :------------------: | :---------------------------------: | :--------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py) | 256x256 | 2.92 | 5.64 | 3.45 | 4.10 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256-eea53406_20211019.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256_20211019.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml new file mode 100644 index 0000000000000000000000000000000000000000..58dcb4832ac5824e375f5d8dc66f6648626528f8 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml @@ -0,0 +1,23 @@ +Collections: +- Name: HRNetv2 + Paper: + Title: Deep High-Resolution Representation Learning for Visual Recognition + URL: https://ieeexplore.ieee.org/abstract/document/9052469/ + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnetv2.md +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: 300W + Name: td-hm_hrnetv2-w18_8xb64-60e_300w-256x256 + Results: + - Dataset: 300W + Metrics: + NME challenge: 5.64 + NME common: 2.92 + NME full: 3.45 + NME test: 4.1 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256-eea53406_20211019.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..52473a4664cca8266f603729d1a631aa6dc5b4ca --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py @@ -0,0 +1,161 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=1.5) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=68, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Face300WDataset' +data_mode = 'topdown' +data_root = 'data/300w/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_300w_valid.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_300wlp.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_300wlp.md new file mode 100644 index 0000000000000000000000000000000000000000..773bc602ae701884088dc372e4e8fd10202dfb81 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_300wlp.md @@ -0,0 +1,42 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+300WLP (IEEE'2017) + +```bibtex +@article{zhu2017face, + title={Face alignment in full pose range: A 3d total solution}, + author={Zhu, Xiangyu and Liu, Xiaoming and Lei, Zhen and Li, Stan Z}, + journal={IEEE transactions on pattern analysis and machine intelligence}, + year={2017}, + publisher={IEEE} +} +``` + +
+ +Results on 300W-LP dataset + +The model is trained on 300W-LP train. + +| Arch | Input Size | NME*full* | NME*test* | ckpt | log | +| :------------------------------------------------- | :--------: | :------------------: | :------------------: | :------------------------------------------------: | :------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/300wlp/td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256.py) | 256x256 | 0.0413 | 0.04125 | [ckpt](https://download.openmmlab.com/mmpose/v1/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_w18_300wlp_256x256-fb433d21_20230922.pth) | [log](https://download.openmmlab.com/mmpose/v1/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_w18_300wlp_256x256-fb433d21_20230922.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_300wlp.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_300wlp.yml new file mode 100644 index 0000000000000000000000000000000000000000..844c15df6d77f726c49465b668dbed852204a36b --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_300wlp.yml @@ -0,0 +1,20 @@ +Collections: +- Name: HRNetv2 + Paper: + Title: Deep High-Resolution Representation Learning for Visual Recognition + URL: https://ieeexplore.ieee.org/abstract/document/9052469/ + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnetv2.md +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/300wlp/td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: 300W-LP + Name: td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256 + Results: + - Dataset: 300W-LP + Metrics: + NME full: 0.0413 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_w18_300wlp_256x256-fb433d21_20230922.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..e96a6bf0ebbc5055d6f19ca7803eec647dc28448 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/300wlp/td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256.py @@ -0,0 +1,160 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=1.5) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=68, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Face300WLPDataset' +data_mode = 'topdown' +data_root = 'data/300wlp/' +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_300wlp_train.json', + data_prefix=dict(img='train/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_300wlp_valid.json', + data_prefix=dict(img='val/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/README.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a8b7cf98fa119c4a1065484b24bd768196a3622d --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/README.md @@ -0,0 +1,57 @@ +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### 300W Dataset + +Results on 300W dataset + +| Model | Input Size | NME*common* | NME*challenge* | NME*full* | NME*test* | Details and Download | +| :---------: | :--------: | :--------------------: | :-----------------------: | :------------------: | :------------------: | :---------------------------------------: | +| HRNetv2-w18 | 256x256 | 2.92 | 5.64 | 3.45 | 4.10 | [hrnetv2_300w.md](./300w/hrnetv2_300w.md) | + +### AFLW Dataset + +Results on AFLW dataset + +| Model | Input Size | NME*full* | NME*frontal* | Details and Download | +| :--------------: | :--------: | :------------------: | :---------------------: | :-------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 1.35 | 1.19 | [hrnetv2_dark_aflw.md](./aflw/hrnetv2_dark_aflw.md) | +| HRNetv2-w18 | 256x256 | 1.41 | 1.27 | [hrnetv2_aflw.md](./aflw/hrnetv2_aflw.md) | + +### COCO-WholeBody-Face Dataset + +Results on COCO-WholeBody-Face val set + +| Model | Input Size | NME | Details and Download | +| :--------------: | :--------: | :----: | :----------------------------------------------------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 0.0513 | [hrnetv2_dark_coco_wholebody_face.md](./coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md) | +| SCNet-50 | 256x256 | 0.0567 | [scnet_coco_wholebody_face.md](./coco_wholebody_face/scnet_coco_wholebody_face.md) | +| HRNetv2-w18 | 256x256 | 0.0569 | [hrnetv2_coco_wholebody_face.md](./coco_wholebody_face/hrnetv2_coco_wholebody_face.md) | +| ResNet-50 | 256x256 | 0.0582 | [resnet_coco_wholebody_face.md](./coco_wholebody_face/resnet_coco_wholebody_face.md) | +| HourglassNet | 256x256 | 0.0587 | [hourglass_coco_wholebody_face.md](./coco_wholebody_face/hourglass_coco_wholebody_face.md) | +| MobileNet-v2 | 256x256 | 0.0611 | [mobilenetv2_coco_wholebody_face.md](./coco_wholebody_face/mobilenetv2_coco_wholebody_face.md) | + +### COFW Dataset + +Results on COFW dataset + +| Model | Input Size | NME | Details and Download | +| :---------: | :--------: | :--: | :---------------------------------------: | +| HRNetv2-w18 | 256x256 | 3.48 | [hrnetv2_cofw.md](./cofw/hrnetv2_cofw.md) | + +### WFLW Dataset + +Results on WFLW dataset + +| Model | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | Details and Download | +| :-----: | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------------------: | +| HRNetv2-w18+Dark | 256x256 | 3.98 | 6.98 | 3.96 | 4.78 | 4.56 | 3.89 | 4.29 | [hrnetv2_dark_wflw.md](./wflw/hrnetv2_dark_wflw.md) | +| HRNetv2-w18+AWing | 256x256 | 4.02 | 6.94 | 3.97 | 4.78 | 4.59 | 3.87 | 4.28 | [hrnetv2_awing_wflw.md](./wflw/hrnetv2_awing_wflw.md) | +| HRNetv2-w18 | 256x256 | 4.06 | 6.97 | 3.99 | 4.83 | 4.58 | 3.94 | 4.33 | [hrnetv2_wflw.md](./wflw/hrnetv2_wflw.md) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.md new file mode 100644 index 0000000000000000000000000000000000000000..70c59ac2e4ca7a58db9057f01d2af3c17ce5785d --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.md @@ -0,0 +1,43 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+AFLW (ICCVW'2011) + +```bibtex +@inproceedings{koestinger2011annotated, + title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, + author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, + booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, + pages={2144--2151}, + year={2011}, + organization={IEEE} +} +``` + +
+ +Results on AFLW dataset + +The model is trained on AFLW train and evaluated on AFLW full and frontal. + +| Arch | Input Size | NME*full* | NME*frontal* | ckpt | log | +| :------------------------------------------------ | :--------: | :------------------: | :---------------------: | :-----------------------------------------------: | :-----------------------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py) | 256x256 | 1.41 | 1.27 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256_20210125.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..06d2d43b9c1983c2c4d43d715b08721a822ffed3 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml @@ -0,0 +1,15 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: AFLW + Name: td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256 + Results: + - Dataset: AFLW + Metrics: + NME frontal: 1.27 + NME full: 1.41 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.md new file mode 100644 index 0000000000000000000000000000000000000000..a51c473d3b243f7f773a851bb69c425b14443767 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.md @@ -0,0 +1,60 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+AFLW (ICCVW'2011) + +```bibtex +@inproceedings{koestinger2011annotated, + title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, + author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, + booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, + pages={2144--2151}, + year={2011}, + organization={IEEE} +} +``` + +
+ +Results on AFLW dataset + +The model is trained on AFLW train and evaluated on AFLW full and frontal. + +| Arch | Input Size | NME*full* | NME*frontal* | ckpt | log | +| :------------------------------------------------ | :--------: | :------------------: | :---------------------: | :-----------------------------------------------: | :-----------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py) | 256x256 | 1.35 | 1.19 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark-219606c0_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark_20210125.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..54c09538974835c5a701de61f41c812d2813940a --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: AFLW + Name: td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256 + Results: + - Dataset: AFLW + Metrics: + NME frontal: 1.19 + NME full: 1.34 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark-219606c0_20210125.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..a157a01442f155d34f7fd330014028bc77c4f888 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py @@ -0,0 +1,156 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=19, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AFLWDataset' +data_mode = 'topdown' +data_root = 'data/aflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_aflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_aflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', norm_mode='use_norm_item', norm_item='bbox_size') +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..44100cebe60bbe023837dba7586f2c913b731918 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py @@ -0,0 +1,160 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=19, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AFLWDataset' +data_mode = 'topdown' +data_root = 'data/aflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_aflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_aflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', norm_mode='use_norm_item', norm_item='bbox_size') +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.md new file mode 100644 index 0000000000000000000000000000000000000000..6099dcf06dcf8a9e988b77623bd6f3a7ee7883a7 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.md @@ -0,0 +1,39 @@ + + +
+Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_hourglass_52](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0587 | [ckpt](https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256-6994cf2e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml new file mode 100644 index 0000000000000000000000000000000000000000..704c01983e4ab53f87a0a1ec798b49bf4b8b5e6f --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml @@ -0,0 +1,14 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: Hourglass + Metadata: + Architecture: + - Hourglass + Training Data: COCO-WholeBody-Face + Name: td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0587 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256-6994cf2e_20210909.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.md new file mode 100644 index 0000000000000000000000000000000000000000..d16ea2bc7fa50b3b8df57219bc5e3fada52c3558 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.md @@ -0,0 +1,39 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0569 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256-c1ca469b_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml new file mode 100644 index 0000000000000000000000000000000000000000..0a4a38d5b78e5390d60c71ba43d663fafb51d279 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml @@ -0,0 +1,14 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: COCO-WholeBody-Face + Name: td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0569 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256-c1ca469b_20210909.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md new file mode 100644 index 0000000000000000000000000000000000000000..fd059ee23cc17a82ed71cfc0ca089785ea6e150e --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md @@ -0,0 +1,56 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0513 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark-3d9a334e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark_20210909.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml new file mode 100644 index 0000000000000000000000000000000000000000..cedc4950f9d2fba11e9a18ce2ca5942dcc2492eb --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml @@ -0,0 +1,15 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: COCO-WholeBody-Face + Name: td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0513 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark-3d9a334e_20210909.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.md new file mode 100644 index 0000000000000000000000000000000000000000..d551a6c9abc3a4da60aaa90dac1ddbb9802ddd83 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.md @@ -0,0 +1,38 @@ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_mobilenetv2](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0611 | [ckpt](https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256-4a3f096e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml new file mode 100644 index 0000000000000000000000000000000000000000..2bd4352119546e2670f4b6bd16c12d37213b099b --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml @@ -0,0 +1,15 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: COCO-WholeBody-Face + Name: td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0611 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256-4a3f096e_20210909.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.md new file mode 100644 index 0000000000000000000000000000000000000000..e4609385bdc230558581a05f8ee0fe73b6b248b2 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.md @@ -0,0 +1,55 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_res50](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0582 | [ckpt](https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256-5128edf5_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml new file mode 100644 index 0000000000000000000000000000000000000000..ef91a3da21c316335caf8d88c0ebde9a6e1bd4d7 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml @@ -0,0 +1,15 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: COCO-WholeBody-Face + Name: td-hm_res50_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0582 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256-5128edf5_20210909.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.md new file mode 100644 index 0000000000000000000000000000000000000000..2710c2ff39bb02fa46949f89592c8c116234a63b --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.md @@ -0,0 +1,38 @@ + + +
+SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_scnet_50](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0567 | [ckpt](https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256-a0183f5f_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml new file mode 100644 index 0000000000000000000000000000000000000000..d3b052ffc51e133706d36246caae80563ac7edcb --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml @@ -0,0 +1,15 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - SCNet + Training Data: COCO-WholeBody-Face + Name: td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0567 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256-a0183f5f_20210909.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..0e6f5c5c9084bf03ec95e203c57bad4a91ce7179 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=68, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..dfeac90ced1307eeaa8fe9c83c59a3ae67b1cb23 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py @@ -0,0 +1,156 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=68, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..3c34f9aa5dc733f6dd1363212791b7f2c5b7f447 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py @@ -0,0 +1,160 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=68, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1a8629fc7448a4edc5e3a98b554b615efb7102 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=68, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..0070e55d69d26b5e50edfef7868dc4faa5b0b5f4 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=68, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..8f79f4b1d362b527cd684ae927e61cf17ec821cd --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=68, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.md new file mode 100644 index 0000000000000000000000000000000000000000..b99f91f3d180287081543a27c6e61818092b3b1c --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.md @@ -0,0 +1,42 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+COFW (ICCV'2013) + +```bibtex +@inproceedings{burgos2013robust, + title={Robust face landmark estimation under occlusion}, + author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={1513--1520}, + year={2013} +} +``` + +
+ +Results on COFW dataset + +The model is trained on COFW train. + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py) | 256x256 | 3.48 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256-49243ab8_20211019.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256_20211019.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml new file mode 100644 index 0000000000000000000000000000000000000000..733e275685de62a483d48e2ec7eedf347d6d0e51 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml @@ -0,0 +1,14 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: COFW + Name: td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256 + Results: + - Dataset: COFW + Metrics: + NME: 3.48 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256-49243ab8_20211019.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..7c52342e950246755a9e5c0ed60302da936bb6fe --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py @@ -0,0 +1,161 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=50, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=1.5) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=29, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'COFWDataset' +data_mode = 'topdown' +data_root = 'data/cofw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/cofw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/cofw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.md new file mode 100644 index 0000000000000000000000000000000000000000..53d5c3b36d7c61e9f6db3542b047adda70e70a86 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.md @@ -0,0 +1,59 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+AdaptiveWingloss (ICCV'2019) + +```bibtex +@inproceedings{wang2019adaptive, + title={Adaptive wing loss for robust face alignment via heatmap regression}, + author={Wang, Xinyao and Bo, Liefeng and Fuxin, Li}, + booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, + pages={6971--6981}, + year={2019} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train. + +| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | +| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | +| [pose_hrnetv2_w18_awing](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py) | 256x256 | 4.02 | 6.94 | 3.97 | 4.78 | 4.59 | 3.87 | 4.28 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing-5af5055c_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing_20211212.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..6ba45c82b7499f09e0664ef42589e46ec298aca9 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml @@ -0,0 +1,21 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + - AdaptiveWingloss + Training Data: WFLW + Name: td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME blur: 4.59 + NME expression: 4.28 + NME illumination: 3.97 + NME makeup: 3.87 + NME occlusion: 4.78 + NME pose: 6.94 + NME test: 4.02 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing-5af5055c_20211212.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.md new file mode 100644 index 0000000000000000000000000000000000000000..476afb6c01c1a2f57c2030f673a913654b5a4698 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.md @@ -0,0 +1,59 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train. + +| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | +| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | +| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py) | 256x256 | 3.98 | 6.98 | 3.96 | 4.78 | 4.56 | 3.89 | 4.29 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark-3f8e0c2c_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark_20210125.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..bbb82185cf0f4be034cb31f4a8166128d522938e --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml @@ -0,0 +1,21 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: WFLW + Name: td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME blur: 4.56 + NME expression: 4.29 + NME illumination: 3.96 + NME makeup: 3.89 + NME occlusion: 4.78 + NME pose: 6.98 + NME test: 3.98 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark-3f8e0c2c_20210125.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.md b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.md new file mode 100644 index 0000000000000000000000000000000000000000..c9b8eec0669a73d60b2023eefa84204e71c7d1d0 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.md @@ -0,0 +1,42 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train. + +| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | +| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py) | 256x256 | 4.06 | 6.97 | 3.99 | 4.83 | 4.58 | 3.94 | 4.33 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256-2bf032a6_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_20210125.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..9124324f8b8fe8ca7d1835471b130013cee13efa --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml @@ -0,0 +1,20 @@ +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: WFLW + Name: td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME blur: 4.58 + NME expression: 4.33 + NME illumination: 3.99 + NME makeup: 3.94 + NME occlusion: 4.83 + NME pose: 6.97 + NME test: 4.06 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256-2bf032a6_20210125.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ae373c816aec3e09f7780f304ce11687e48b8e32 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py @@ -0,0 +1,158 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=98, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ada24a97bb7954d42b2d300ea9e8a14b494da938 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py @@ -0,0 +1,158 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=98, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='AdaptiveWingLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..973a850f3fdf2ab6300e8e56c4e1b92b15d3f63a --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py @@ -0,0 +1,162 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=98, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/README.md b/mmpose/configs/face_2d_keypoint/topdown_regression/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5d20cb9a311c033eef1b8668b2d9d0e5c56e6514 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/README.md @@ -0,0 +1,19 @@ +# Top-down regression-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). + +
+ +
+ +## Results and Models + +### WFLW Dataset + +Result on WFLW test set + +| Model | Input Size | NME | ckpt | log | +| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [ResNet-50](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py) | 256x256 | 4.88 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_20210303.log.json) | +| [ResNet-50+WingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.67 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss_20210303.log.json) | +| [ResNet-50+SoftWingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.44 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss_20211212.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.md b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.md new file mode 100644 index 0000000000000000000000000000000000000000..f1d9629d0ad0caced74cb3b0f4781080c302588f --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.md @@ -0,0 +1,75 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+SoftWingloss (TIP'2021) + +```bibtex +@article{lin2021structure, + title={Structure-Coherent Deep Feature Learning for Robust Face Alignment}, + author={Lin, Chunze and Zhu, Beier and Wang, Quan and Liao, Renjie and Qian, Chen and Lu, Jiwen and Zhou, Jie}, + journal={IEEE Transactions on Image Processing}, + year={2021}, + publisher={IEEE} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train set. + +| Model | Input Size | NME | ckpt | log | +| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [ResNet-50+SoftWingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.44 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss_20211212.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.yml b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..bf7c8436e09a88b4cb8c42a560b567c1b51862a7 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.yml @@ -0,0 +1,22 @@ +Collections: +- Name: SoftWingloss + Paper: + Title: Structure-Coherent Deep Feature Learning for Robust Face Alignment + URL: https://ieeexplore.ieee.org/document/9442331/ + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/softwingloss.md +Models: +- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py + In Collection: SoftWingloss + Metadata: + Architecture: + - DeepPose + - ResNet + - SoftWingloss + Training Data: WFLW + Name: td-reg_res50_softwingloss_8xb64-210e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 4.44 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.md b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.md new file mode 100644 index 0000000000000000000000000000000000000000..1ec3e76dbad52d30e8ce1c458592ec13e6c8ee31 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.md @@ -0,0 +1,58 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train set. + +| Model | Input Size | NME | ckpt | log | +| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [ResNet-50](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py) | 256x256 | 4.88 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_20210303.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.yml b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..f59689a7a9977e9599c96c41021b980f6d78555f --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.yml @@ -0,0 +1,21 @@ +Collections: +- Name: ResNet + Paper: + Title: Deep residual learning for image recognition + URL: http://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/resnet.md +Models: +- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py + In Collection: ResNet + Metadata: + Architecture: + - DeepPose + - ResNet + Training Data: WFLW + Name: td-reg_res50_8x64e-210e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 4.88 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.md b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.md new file mode 100644 index 0000000000000000000000000000000000000000..51477143d11c1755f9280026641ba68b954ec99e --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.md @@ -0,0 +1,76 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+Wingloss (CVPR'2018) + +```bibtex +@inproceedings{feng2018wing, + title={Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks}, + author={Feng, Zhen-Hua and Kittler, Josef and Awais, Muhammad and Huber, Patrik and Wu, Xiao-Jun}, + booktitle={Computer Vision and Pattern Recognition (CVPR), 2018 IEEE Conference on}, + year={2018}, + pages ={2235-2245}, + organization={IEEE} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train set. + +| Model | Input Size | NME | ckpt | log | +| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [ResNet-50+WingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.67 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss_20210303.log.json) | diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.yml b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.yml new file mode 100644 index 0000000000000000000000000000000000000000..da737de7cd76afd4e8ee962730b30f1961793de0 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.yml @@ -0,0 +1,23 @@ +Collections: +- Name: Wingloss + Paper: + Title: Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural + Networks + URL: http://openaccess.thecvf.com/content_cvpr_2018/html/Feng_Wing_Loss_for_CVPR_2018_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/wingloss.md +Models: +- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py + In Collection: Wingloss + Metadata: + Architecture: + - DeepPose + - ResNet + - WingLoss + Training Data: WFLW + Name: td-reg_res50_wingloss_8xb64-210e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 4.67 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..2742f497b8fbdd7889281c660b9ccd804ccf754d --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=98, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + train_cfg=dict(), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# dataloaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..eb4199073d712024f0495746ad902f4ea4dd9052 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=98, + loss=dict(type='SoftWingLoss', use_target_weight=True), + decoder=codec), + train_cfg=dict(), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# dataloaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ab519cd401bbd07212e4834c9a5d655418b49fb1 --- /dev/null +++ b/mmpose/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=98, + loss=dict(type='WingLoss', use_target_weight=True), + decoder=codec), + train_cfg=dict(), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# dataloaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/README.md b/mmpose/configs/fashion_2d_keypoint/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e7d761067afc34b6a7249faa187752b39ca24ffd --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/README.md @@ -0,0 +1,7 @@ +# 2D Fashion Landmark Detection + +2D fashion landmark detection (also referred to as fashion alignment) aims to detect the key-point located at the functional region of clothes, for example the neckline and the cuff. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_fashion_landmark.md) to prepare data. diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/README.md b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..865a3b823ebd65f3abd707a0db4c931cd99abcd4 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/README.md @@ -0,0 +1,42 @@ +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### DeepFashion Dataset + +Results on DeepFashion dataset with ResNet backbones: + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-----------------: | :--------: | :-----: | :--: | :--: | :----------------------------------------------------------: | +| HRNet-w48-UDP-Upper | 256x192 | 96.1 | 60.9 | 15.1 | [hrnet_deepfashion.md](./deepfashion/hrnet_deepfashion.md) | +| HRNet-w48-UDP-Lower | 256x192 | 97.8 | 76.1 | 8.9 | [hrnet_deepfashion.md](./deepfashion/hrnet_deepfashion.md) | +| HRNet-w48-UDP-Full | 256x192 | 98.3 | 67.3 | 11.7 | [hrnet_deepfashion.md](./deepfashion/hrnet_deepfashion.md) | +| ResNet-50-Upper | 256x192 | 95.4 | 57.8 | 16.8 | [resnet_deepfashion.md](./deepfashion/resnet_deepfashion.md) | +| ResNet-50-Lower | 256x192 | 96.5 | 74.4 | 10.5 | [resnet_deepfashion.md](./deepfashion/resnet_deepfashion.md) | +| ResNet-50-Full | 256x192 | 97.7 | 66.4 | 12.7 | [resnet_deepfashion.md](./deepfashion/resnet_deepfashion.md) | + +### DeepFashion2 Dataset + +Results on DeepFashion2 dataset + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-----------------------------: | :--------: | :-----: | :---: | :--: | :-----------------------------------------------------------: | +| ResNet-50-Short-Sleeved-Shirt | 256x192 | 0.988 | 0.703 | 10.2 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Long-Sleeved-Shirt | 256x192 | 0.973 | 0.587 | 16.6 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Short-Sleeved-Outwear | 256x192 | 0.966 | 0.408 | 24.0 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Long-Sleeved-Outwear | 256x192 | 0.987 | 0.517 | 18.1 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Vest | 256x192 | 0.981 | 0.643 | 12.7 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Sling | 256x192 | 0.940 | 0.557 | 21.6 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Shorts | 256x192 | 0.975 | 0.682 | 12.4 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Trousers | 256x192 | 0.973 | 0.625 | 14.8 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Skirt | 256x192 | 0.952 | 0.653 | 16.6 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Short-Sleeved-Dress | 256x192 | 0.980 | 0.603 | 15.6 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Long-Sleeved-Dress | 256x192 | 0.976 | 0.518 | 20.1 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Vest-Dress | 256x192 | 0.980 | 0.600 | 16.0 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | +| ResNet-50-Sling-Dress | 256x192 | 0.967 | 0.544 | 19.5 | [res50_deepfashion2.md](./deepfashion2/res50_deepfashion2.md) | diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/hrnet_deepfashion.md b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/hrnet_deepfashion.md new file mode 100644 index 0000000000000000000000000000000000000000..2d5e382c927535caafb94b3c51fb1cc7b9290c04 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/hrnet_deepfashion.md @@ -0,0 +1,77 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+DeepFashion (CVPR'2016) + +```bibtex +@inproceedings{liuLQWTcvpr16DeepFashion, + author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, + title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2016} +} +``` + +
+ + + +
+DeepFashion (ECCV'2016) + +```bibtex +@inproceedings{liuYLWTeccv16FashionLandmark, + author = {Liu, Ziwei and Yan, Sijie and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, + title = {Fashion Landmark Detection in the Wild}, + booktitle = {European Conference on Computer Vision (ECCV)}, + month = {October}, + year = {2016} + } +``` + +
+ +Results on DeepFashion val set + +| Set | Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :---- | :-------------------------------------------------------: | :--------: | :-----: | :--: | :--: | :-------------------------------------------------------: | :------------------------------------------------------: | +| upper | [pose_hrnet_w48_udp](td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_uppder-256x192.py) | 256x192 | 96.1 | 60.9 | 15.1 | [ckpt](https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192-de7c0eb1_20230810.pth) | [log](https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192-de7c0eb1_20230810.log) | +| lower | [pose_hrnet_w48_udp](td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192.py) | 256x192 | 97.8 | 76.1 | 8.9 | [ckpt](https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192-ddaf747d_20230810.pth) | [log](https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192-ddaf747d_20230810.log) | +| full | [pose_hrnet_w48_udp](td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192.py) | 256x192 | 98.3 | 67.3 | 11.7 | [ckpt](https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192-7ab504c7_20230810.pth) | [log](https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192-7ab504c7_20230810.log) | + +Note: Due to the time constraints, we have only trained resnet50 models. We warmly welcome any contributions if you can successfully reproduce the results from the paper! diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/hrnet_deepfashion.yml b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/hrnet_deepfashion.yml new file mode 100644 index 0000000000000000000000000000000000000000..06c297ef8ed5409ecae5c0595fa9d4e8c5f4696a --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/hrnet_deepfashion.yml @@ -0,0 +1,45 @@ +Models: +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192.py + In Collection: UDP + Metadata: + Architecture: &id001 + - HRNet + - UDP + Training Data: DeepFashion + Name: td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192 + Results: + - Dataset: DeepFashion + Metrics: + AUC: 76.1 + EPE: 8.9 + PCK@0.2: 97.8 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192-ddaf747d_20230810.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: DeepFashion + Name: td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192 + Results: + - Dataset: DeepFashion + Metrics: + AUC: 60.9 + EPE: 15.1 + PCK@0.2: 96.1 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192-de7c0eb1_20230810.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: DeepFashion + Name: td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192 + Results: + - Dataset: DeepFashion + Metrics: + AUC: 67.3 + EPE: 11.7 + PCK@0.2: 98.3 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192-7ab504c7_20230810.pth diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/resnet_deepfashion.md b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/resnet_deepfashion.md new file mode 100644 index 0000000000000000000000000000000000000000..cb5c3c1c84fb6640bf427c39c559c98d75fb274c --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/resnet_deepfashion.md @@ -0,0 +1,77 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+DeepFashion (CVPR'2016) + +```bibtex +@inproceedings{liuLQWTcvpr16DeepFashion, + author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, + title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2016} +} +``` + +
+ + + +
+DeepFashion (ECCV'2016) + +```bibtex +@inproceedings{liuYLWTeccv16FashionLandmark, + author = {Liu, Ziwei and Yan, Sijie and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, + title = {Fashion Landmark Detection in the Wild}, + booktitle = {European Conference on Computer Vision (ECCV)}, + month = {October}, + year = {2016} + } +``` + +
+ +Results on DeepFashion val set + +| Set | Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :---- | :-------------------------------------------------------: | :--------: | :-----: | :--: | :--: | :-------------------------------------------------------: | :------------------------------------------------------: | +| upper | [pose_resnet_50](td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py) | 256x192 | 95.4 | 57.8 | 16.8 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_upper_256x192-41794f03_20210124.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_upper_256x192_20210124.log.json) | +| lower | [pose_resnet_50](td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py) | 256x192 | 96.5 | 74.4 | 10.5 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_lower_256x192-1292a839_20210124.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_lower_256x192_20210124.log.json) | +| full | [pose_resnet_50](td-hm_res50_8xb64-210e_deepfashion_full-256x192.py) | 256x192 | 97.7 | 66.4 | 12.7 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_full_256x192-0dbd6e42_20210124.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_full_256x192_20210124.log.json) | + +Note: Due to the time constraints, we have only trained resnet50 models. We warmly welcome any contributions if you can successfully reproduce the results from the paper! diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/resnet_deepfashion.yml b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/resnet_deepfashion.yml new file mode 100644 index 0000000000000000000000000000000000000000..1c382ee2d5cb792cbf1e2d135a27a435dbd52390 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/resnet_deepfashion.yml @@ -0,0 +1,51 @@ +Collections: +- Name: SimpleBaseline2D + Paper: + Title: Simple baselines for human pose estimation and tracking + URL: http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html + README: https://github.com/open-mmlab/mmpose/blob/master/docs/en/papers/algorithms/simplebaseline2d.md +Models: +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: DeepFashion + Name: td-hm_res50_8xb64-210e_deepfashion_upper-256x192 + Results: + - Dataset: DeepFashion + Metrics: + AUC: 57.8 + EPE: 16.8 + PCK@0.2: 95.4 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_upper_256x192-41794f03_20210124.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion + Name: td-hm_res50_8xb64-210e_deepfashion_lower-256x192 + Results: + - Dataset: DeepFashion + Metrics: + AUC: 74.4 + EPE: 96.5 + PCK@0.2: 10.5 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_lower_256x192-1292a839_20210124.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_full-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion + Name: td-hm_res50_8xb64-210e_deepfashion_full-256x192 + Results: + - Dataset: DeepFashion + Metrics: + AUC: 66.4 + EPE: 12.7 + PCK@0.2: 97.7 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion_full_256x192-0dbd6e42_20210124.pth diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_full-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_full-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..4a30ead782415117e309c6bba4da904740e6c884 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_full-256x192.py @@ -0,0 +1,169 @@ +_base_ = '../../../_base_/default_runtime.py' + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=8, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashionDataset' +data_mode = 'topdown' +data_root = 'data/fld/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +test_pipeline = val_pipeline + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + subset='full', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_full_train.json', + data_prefix=dict(img='img/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='full', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_full_val.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='full', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_full_test.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=test_pipeline, + )) + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_lower-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_lower-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..0a86c38ba8161bdb129db09e87d64c964242877a --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_lower-256x192.py @@ -0,0 +1,169 @@ +_base_ = '../../../_base_/default_runtime.py' + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=4, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashionDataset' +data_mode = 'topdown' +data_root = 'data/fld/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +test_pipeline = val_pipeline + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + subset='lower', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_lower_train.json', + data_prefix=dict(img='img/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='lower', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_lower_val.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='lower', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_lower_test.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=test_pipeline, + )) + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_upper-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_upper-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..7d6af18fd947fb480b6977aa9f3b28ee0e6c1e30 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_8xb64-210e_deepfashion_upper-256x192.py @@ -0,0 +1,169 @@ +_base_ = '../../../_base_/default_runtime.py' + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=6, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashionDataset' +data_mode = 'topdown' +data_root = 'data/fld/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +test_pipeline = val_pipeline + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + subset='upper', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_upper_train.json', + data_prefix=dict(img='img/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='upper', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_upper_val.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='upper', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_upper_test.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=test_pipeline, + )) + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_full-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_full-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..8977c25b56930ca89774a4147edc385b994eeb7f --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_full-256x192.py @@ -0,0 +1,26 @@ +_base_ = './td-hm_hrnet-w32_8xb64-210e_deepfashion_full-256x192.py' + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +model = dict( + test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=False)) + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_lower-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_lower-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..595035b132f954cfcd58c8b23de410ef6f710e8b --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_lower-256x192.py @@ -0,0 +1,26 @@ +_base_ = './td-hm_hrnet-w32_8xb64-210e_deepfashion_lower-256x192.py' + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +model = dict( + test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=False)) + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_upper-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_upper-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..777ffddb22047daf7b1183f530f8509495fc92ce --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_upper-256x192.py @@ -0,0 +1,26 @@ +_base_ = './td-hm_hrnet-w32_8xb64-210e_deepfashion_upper-256x192.py' + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +model = dict( + test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=False)) + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_full-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_full-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7a80d59f8c81ffdbd2f8e3764c32019561812b --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_full-256x192.py @@ -0,0 +1,42 @@ +_base_ = './td-hm_hrnet-w32_8xb64-210e_deepfashion_full-256x192.py' + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +model = dict( + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict(in_channels=48)) + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_lower-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_lower-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..a26e3f0cd43950ec6e56f1ef02b8c726f2abce4b --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_lower-256x192.py @@ -0,0 +1,42 @@ +_base_ = './td-hm_hrnet-w32_8xb64-210e_deepfashion_lower-256x192.py' # noqa + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +model = dict( + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict(in_channels=48)) + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_upper-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_upper-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..cd619bd96307420cbf6885a73df2b4f5b2635783 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_8xb32-210e_deepfashion_upper-256x192.py @@ -0,0 +1,42 @@ +_base_ = './td-hm_hrnet-w32_8xb64-210e_deepfashion_upper-256x192.py' # noqa + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +model = dict( + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict(in_channels=48)) + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..5445d7d377ddae776760b81c6e12249d697a1928 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192.py @@ -0,0 +1,31 @@ +_base_ = './td-hm_hrnet-w48_8xb32-210e_deepfashion_full-256x192.py' + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +model = dict( + test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=False)) + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c5c0966653b59f4a5e84f188bd226793ec8ab6 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192.py @@ -0,0 +1,31 @@ +_base_ = './td-hm_hrnet-w48_8xb32-210e_deepfashion_lower-256x192.py' + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +model = dict( + test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=False)) + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..706a87da84d006582078f34774b70a70e38d553f --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192.py @@ -0,0 +1,31 @@ +_base_ = './td-hm_hrnet-w48_8xb32-210e_deepfashion_upper-256x192.py' + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +model = dict( + test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=False)) + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_full-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_full-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..57e9558f7602b07b26fbb198e4d6fb3233e2e9e8 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_full-256x192.py @@ -0,0 +1,8 @@ +_base_ = './td-hm_res50_8xb64-210e_deepfashion_full-256x192.py' + +model = dict( + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_lower-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_lower-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..0073adfdfbcc9cd1695f6fc28776da6df4fa110b --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_lower-256x192.py @@ -0,0 +1,8 @@ +_base_ = './td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py' + +model = dict( + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_upper-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_upper-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..cf2198fa2804547f17afd037e4f4e282f4ca2b63 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_upper-256x192.py @@ -0,0 +1,8 @@ +_base_ = './td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py' + +model = dict( + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_full-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_full-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..04dee6d3a5f4811588fa42d6fa821e9d1883b52e --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_full-256x192.py @@ -0,0 +1,13 @@ +_base_ = './td-hm_res50_8xb64-210e_deepfashion_full-256x192.py' + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +model = dict( + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet152'))) + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_lower-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_lower-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..ef4b3d57d300f7645c08ba5e3f4de378b449c80f --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_lower-256x192.py @@ -0,0 +1,13 @@ +_base_ = './td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py' + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +model = dict( + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet152'))) + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_upper-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_upper-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..122ad6817ac9d7d763b99d72f01e4f69b1721953 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_upper-256x192.py @@ -0,0 +1,13 @@ +_base_ = './td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py' + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +model = dict( + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet152'))) + +train_dataloader = dict(batch_size=32) diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_full-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_full-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..292e83cb12dfcfeb4193d2be8d9844cf11816a4f --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_full-256x192.py @@ -0,0 +1,140 @@ +_base_ = '../../../_base_/default_runtime.py' + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=8, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashionDataset' +data_mode = 'topdown' +data_root = 'data/fld/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +test_pipeline = val_pipeline + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + subset='full', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_full_train.json', + data_prefix=dict(img='img/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='full', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_full_val.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='full', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_full_test.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=test_pipeline, + )) + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..51e4ddfcbd251a47925682edd4017a18a8af0f03 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py @@ -0,0 +1,140 @@ +_base_ = '../../../_base_/default_runtime.py' + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=64) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=4, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashionDataset' +data_mode = 'topdown' +data_root = 'data/fld/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +test_pipeline = val_pipeline + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + subset='lower', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_lower_train.json', + data_prefix=dict(img='img/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='lower', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_lower_val.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='lower', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_lower_test.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=test_pipeline, + )) + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..29663966904fbd994d274b4a5ecf8c6393ec8ad5 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py @@ -0,0 +1,140 @@ +_base_ = '../../../_base_/default_runtime.py' + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=64) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=6, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashionDataset' +data_mode = 'topdown' +data_root = 'data/fld/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +test_pipeline = val_pipeline + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + subset='upper', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_upper_train.json', + data_prefix=dict(img='img/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='upper', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_upper_val.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + subset='upper', + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/fld_upper_test.json', + data_prefix=dict(img='img/'), + test_mode=True, + pipeline=test_pipeline, + )) + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfashion2.md b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfashion2.md new file mode 100644 index 0000000000000000000000000000000000000000..1dcfd593133c95f744869ff23bd2ec12a54e187c --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfashion2.md @@ -0,0 +1,67 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+DeepFashion2 (CVPR'2019) + +```bibtex +@article{DeepFashion2, + author = {Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo}, + title={A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images}, + journal={CVPR}, + year={2019} +} +``` + +
+ +Results on DeepFashion2 val set + +| Set | Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :-------------------- | :-------------------------------------------------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------: | :-------------------------------------------------: | +| short_sleeved_shirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py) | 256x192 | 0.988 | 0.703 | 10.2 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192-21e1c5da_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192_20221208.log.json) | +| long_sleeved_shirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py) | 256x192 | 0.973 | 0.587 | 16.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192-8679e7e3_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192_20221208.log.json) | +| short_sleeved_outwear | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py) | 256x192 | 0.966 | 0.408 | 24.0 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192-a04c1298_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192_20221208.log.json) | +| long_sleeved_outwear | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py) | 256x192 | 0.987 | 0.517 | 18.1 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192-31fbaecf_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192_20221208.log.json) | +| vest | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py) | 256x192 | 0.981 | 0.643 | 12.7 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192-4c48d05c_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192_20221208.log.json) | +| sling | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py) | 256x192 | 0.940 | 0.557 | 21.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192-ebb2b736_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192_20221208.log.json) | +| shorts | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py) | 256x192 | 0.975 | 0.682 | 12.4 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192-9ab23592_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192_20221208.log.json) | +| trousers | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py) | 256x192 | 0.973 | 0.625 | 14.8 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192-3e632257_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192_20221208.log.json) | +| skirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py) | 256x192 | 0.952 | 0.653 | 16.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192-09573469_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192_20221208.log.json) | +| short_sleeved_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py) | 256x192 | 0.980 | 0.603 | 15.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192-1345b07a_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192_20221208.log.json) | +| long_sleeved_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py) | 256x192 | 0.976 | 0.518 | 20.1 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192-87bac74e_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192_20221208.log.json) | +| vest_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py) | 256x192 | 0.980 | 0.600 | 16.0 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192-fb3fbd6f_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192_20221208.log.json) | +| sling_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py) | 256x192 | 0.967 | 0.544 | 19.5 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192-8ebae0eb_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192_20221208.log.json) | diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml new file mode 100644 index 0000000000000000000000000000000000000000..28825fa01100a9375f4640eb89575e829608ac37 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml @@ -0,0 +1,185 @@ +Models: +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: DeepFashion2 + Name: td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.703 + EPE: 10.2 + PCK@0.2: 0.988 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192-21e1c5da_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.587 + EPE: 16.5 + PCK@0.2: 0.973 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192-8679e7e3_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.408 + EPE: 24.0 + PCK@0.2: 0.966 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192-a04c1298_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.517 + EPE: 18.1 + PCK@0.2: 0.987 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192-31fbaecf_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_4xb64-210e_deepfasion2-vest-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.643 + EPE: 12.7 + PCK@0.2: 0.981 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192-4c48d05c_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_4xb64-210e_deepfasion2-sling-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.557 + EPE: 21.6 + PCK@0.2: 0.94 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192-ebb2b736_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.682 + EPE: 12.4 + PCK@0.2: 0.975 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192-9ab23592_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.625 + EPE: 14.8 + PCK@0.2: 0.973 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192-3e632257_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.653 + EPE: 16.6 + PCK@0.2: 0.952 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192-09573469_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.603 + EPE: 15.6 + PCK@0.2: 0.98 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192-1345b07a_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.518 + EPE: 20.1 + PCK@0.2: 0.976 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192-87bac74e_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.6 + EPE: 16.0 + PCK@0.2: 0.98 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192-fb3fbd6f_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.544 + EPE: 19.5 + PCK@0.2: 0.967 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192-8ebae0eb_20221208.pth diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..09dfaaa390bb2020e4a511d6ba111d35d5fa4378 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=64) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_long_sleeved_dress_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_long_sleeved_dress_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f0e6f0c63218874f4e40bdd06eb0cbc57b9365a7 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=64) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_skirt_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_skirt_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..9bed7421991041145f028e2b91689b8c5125d205 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=64) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_vest_dress_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_vest_dress_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..617e59ae74be40511256c2b9e358300ea2348f27 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=128) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_trousers_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_trousers_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..aa3b2774fcaedf9c7ace5a335775011e6c0a7d29 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=192) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_shorts_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_shorts_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..0bfcabaa5478596cc026309e5f57e6ea5db83abc --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_short_sleeved_dress_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_short_sleeved_dress_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f627eb182c90b57ae53a4a9141f00ed333d3e229 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_sling_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_sling_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..8b59607060c41a8ddbb4d38c5acc41e243cd2e96 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_sling_dress_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_sling_dress_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..4249d5a8971e80a4e068e51543b9191f36488542 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_vest_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_vest_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..4161952dcf31904e8df8c70ff25ca207c1cea2ae --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=384) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_short_sleeved_shirt_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_short_sleeved_shirt_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..36e0318bf7a954fdbd35a8b59219a6cde2396df2 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_long_sleeved_outwear_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/' + 'deepfashion2_long_sleeved_outwear_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..f82e3cb5fb04011130521a35080b00f01a70ac68 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_long_sleeved_shirt_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_long_sleeved_shirt_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..30db99de9e96eaede42332daae3d55f578b941f2 --- /dev/null +++ b/mmpose/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_short_sleeved_outwear_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/' + 'deepfashion2_short_sleeved_outwear_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/README.md b/mmpose/configs/hand_2d_keypoint/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6f7758290eb914b88662e685135748c1fb5f665d --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/README.md @@ -0,0 +1,18 @@ +# 2D Hand Pose Estimation + +2D hand pose estimation is defined as the task of detecting the poses (or keypoints) of the hand from an input image. + +Normally, the input images are cropped hand images, where the hand locates at the center; +or the rough location (or the bounding box) of the hand is provided. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/2d_hand_demo.md) to run demos. + +
+ +
diff --git a/mmpose/configs/hand_2d_keypoint/rtmpose/README.md b/mmpose/configs/hand_2d_keypoint/rtmpose/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9687b7e72c98376a00570389e7f1d003b4ace8f0 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/rtmpose/README.md @@ -0,0 +1,16 @@ +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### COCO-WholeBody-Hand Dataset + +Results on COCO-WholeBody-Hand val set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-------: | :--------: | :-----: | :---: | :--: | :------------------------------------------------------------------------------------: | +| RTMPose-m | 256x256 | 0.815 | 0.837 | 4.51 | [rtmpose_coco_wholebody_hand.md](./coco_wholebody_hand/rtmpose_coco_wholebody_hand.md) | diff --git a/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py b/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..48c719339443eac75dfb4849553294751fc2f62d --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=21, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.md b/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.md new file mode 100644 index 0000000000000000000000000000000000000000..b2a5957e6ec3850423188c4fde9fd4aeae9853ee --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.md @@ -0,0 +1,39 @@ + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [rtmpose_m](/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.815 | 0.837 | 4.51 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.json) | diff --git a/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml b/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml new file mode 100644 index 0000000000000000000000000000000000000000..b0f9d9ac3c575f094ca1ef3ea40ef9a5ccf63b07 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: RTMPose + Metadata: + Architecture: + - RTMPose + Training Data: COCO-WholeBody-Hand + Name: rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.815 + EPE: 4.51 + PCK@0.2: 0.837 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.pth diff --git a/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py b/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..f329f1cb1df65ad7cff2ed255d6d89e859e78ea2 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py @@ -0,0 +1,380 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# coco-hand onehand10k freihand2d rhd2d halpehand + +# runtime +max_epochs = 210 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=21, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_onehand10k = dict( + type='OneHand10KDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='onehand10k/annotations/onehand10k_train.json', + data_prefix=dict(img='pose/OneHand10K/'), + pipeline=[], +) + +dataset_freihand = dict( + type='FreiHandDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='freihand/annotations/freihand_train.json', + data_prefix=dict(img='pose/FreiHand/'), + pipeline=[], +) + +dataset_rhd = dict( + type='Rhd2DDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='rhd/annotations/rhd_train.json', + data_prefix=dict(img='pose/RHD/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=21, + mapping=[ + (0, 0), + (1, 4), + (2, 3), + (3, 2), + (4, 1), + (5, 8), + (6, 7), + (7, 6), + (8, 5), + (9, 12), + (10, 11), + (11, 10), + (12, 9), + (13, 16), + (14, 15), + (15, 14), + (16, 13), + (17, 20), + (18, 19), + (19, 18), + (20, 17), + ]) + ], +) + +dataset_halpehand = dict( + type='HalpeHandDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), + pipeline=[], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict( + from_file='configs/_base_/datasets/coco_wholebody_hand.py'), + datasets=[ + dataset_coco, dataset_onehand10k, dataset_freihand, dataset_rhd, + dataset_halpehand + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# test datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_onehand10k = dict( + type='OneHand10KDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='onehand10k/annotations/onehand10k_test.json', + data_prefix=dict(img='pose/OneHand10K/'), + pipeline=[], +) + +val_freihand = dict( + type='FreiHandDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='freihand/annotations/freihand_test.json', + data_prefix=dict(img='pose/FreiHand/'), + pipeline=[], +) + +val_rhd = dict( + type='Rhd2DDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='rhd/annotations/rhd_test.json', + data_prefix=dict(img='pose/RHD/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=21, + mapping=[ + (0, 0), + (1, 4), + (2, 3), + (3, 2), + (4, 1), + (5, 8), + (6, 7), + (7, 6), + (8, 5), + (9, 12), + (10, 11), + (11, 10), + (12, 9), + (13, 16), + (14, 15), + (15, 14), + (16, 13), + (17, 20), + (18, 19), + (19, 18), + (20, 17), + ]) + ], +) + +val_halpehand = dict( + type='HalpeHandDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict( + from_file='configs/_base_/datasets/coco_wholebody_hand.py'), + datasets=[ + val_coco, val_onehand10k, val_freihand, val_rhd, val_halpehand + ], + pipeline=val_pipeline, + test_mode=True, + )) + +val_dataloader = test_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.md b/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.md new file mode 100644 index 0000000000000000000000000000000000000000..361770dad2ec789daf9bfde0d08b3947a8a2cf38 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.md @@ -0,0 +1,67 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +- `Hand5` and `*` denote model trained on 5 public datasets: + - [COCO-Wholebody-Hand](https://github.com/jin-s13/COCO-WholeBody/) + - [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) + - [FreiHand2d](https://lmb.informatik.uni-freiburg.de/projects/freihand/) + - [RHD2d](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + +| Config | Input Size | PCK@0.2
(COCO-Wholebody-Hand) | PCK@0.2
(Hand5) | AUC
(Hand5) | EPE
(Hand5) | FLOPS(G) | Download | +| :---------------------------------------: | :--------: | :-----------------------------------: | :---------------------: | :-----------------: | :-----------------: | :------: | :-----------------------------------------: | +| [RTMPose-m\*
(alpha version)](./rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 81.5 | 96.4 | 83.9 | 5.06 | 2.581 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth) | diff --git a/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.yml b/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.yml new file mode 100644 index 0000000000000000000000000000000000000000..c32aa4a61c3eb98884f016a2f42e66577ac58ecd --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.yml @@ -0,0 +1,28 @@ +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py + In Collection: RTMPose + Alias: hand + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - COCO-Wholebody-Hand + - OneHand10K + - FreiHand2d + - RHD2d + - Halpe + Name: rtmpose-m_8xb256-210e_hand5-256x256 + Results: + - Dataset: Hand5 + Metrics: + PCK@0.2: 0.964 + AUC: 0.839 + EPE: 5.06 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/README.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7f63f1f8259dd1cf52bebd3612db8ceec3d23220 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/README.md @@ -0,0 +1,55 @@ +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### COCO-WholeBody-Hand Dataset + +Results on COCO-WholeBody-Hand val set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :--------------: | :--------: | :-----: | :---: | :--: | :----------------------------------------------------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 0.814 | 0.840 | 4.37 | [hrnetv2_dark_coco_wholebody_hand.md](./coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md) | +| HRNetv2-w18 | 256x256 | 0.813 | 0.840 | 4.39 | [hrnetv2_coco_wholebody_hand.md](./coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md) | +| HourglassNet | 256x256 | 0.804 | 0.835 | 4.54 | [hourglass_coco_wholebody_hand.md](./coco_wholebody_hand/hourglass_coco_wholebody_hand.md) | +| SCNet-50 | 256x256 | 0.803 | 0.834 | 4.55 | [scnet_coco_wholebody_hand.md](./coco_wholebody_hand/scnet_coco_wholebody_hand.md) | +| ResNet-50 | 256x256 | 0.800 | 0.833 | 4.64 | [resnet_coco_wholebody_hand.md](./coco_wholebody_hand/resnet_coco_wholebody_hand.md) | +| LiteHRNet-18 | 256x256 | 0.795 | 0.830 | 4.77 | [litehrnet_coco_wholebody_hand.md](./coco_wholebody_hand/litehrnet_coco_wholebody_hand.md) | +| MobileNet-v2 | 256x256 | 0.795 | 0.829 | 4.77 | [mobilenetv2_coco_wholebody_hand.md](./coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md) | + +### FreiHand Dataset + +Results on FreiHand val & test set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------------: | +| ResNet-50 | 224x224 | 0.999 | 0.868 | 3.27 | [resnet_freihand2d.md](./freihand2d/resnet_freihand2d.md) | + +### OneHand10K Dataset + +Results on OneHand10K val set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :--------------: | :--------: | :-----: | :---: | :---: | :-------------------------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 0.990 | 0.572 | 23.96 | [hrnetv2_dark_onehand10k.md](./onehand10k/hrnetv2_dark_onehand10k.md) | +| HRNetv2-w18+UDP | 256x256 | 0.990 | 0.571 | 23.88 | [hrnetv2_udp_onehand10k.md](./onehand10k/hrnetv2_udp_onehand10k.md) | +| HRNetv2-w18 | 256x256 | 0.990 | 0.567 | 24.26 | [hrnetv2_onehand10k.md](./onehand10k/hrnetv2_onehand10k.md) | +| ResNet-50 | 256x256 | 0.989 | 0.555 | 25.16 | [resnet_onehand10k.md](./onehand10k/resnet_onehand10k.md) | +| MobileNet-v2 | 256x256 | 0.986 | 0.537 | 28.56 | [mobilenetv2_onehand10k.md](./onehand10k/mobilenetv2_onehand10k.md) | + +### RHD Dataset + +Results on RHD test set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :--------------: | :--------: | :-----: | :---: | :--: | :----------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 0.992 | 0.903 | 2.18 | [hrnetv2_dark_rhd2d.md](./rhd2d/hrnetv2_dark_rhd2d.md) | +| HRNetv2-w18+UDP | 256x256 | 0.992 | 0.902 | 2.19 | [hrnetv2_udp_rhd2d.md](./rhd2d/hrnetv2_udp_rhd2d.md) | +| HRNetv2-w18 | 256x256 | 0.992 | 0.902 | 2.21 | [hrnetv2_rhd2d.md](./rhd2d/hrnetv2_rhd2d.md) | +| ResNet-50 | 256x256 | 0.991 | 0.898 | 2.32 | [resnet_rhd2d.md](./rhd2d/resnet_rhd2d.md) | +| MobileNet-v2 | 256x256 | 0.985 | 0.883 | 2.79 | [mobilenetv2_rhd2d.md](./rhd2d/mobilenetv2_rhd2d.md) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.md new file mode 100644 index 0000000000000000000000000000000000000000..4728baaba2f19ad8e6760958be21ab54dc964266 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.md @@ -0,0 +1,39 @@ + + +
+Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hourglass_52](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.804 | 0.835 | 4.54 | [ckpt](https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256-7b05c6db_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256_20210909.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml new file mode 100644 index 0000000000000000000000000000000000000000..f6247504e2b6323010a800ea7b5a1c6d01f51a99 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: Hourglass + Metadata: + Architecture: + - Hourglass + Training Data: COCO-WholeBody-Hand + Name: td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.835 + EPE: 4.54 + PCK@0.2: 0.804 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256-7b05c6db_20210909.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md new file mode 100644 index 0000000000000000000000000000000000000000..d944ff43a268aad9f781dbea9063ae4eb000a597 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md @@ -0,0 +1,39 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.813 | 0.840 | 4.39 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256-1c028db7_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256_20210908.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml new file mode 100644 index 0000000000000000000000000000000000000000..f6c0046f663badee9d9b9ed0d42baaaaafe280ad --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: COCO-WholeBody-Hand + Name: td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.84 + EPE: 4.39 + PCK@0.2: 0.813 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256-1c028db7_20210908.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md new file mode 100644 index 0000000000000000000000000000000000000000..73896361860d72f901b173206a29d1985a85bd65 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md @@ -0,0 +1,56 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.814 | 0.840 | 4.37 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark-a9228c9c_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark_20210908.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml new file mode 100644 index 0000000000000000000000000000000000000000..af1d607d10f4fc62e678a62676fddcf0f1752295 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: COCO-WholeBody-Hand + Name: td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.84 + EPE: 4.37 + PCK@0.2: 0.814 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark-a9228c9c_20210908.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.md new file mode 100644 index 0000000000000000000000000000000000000000..7c084b79e1a463794988134b4403393fba540e6a --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.md @@ -0,0 +1,37 @@ + + +
+LiteHRNet (CVPR'2021) + +```bibtex +@inproceedings{Yulitehrnet21, + title={Lite-HRNet: A Lightweight High-Resolution Network}, + author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, + booktitle={CVPR}, + year={2021} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [LiteHRNet-18](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.795 | 0.830 | 4.77 | [ckpt](https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256-d6945e6a_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256_20210908.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml new file mode 100644 index 0000000000000000000000000000000000000000..eeecbfe7e244b8c629fd4d621aea73a6b5694704 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: LiteHRNet + Metadata: + Architecture: + - LiteHRNet + Training Data: COCO-WholeBody-Hand + Name: td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.83 + EPE: 4.77 + PCK@0.2: 0.795 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256-d6945e6a_20210908.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md new file mode 100644 index 0000000000000000000000000000000000000000..cc76358a8fe124d1f2bc04523e841e551590fccb --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md @@ -0,0 +1,38 @@ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_mobilenetv2](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.795 | 0.829 | 4.77 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256-06b8c877_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256_20210909.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml new file mode 100644 index 0000000000000000000000000000000000000000..a9d0101ce77db1d1062cfb20c9d2d80f848f0ecf --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: COCO-WholeBody-Hand + Name: td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.829 + EPE: 4.77 + PCK@0.2: 0.795 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256-06b8c877_20210909.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.md new file mode 100644 index 0000000000000000000000000000000000000000..ae7f287e3d250de0f9a39a0190562eb9f6e69e7c --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.md @@ -0,0 +1,55 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.800 | 0.833 | 4.64 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256-8dbc750c_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256_20210908.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml new file mode 100644 index 0000000000000000000000000000000000000000..78d16a6e459e6cbafeffb2fb14d863910a7b2144 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: COCO-WholeBody-Hand + Name: td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.833 + EPE: 4.64 + PCK@0.2: 0.8 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256-8dbc750c_20210908.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.md new file mode 100644 index 0000000000000000000000000000000000000000..06c6fda74c6c36df1abac5a06c5d642f9eac580d --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.md @@ -0,0 +1,38 @@ + + +
+SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_scnet_50](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.803 | 0.834 | 4.55 | [ckpt](https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256-e73414c7_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256_20210909.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml new file mode 100644 index 0000000000000000000000000000000000000000..a8887b3c8eeb2be74423d845ece640af8333e1d3 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SCNet + Training Data: COCO-WholeBody-Hand + Name: td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.834 + EPE: 4.55 + PCK@0.2: 0.803 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256-e73414c7_20210909.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..e0bc1c8739c9d8ea1fc585882abe5b8189087e2a --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=21, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=180.0, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b9f0f281b9bc72598b9e1ffacd99f58248175d --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..5d67f393f6612aab494a47c15bd9ce7b68fc8b4d --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py @@ -0,0 +1,158 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..f3a6150e49e687bb3d510bd7139d66bd8ac8b37f --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py @@ -0,0 +1,136 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=21, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..dba8538a5fe7b4313b888cae5a21f0c55b58c340 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py @@ -0,0 +1,120 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..c04950bfaabcfebc806ce541e8d5285d0bca75be --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py @@ -0,0 +1,119 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..f596227c5c109fe51b0fd822c1f2b26b4abaae83 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.md new file mode 100644 index 0000000000000000000000000000000000000000..f1a6c80132255b1cd6d029acd05add2a71f72e98 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.md @@ -0,0 +1,56 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+FreiHand (ICCV'2019) + +```bibtex +@inproceedings{zimmermann2019freihand, + title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, + author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={813--822}, + year={2019} +} +``` + +
+ +Results on FreiHand val & test set + +| Set | Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--- | :-------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------------: | :------------------------------------------------------: | +| test | [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py) | 224x224 | 0.999 | 0.868 | 3.27 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224-ff0799bc_20200914.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224_20200914.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml new file mode 100644 index 0000000000000000000000000000000000000000..9937b50be6756f1b8cd3de45347cabb159919d2f --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: FreiHand + Name: td-hm_res50_8xb64-100e_freihand2d-224x224 + Results: + - Dataset: FreiHand + Metrics: + AUC: 0.868 + EPE: 3.27 + PCK@0.2: 0.999 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224-ff0799bc_20200914.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py new file mode 100644 index 0000000000000000000000000000000000000000..cd1750cdebc9d977ae917432b3e714ee1275f3d8 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py @@ -0,0 +1,138 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=100, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=100, + milestones=[50, 70], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(224, 224), heatmap_size=(56, 56), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'FreiHandDataset' +data_mode = 'topdown' +data_root = 'data/freihand/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/freihand_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/freihand_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/freihand_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.md new file mode 100644 index 0000000000000000000000000000000000000000..59d70fc597094e1597440809c5b1de2d9e4a760f --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.md @@ -0,0 +1,60 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.572 | 23.96 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark-a2f80c64_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..7fc64b75c7a445e7cae9c0350e5847b90205f87f --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: OneHand10K + Name: td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.572 + EPE: 23.96 + PCK@0.2: 0.99 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark-a2f80c64_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.md new file mode 100644 index 0000000000000000000000000000000000000000..262bf3225390b69c8d965e02d9f78691a39b4760 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.md @@ -0,0 +1,43 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.567 | 24.26 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..fd0c75587621aa94af22654a8a3ea80957eddc0a --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: OneHand10K + Name: td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.567 + EPE: 24.26 + PCK@0.2: 0.99 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.md new file mode 100644 index 0000000000000000000000000000000000000000..ca1599c116e3df24438842da11a05c713fbf99b1 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.md @@ -0,0 +1,60 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_hrnetv2_w18_udp](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.571 | 23.88 | [ckpt](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp-0d1b515d_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..32d5dd6db5e3f6873456dd5016bdb74a75b783cf --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py + In Collection: UDP + Metadata: + Architecture: + - HRNetv2 + - UDP + Training Data: OneHand10K + Name: td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.571 + EPE: 23.88 + PCK@0.2: 0.99 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp-0d1b515d_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.md new file mode 100644 index 0000000000000000000000000000000000000000..3f0bf9d1b76f27294712462584791657c910212e --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.md @@ -0,0 +1,42 @@ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_mobilenet_v2](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.986 | 0.537 | 28.56 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256-f3a3d90e_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..ade1f054f1b16754be9c2dddf174a85db838140d --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: OneHand10K + Name: td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.537 + EPE: 28.56 + PCK@0.2: 0.986 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256-f3a3d90e_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.md new file mode 100644 index 0000000000000000000000000000000000000000..c07817d68eae7caba106695c69d6ab1746e6c5b8 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.md @@ -0,0 +1,59 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py) | 256x256 | 0.989 | 0.555 | 25.16 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256-739c8639_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..59dc7f523f3f9d75e2451587525c36cf82ef851f --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: OneHand10K + Name: td-hm_res50_8xb32-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.555 + EPE: 25.16 + PCK@0.2: 0.989 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256-739c8639_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..99419065aa879884fddb6afa568257fb1b9fe340 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py @@ -0,0 +1,158 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..610e9d149b658166a37d1fa1a028efab32d0637d --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py @@ -0,0 +1,162 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..54e2220d636601ac4a19a116aa6d0aabe138dbef --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py @@ -0,0 +1,158 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..1f4e61c37c5692b62d407f642e23b38197e23d47 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..36589d899ddd930143749845b5fd5650917d23ec --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.md new file mode 100644 index 0000000000000000000000000000000000000000..334d97978c1dbb9b7ddca0df13f75372d753a067 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.md @@ -0,0 +1,58 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.903 | 2.18 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark-4df3a347_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml new file mode 100644 index 0000000000000000000000000000000000000000..7400dc19e019f605efe6688de5be8170c1aa2c4b --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: RHD + Name: td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.903 + EPE: 2.18 + PCK@0.2: 0.992 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark-4df3a347_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.md new file mode 100644 index 0000000000000000000000000000000000000000..6fe91fe17b3ef52fdd65751dab103641202c5595 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.md @@ -0,0 +1,41 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.902 | 2.21 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256-95b20dd8_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml new file mode 100644 index 0000000000000000000000000000000000000000..f5292da7706f8a51a7a52a9dc98371ec9705aff5 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml @@ -0,0 +1,16 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: RHD + Name: td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.902 + EPE: 2.21 + PCK@0.2: 0.992 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256-95b20dd8_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.md new file mode 100644 index 0000000000000000000000000000000000000000..c494eb8fc6b79e310f3b9099cf0cfc6059ce4f8c --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.md @@ -0,0 +1,58 @@ + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCKh@0.7 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :------: | :---: | :--: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_hrnetv2_w18_udp](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.902 | 2.19 | [ckpt](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp-63ba6007_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml new file mode 100644 index 0000000000000000000000000000000000000000..db63b682e2fc50570d118a3755d9e64285fb6fe5 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py + In Collection: UDP + Metadata: + Architecture: + - HRNetv2 + - UDP + Training Data: RHD + Name: td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.902 + EPE: 2.19 + PCKh@0.7: 0.992 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp-63ba6007_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.md new file mode 100644 index 0000000000000000000000000000000000000000..877247fe86ff2ec59a296f5cb45d88b392533135 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.md @@ -0,0 +1,40 @@ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_mobilenet_v2](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.985 | 0.883 | 2.79 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256-85fa02db_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml new file mode 100644 index 0000000000000000000000000000000000000000..202a636fbe70e59ff71b6f225273242162c400cc --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: RHD + Name: td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.883 + EPE: 2.79 + PCK@0.2: 0.985 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256-85fa02db_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.md b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.md new file mode 100644 index 0000000000000000000000000000000000000000..f103a0df40e4ec57223a752e2c833f42b6909ae8 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.md @@ -0,0 +1,57 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_resnet50](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.991 | 0.898 | 2.32 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256-5dc7e4cc_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml new file mode 100644 index 0000000000000000000000000000000000000000..d09f8ba2685b86fb1326c0460b7eb6fc83dd95fa --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: RHD + Name: td-hm_res50_8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.898 + EPE: 2.32 + PCK@0.2: 0.991 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256-5dc7e4cc_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..4a9bcc9b896ae499e034605209f1c7eb14ba7b39 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py @@ -0,0 +1,158 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..44b8dc0f5a1c55d10293c40e5b8314fca6aa9b9c --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py @@ -0,0 +1,162 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..d1c796234dd22760f6f52dcb97d05ad0410ceabb --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py @@ -0,0 +1,158 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..d7176bacd73cad44295c84ae8f4b9b1d1201bf35 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py @@ -0,0 +1,125 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..da5556802891a6d742527e7889d0f31161223eef --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py @@ -0,0 +1,124 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_regression/README.md b/mmpose/configs/hand_2d_keypoint/topdown_regression/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0210a89c2de35fc5f0a480662aa216e61ac9c623 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_regression/README.md @@ -0,0 +1,25 @@ +# Top-down regression-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). + +
+ +
+ +## Results and Models + +### OneHand10K Dataset + +Results on OneHand10K val set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-------: | :--------: | :-----: | :---: | :---: | :-------------------------------------------------------: | +| ResNet-50 | 256x256 | 0.990 | 0.485 | 34.21 | [resnet_onehand10k.md](./onehand10k/resnet_onehand10k.md) | + +### RHD Dataset + +Results on RHD test set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-------: | :--------: | :-----: | :---: | :--: | :----------------------------------------: | +| ResNet-50 | 256x256 | 0.988 | 0.865 | 3.32 | [resnet_rhd2d.md](./rhd2d/resnet_rhd2d.md) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.md b/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.md new file mode 100644 index 0000000000000000000000000000000000000000..40c0c184959466df639518450dcf17aa0b60ba30 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.md @@ -0,0 +1,59 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [deeppose_resnet_50](/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.485 | 34.21 | [ckpt](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256-cbddf43a_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml b/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml new file mode 100644 index 0000000000000000000000000000000000000000..d5e9d8122ecb0c2082b2d0b38413dee2dc2220aa --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py + In Collection: DeepPose + Metadata: + Architecture: + - DeepPose + - ResNet + Training Data: OneHand10K + Name: td-reg_res50_8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.485 + EPE: 34.21 + PCK@0.2: 0.99 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256-cbddf43a_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ee1556d45e18e3253f421948d1affd4eabfe673f --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=21, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.md b/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.md new file mode 100644 index 0000000000000000000000000000000000000000..6cca5580ba7fcb0fa0f089159b0c96007c5ce90f --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.md @@ -0,0 +1,57 @@ + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [deeppose_resnet_50](/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.988 | 0.865 | 3.32 | [ckpt](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256-37f1c4d3_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256_20210330.log.json) | diff --git a/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml b/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml new file mode 100644 index 0000000000000000000000000000000000000000..3d0a920c5daa2eb1eaab3bea7c5c77529d72f377 --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml @@ -0,0 +1,17 @@ +Models: +- Config: configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py + In Collection: DeepPose + Metadata: + Architecture: + - DeepPose + - ResNet + Training Data: RHD + Name: td-reg_res50_8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.865 + EPE: 3.32 + PCK@0.2: 0.988 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256-37f1c4d3_20210330.pth diff --git a/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py b/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..a350c24bfe2d3d7246ed63c78f737414ee5f247e --- /dev/null +++ b/mmpose/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=21, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_3d_keypoint/README.md b/mmpose/configs/hand_3d_keypoint/README.md new file mode 100644 index 0000000000000000000000000000000000000000..330319f42b186afec3a7c73afd627505e91c7dc8 --- /dev/null +++ b/mmpose/configs/hand_3d_keypoint/README.md @@ -0,0 +1,7 @@ +# 3D Hand Pose Estimation + +3D hand pose estimation is defined as the task of detecting the poses (or keypoints) of the hand from an input image. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_hand_keypoint.md) to prepare data. diff --git a/mmpose/configs/hand_3d_keypoint/internet/README.md b/mmpose/configs/hand_3d_keypoint/internet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8d913e07674c8c27b6b1b269af3bc50b1f9093b1 --- /dev/null +++ b/mmpose/configs/hand_3d_keypoint/internet/README.md @@ -0,0 +1,10 @@ +# InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image + +## Results and Models + +### InterHand2.6m 3D Dataset + +| Arch | Set | MPJPE-single | MPJPE-interacting | MPJPE-all | MRRPE | APh | ckpt | log | Details and Download | +| :------------------------------- | :-------: | :----------: | :---------------: | :-------: | :---: | :--: | :------------------------------: | :-----------------------------: | :-----------------------------------------------: | +| [InterNet_resnet_50](/configs/hand/3d_kpt_sview_rgb_img/internet/interhand3d/res50_interhand3d_all_256x256.py) | test(H+M) | 9.47 | 13.40 | 11.59 | 29.28 | 0.99 | [ckpt](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256-42b7f2ac_20210702.pth) | [log](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256_20210702.log.json) | [internet_interhand3d.md](./interhand3d/internet_interhand3d.md) | +| [InterNet_resnet_50](/configs/hand/3d_kpt_sview_rgb_img/internet/interhand3d/res50_interhand3d_all_256x256.py) | val(M) | 11.22 | 15.23 | 13.16 | 31.73 | 0.98 | [ckpt](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256-42b7f2ac_20210702.pth) | [log](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256_20210702.log.json) | [internet_interhand3d.md](./interhand3d/internet_interhand3d.md) | diff --git a/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_interhand3d.md b/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_interhand3d.md new file mode 100644 index 0000000000000000000000000000000000000000..eb775d743913aa13b0c5c5f75ab5a090191f280c --- /dev/null +++ b/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_interhand3d.md @@ -0,0 +1,59 @@ + + +
+InterNet (ECCV'2020) + +```bibtex +@InProceedings{Moon_2020_ECCV_InterHand2.6M, +author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, +title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, +booktitle = {European Conference on Computer Vision (ECCV)}, +year = {2020} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+InterHand2.6M (ECCV'2020) + +```bibtex +@InProceedings{Moon_2020_ECCV_InterHand2.6M, +author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, +title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, +booktitle = {European Conference on Computer Vision (ECCV)}, +year = {2020} +} +``` + +
+ +Results on InterHand2.6M val & test set + +| Train Set | Set | Arch | Input Size | MPJPE-single | MPJPE-interacting | MPJPE-all | MRRPE | APh | ckpt | log | +| :-------- | :-------- | :----------------------------------------: | :--------: | :----------: | :---------------: | :-------: | :---: | :--: | :----------------------------------------: | :---------------------------------------: | +| All | test(H+M) | [InterNet_resnet_50](/configs/hand/3d_kpt_sview_rgb_img/internet/interhand3d/res50_interhand3d_all_256x256.py) | 256x256 | 9.69 | 13.72 | 11.86 | 29.27 | 0.99 | [ckpt](https://download.openmmlab.com/mmpose/v1/hand_3d_keypoint/internet/interhand3d/internet_res50_interhand3d-d6ff20d6_20230913.pth) | [log](https://download.openmmlab.com/mmpose/v1/hand_3d_keypoint/internet/interhand3d/internet_res50_interhand3d-d6ff20d6_20230913.json) | +| All | val(M) | [InterNet_resnet_50](/configs/hand/3d_kpt_sview_rgb_img/internet/interhand3d/res50_interhand3d_all_256x256.py) | 256x256 | 11.30 | 15.57 | 13.36 | 32.15 | 0.98 | [ckpt](https://download.openmmlab.com/mmpose/v1/hand_3d_keypoint/internet/interhand3d/internet_res50_interhand3d-d6ff20d6_20230913.pth) | [log](https://download.openmmlab.com/mmpose/v1/hand_3d_keypoint/internet/interhand3d/internet_res50_interhand3d-d6ff20d6_20230913.json) | +| All | test(H+M) | [InterNet_resnet_50\*](/configs/hand/3d_kpt_sview_rgb_img/internet/interhand3d/res50_interhand3d_all_256x256.py) | 256x256 | 9.47 | 13.40 | 11.59 | 29.28 | 0.99 | [ckpt](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256-42b7f2ac_20210702.pth) | [log](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256_20210702.log.json) | +| All | val(M) | [InterNet_resnet_50\*](/configs/hand/3d_kpt_sview_rgb_img/internet/interhand3d/res50_interhand3d_all_256x256.py) | 256x256 | 11.22 | 15.23 | 13.16 | 31.73 | 0.98 | [ckpt](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256-42b7f2ac_20210702.pth) | [log](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256_20210702.log.json) | + +*Models with * are trained in [MMPose 0.x](https://github.com/open-mmlab/mmpose/tree/0.x). The checkpoints and logs are only for validation.* diff --git a/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_interhand3d.yml b/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_interhand3d.yml new file mode 100644 index 0000000000000000000000000000000000000000..778e436272b71f90edd1655e67a0106bd7dee2da --- /dev/null +++ b/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_interhand3d.yml @@ -0,0 +1,35 @@ +Collections: +- Name: InterNet + Paper: + Title: 'InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation + from a Single RGB Image' + URL: https://link.springer.com/content/pdf/10.1007/978-3-030-58565-5_33.pdf + README: https://github.com/open-mmlab/mmpose/blob/master/docs/en/papers/algorithms/internet.md +Models: +- Config: configs/hand_3d_keypoint/internet/interhand3d/internet_res50_4xb16-20e_interhand3d-256x256.py + In Collection: InterNet + Alias: hand3d + Metadata: + Architecture: &id001 + - InterNet + - ResNet + Training Data: InterHand2.6M + Name: internet_res50_4xb16-20e_interhand3d-256x256 + Results: + - Dataset: InterHand2.6M (H+M) + Metrics: + APh: 0.99 + MPJPE-all: 11.86 + MPJPE-interacting: 13.72 + MPJPE-single: 9.69 + MRRPE: 29.27 + Task: Hand 3D Keypoint + - Dataset: InterHand2.6M (M) + Metrics: + APh: 0.98 + MPJPE-all: 13.36 + MPJPE-interacting: 15.57 + MPJPE-single: 11.30 + MRRPE: 32.15 + Task: Hand 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256-42b7f2ac_20210702.pth diff --git a/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_res50_4xb16-20e_interhand3d-256x256.py b/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_res50_4xb16-20e_interhand3d-256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ffe9f0f051cce39c54cecf22a1b0f38983d84ce6 --- /dev/null +++ b/mmpose/configs/hand_3d_keypoint/internet/interhand3d/internet_res50_4xb16-20e_interhand3d-256x256.py @@ -0,0 +1,182 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# visualization +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0002)) + +# learning policy +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[15, 17], + gamma=0.1, + by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=128) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=1, + save_best='MPJPE_all', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='Hand3DHeatmap', + image_size=[256, 256], + root_heatmap_size=64, + heatmap_size=[64, 64, 64], + sigma=2.5, + max_bound=255, + depth_size=64) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + head=dict( + type='InternetHead', + keypoint_head_cfg=dict( + in_channels=2048, + out_channels=21 * 64, + depth_size=codec['depth_size'], + deconv_out_channels=(256, 256, 256), + deconv_kernel_sizes=(4, 4, 4), + ), + root_head_cfg=dict( + in_channels=2048, + heatmap_size=codec['root_heatmap_size'], + hidden_dims=(512, ), + ), + hand_type_head_cfg=dict( + in_channels=2048, + num_labels=2, + hidden_dims=(512, ), + ), + decoder=codec), + test_cfg=dict(flip_test=False)) + +# base dataset settings +dataset_type = 'InterHand3DDataset' +data_mode = 'topdown' +data_root = 'data/interhand2.6m/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='HandRandomFlip', prob=0.5), + dict(type='RandomBBoxTransform', rotate_factor=90.0), + dict(type='TopdownAffine', input_size=codec['image_size']), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'rotation', 'img_shape', + 'focal', 'principal_pt', 'input_size', 'input_center', + 'input_scale', 'hand_type', 'hand_type_valid', 'flip', + 'flip_indices', 'abs_depth')) +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['image_size']), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'rotation', 'img_shape', + 'focal', 'principal_pt', 'input_size', 'input_center', + 'input_scale', 'hand_type', 'hand_type_valid', 'flip', + 'flip_indices', 'abs_depth')) +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotations/all/InterHand2.6M_train_data.json', + camera_param_file='annotations/all/InterHand2.6M_train_camera.json', + joint_file='annotations/all/InterHand2.6M_train_joint_3d.json', + use_gt_root_depth=True, + rootnet_result_file=None, + data_mode=data_mode, + data_root=data_root, + data_prefix=dict(img='images/train/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=16, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotations/machine_annot/InterHand2.6M_val_data.json', + camera_param_file='annotations/machine_annot/' + 'InterHand2.6M_val_camera.json', + joint_file='annotations/machine_annot/InterHand2.6M_val_joint_3d.json', + use_gt_root_depth=True, + rootnet_result_file=None, + data_mode=data_mode, + data_root=data_root, + data_prefix=dict(img='images/val/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = dict( + batch_size=16, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotations/all/' + 'InterHand2.6M_test_data.json', + camera_param_file='annotations/all/' + 'InterHand2.6M_test_camera.json', + joint_file='annotations/all/' + 'InterHand2.6M_test_joint_3d.json', + use_gt_root_depth=True, + rootnet_result_file=None, + data_mode=data_mode, + data_root=data_root, + data_prefix=dict(img='images/test/'), + pipeline=val_pipeline, + test_mode=True, + )) + +# evaluators +val_evaluator = [ + dict(type='InterHandMetric', modes=['MPJPE', 'MRRPE', 'HandednessAcc']) +] +test_evaluator = val_evaluator diff --git a/mmpose/configs/hand_gesture/README.md b/mmpose/configs/hand_gesture/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7cc5bb323b05823b8eeedfb905756ce84c87c8ac --- /dev/null +++ b/mmpose/configs/hand_gesture/README.md @@ -0,0 +1,13 @@ +# Gesture Recognition + +Gesture recognition aims to recognize the hand gestures in the video, such as thumbs up. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_gesture.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/gesture_recognition_demo.md) to run the demo. + + diff --git a/mmpose/configs/mmdet/CO-DETR/README.md b/mmpose/configs/mmdet/CO-DETR/README.md new file mode 100644 index 0000000000000000000000000000000000000000..787592ade508337d341342058a25d471603d93fe --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/README.md @@ -0,0 +1,32 @@ +# CO-DETR + +> [DETRs with Collaborative Hybrid Assignments Training](https://arxiv.org/abs/2211.12860) + + + +## Abstract + +In this paper, we provide the observation that too few queries assigned as positive samples in DETR with one-to-one set matching leads to sparse supervision on the encoder's output which considerably hurt the discriminative feature learning of the encoder and vice visa for attention learning in the decoder. To alleviate this, we present a novel collaborative hybrid assignments training scheme, namely Co-DETR, to learn more efficient and effective DETR-based detectors from versatile label assignment manners. This new training scheme can easily enhance the encoder's learning ability in end-to-end detectors by training the multiple parallel auxiliary heads supervised by one-to-many label assignments such as ATSS and Faster RCNN. In addition, we conduct extra customized positive queries by extracting the positive coordinates from these auxiliary heads to improve the training efficiency of positive samples in the decoder. In inference, these auxiliary heads are discarded and thus our method introduces no additional parameters and computational cost to the original detector while requiring no hand-crafted non-maximum suppression (NMS). We conduct extensive experiments to evaluate the effectiveness of the proposed approach on DETR variants, including DAB-DETR, Deformable-DETR, and DINO-Deformable-DETR. The state-of-the-art DINO-Deformable-DETR with Swin-L can be improved from 58.5% to 59.5% AP on COCO val. Surprisingly, incorporated with ViT-L backbone, we achieve 66.0% AP on COCO test-dev and 67.9% AP on LVIS val, outperforming previous methods by clear margins with much fewer model sizes. + +
+ +
+ +## Results and Models + +| Model | Backbone | Epochs | Aug | Dataset | box AP | Config | Download | +| :-------: | :------: | :----: | :--: | :---------------------------: | :----: | :--------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Co-DINO | R50 | 12 | LSJ | COCO | 52.0 | [config](configs/codino/co_dino_5scale_r50_lsj_8xb2_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_r50_lsj_8xb2_1x_coco/co_dino_5scale_r50_lsj_8xb2_1x_coco-69a72d67.pth)\\ [log](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_r50_lsj_8xb2_1x_coco/co_dino_5scale_r50_lsj_8xb2_1x_coco_20230818_150457.json) | +| Co-DINO\* | R50 | 12 | DETR | COCO | 52.1 | [config](configs/codino/co_dino_5scale_r50_8xb2_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_r50_1x_coco-7481f903.pth) | +| Co-DINO\* | R50 | 36 | LSJ | COCO | 54.8 | [config](configs/codino/co_dino_5scale_r50_lsj_8xb2_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_lsj_r50_3x_coco-fe5a6829.pth) | +| Co-DINO\* | Swin-L | 12 | DETR | COCO | 58.9 | [config](configs/codino/co_dino_5scale_swin_l_16xb1_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_swin_large_1x_coco-27c13da4.pth) | +| Co-DINO\* | Swin-L | 12 | LSJ | COCO | 59.3 | [config](configs/codino/co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_lsj_swin_large_1x_coco-3af73af2.pth) | +| Co-DINO\* | Swin-L | 36 | DETR | COCO | 60.0 | [config](configs/codino/co_dino_5scale_swin_l_16xb1_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_swin_large_3x_coco-d7a6d8af.pth) | +| Co-DINO\* | Swin-L | 36 | LSJ | COCO | 60.7 | [config](configs/codino/co_dino_5scale_swin_l_lsj_16xb1_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_lsj_swin_large_1x_coco-3af73af2.pth) | +| Co-DINO\* | Swin-L | 16 | DETR | Objects365 pre-trained + COCO | 64.1 | [config](configs/codino/co_dino_5scale_swin_l_16xb1_16e_o365tococo.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_swin_large_16e_o365tococo-614254c9.pth) | + +Note + +- Models labeled * are not trained by us, but from [CO-DETR](https://github.com/Sense-X/Co-DETR) official website. +- We find that the performance is unstable and may fluctuate by about 0.3 mAP. +- If you want to save GPU memory by enabling checkpointing, please use the `pip install fairscale` command. diff --git a/mmpose/configs/mmdet/CO-DETR/codetr/__init__.py b/mmpose/configs/mmdet/CO-DETR/codetr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ca4c02d9f7b71643b3b63ef4df254b87d4f9661 --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/codetr/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .co_atss_head import CoATSSHead +from .co_dino_head import CoDINOHead +from .co_roi_head import CoStandardRoIHead +from .codetr import CoDETR +from .transformer import (CoDinoTransformer, DetrTransformerDecoderLayer, + DetrTransformerEncoder, DinoTransformerDecoder) + +__all__ = [ + 'CoDETR', 'CoDinoTransformer', 'DinoTransformerDecoder', 'CoDINOHead', + 'CoATSSHead', 'CoStandardRoIHead', 'DetrTransformerEncoder', + 'DetrTransformerDecoderLayer' +] diff --git a/mmpose/configs/mmdet/CO-DETR/codetr/co_atss_head.py b/mmpose/configs/mmdet/CO-DETR/codetr/co_atss_head.py new file mode 100644 index 0000000000000000000000000000000000000000..c6ae0180da7be292b67a5bb83c1ad34b848ff17a --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/codetr/co_atss_head.py @@ -0,0 +1,153 @@ +from typing import List + +import torch +from torch import Tensor + +from mmdet.models.dense_heads import ATSSHead +from mmdet.models.utils import images_to_levels, multi_apply +from mmdet.registry import MODELS +from mmdet.utils import InstanceList, OptInstanceList, reduce_mean + + +@MODELS.register_module() +class CoATSSHead(ATSSHead): + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + centernesses: List[Tensor], + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + centernesses (list[Tensor]): Centerness for each scale + level with shape (N, num_anchors * 1, H, W) + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, batch_img_metas, device=device) + + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + batch_gt_instances, + batch_img_metas, + batch_gt_instances_ignore=batch_gt_instances_ignore) + + (anchor_list, labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, avg_factor, ori_anchors, ori_labels, + ori_bbox_targets) = cls_reg_targets + + avg_factor = reduce_mean( + torch.tensor(avg_factor, dtype=torch.float, device=device)).item() + + losses_cls, losses_bbox, loss_centerness, \ + bbox_avg_factor = multi_apply( + self.loss_by_feat_single, + anchor_list, + cls_scores, + bbox_preds, + centernesses, + labels_list, + label_weights_list, + bbox_targets_list, + avg_factor=avg_factor) + + bbox_avg_factor = sum(bbox_avg_factor) + bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() + losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) + + # diff + pos_coords = (ori_anchors, ori_labels, ori_bbox_targets, 'atss') + return dict( + loss_cls=losses_cls, + loss_bbox=losses_bbox, + loss_centerness=loss_centerness, + pos_coords=pos_coords) + + def get_targets(self, + anchor_list: List[List[Tensor]], + valid_flag_list: List[List[Tensor]], + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None, + unmap_outputs: bool = True) -> tuple: + """Get targets for ATSS head. + + This method is almost the same as `AnchorHead.get_targets()`. Besides + returning the targets as the parent method does, it also returns the + anchors as the first element of the returned tuple. + """ + num_imgs = len(batch_img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + num_level_anchors_list = [num_level_anchors] * num_imgs + + # concat all level anchors and flags to a single tensor + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + anchor_list[i] = torch.cat(anchor_list[i]) + valid_flag_list[i] = torch.cat(valid_flag_list[i]) + + # compute targets for each image + if batch_gt_instances_ignore is None: + batch_gt_instances_ignore = [None] * num_imgs + (all_anchors, all_labels, all_label_weights, all_bbox_targets, + all_bbox_weights, pos_inds_list, neg_inds_list, + sampling_results_list) = multi_apply( + self._get_targets_single, + anchor_list, + valid_flag_list, + num_level_anchors_list, + batch_gt_instances, + batch_img_metas, + batch_gt_instances_ignore, + unmap_outputs=unmap_outputs) + # Get `avg_factor` of all images, which calculate in `SamplingResult`. + # When using sampling method, avg_factor is usually the sum of + # positive and negative priors. When using `PseudoSampler`, + # `avg_factor` is usually equal to the number of positive priors. + avg_factor = sum( + [results.avg_factor for results in sampling_results_list]) + # split targets to a list w.r.t. multiple levels + anchors_list = images_to_levels(all_anchors, num_level_anchors) + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors) + + # diff + ori_anchors = all_anchors + ori_labels = all_labels + ori_bbox_targets = all_bbox_targets + return (anchors_list, labels_list, label_weights_list, + bbox_targets_list, bbox_weights_list, avg_factor, ori_anchors, + ori_labels, ori_bbox_targets) diff --git a/mmpose/configs/mmdet/CO-DETR/codetr/co_dino_head.py b/mmpose/configs/mmdet/CO-DETR/codetr/co_dino_head.py new file mode 100644 index 0000000000000000000000000000000000000000..192acf97d86c5d24b623608a46d564a8753b5b7b --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/codetr/co_dino_head.py @@ -0,0 +1,677 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Linear +from mmcv.ops import batched_nms +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet.models import DINOHead +from mmdet.models.layers import CdnQueryGenerator +from mmdet.models.layers.transformer import inverse_sigmoid +from mmdet.models.utils import multi_apply +from mmdet.registry import MODELS +from mmdet.structures import SampleList +from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps, + bbox_xyxy_to_cxcywh) +from mmdet.utils import InstanceList, reduce_mean + + +@MODELS.register_module() +class CoDINOHead(DINOHead): + + def __init__(self, + *args, + num_query=900, + transformer=None, + in_channels=2048, + max_pos_coords=300, + dn_cfg=None, + use_zero_padding=False, + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True), + **kwargs): + self.with_box_refine = True + self.mixed_selection = True + self.in_channels = in_channels + self.max_pos_coords = max_pos_coords + self.positional_encoding = positional_encoding + self.num_query = num_query + self.use_zero_padding = use_zero_padding + + if 'two_stage_num_proposals' in transformer: + assert transformer['two_stage_num_proposals'] == num_query, \ + 'two_stage_num_proposals must be equal to num_query for DINO' + else: + transformer['two_stage_num_proposals'] = num_query + transformer['as_two_stage'] = True + if self.mixed_selection: + transformer['mixed_selection'] = self.mixed_selection + self.transformer = transformer + self.act_cfg = transformer.get('act_cfg', + dict(type='ReLU', inplace=True)) + + super().__init__(*args, **kwargs) + + self.activate = MODELS.build(self.act_cfg) + self.positional_encoding = MODELS.build(self.positional_encoding) + self.init_denoising(dn_cfg) + + def _init_layers(self): + self.transformer = MODELS.build(self.transformer) + self.embed_dims = self.transformer.embed_dims + assert hasattr(self.positional_encoding, 'num_feats') + num_feats = self.positional_encoding.num_feats + assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ + f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ + f' and {num_feats}.' + """Initialize classification branch and regression branch of head.""" + fc_cls = Linear(self.embed_dims, self.cls_out_channels) + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(Linear(self.embed_dims, 4)) + reg_branch = nn.Sequential(*reg_branch) + + def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + # last reg_branch is used to generate proposal from + # encode feature map when as_two_stage is True. + num_pred = (self.transformer.decoder.num_layers + 1) if \ + self.as_two_stage else self.transformer.decoder.num_layers + + self.cls_branches = _get_clones(fc_cls, num_pred) + self.reg_branches = _get_clones(reg_branch, num_pred) + + self.downsample = nn.Sequential( + nn.Conv2d( + self.embed_dims, + self.embed_dims, + kernel_size=3, + stride=2, + padding=1), nn.GroupNorm(32, self.embed_dims)) + + def init_denoising(self, dn_cfg): + if dn_cfg is not None: + dn_cfg['num_classes'] = self.num_classes + dn_cfg['num_matching_queries'] = self.num_query + dn_cfg['embed_dims'] = self.embed_dims + self.dn_generator = CdnQueryGenerator(**dn_cfg) + + def forward(self, + mlvl_feats, + img_metas, + dn_label_query=None, + dn_bbox_query=None, + attn_mask=None): + batch_size = mlvl_feats[0].size(0) + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + img_masks = mlvl_feats[0].new_ones( + (batch_size, input_img_h, input_img_w)) + for img_id in range(batch_size): + img_h, img_w = img_metas[img_id]['img_shape'] + img_masks[img_id, :img_h, :img_w] = 0 + + mlvl_masks = [] + mlvl_positional_encodings = [] + for feat in mlvl_feats: + mlvl_masks.append( + F.interpolate(img_masks[None], + size=feat.shape[-2:]).to(torch.bool).squeeze(0)) + mlvl_positional_encodings.append( + self.positional_encoding(mlvl_masks[-1])) + + query_embeds = None + hs, inter_references, topk_score, topk_anchor, enc_outputs = \ + self.transformer( + mlvl_feats, + mlvl_masks, + query_embeds, + mlvl_positional_encodings, + dn_label_query, + dn_bbox_query, + attn_mask, + reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 + cls_branches=self.cls_branches if self.as_two_stage else None # noqa:E501 + ) + outs = [] + num_level = len(mlvl_feats) + start = 0 + for lvl in range(num_level): + bs, c, h, w = mlvl_feats[lvl].shape + end = start + h * w + feat = enc_outputs[start:end].permute(1, 2, 0).contiguous() + start = end + outs.append(feat.reshape(bs, c, h, w)) + outs.append(self.downsample(outs[-1])) + + hs = hs.permute(0, 2, 1, 3) + + if dn_label_query is not None and dn_label_query.size(1) == 0: + # NOTE: If there is no target in the image, the parameters of + # label_embedding won't be used in producing loss, which raises + # RuntimeError when using distributed mode. + hs[0] += self.dn_generator.label_embedding.weight[0, 0] * 0.0 + + outputs_classes = [] + outputs_coords = [] + + for lvl in range(hs.shape[0]): + reference = inter_references[lvl] + reference = inverse_sigmoid(reference, eps=1e-3) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) + if reference.shape[-1] == 4: + tmp += reference + else: + assert reference.shape[-1] == 2 + tmp[..., :2] += reference + outputs_coord = tmp.sigmoid() + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + + return outputs_classes, outputs_coords, topk_score, topk_anchor, outs + + def predict(self, + feats: List[Tensor], + batch_data_samples: SampleList, + rescale: bool = True) -> InstanceList: + batch_img_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + outs = self.forward(feats, batch_img_metas) + + predictions = self.predict_by_feat( + *outs, batch_img_metas=batch_img_metas, rescale=rescale) + + return predictions + + def predict_by_feat(self, + all_cls_scores, + all_bbox_preds, + enc_cls_scores, + enc_bbox_preds, + enc_outputs, + batch_img_metas, + rescale=True): + + cls_scores = all_cls_scores[-1] + bbox_preds = all_bbox_preds[-1] + + result_list = [] + for img_id in range(len(batch_img_metas)): + cls_score = cls_scores[img_id] + bbox_pred = bbox_preds[img_id] + img_meta = batch_img_metas[img_id] + results = self._predict_by_feat_single(cls_score, bbox_pred, + img_meta, rescale) + result_list.append(results) + return result_list + + def _predict_by_feat_single(self, + cls_score: Tensor, + bbox_pred: Tensor, + img_meta: dict, + rescale: bool = True) -> InstanceData: + """Transform outputs from the last decoder layer into bbox predictions + for each image. + + Args: + cls_score (Tensor): Box score logits from the last decoder layer + for each image. Shape [num_queries, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from the last decoder layer + for each image, with coordinate format (cx, cy, w, h) and + shape [num_queries, 4]. + img_meta (dict): Image meta info. + rescale (bool): If True, return boxes in original image + space. Default True. + + Returns: + :obj:`InstanceData`: Detection results of each image + after the post process. + Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + """ + assert len(cls_score) == len(bbox_pred) # num_queries + max_per_img = self.test_cfg.get('max_per_img', self.num_query) + score_thr = self.test_cfg.get('score_thr', 0) + with_nms = self.test_cfg.get('nms', None) + + img_shape = img_meta['img_shape'] + # exclude background + if self.loss_cls.use_sigmoid: + cls_score = cls_score.sigmoid() + scores, indexes = cls_score.view(-1).topk(max_per_img) + det_labels = indexes % self.num_classes + bbox_index = indexes // self.num_classes + bbox_pred = bbox_pred[bbox_index] + else: + scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) + scores, bbox_index = scores.topk(max_per_img) + bbox_pred = bbox_pred[bbox_index] + det_labels = det_labels[bbox_index] + + if score_thr > 0: + valid_mask = scores > score_thr + scores = scores[valid_mask] + bbox_pred = bbox_pred[valid_mask] + det_labels = det_labels[valid_mask] + + det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) + det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] + det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] + det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) + det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) + if rescale: + assert img_meta.get('scale_factor') is not None + det_bboxes /= det_bboxes.new_tensor( + img_meta['scale_factor']).repeat((1, 2)) + + results = InstanceData() + results.bboxes = det_bboxes + results.scores = scores + results.labels = det_labels + + if with_nms and results.bboxes.numel() > 0: + det_bboxes, keep_idxs = batched_nms(results.bboxes, results.scores, + results.labels, + self.test_cfg.nms) + results = results[keep_idxs] + results.scores = det_bboxes[:, -1] + results = results[:max_per_img] + + return results + + def loss(self, x, batch_data_samples): + assert self.dn_generator is not None, '"dn_cfg" must be set' + + batch_gt_instances = [] + batch_img_metas = [] + for data_sample in batch_data_samples: + batch_img_metas.append(data_sample.metainfo) + batch_gt_instances.append(data_sample.gt_instances) + + dn_label_query, dn_bbox_query, attn_mask, dn_meta = \ + self.dn_generator(batch_data_samples) + + outs = self(x, batch_img_metas, dn_label_query, dn_bbox_query, + attn_mask) + + loss_inputs = outs[:-1] + (batch_gt_instances, batch_img_metas, + dn_meta) + losses = self.loss_by_feat(*loss_inputs) + enc_outputs = outs[-1] + return losses, enc_outputs + + def forward_aux(self, mlvl_feats, img_metas, aux_targets, head_idx): + """Forward function. + + Args: + mlvl_feats (tuple[Tensor]): Features from the upstream + network, each is a 4D-tensor with shape + (N, C, H, W). + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, h). \ + Shape [nb_dec, bs, num_query, 4]. + enc_outputs_class (Tensor): The score of each point on encode \ + feature map, has shape (N, h*w, num_class). Only when \ + as_two_stage is True it would be returned, otherwise \ + `None` would be returned. + enc_outputs_coord (Tensor): The proposal generate from the \ + encode feature map, has shape (N, h*w, 4). Only when \ + as_two_stage is True it would be returned, otherwise \ + `None` would be returned. + """ + aux_coords, aux_labels, aux_targets, aux_label_weights, \ + aux_bbox_weights, aux_feats, attn_masks = aux_targets + batch_size = mlvl_feats[0].size(0) + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + img_masks = mlvl_feats[0].new_ones( + (batch_size, input_img_h, input_img_w)) + for img_id in range(batch_size): + img_h, img_w = img_metas[img_id]['img_shape'] + img_masks[img_id, :img_h, :img_w] = 0 + + mlvl_masks = [] + mlvl_positional_encodings = [] + for feat in mlvl_feats: + mlvl_masks.append( + F.interpolate(img_masks[None], + size=feat.shape[-2:]).to(torch.bool).squeeze(0)) + mlvl_positional_encodings.append( + self.positional_encoding(mlvl_masks[-1])) + + query_embeds = None + hs, inter_references = self.transformer.forward_aux( + mlvl_feats, + mlvl_masks, + query_embeds, + mlvl_positional_encodings, + aux_coords, + pos_feats=aux_feats, + reg_branches=self.reg_branches if self.with_box_refine else None, + cls_branches=self.cls_branches if self.as_two_stage else None, + return_encoder_output=True, + attn_masks=attn_masks, + head_idx=head_idx) + + hs = hs.permute(0, 2, 1, 3) + outputs_classes = [] + outputs_coords = [] + + for lvl in range(hs.shape[0]): + reference = inter_references[lvl] + reference = inverse_sigmoid(reference, eps=1e-3) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) + if reference.shape[-1] == 4: + tmp += reference + else: + assert reference.shape[-1] == 2 + tmp[..., :2] += reference + outputs_coord = tmp.sigmoid() + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + + return outputs_classes, outputs_coords, None, None + + def loss_aux(self, + x, + pos_coords=None, + head_idx=0, + batch_data_samples=None): + batch_gt_instances = [] + batch_img_metas = [] + for data_sample in batch_data_samples: + batch_img_metas.append(data_sample.metainfo) + batch_gt_instances.append(data_sample.gt_instances) + + gt_bboxes = [b.bboxes for b in batch_gt_instances] + gt_labels = [b.labels for b in batch_gt_instances] + + aux_targets = self.get_aux_targets(pos_coords, batch_img_metas, x, + head_idx) + outs = self.forward_aux(x[:-1], batch_img_metas, aux_targets, head_idx) + outs = outs + aux_targets + if gt_labels is None: + loss_inputs = outs + (gt_bboxes, batch_img_metas) + else: + loss_inputs = outs + (gt_bboxes, gt_labels, batch_img_metas) + losses = self.loss_aux_by_feat(*loss_inputs) + return losses + + def get_aux_targets(self, pos_coords, img_metas, mlvl_feats, head_idx): + coords, labels, targets = pos_coords[:3] + head_name = pos_coords[-1] + bs, c = len(coords), mlvl_feats[0].shape[1] + max_num_coords = 0 + all_feats = [] + for i in range(bs): + label = labels[i] + feats = [ + feat[i].reshape(c, -1).transpose(1, 0) for feat in mlvl_feats + ] + feats = torch.cat(feats, dim=0) + bg_class_ind = self.num_classes + pos_inds = ((label >= 0) + & (label < bg_class_ind)).nonzero().squeeze(1) + max_num_coords = max(max_num_coords, len(pos_inds)) + all_feats.append(feats) + max_num_coords = min(self.max_pos_coords, max_num_coords) + max_num_coords = max(9, max_num_coords) + + if self.use_zero_padding: + attn_masks = [] + label_weights = coords[0].new_zeros([bs, max_num_coords]) + else: + attn_masks = None + label_weights = coords[0].new_ones([bs, max_num_coords]) + bbox_weights = coords[0].new_zeros([bs, max_num_coords, 4]) + + aux_coords, aux_labels, aux_targets, aux_feats = [], [], [], [] + + for i in range(bs): + coord, label, target = coords[i], labels[i], targets[i] + feats = all_feats[i] + if 'rcnn' in head_name: + feats = pos_coords[-2][i] + num_coords_per_point = 1 + else: + num_coords_per_point = coord.shape[0] // feats.shape[0] + feats = feats.unsqueeze(1).repeat(1, num_coords_per_point, 1) + feats = feats.reshape(feats.shape[0] * num_coords_per_point, + feats.shape[-1]) + img_meta = img_metas[i] + img_h, img_w = img_meta['img_shape'] + factor = coord.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + bg_class_ind = self.num_classes + pos_inds = ((label >= 0) + & (label < bg_class_ind)).nonzero().squeeze(1) + neg_inds = (label == bg_class_ind).nonzero().squeeze(1) + if pos_inds.shape[0] > max_num_coords: + indices = torch.randperm( + pos_inds.shape[0])[:max_num_coords].cuda() + pos_inds = pos_inds[indices] + + coord = bbox_xyxy_to_cxcywh(coord[pos_inds] / factor) + label = label[pos_inds] + target = bbox_xyxy_to_cxcywh(target[pos_inds] / factor) + feat = feats[pos_inds] + + if self.use_zero_padding: + label_weights[i][:len(label)] = 1 + bbox_weights[i][:len(label)] = 1 + attn_mask = torch.zeros([ + max_num_coords, + max_num_coords, + ]).bool().to(coord.device) + else: + bbox_weights[i][:len(label)] = 1 + + if coord.shape[0] < max_num_coords: + padding_shape = max_num_coords - coord.shape[0] + if self.use_zero_padding: + padding_coord = coord.new_zeros([padding_shape, 4]) + padding_label = label.new_ones([padding_shape + ]) * self.num_classes + padding_target = target.new_zeros([padding_shape, 4]) + padding_feat = feat.new_zeros([padding_shape, c]) + attn_mask[coord.shape[0]:, 0:coord.shape[0], ] = True + attn_mask[:, coord.shape[0]:, ] = True + else: + indices = torch.randperm( + neg_inds.shape[0])[:padding_shape].cuda() + neg_inds = neg_inds[indices] + padding_coord = bbox_xyxy_to_cxcywh(coords[i][neg_inds] / + factor) + padding_label = labels[i][neg_inds] + padding_target = bbox_xyxy_to_cxcywh(targets[i][neg_inds] / + factor) + padding_feat = feats[neg_inds] + coord = torch.cat((coord, padding_coord), dim=0) + label = torch.cat((label, padding_label), dim=0) + target = torch.cat((target, padding_target), dim=0) + feat = torch.cat((feat, padding_feat), dim=0) + if self.use_zero_padding: + attn_masks.append(attn_mask.unsqueeze(0)) + aux_coords.append(coord.unsqueeze(0)) + aux_labels.append(label.unsqueeze(0)) + aux_targets.append(target.unsqueeze(0)) + aux_feats.append(feat.unsqueeze(0)) + + if self.use_zero_padding: + attn_masks = torch.cat( + attn_masks, dim=0).unsqueeze(1).repeat(1, 8, 1, 1) + attn_masks = attn_masks.reshape(bs * 8, max_num_coords, + max_num_coords) + else: + attn_masks = None + + aux_coords = torch.cat(aux_coords, dim=0) + aux_labels = torch.cat(aux_labels, dim=0) + aux_targets = torch.cat(aux_targets, dim=0) + aux_feats = torch.cat(aux_feats, dim=0) + aux_label_weights = label_weights + aux_bbox_weights = bbox_weights + return (aux_coords, aux_labels, aux_targets, aux_label_weights, + aux_bbox_weights, aux_feats, attn_masks) + + def loss_aux_by_feat(self, + all_cls_scores, + all_bbox_preds, + enc_cls_scores, + enc_bbox_preds, + aux_coords, + aux_labels, + aux_targets, + aux_label_weights, + aux_bbox_weights, + aux_feats, + attn_masks, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore=None): + num_dec_layers = len(all_cls_scores) + all_labels = [aux_labels for _ in range(num_dec_layers)] + all_label_weights = [aux_label_weights for _ in range(num_dec_layers)] + all_bbox_targets = [aux_targets for _ in range(num_dec_layers)] + all_bbox_weights = [aux_bbox_weights for _ in range(num_dec_layers)] + img_metas_list = [img_metas for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + + losses_cls, losses_bbox, losses_iou = multi_apply( + self._loss_aux_by_feat_single, all_cls_scores, all_bbox_preds, + all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, + img_metas_list, all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss of proposal generated from encode feature map. + + # loss from the last decoder layer + loss_dict['loss_cls_aux'] = losses_cls[-1] + loss_dict['loss_bbox_aux'] = losses_bbox[-1] + loss_dict['loss_iou_aux'] = losses_iou[-1] + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], + losses_bbox[:-1], + losses_iou[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls_aux'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox_aux'] = loss_bbox_i + loss_dict[f'd{num_dec_layer}.loss_iou_aux'] = loss_iou_i + num_dec_layer += 1 + return loss_dict + + def _loss_aux_by_feat_single(self, + cls_scores, + bbox_preds, + labels, + label_weights, + bbox_targets, + bbox_weights, + img_metas, + gt_bboxes_ignore_list=None): + num_imgs = cls_scores.size(0) + num_q = cls_scores.size(1) + + try: + labels = labels.reshape(num_imgs * num_q) + label_weights = label_weights.reshape(num_imgs * num_q) + bbox_targets = bbox_targets.reshape(num_imgs * num_q, 4) + bbox_weights = bbox_weights.reshape(num_imgs * num_q, 4) + except Exception: + return cls_scores.mean() * 0, cls_scores.mean( + ) * 0, cls_scores.mean() * 0 + + bg_class_ind = self.num_classes + num_total_pos = len( + ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1)) + num_total_neg = num_imgs * num_q - num_total_pos + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + cls_avg_factor = max(cls_avg_factor, 1) + + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero().squeeze(1) + scores = label_weights.new_zeros(labels.shape) + pos_bbox_targets = bbox_targets[pos_inds] + pos_decode_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets) + pos_bbox_pred = bbox_preds.reshape(-1, 4)[pos_inds] + pos_decode_bbox_pred = bbox_cxcywh_to_xyxy(pos_bbox_pred) + scores[pos_inds] = bbox_overlaps( + pos_decode_bbox_pred.detach(), + pos_decode_bbox_targets, + is_aligned=True) + loss_cls = self.loss_cls( + cls_scores, (labels, scores), + weight=label_weights, + avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes across all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # construct factors used for rescale bboxes + factors = [] + for img_meta, bbox_pred in zip(img_metas, bbox_preds): + img_h, img_w = img_meta['img_shape'] + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0).repeat( + bbox_pred.size(0), 1) + factors.append(factor) + factors = torch.cat(factors, 0) + + # DETR regress the relative position of boxes (cxcywh) in the image, + # thus the learning target is normalized by the image size. So here + # we need to re-scale them for calculating IoU loss + bbox_preds = bbox_preds.reshape(-1, 4) + bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors + bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors + + # regression IoU loss, defaultly GIoU loss + loss_iou = self.loss_iou( + bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) + + # regression L1 loss + loss_bbox = self.loss_bbox( + bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) + return loss_cls, loss_bbox, loss_iou diff --git a/mmpose/configs/mmdet/CO-DETR/codetr/co_roi_head.py b/mmpose/configs/mmdet/CO-DETR/codetr/co_roi_head.py new file mode 100644 index 0000000000000000000000000000000000000000..9aafb53beddf07428e59d83e9de832ff5102821a --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/codetr/co_roi_head.py @@ -0,0 +1,108 @@ +from typing import List, Tuple + +import torch +from torch import Tensor + +from mmdet.models.roi_heads import StandardRoIHead +from mmdet.models.task_modules.samplers import SamplingResult +from mmdet.models.utils import unpack_gt_instances +from mmdet.registry import MODELS +from mmdet.structures import DetDataSample +from mmdet.structures.bbox import bbox2roi +from mmdet.utils import InstanceList + + +@MODELS.register_module() +class CoStandardRoIHead(StandardRoIHead): + + def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, + batch_data_samples: List[DetDataSample]) -> dict: + max_proposal = 2000 + + assert len(rpn_results_list) == len(batch_data_samples) + outputs = unpack_gt_instances(batch_data_samples) + batch_gt_instances, batch_gt_instances_ignore, _ = outputs + + # assign gts and sample proposals + num_imgs = len(batch_data_samples) + sampling_results = [] + for i in range(num_imgs): + # rename rpn_results.bboxes to rpn_results.priors + rpn_results = rpn_results_list[i] + rpn_results.priors = rpn_results.pop('bboxes') + + assign_result = self.bbox_assigner.assign( + rpn_results, batch_gt_instances[i], + batch_gt_instances_ignore[i]) + sampling_result = self.bbox_sampler.sample( + assign_result, + rpn_results, + batch_gt_instances[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + losses = dict() + # bbox head forward and loss + if self.with_bbox: + bbox_results = self.bbox_loss(x, sampling_results) + losses.update(bbox_results['loss_bbox']) + + bbox_targets = bbox_results['bbox_targets'] + for res in sampling_results: + max_proposal = min(max_proposal, res.bboxes.shape[0]) + ori_coords = bbox2roi([res.bboxes for res in sampling_results]) + ori_proposals, ori_labels, \ + ori_bbox_targets, ori_bbox_feats = [], [], [], [] + for i in range(num_imgs): + idx = (ori_coords[:, 0] == i).nonzero().squeeze(1) + idx = idx[:max_proposal] + ori_proposal = ori_coords[idx][:, 1:].unsqueeze(0) + ori_label = bbox_targets[0][idx].unsqueeze(0) + ori_bbox_target = bbox_targets[2][idx].unsqueeze(0) + ori_bbox_feat = bbox_results['bbox_feats'].mean(-1).mean(-1) + ori_bbox_feat = ori_bbox_feat[idx].unsqueeze(0) + ori_proposals.append(ori_proposal) + ori_labels.append(ori_label) + ori_bbox_targets.append(ori_bbox_target) + ori_bbox_feats.append(ori_bbox_feat) + ori_coords = torch.cat(ori_proposals, dim=0) + ori_labels = torch.cat(ori_labels, dim=0) + ori_bbox_targets = torch.cat(ori_bbox_targets, dim=0) + ori_bbox_feats = torch.cat(ori_bbox_feats, dim=0) + pos_coords = (ori_coords, ori_labels, ori_bbox_targets, + ori_bbox_feats, 'rcnn') + losses.update(pos_coords=pos_coords) + + return losses + + def bbox_loss(self, x: Tuple[Tensor], + sampling_results: List[SamplingResult]) -> dict: + """Perform forward propagation and loss calculation of the bbox head on + the features of the upstream network. + + Args: + x (tuple[Tensor]): List of multi-level img features. + sampling_results (list["obj:`SamplingResult`]): Sampling results. + + Returns: + dict[str, Tensor]: Usually returns a dictionary with keys: + + - `cls_score` (Tensor): Classification scores. + - `bbox_pred` (Tensor): Box energies / deltas. + - `bbox_feats` (Tensor): Extract bbox RoI features. + - `loss_bbox` (dict): A dictionary of bbox loss components. + """ + rois = bbox2roi([res.priors for res in sampling_results]) + bbox_results = self._bbox_forward(x, rois) + + bbox_loss_and_target = self.bbox_head.loss_and_target( + cls_score=bbox_results['cls_score'], + bbox_pred=bbox_results['bbox_pred'], + rois=rois, + sampling_results=sampling_results, + rcnn_train_cfg=self.train_cfg) + + bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) + # diff + bbox_results.update(bbox_targets=bbox_loss_and_target['bbox_targets']) + return bbox_results diff --git a/mmpose/configs/mmdet/CO-DETR/codetr/codetr.py b/mmpose/configs/mmdet/CO-DETR/codetr/codetr.py new file mode 100644 index 0000000000000000000000000000000000000000..82826f641075c0af7eebd322b6b36b53390cc648 --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/codetr/codetr.py @@ -0,0 +1,320 @@ +import copy +from typing import Tuple, Union + +import torch +import torch.nn as nn +from torch import Tensor + +from mmdet.models.detectors.base import BaseDetector +from mmdet.registry import MODELS +from mmdet.structures import OptSampleList, SampleList +from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig + + +@MODELS.register_module() +class CoDETR(BaseDetector): + + def __init__( + self, + backbone, + neck=None, + query_head=None, # detr head + rpn_head=None, # two-stage rpn + roi_head=[None], # two-stage + bbox_head=[None], # one-stage + train_cfg=[None, None], + test_cfg=[None, None], + # Control whether to consider positive samples + # from the auxiliary head as additional positive queries. + with_pos_coord=True, + use_lsj=True, + eval_module='detr', + # Evaluate the Nth head. + eval_index=0, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super(CoDETR, self).__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.with_pos_coord = with_pos_coord + self.use_lsj = use_lsj + + assert eval_module in ['detr', 'one-stage', 'two-stage'] + self.eval_module = eval_module + + self.backbone = MODELS.build(backbone) + if neck is not None: + self.neck = MODELS.build(neck) + # Module index for evaluation + self.eval_index = eval_index + head_idx = 0 + if query_head is not None: + query_head.update(train_cfg=train_cfg[head_idx] if ( + train_cfg is not None and train_cfg[head_idx] is not None + ) else None) + query_head.update(test_cfg=test_cfg[head_idx]) + self.query_head = MODELS.build(query_head) + self.query_head.init_weights() + head_idx += 1 + + if rpn_head is not None: + rpn_train_cfg = train_cfg[head_idx].rpn if ( + train_cfg is not None + and train_cfg[head_idx] is not None) else None + rpn_head_ = rpn_head.copy() + rpn_head_.update( + train_cfg=rpn_train_cfg, test_cfg=test_cfg[head_idx].rpn) + self.rpn_head = MODELS.build(rpn_head_) + self.rpn_head.init_weights() + + self.roi_head = nn.ModuleList() + for i in range(len(roi_head)): + if roi_head[i]: + rcnn_train_cfg = train_cfg[i + head_idx].rcnn if ( + train_cfg + and train_cfg[i + head_idx] is not None) else None + roi_head[i].update(train_cfg=rcnn_train_cfg) + roi_head[i].update(test_cfg=test_cfg[i + head_idx].rcnn) + self.roi_head.append(MODELS.build(roi_head[i])) + self.roi_head[-1].init_weights() + + self.bbox_head = nn.ModuleList() + for i in range(len(bbox_head)): + if bbox_head[i]: + bbox_head[i].update( + train_cfg=train_cfg[i + head_idx + len(self.roi_head)] if ( + train_cfg and train_cfg[i + head_idx + + len(self.roi_head)] is not None + ) else None) + bbox_head[i].update(test_cfg=test_cfg[i + head_idx + + len(self.roi_head)]) + self.bbox_head.append(MODELS.build(bbox_head[i])) + self.bbox_head[-1].init_weights() + + self.head_idx = head_idx + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + @property + def with_rpn(self): + """bool: whether the detector has RPN""" + return hasattr(self, 'rpn_head') and self.rpn_head is not None + + @property + def with_query_head(self): + """bool: whether the detector has a RoI head""" + return hasattr(self, 'query_head') and self.query_head is not None + + @property + def with_roi_head(self): + """bool: whether the detector has a RoI head""" + return hasattr(self, 'roi_head') and self.roi_head is not None and len( + self.roi_head) > 0 + + @property + def with_shared_head(self): + """bool: whether the detector has a shared head in the RoI Head""" + return hasattr(self, 'roi_head') and self.roi_head[0].with_shared_head + + @property + def with_bbox(self): + """bool: whether the detector has a bbox head""" + return ((hasattr(self, 'roi_head') and self.roi_head is not None + and len(self.roi_head) > 0) + or (hasattr(self, 'bbox_head') and self.bbox_head is not None + and len(self.bbox_head) > 0)) + + def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]: + """Extract features. + + Args: + batch_inputs (Tensor): Image tensor, has shape (bs, dim, H, W). + + Returns: + tuple[Tensor]: Tuple of feature maps from neck. Each feature map + has shape (bs, dim, H, W). + """ + x = self.backbone(batch_inputs) + if self.with_neck: + x = self.neck(x) + return x + + def _forward(self, + batch_inputs: Tensor, + batch_data_samples: OptSampleList = None): + pass + + def loss(self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> Union[dict, list]: + batch_input_shape = batch_data_samples[0].batch_input_shape + if self.use_lsj: + for data_samples in batch_data_samples: + img_metas = data_samples.metainfo + input_img_h, input_img_w = batch_input_shape + img_metas['img_shape'] = [input_img_h, input_img_w] + + x = self.extract_feat(batch_inputs) + + losses = dict() + + def upd_loss(losses, idx, weight=1): + new_losses = dict() + for k, v in losses.items(): + new_k = '{}{}'.format(k, idx) + if isinstance(v, list) or isinstance(v, tuple): + new_losses[new_k] = [i * weight for i in v] + else: + new_losses[new_k] = v * weight + return new_losses + + # DETR encoder and decoder forward + if self.with_query_head: + bbox_losses, x = self.query_head.loss(x, batch_data_samples) + losses.update(bbox_losses) + + # RPN forward and loss + if self.with_rpn: + proposal_cfg = self.train_cfg[self.head_idx].get( + 'rpn_proposal', self.test_cfg[self.head_idx].rpn) + + rpn_data_samples = copy.deepcopy(batch_data_samples) + # set cat_id of gt_labels to 0 in RPN + for data_sample in rpn_data_samples: + data_sample.gt_instances.labels = \ + torch.zeros_like(data_sample.gt_instances.labels) + + rpn_losses, proposal_list = self.rpn_head.loss_and_predict( + x, rpn_data_samples, proposal_cfg=proposal_cfg) + + # avoid get same name with roi_head loss + keys = rpn_losses.keys() + for key in list(keys): + if 'loss' in key and 'rpn' not in key: + rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key) + + losses.update(rpn_losses) + else: + assert batch_data_samples[0].get('proposals', None) is not None + # use pre-defined proposals in InstanceData for the second stage + # to extract ROI features. + proposal_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + + positive_coords = [] + for i in range(len(self.roi_head)): + roi_losses = self.roi_head[i].loss(x, proposal_list, + batch_data_samples) + if self.with_pos_coord: + positive_coords.append(roi_losses.pop('pos_coords')) + else: + if 'pos_coords' in roi_losses.keys(): + roi_losses.pop('pos_coords') + roi_losses = upd_loss(roi_losses, idx=i) + losses.update(roi_losses) + + for i in range(len(self.bbox_head)): + bbox_losses = self.bbox_head[i].loss(x, batch_data_samples) + if self.with_pos_coord: + pos_coords = bbox_losses.pop('pos_coords') + positive_coords.append(pos_coords) + else: + if 'pos_coords' in bbox_losses.keys(): + bbox_losses.pop('pos_coords') + bbox_losses = upd_loss(bbox_losses, idx=i + len(self.roi_head)) + losses.update(bbox_losses) + + if self.with_pos_coord and len(positive_coords) > 0: + for i in range(len(positive_coords)): + bbox_losses = self.query_head.loss_aux(x, positive_coords[i], + i, batch_data_samples) + bbox_losses = upd_loss(bbox_losses, idx=i) + losses.update(bbox_losses) + + return losses + + def predict(self, + batch_inputs: Tensor, + batch_data_samples: SampleList, + rescale: bool = True) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + batch_inputs (Tensor): Inputs, has shape (bs, dim, H, W). + batch_data_samples (List[:obj:`DetDataSample`]): The batch + data samples. It usually includes information such + as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. + rescale (bool): Whether to rescale the results. + Defaults to True. + + Returns: + list[:obj:`DetDataSample`]: Detection results of the input images. + Each DetDataSample usually contain 'pred_instances'. And the + `pred_instances` usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + """ + assert self.eval_module in ['detr', 'one-stage', 'two-stage'] + + if self.use_lsj: + for data_samples in batch_data_samples: + img_metas = data_samples.metainfo + input_img_h, input_img_w = img_metas['batch_input_shape'] + img_metas['img_shape'] = [input_img_h, input_img_w] + + img_feats = self.extract_feat(batch_inputs) + if self.with_bbox and self.eval_module == 'one-stage': + results_list = self.predict_bbox_head( + img_feats, batch_data_samples, rescale=rescale) + elif self.with_roi_head and self.eval_module == 'two-stage': + results_list = self.predict_roi_head( + img_feats, batch_data_samples, rescale=rescale) + else: + results_list = self.predict_query_head( + img_feats, batch_data_samples, rescale=rescale) + + batch_data_samples = self.add_pred_to_datasample( + batch_data_samples, results_list) + return batch_data_samples + + def predict_query_head(self, + mlvl_feats: Tuple[Tensor], + batch_data_samples: SampleList, + rescale: bool = True) -> InstanceList: + return self.query_head.predict( + mlvl_feats, batch_data_samples=batch_data_samples, rescale=rescale) + + def predict_roi_head(self, + mlvl_feats: Tuple[Tensor], + batch_data_samples: SampleList, + rescale: bool = True) -> InstanceList: + assert self.with_bbox, 'Bbox head must be implemented.' + if self.with_query_head: + batch_img_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + results = self.query_head.forward(mlvl_feats, batch_img_metas) + mlvl_feats = results[-1] + rpn_results_list = self.rpn_head.predict( + mlvl_feats, batch_data_samples, rescale=False) + return self.roi_head[self.eval_index].predict( + mlvl_feats, rpn_results_list, batch_data_samples, rescale=rescale) + + def predict_bbox_head(self, + mlvl_feats: Tuple[Tensor], + batch_data_samples: SampleList, + rescale: bool = True) -> InstanceList: + assert self.with_bbox, 'Bbox head must be implemented.' + if self.with_query_head: + batch_img_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + results = self.query_head.forward(mlvl_feats, batch_img_metas) + mlvl_feats = results[-1] + return self.bbox_head[self.eval_index].predict( + mlvl_feats, batch_data_samples, rescale=rescale) diff --git a/mmpose/configs/mmdet/CO-DETR/codetr/transformer.py b/mmpose/configs/mmdet/CO-DETR/codetr/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..009f94a8bcc88c584b336bab272a48b4960202de --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/codetr/transformer.py @@ -0,0 +1,1376 @@ +import math +import warnings + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import (BaseTransformerLayer, + TransformerLayerSequence, + build_transformer_layer_sequence) +from mmcv.ops import MultiScaleDeformableAttention +from mmengine.model import BaseModule +from mmengine.model.weight_init import xavier_init +from torch.nn.init import normal_ + +from mmdet.models.layers.transformer import inverse_sigmoid +from mmdet.registry import MODELS + +try: + from fairscale.nn.checkpoint import checkpoint_wrapper +except Exception: + checkpoint_wrapper = None + +# In order to save the cost and effort of reproduction, +# I did not refactor it into the style of mmdet 3.x DETR. + + +class Transformer(BaseModule): + """Implements the DETR transformer. + + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + + See `paper: End-to-End Object Detection with Transformers + `_ for details. + + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + + def __init__(self, encoder=None, decoder=None, init_cfg=None): + super(Transformer, self).__init__(init_cfg=init_cfg) + self.encoder = build_transformer_layer_sequence(encoder) + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = self.encoder.embed_dims + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, x, mask, query_embed, pos_embed): + """Forward function for `Transformer`. + + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + bs, c, h, w = x.shape + # use `view` instead of `flatten` for dynamically exporting to ONNX + x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] + pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat( + 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] + memory = self.encoder( + query=x, + key=None, + value=None, + query_pos=pos_embed, + query_key_padding_mask=mask) + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask) + out_dec = out_dec.transpose(1, 2) + memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) + return out_dec, memory + + +@MODELS.register_module(force=True) +class DeformableDetrTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, return_intermediate=False, **kwargs): + + super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + + def forward(self, + query, + *args, + reference_points=None, + valid_ratios=None, + reg_branches=None, + **kwargs): + """Forward function for `TransformerDecoder`. + + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + reg_branch: (obj:`nn.ModuleList`): Used for + refining the regression results. Only would + be passed when with_box_refine is True, + otherwise would be passed a `None`. + + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = reference_points[:, :, None] * \ + torch.cat([valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = reference_points[:, :, None] * \ + valid_ratios[:, None] + output = layer( + output, + *args, + reference_points=reference_points_input, + **kwargs) + output = output.permute(1, 0, 2) + + if reg_branches is not None: + tmp = reg_branches[lid](output) + if reference_points.shape[-1] == 4: + new_reference_points = tmp + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + assert reference_points.shape[-1] == 2 + new_reference_points = tmp + new_reference_points[..., :2] = tmp[ + ..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@MODELS.register_module(force=True) +class DeformableDetrTransformer(Transformer): + """Implements the DeformableDETR transformer. + + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + as_two_stage=False, + num_feature_levels=4, + two_stage_num_proposals=300, + **kwargs): + super(DeformableDetrTransformer, self).__init__(**kwargs) + self.as_two_stage = as_two_stage + self.num_feature_levels = num_feature_levels + self.two_stage_num_proposals = two_stage_num_proposals + self.embed_dims = self.encoder.embed_dims + self.init_layers() + + def init_layers(self): + """Initialize layers of the DeformableDetrTransformer.""" + self.level_embeds = nn.Parameter( + torch.Tensor(self.num_feature_levels, self.embed_dims)) + + if self.as_two_stage: + self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) + self.enc_output_norm = nn.LayerNorm(self.embed_dims) + self.pos_trans = nn.Linear(self.embed_dims * 2, + self.embed_dims * 2) + self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) + else: + self.reference_points = nn.Linear(self.embed_dims, 2) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MultiScaleDeformableAttention): + m.init_weights() + if not self.as_two_stage: + xavier_init(self.reference_points, distribution='uniform', bias=0.) + normal_(self.level_embeds) + + def gen_encoder_output_proposals(self, memory, memory_padding_mask, + spatial_shapes): + """Generate proposals from encoded memory. + + Args: + memory (Tensor) : The output of encoder, + has shape (bs, num_key, embed_dim). num_key is + equal the number of points on feature map from + all level. + memory_padding_mask (Tensor): Padding mask for memory. + has shape (bs, num_key). + spatial_shapes (Tensor): The shape of all feature maps. + has shape (num_level, 2). + + Returns: + tuple: A tuple of feature map and bbox prediction. + + - output_memory (Tensor): The input of decoder, \ + has shape (bs, num_key, embed_dim). num_key is \ + equal the number of points on feature map from \ + all levels. + - output_proposals (Tensor): The normalized proposal \ + after a inverse sigmoid, has shape \ + (bs, num_keys, 4). + """ + + N, S, C = memory.shape + proposals = [] + _cur = 0 + for lvl, (H, W) in enumerate(spatial_shapes): + mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( + N, H, W, 1) + valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = torch.meshgrid( + torch.linspace( + 0, H - 1, H, dtype=torch.float32, device=memory.device), + torch.linspace( + 0, W - 1, W, dtype=torch.float32, device=memory.device)) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_W.unsqueeze(-1), + valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale + wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) + proposal = torch.cat((grid, wh), -1).view(N, -1, 4) + proposals.append(proposal) + _cur += (H * W) + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & + (output_proposals < 0.99)).all( + -1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) + output_proposals = output_proposals.masked_fill( + memory_padding_mask.unsqueeze(-1), float('inf')) + output_proposals = output_proposals.masked_fill( + ~output_proposals_valid, float('inf')) + + output_memory = memory + output_memory = output_memory.masked_fill( + memory_padding_mask.unsqueeze(-1), float(0)) + output_memory = output_memory.masked_fill(~output_proposals_valid, + float(0)) + output_memory = self.enc_output_norm(self.enc_output(output_memory)) + return output_memory, output_proposals + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """Get the reference points used in decoder. + + Args: + spatial_shapes (Tensor): The shape of all + feature maps, has shape (num_level, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + device (obj:`device`): The device where + reference_points should be. + + Returns: + Tensor: reference points used in decoder, has \ + shape (bs, num_keys, num_levels, 2). + """ + reference_points_list = [] + for lvl, (H, W) in enumerate(spatial_shapes): + ref_y, ref_x = torch.meshgrid( + torch.linspace( + 0.5, H - 0.5, H, dtype=torch.float32, device=device), + torch.linspace( + 0.5, W - 0.5, W, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 1] * H) + ref_x = ref_x.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 0] * W) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def get_valid_ratio(self, mask): + """Get the valid radios of feature maps of all level.""" + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def get_proposal_pos_embed(self, + proposals, + num_pos_feats=128, + temperature=10000): + """Get the position embedding of proposal.""" + scale = 2 * math.pi + dim_t = torch.arange( + num_pos_feats, dtype=torch.float32, device=proposals.device) + dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) + # N, L, 4 + proposals = proposals.sigmoid() * scale + # N, L, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # N, L, 4, 64, 2 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), + dim=4).flatten(2) + return pos + + def forward(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + reg_branches=None, + cls_branches=None, + **kwargs): + """Forward function for `Transformer`. + + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, embed_dims, h, w]. + mlvl_masks (list(Tensor)): The key_padding_mask from + different level used for encoder and decoder, + each element has shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + mlvl_pos_embeds (list(Tensor)): The positional encoding + of feats from different level, has the shape + [bs, embed_dims, h, w]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when + `with_box_refine` is True. Default to None. + cls_branches (obj:`nn.ModuleList`): Classification heads + for feature maps from each decoder layer. Only would + be passed when `as_two_stage` + is True. Default to None. + + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + assert self.as_two_stage or query_embed is not None + + feat_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + reference_points = \ + self.get_reference_points(spatial_shapes, + valid_ratios, + device=feat.device) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( + 1, 0, 2) # (H*W, bs, embed_dims) + memory = self.encoder( + query=feat_flatten, + key=None, + value=None, + query_pos=lvl_pos_embed_flatten, + query_key_padding_mask=mask_flatten, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + **kwargs) + + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + if self.as_two_stage: + output_memory, output_proposals = \ + self.gen_encoder_output_proposals( + memory, mask_flatten, spatial_shapes) + enc_outputs_class = cls_branches[self.decoder.num_layers]( + output_memory) + enc_outputs_coord_unact = \ + reg_branches[ + self.decoder.num_layers](output_memory) + output_proposals + + topk = self.two_stage_num_proposals + # We only use the first channel in enc_outputs_class as foreground, + # the other (num_classes - 1) channels are actually not used. + # Its targets are set to be 0s, which indicates the first + # class (foreground) because we use [0, num_classes - 1] to + # indicate class labels, background class is indicated by + # num_classes (similar convention in RPN). + # See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa + # This follows the official implementation of Deformable DETR. + topk_proposals = torch.topk( + enc_outputs_class[..., 0], topk, dim=1)[1] + topk_coords_unact = torch.gather( + enc_outputs_coord_unact, 1, + topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + topk_coords_unact = topk_coords_unact.detach() + reference_points = topk_coords_unact.sigmoid() + init_reference_out = reference_points + pos_trans_out = self.pos_trans_norm( + self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) + query_pos, query = torch.split(pos_trans_out, c, dim=2) + else: + query_pos, query = torch.split(query_embed, c, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos).sigmoid() + init_reference_out = reference_points + + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + query_pos=query_pos, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + **kwargs) + + inter_references_out = inter_references + if self.as_two_stage: + return inter_states, init_reference_out,\ + inter_references_out, enc_outputs_class,\ + enc_outputs_coord_unact + return inter_states, init_reference_out, \ + inter_references_out, None, None + + +@MODELS.register_module() +class CoDeformableDetrTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, + *args, + return_intermediate=False, + look_forward_twice=False, + **kwargs): + + super(CoDeformableDetrTransformerDecoder, + self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + self.look_forward_twice = look_forward_twice + + def forward(self, + query, + *args, + reference_points=None, + valid_ratios=None, + reg_branches=None, + **kwargs): + """Forward function for `TransformerDecoder`. + + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + reg_branch: (obj:`nn.ModuleList`): Used for + refining the regression results. Only would + be passed when with_box_refine is True, + otherwise would be passed a `None`. + + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = reference_points[:, :, None] * \ + torch.cat([valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = reference_points[:, :, None] * \ + valid_ratios[:, None] + output = layer( + output, + *args, + reference_points=reference_points_input, + **kwargs) + output = output.permute(1, 0, 2) + + if reg_branches is not None: + tmp = reg_branches[lid](output) + if reference_points.shape[-1] == 4: + new_reference_points = tmp + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + assert reference_points.shape[-1] == 2 + new_reference_points = tmp + new_reference_points[..., :2] = tmp[ + ..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append( + new_reference_points if self. + look_forward_twice else reference_points) + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@MODELS.register_module() +class CoDeformableDetrTransformer(DeformableDetrTransformer): + + def __init__(self, + mixed_selection=True, + with_pos_coord=True, + with_coord_feat=True, + num_co_heads=1, + **kwargs): + self.mixed_selection = mixed_selection + self.with_pos_coord = with_pos_coord + self.with_coord_feat = with_coord_feat + self.num_co_heads = num_co_heads + super(CoDeformableDetrTransformer, self).__init__(**kwargs) + self._init_layers() + + def _init_layers(self): + """Initialize layers of the CoDeformableDetrTransformer.""" + if self.with_pos_coord: + if self.num_co_heads > 0: + # bug: this code should be 'self.head_pos_embed = + # nn.Embedding(self.num_co_heads, self.embed_dims)', + # we keep this bug for reproducing our results with ResNet-50. + # You can fix this bug when reproducing results with + # swin transformer. + self.head_pos_embed = nn.Embedding(self.num_co_heads, 1, 1, + self.embed_dims) + self.aux_pos_trans = nn.ModuleList() + self.aux_pos_trans_norm = nn.ModuleList() + self.pos_feats_trans = nn.ModuleList() + self.pos_feats_norm = nn.ModuleList() + for i in range(self.num_co_heads): + self.aux_pos_trans.append( + nn.Linear(self.embed_dims * 2, self.embed_dims * 2)) + self.aux_pos_trans_norm.append( + nn.LayerNorm(self.embed_dims * 2)) + if self.with_coord_feat: + self.pos_feats_trans.append( + nn.Linear(self.embed_dims, self.embed_dims)) + self.pos_feats_norm.append( + nn.LayerNorm(self.embed_dims)) + + def get_proposal_pos_embed(self, + proposals, + num_pos_feats=128, + temperature=10000): + """Get the position embedding of proposal.""" + num_pos_feats = self.embed_dims // 2 + scale = 2 * math.pi + dim_t = torch.arange( + num_pos_feats, dtype=torch.float32, device=proposals.device) + dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) + # N, L, 4 + proposals = proposals.sigmoid() * scale + # N, L, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # N, L, 4, 64, 2 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), + dim=4).flatten(2) + return pos + + def forward(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + reg_branches=None, + cls_branches=None, + return_encoder_output=False, + attn_masks=None, + **kwargs): + """Forward function for `Transformer`. + + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, embed_dims, h, w]. + mlvl_masks (list(Tensor)): The key_padding_mask from + different level used for encoder and decoder, + each element has shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + mlvl_pos_embeds (list(Tensor)): The positional encoding + of feats from different level, has the shape + [bs, embed_dims, h, w]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when + `with_box_refine` is True. Default to None. + cls_branches (obj:`nn.ModuleList`): Classification heads + for feature maps from each decoder layer. Only would + be passed when `as_two_stage` + is True. Default to None. + + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + assert self.as_two_stage or query_embed is not None + + feat_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + reference_points = \ + self.get_reference_points(spatial_shapes, + valid_ratios, + device=feat.device) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( + 1, 0, 2) # (H*W, bs, embed_dims) + memory = self.encoder( + query=feat_flatten, + key=None, + value=None, + query_pos=lvl_pos_embed_flatten, + query_key_padding_mask=mask_flatten, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + **kwargs) + + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + if self.as_two_stage: + output_memory, output_proposals = \ + self.gen_encoder_output_proposals( + memory, mask_flatten, spatial_shapes) + enc_outputs_class = cls_branches[self.decoder.num_layers]( + output_memory) + enc_outputs_coord_unact = \ + reg_branches[ + self.decoder.num_layers](output_memory) + output_proposals + + topk = self.two_stage_num_proposals + topk = query_embed.shape[0] + topk_proposals = torch.topk( + enc_outputs_class[..., 0], topk, dim=1)[1] + topk_coords_unact = torch.gather( + enc_outputs_coord_unact, 1, + topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + topk_coords_unact = topk_coords_unact.detach() + reference_points = topk_coords_unact.sigmoid() + init_reference_out = reference_points + pos_trans_out = self.pos_trans_norm( + self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) + + if not self.mixed_selection: + query_pos, query = torch.split(pos_trans_out, c, dim=2) + else: + # query_embed here is the content embed for deformable DETR + query = query_embed.unsqueeze(0).expand(bs, -1, -1) + query_pos, _ = torch.split(pos_trans_out, c, dim=2) + else: + query_pos, query = torch.split(query_embed, c, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos).sigmoid() + init_reference_out = reference_points + + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + query_pos=query_pos, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + attn_masks=attn_masks, + **kwargs) + + inter_references_out = inter_references + if self.as_two_stage: + if return_encoder_output: + return inter_states, init_reference_out,\ + inter_references_out, enc_outputs_class,\ + enc_outputs_coord_unact, memory + return inter_states, init_reference_out,\ + inter_references_out, enc_outputs_class,\ + enc_outputs_coord_unact + if return_encoder_output: + return inter_states, init_reference_out, \ + inter_references_out, None, None, memory + return inter_states, init_reference_out, \ + inter_references_out, None, None + + def forward_aux(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + pos_anchors, + pos_feats=None, + reg_branches=None, + cls_branches=None, + return_encoder_output=False, + attn_masks=None, + head_idx=0, + **kwargs): + feat_flatten = [] + mask_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + + memory = feat_flatten + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + + topk_coords_unact = inverse_sigmoid(pos_anchors) + reference_points = pos_anchors + init_reference_out = reference_points + if self.num_co_heads > 0: + pos_trans_out = self.aux_pos_trans_norm[head_idx]( + self.aux_pos_trans[head_idx]( + self.get_proposal_pos_embed(topk_coords_unact))) + query_pos, query = torch.split(pos_trans_out, c, dim=2) + if self.with_coord_feat: + query = query + self.pos_feats_norm[head_idx]( + self.pos_feats_trans[head_idx](pos_feats)) + query_pos = query_pos + self.head_pos_embed.weight[head_idx] + + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + query_pos=query_pos, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + attn_masks=attn_masks, + **kwargs) + + inter_references_out = inter_references + return inter_states, init_reference_out, \ + inter_references_out + + +def build_MLP(input_dim, hidden_dim, output_dim, num_layers): + assert num_layers > 1, \ + f'num_layers should be greater than 1 but got {num_layers}' + h = [hidden_dim] * (num_layers - 1) + layers = list() + for n, k in zip([input_dim] + h[:-1], h): + layers.extend((nn.Linear(n, k), nn.ReLU())) + # Note that the relu func of MLP in original DETR repo is set + # 'inplace=False', however the ReLU cfg of FFN in mmdet is set + # 'inplace=True' by default. + layers.append(nn.Linear(hidden_dim, output_dim)) + return nn.Sequential(*layers) + + +@MODELS.register_module() +class DinoTransformerDecoder(DeformableDetrTransformerDecoder): + + def __init__(self, *args, **kwargs): + super(DinoTransformerDecoder, self).__init__(*args, **kwargs) + self._init_layers() + + def _init_layers(self): + self.ref_point_head = build_MLP(self.embed_dims * 2, self.embed_dims, + self.embed_dims, 2) + self.norm = nn.LayerNorm(self.embed_dims) + + @staticmethod + def gen_sineembed_for_position(pos_tensor, pos_feat): + # n_query, bs, _ = pos_tensor.size() + # sineembed_tensor = torch.zeros(n_query, bs, 256) + scale = 2 * math.pi + dim_t = torch.arange( + pos_feat, dtype=torch.float32, device=pos_tensor.device) + dim_t = 10000**(2 * (dim_t // 2) / pos_feat) + x_embed = pos_tensor[:, :, 0] * scale + y_embed = pos_tensor[:, :, 1] * scale + pos_x = x_embed[:, :, None] / dim_t + pos_y = y_embed[:, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), + dim=3).flatten(2) + pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), + dim=3).flatten(2) + if pos_tensor.size(-1) == 2: + pos = torch.cat((pos_y, pos_x), dim=2) + elif pos_tensor.size(-1) == 4: + w_embed = pos_tensor[:, :, 2] * scale + pos_w = w_embed[:, :, None] / dim_t + pos_w = torch.stack( + (pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), + dim=3).flatten(2) + + h_embed = pos_tensor[:, :, 3] * scale + pos_h = h_embed[:, :, None] / dim_t + pos_h = torch.stack( + (pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), + dim=3).flatten(2) + + pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) + else: + raise ValueError('Unknown pos_tensor shape(-1):{}'.format( + pos_tensor.size(-1))) + return pos + + def forward(self, + query, + *args, + reference_points=None, + valid_ratios=None, + reg_branches=None, + **kwargs): + output = query + intermediate = [] + intermediate_reference_points = [reference_points] + for lid, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = \ + reference_points[:, :, None] * torch.cat( + [valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = \ + reference_points[:, :, None] * valid_ratios[:, None] + + query_sine_embed = self.gen_sineembed_for_position( + reference_points_input[:, :, 0, :], self.embed_dims // 2) + query_pos = self.ref_point_head(query_sine_embed) + + query_pos = query_pos.permute(1, 0, 2) + output = layer( + output, + *args, + query_pos=query_pos, + reference_points=reference_points_input, + **kwargs) + output = output.permute(1, 0, 2) + + if reg_branches is not None: + tmp = reg_branches[lid](output) + assert reference_points.shape[-1] == 4 + new_reference_points = tmp + inverse_sigmoid( + reference_points, eps=1e-3) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(self.norm(output)) + intermediate_reference_points.append(new_reference_points) + # NOTE this is for the "Look Forward Twice" module, + # in the DeformDETR, reference_points was appended. + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@MODELS.register_module() +class CoDinoTransformer(CoDeformableDetrTransformer): + + def __init__(self, *args, **kwargs): + super(CoDinoTransformer, self).__init__(*args, **kwargs) + + def init_layers(self): + """Initialize layers of the DinoTransformer.""" + self.level_embeds = nn.Parameter( + torch.Tensor(self.num_feature_levels, self.embed_dims)) + self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) + self.enc_output_norm = nn.LayerNorm(self.embed_dims) + self.query_embed = nn.Embedding(self.two_stage_num_proposals, + self.embed_dims) + + def _init_layers(self): + if self.with_pos_coord: + if self.num_co_heads > 0: + self.aux_pos_trans = nn.ModuleList() + self.aux_pos_trans_norm = nn.ModuleList() + self.pos_feats_trans = nn.ModuleList() + self.pos_feats_norm = nn.ModuleList() + for i in range(self.num_co_heads): + self.aux_pos_trans.append( + nn.Linear(self.embed_dims * 2, self.embed_dims)) + self.aux_pos_trans_norm.append( + nn.LayerNorm(self.embed_dims)) + if self.with_coord_feat: + self.pos_feats_trans.append( + nn.Linear(self.embed_dims, self.embed_dims)) + self.pos_feats_norm.append( + nn.LayerNorm(self.embed_dims)) + + def init_weights(self): + super().init_weights() + nn.init.normal_(self.query_embed.weight.data) + + def forward(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + dn_label_query, + dn_bbox_query, + attn_mask, + reg_branches=None, + cls_branches=None, + **kwargs): + assert self.as_two_stage and query_embed is None, \ + 'as_two_stage must be True for DINO' + + feat_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + reference_points = self.get_reference_points( + spatial_shapes, valid_ratios, device=feat.device) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( + 1, 0, 2) # (H*W, bs, embed_dims) + memory = self.encoder( + query=feat_flatten, + key=None, + value=None, + query_pos=lvl_pos_embed_flatten, + query_key_padding_mask=mask_flatten, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + **kwargs) + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + + output_memory, output_proposals = self.gen_encoder_output_proposals( + memory, mask_flatten, spatial_shapes) + enc_outputs_class = cls_branches[self.decoder.num_layers]( + output_memory) + enc_outputs_coord_unact = reg_branches[self.decoder.num_layers]( + output_memory) + output_proposals + cls_out_features = cls_branches[self.decoder.num_layers].out_features + topk = self.two_stage_num_proposals + # NOTE In DeformDETR, enc_outputs_class[..., 0] is used for topk + topk_indices = torch.topk(enc_outputs_class.max(-1)[0], topk, dim=1)[1] + + topk_score = torch.gather( + enc_outputs_class, 1, + topk_indices.unsqueeze(-1).repeat(1, 1, cls_out_features)) + topk_coords_unact = torch.gather( + enc_outputs_coord_unact, 1, + topk_indices.unsqueeze(-1).repeat(1, 1, 4)) + topk_anchor = topk_coords_unact.sigmoid() + topk_coords_unact = topk_coords_unact.detach() + + query = self.query_embed.weight[:, None, :].repeat(1, bs, + 1).transpose(0, 1) + # NOTE the query_embed here is not spatial query as in DETR. + # It is actually content query, which is named tgt in other + # DETR-like models + if dn_label_query is not None: + query = torch.cat([dn_label_query, query], dim=1) + if dn_bbox_query is not None: + reference_points = torch.cat([dn_bbox_query, topk_coords_unact], + dim=1) + else: + reference_points = topk_coords_unact + reference_points = reference_points.sigmoid() + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + attn_masks=attn_mask, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + **kwargs) + + inter_references_out = inter_references + + return inter_states, inter_references_out, \ + topk_score, topk_anchor, memory + + def forward_aux(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + pos_anchors, + pos_feats=None, + reg_branches=None, + cls_branches=None, + return_encoder_output=False, + attn_masks=None, + head_idx=0, + **kwargs): + feat_flatten = [] + mask_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + + memory = feat_flatten + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + + topk_coords_unact = inverse_sigmoid(pos_anchors) + reference_points = pos_anchors + if self.num_co_heads > 0: + pos_trans_out = self.aux_pos_trans_norm[head_idx]( + self.aux_pos_trans[head_idx]( + self.get_proposal_pos_embed(topk_coords_unact))) + query = pos_trans_out + if self.with_coord_feat: + query = query + self.pos_feats_norm[head_idx]( + self.pos_feats_trans[head_idx](pos_feats)) + + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + attn_masks=None, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + **kwargs) + + inter_references_out = inter_references + + return inter_states, inter_references_out + + +@MODELS.register_module() +class DetrTransformerEncoder(TransformerLayerSequence): + """TransformerEncoder of DETR. + + Args: + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. Only used when `self.pre_norm` is `True` + """ + + def __init__(self, + *args, + post_norm_cfg=dict(type='LN'), + with_cp=-1, + **kwargs): + super(DetrTransformerEncoder, self).__init__(*args, **kwargs) + if post_norm_cfg is not None: + self.post_norm = build_norm_layer( + post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None + else: + assert not self.pre_norm, f'Use prenorm in ' \ + f'{self.__class__.__name__},' \ + f'Please specify post_norm_cfg' + self.post_norm = None + self.with_cp = with_cp + if self.with_cp > 0: + if checkpoint_wrapper is None: + warnings.warn('If you want to reduce GPU memory usage, \ + please install fairscale by executing the \ + following command: pip install fairscale.') + return + for i in range(self.with_cp): + self.layers[i] = checkpoint_wrapper(self.layers[i]) + + +@MODELS.register_module() +class DetrTransformerDecoderLayer(BaseTransformerLayer): + """Implements decoder layer in DETR transformer. + + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + **kwargs): + super(DetrTransformerDecoderLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) diff --git a/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_8xb2_1x_coco.py b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_8xb2_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1a4130437666428213eb3250f8eee9d2a4d1442b --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_8xb2_1x_coco.py @@ -0,0 +1,68 @@ +_base_ = './co_dino_5scale_r50_lsj_8xb2_1x_coco.py' + +model = dict( + use_lsj=False, data_preprocessor=dict(pad_mask=False, batch_augments=None)) + +# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different +# from the default setting in mmdet. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type=_base_.dataset_type, + data_root=_base_.data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline, + backend_args=_base_.backend_args)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_lsj_8xb2_1x_coco.py b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_lsj_8xb2_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..876b90f89c8795186d830689c9bdb420b0cfbb18 --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_lsj_8xb2_1x_coco.py @@ -0,0 +1,359 @@ +_base_ = 'mmdet::common/ssj_scp_270k_coco-instance.py' + +custom_imports = dict( + imports=['projects.CO-DETR.codetr'], allow_failed_imports=False) + +# model settings +num_dec_layer = 6 +loss_lambda = 2.0 +num_classes = 80 + +image_size = (1024, 1024) +batch_augments = [ + dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) +] +model = dict( + type='CoDETR', + # If using the lsj augmentation, + # it is recommended to set it to True. + use_lsj=True, + # detr: 52.1 + # one-stage: 49.4 + # two-stage: 47.9 + eval_module='detr', # in ['detr', 'one-stage', 'two-stage'] + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + batch_augments=batch_augments), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[256, 512, 1024, 2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=5), + query_head=dict( + type='CoDINOHead', + num_query=900, + num_classes=num_classes, + in_channels=2048, + as_two_stage=True, + dn_cfg=dict( + label_noise_scale=0.5, + box_noise_scale=1.0, + group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=100)), + transformer=dict( + type='CoDinoTransformer', + with_coord_feat=False, + num_co_heads=2, # ATSS Aux Head + Faster RCNN Aux Head + num_feature_levels=5, + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + # number of layers that use checkpoint. + # The maximum value for the setting is num_layers. + # FairScale must be installed for it to work. + with_cp=4, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_levels=5, + dropout=0.0), + feedforward_channels=2048, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DinoTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + dropout=0.0), + dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_levels=5, + dropout=0.0), + ], + feedforward_channels=2048, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + temperature=20, + normalize=True), + loss_cls=dict( # Different from the DINO + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0 * num_dec_layer * loss_lambda), + loss_bbox=dict( + type='L1Loss', loss_weight=1.0 * num_dec_layer * loss_lambda)), + roi_head=[ + dict( + type='CoStandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32, 64], + finest_scale=56), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=num_classes, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + reg_decoded_bbox=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0 * num_dec_layer * loss_lambda), + loss_bbox=dict( + type='GIoULoss', + loss_weight=10.0 * num_dec_layer * loss_lambda))) + ], + bbox_head=[ + dict( + type='CoATSSHead', + num_classes=num_classes, + in_channels=256, + stacked_convs=1, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[4, 8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0 * num_dec_layer * loss_lambda), + loss_bbox=dict( + type='GIoULoss', + loss_weight=2.0 * num_dec_layer * loss_lambda), + loss_centerness=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0 * num_dec_layer * loss_lambda)), + ], + # model training and testing settings + train_cfg=[ + dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=4000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False) + ], + test_cfg=[ + # Deferent from the DINO, we use the NMS. + dict( + max_per_img=300, + # NMS can improve the mAP by 0.2. + nms=dict(type='soft_nms', iou_threshold=0.8)), + dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.0, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)), + dict( + # atss bbox head: + nms_pre=1000, + min_bbox_size=0, + score_thr=0.0, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100), + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) + ]) + +# LSJ + CopyPaste +load_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), +] + +train_pipeline = [ + dict(type='CopyPaste', max_num_pasted=100), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + pipeline=train_pipeline, + dataset=dict( + filter_cfg=dict(filter_empty_gt=False), pipeline=load_pipeline))) + +# follow ViTDet +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=image_size, keep_ratio=True), # diff + dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=2e-4, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})) + +val_evaluator = dict(metric='bbox') +test_evaluator = val_evaluator + +max_epochs = 12 +train_cfg = dict( + _delete_=True, + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=1) + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] + +default_hooks = dict( + checkpoint=dict(by_epoch=True, interval=1, max_keep_ckpts=3)) +log_processor = dict(by_epoch=True) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_lsj_8xb2_3x_coco.py b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_lsj_8xb2_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9fc34f680a3de3f96a548817f3d4e37983fee7 --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_r50_lsj_8xb2_3x_coco.py @@ -0,0 +1,4 @@ +_base_ = ['co_dino_5scale_r50_lsj_8xb2_1x_coco.py'] + +param_scheduler = [dict(milestones=[30])] +train_cfg = dict(max_epochs=36) diff --git a/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_16e_o365tococo.py b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_16e_o365tococo.py new file mode 100644 index 0000000000000000000000000000000000000000..77821c380f3407c2288377dc78232fd12205fc76 --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_16e_o365tococo.py @@ -0,0 +1,115 @@ +_base_ = ['co_dino_5scale_r50_8xb2_1x_coco.py'] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_swin_large_16e_o365tococo-614254c9.pth' # noqa + +# model settings +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=True, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768, 1536]), + query_head=dict( + dn_cfg=dict(box_noise_scale=0.4, group_cfg=dict(num_dn_queries=500)), + transformer=dict(encoder=dict(with_cp=6)))) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048), + (608, 2048), (640, 2048), (672, 2048), (704, 2048), + (736, 2048), (768, 2048), (800, 2048), (832, 2048), + (864, 2048), (896, 2048), (928, 2048), (960, 2048), + (992, 2048), (1024, 2048), (1056, 2048), + (1088, 2048), (1120, 2048), (1152, 2048), + (1184, 2048), (1216, 2048), (1248, 2048), + (1280, 2048), (1312, 2048), (1344, 2048), + (1376, 2048), (1408, 2048), (1440, 2048), + (1472, 2048), (1504, 2048), (1536, 2048)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048), + (608, 2048), (640, 2048), (672, 2048), (704, 2048), + (736, 2048), (768, 2048), (800, 2048), (832, 2048), + (864, 2048), (896, 2048), (928, 2048), (960, 2048), + (992, 2048), (1024, 2048), (1056, 2048), + (1088, 2048), (1120, 2048), (1152, 2048), + (1184, 2048), (1216, 2048), (1248, 2048), + (1280, 2048), (1312, 2048), (1344, 2048), + (1376, 2048), (1408, 2048), (1440, 2048), + (1472, 2048), (1504, 2048), (1536, 2048)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + batch_size=1, num_workers=1, dataset=dict(pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 1280), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +optim_wrapper = dict(optimizer=dict(lr=1e-4)) + +max_epochs = 16 +train_cfg = dict(max_epochs=max_epochs) + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_1x_coco.py b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d4a873464d422334a42d72543bbccc3b344aa97e --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_1x_coco.py @@ -0,0 +1,31 @@ +_base_ = ['co_dino_5scale_r50_8xb2_1x_coco.py'] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa + +# model settings +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768, 1536]), + query_head=dict(transformer=dict(encoder=dict(with_cp=6)))) + +train_dataloader = dict(batch_size=1, num_workers=1) diff --git a/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_3x_coco.py b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c2fce29b98b5ffe7e51396b8b88b289fc4c8ffbc --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_16xb1_3x_coco.py @@ -0,0 +1,6 @@ +_base_ = ['co_dino_5scale_swin_l_16xb1_1x_coco.py'] +# model settings +model = dict(backbone=dict(drop_path_rate=0.6)) + +param_scheduler = [dict(milestones=[30])] +train_cfg = dict(max_epochs=36) diff --git a/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4a9b3688b8ebf6525f4d96526dd543576ae6253b --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py @@ -0,0 +1,72 @@ +_base_ = ['co_dino_5scale_r50_lsj_8xb2_1x_coco.py'] + +image_size = (1280, 1280) +batch_augments = [ + dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa + +# model settings +model = dict( + data_preprocessor=dict(batch_augments=batch_augments), + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768, 1536]), + query_head=dict(transformer=dict(encoder=dict(with_cp=6)))) + +load_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), +] + +train_dataloader = dict( + batch_size=1, + num_workers=1, + dataset=dict(dataset=dict(pipeline=load_pipeline))) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=image_size, keep_ratio=True), + dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_lsj_16xb1_3x_coco.py b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_lsj_16xb1_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..bf9cd4f439287d7174f9b773b7177ade179cd536 --- /dev/null +++ b/mmpose/configs/mmdet/CO-DETR/configs/codino/co_dino_5scale_swin_l_lsj_16xb1_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = ['co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py'] + +model = dict(backbone=dict(drop_path_rate=0.5)) + +param_scheduler = [dict(type='MultiStepLR', milestones=[30])] + +train_cfg = dict(max_epochs=36) diff --git a/mmpose/configs/mmdet/_base_/datasets/ade20k_instance.py b/mmpose/configs/mmdet/_base_/datasets/ade20k_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..57f657aa67f34830515f410425eccc96cb065af4 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/ade20k_instance.py @@ -0,0 +1,53 @@ +# dataset settings +dataset_type = 'ADE20KInstanceDataset' +data_root = 'data/ADEChallengeData2016/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/ADEChallengeData2016/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(2560, 640), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='ade20k_instance_val.json', + data_prefix=dict(img='images/validation'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'ade20k_instance_val.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/ade20k_panoptic.py b/mmpose/configs/mmdet/_base_/datasets/ade20k_panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..7be5ddd7f0732193f4f92bc49e52493602928162 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/ade20k_panoptic.py @@ -0,0 +1,38 @@ +# dataset settings +dataset_type = 'ADE20KPanopticDataset' +data_root = 'data/ADEChallengeData2016/' + +backend_args = None + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(2560, 640), keep_ratio=True), + dict(type='LoadPanopticAnnotations', backend_args=backend_args), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=0, + persistent_workers=False, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='ade20k_panoptic_val.json', + data_prefix=dict(img='images/validation/', seg='ade20k_panoptic_val/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoPanopticMetric', + ann_file=data_root + 'ade20k_panoptic_val.json', + seg_prefix=data_root + 'ade20k_panoptic_val/', + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/ade20k_semantic.py b/mmpose/configs/mmdet/_base_/datasets/ade20k_semantic.py new file mode 100644 index 0000000000000000000000000000000000000000..522a775704182ededaa36f318cd1eb185784918f --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/ade20k_semantic.py @@ -0,0 +1,48 @@ +dataset_type = 'ADE20KSegDataset' +data_root = 'data/ADEChallengeData2016/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/ADEChallengeData2016/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + dict( + type='LoadAnnotations', + with_bbox=False, + with_mask=False, + with_seg=True, + reduce_zero_label=True), + dict( + type='PackDetInputs', meta_keys=('img_path', 'ori_shape', 'img_shape')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='SemSegMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/cityscapes_detection.py b/mmpose/configs/mmdet/_base_/datasets/cityscapes_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..caeba6bfcd26d8954fc9d499446e93323e372959 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/cityscapes_detection.py @@ -0,0 +1,84 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/segmentation/', +# 'data/': 's3://openmmlab/datasets/segmentation/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=[(2048, 800), (2048, 1024)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='RepeatDataset', + times=8, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instancesonly_filtered_gtFine_train.json', + data_prefix=dict(img='leftImg8bit/train/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instancesonly_filtered_gtFine_val.json', + data_prefix=dict(img='leftImg8bit/val/'), + test_mode=True, + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=test_pipeline, + backend_args=backend_args)) + +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', + metric='bbox', + backend_args=backend_args) + +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/cityscapes_instance.py b/mmpose/configs/mmdet/_base_/datasets/cityscapes_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..136403136c67a6726662832b66f56701ff5aba8a --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/cityscapes_instance.py @@ -0,0 +1,113 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/' + +# Method 2: Use backend_args, file_client_args in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/segmentation/', +# 'data/': 's3://openmmlab/datasets/segmentation/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', + scale=[(2048, 800), (2048, 1024)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='RepeatDataset', + times=8, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instancesonly_filtered_gtFine_train.json', + data_prefix=dict(img='leftImg8bit/train/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instancesonly_filtered_gtFine_val.json', + data_prefix=dict(img='leftImg8bit/val/'), + test_mode=True, + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=test_pipeline, + backend_args=backend_args)) + +test_dataloader = val_dataloader + +val_evaluator = [ + dict( + type='CocoMetric', + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_val.json', + metric=['bbox', 'segm'], + backend_args=backend_args), + dict( + type='CityScapesMetric', + seg_prefix=data_root + 'gtFine/val', + outfile_prefix='./work_dirs/cityscapes_metric/instance', + backend_args=backend_args) +] + +test_evaluator = val_evaluator + +# inference on test dataset and +# format the output results for submission. +# test_dataloader = dict( +# batch_size=1, +# num_workers=2, +# persistent_workers=True, +# drop_last=False, +# sampler=dict(type='DefaultSampler', shuffle=False), +# dataset=dict( +# type=dataset_type, +# data_root=data_root, +# ann_file='annotations/instancesonly_filtered_gtFine_test.json', +# data_prefix=dict(img='leftImg8bit/test/'), +# test_mode=True, +# filter_cfg=dict(filter_empty_gt=True, min_size=32), +# pipeline=test_pipeline)) +# test_evaluator = dict( +# type='CityScapesMetric', +# format_only=True, +# outfile_prefix='./work_dirs/cityscapes_metric/test') diff --git a/mmpose/configs/mmdet/_base_/datasets/coco_caption.py b/mmpose/configs/mmdet/_base_/datasets/coco_caption.py new file mode 100644 index 0000000000000000000000000000000000000000..a1bd898313927e4fca336dfa10f05e78b9fb7162 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/coco_caption.py @@ -0,0 +1,60 @@ +# data settings + +dataset_type = 'CocoCaptionDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +test_pipeline = [ + dict( + type='LoadImageFromFile', + imdecode_backend='pillow', + backend_args=backend_args), + dict( + type='Resize', + scale=(224, 224), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +# ann_file download from +# train dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json # noqa +# val dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val.json # noqa +# test dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test.json # noqa +# val evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json # noqa +# test evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json # noqa +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline, + )) + +val_evaluator = dict( + type='COCOCaptionMetric', + ann_file=data_root + 'annotations/coco_karpathy_val_gt.json', +) + +# # If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/coco_detection.py b/mmpose/configs/mmdet/_base_/datasets/coco_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..fdf8dfad9476b1d7b7a4e8c3e2832f115a1ea7f2 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/coco_detection.py @@ -0,0 +1,95 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +# inference on test dataset and +# format the output results for submission. +# test_dataloader = dict( +# batch_size=1, +# num_workers=2, +# persistent_workers=True, +# drop_last=False, +# sampler=dict(type='DefaultSampler', shuffle=False), +# dataset=dict( +# type=dataset_type, +# data_root=data_root, +# ann_file=data_root + 'annotations/image_info_test-dev2017.json', +# data_prefix=dict(img='test2017/'), +# test_mode=True, +# pipeline=test_pipeline)) +# test_evaluator = dict( +# type='CocoMetric', +# metric='bbox', +# format_only=True, +# ann_file=data_root + 'annotations/image_info_test-dev2017.json', +# outfile_prefix='./work_dirs/coco_detection/test') diff --git a/mmpose/configs/mmdet/_base_/datasets/coco_human_instance.py b/mmpose/configs/mmdet/_base_/datasets/coco_human_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..7e0d886d407c94daa5c61543e3149f22a8986f36 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/coco_human_instance.py @@ -0,0 +1,95 @@ +# dataset settings +dataset_type = 'CocoHumanDataset' +data_root = "/datagrid/personal/purkrmir/data/COCO/original/" + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +# inference on test dataset and +# format the output results for submission. +# test_dataloader = dict( +# batch_size=1, +# num_workers=2, +# persistent_workers=True, +# drop_last=False, +# sampler=dict(type='DefaultSampler', shuffle=False), +# dataset=dict( +# type=dataset_type, +# data_root=data_root, +# ann_file=data_root + 'annotations/image_info_test-dev2017.json', +# data_prefix=dict(img='test2017/'), +# test_mode=True, +# pipeline=test_pipeline)) +# test_evaluator = dict( +# type='CocoMetric', +# metric=['bbox', 'segm'], +# format_only=True, +# ann_file=data_root + 'annotations/image_info_test-dev2017.json', +# outfile_prefix='./work_dirs/coco_instance/test') diff --git a/mmpose/configs/mmdet/_base_/datasets/coco_instance.py b/mmpose/configs/mmdet/_base_/datasets/coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..e91cb354038db4df3b990b307a5da9d77f341a88 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/coco_instance.py @@ -0,0 +1,95 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +# inference on test dataset and +# format the output results for submission. +# test_dataloader = dict( +# batch_size=1, +# num_workers=2, +# persistent_workers=True, +# drop_last=False, +# sampler=dict(type='DefaultSampler', shuffle=False), +# dataset=dict( +# type=dataset_type, +# data_root=data_root, +# ann_file=data_root + 'annotations/image_info_test-dev2017.json', +# data_prefix=dict(img='test2017/'), +# test_mode=True, +# pipeline=test_pipeline)) +# test_evaluator = dict( +# type='CocoMetric', +# metric=['bbox', 'segm'], +# format_only=True, +# ann_file=data_root + 'annotations/image_info_test-dev2017.json', +# outfile_prefix='./work_dirs/coco_instance/test') diff --git a/mmpose/configs/mmdet/_base_/datasets/coco_instance_semantic.py b/mmpose/configs/mmdet/_base_/datasets/coco_instance_semantic.py new file mode 100644 index 0000000000000000000000000000000000000000..cc961863306690c056e564b542d518c0ebfbb7e2 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/coco_instance_semantic.py @@ -0,0 +1,78 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) + +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/coco_panoptic.py b/mmpose/configs/mmdet/_base_/datasets/coco_panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..0b95b619e68ed531d361bbd11a2382852c13446e --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/coco_panoptic.py @@ -0,0 +1,94 @@ +# dataset settings +dataset_type = 'CocoPanopticDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadPanopticAnnotations', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadPanopticAnnotations', backend_args=backend_args), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/panoptic_train2017.json', + data_prefix=dict( + img='train2017/', seg='annotations/panoptic_train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/panoptic_val2017.json', + data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoPanopticMetric', + ann_file=data_root + 'annotations/panoptic_val2017.json', + seg_prefix=data_root + 'annotations/panoptic_val2017/', + backend_args=backend_args) +test_evaluator = val_evaluator + +# inference on test dataset and +# format the output results for submission. +# test_dataloader = dict( +# batch_size=1, +# num_workers=1, +# persistent_workers=True, +# drop_last=False, +# sampler=dict(type='DefaultSampler', shuffle=False), +# dataset=dict( +# type=dataset_type, +# data_root=data_root, +# ann_file='annotations/panoptic_image_info_test-dev2017.json', +# data_prefix=dict(img='test2017/'), +# test_mode=True, +# pipeline=test_pipeline)) +# test_evaluator = dict( +# type='CocoPanopticMetric', +# format_only=True, +# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json', +# outfile_prefix='./work_dirs/coco_panoptic/test') diff --git a/mmpose/configs/mmdet/_base_/datasets/coco_semantic.py b/mmpose/configs/mmdet/_base_/datasets/coco_semantic.py new file mode 100644 index 0000000000000000000000000000000000000000..944bbbaeaeb6f10f0946bd1fc828bb01ea6c1fc3 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/coco_semantic.py @@ -0,0 +1,78 @@ +# dataset settings +dataset_type = 'CocoSegDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='LoadAnnotations', + with_bbox=False, + with_label=False, + with_seg=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict( + type='LoadAnnotations', + with_bbox=False, + with_label=False, + with_seg=True), + dict( + type='PackDetInputs', + meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor')) +] + +# For stuffthingmaps_semseg, please refer to +# `docs/en/user_guides/dataset_prepare.md` +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='train2017/', + seg_map_path='stuffthingmaps_semseg/train2017/'), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='val2017/', + seg_map_path='stuffthingmaps_semseg/val2017/'), + pipeline=test_pipeline)) + +test_dataloader = val_dataloader + +val_evaluator = dict(type='SemSegMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/deepfashion.py b/mmpose/configs/mmdet/_base_/datasets/deepfashion.py new file mode 100644 index 0000000000000000000000000000000000000000..a93dc7152f7a2e28ab726c79f9398a1034b7b4a1 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/deepfashion.py @@ -0,0 +1,95 @@ +# dataset settings +dataset_type = 'DeepFashionDataset' +data_root = 'data/DeepFashion/In-shop/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', scale=(750, 1101), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(750, 1101), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='Anno/segmentation/DeepFashion_segmentation_train.json', + data_prefix=dict(img='Img/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='Anno/segmentation/DeepFashion_segmentation_query.json', + data_prefix=dict(img='Img/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='Anno/segmentation/DeepFashion_segmentation_gallery.json', + data_prefix=dict(img='Img/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + + 'Anno/segmentation/DeepFashion_segmentation_query.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + + 'Anno/segmentation/DeepFashion_segmentation_gallery.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) diff --git a/mmpose/configs/mmdet/_base_/datasets/dsdl.py b/mmpose/configs/mmdet/_base_/datasets/dsdl.py new file mode 100644 index 0000000000000000000000000000000000000000..1f19e5e498b18a404f3c4e6419316b5f9981e811 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/dsdl.py @@ -0,0 +1,62 @@ +dataset_type = 'DSDLDetDataset' +data_root = 'path to dataset folder' +train_ann = 'path to train yaml file' +val_ann = 'path to val yaml file' + +backend_args = None +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': "s3://open_data/", +# 'data/': "s3://open_data/" +# })) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'instances')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann, + filter_cfg=dict(filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=val_ann, + test_mode=True, + pipeline=test_pipeline)) + +test_dataloader = val_dataloader + +val_evaluator = dict(type='CocoMetric', metric='bbox') +# val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/isaid_instance.py b/mmpose/configs/mmdet/_base_/datasets/isaid_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..09ddcab02bdd52374d5093d446abb0e34751f7a3 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/isaid_instance.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'iSAIDDataset' +data_root = 'data/iSAID/' +backend_args = None + +# Please see `projects/iSAID/README.md` for data preparation +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', scale=(800, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(800, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='train/instancesonly_filtered_train.json', + data_prefix=dict(img='train/images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='val/instancesonly_filtered_val.json', + data_prefix=dict(img='val/images/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'val/instancesonly_filtered_val.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/lvis_v0.5_instance.py b/mmpose/configs/mmdet/_base_/datasets/lvis_v0.5_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ca44efb6d31aae5f6426a1c8b89d2e9be2104f --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/lvis_v0.5_instance.py @@ -0,0 +1,79 @@ +# dataset settings +dataset_type = 'LVISV05Dataset' +data_root = 'data/lvis_v0.5/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/lvis_v0.5/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/lvis_v0.5_train.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/lvis_v0.5_val.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='LVISMetric', + ann_file=data_root + 'annotations/lvis_v0.5_val.json', + metric=['bbox', 'segm'], + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/lvis_v1_instance.py b/mmpose/configs/mmdet/_base_/datasets/lvis_v1_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..0413f370a2b635362a60c20881769064bac9a603 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/lvis_v1_instance.py @@ -0,0 +1,22 @@ +# dataset settings +_base_ = 'lvis_v0.5_instance.py' +dataset_type = 'LVISV1Dataset' +data_root = 'data/lvis_v1/' + +train_dataloader = dict( + dataset=dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/lvis_v1_train.json', + data_prefix=dict(img='')))) +val_dataloader = dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/lvis_v1_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/lvis_v1_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/mot_challenge.py b/mmpose/configs/mmdet/_base_/datasets/mot_challenge.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2828ef70a34c123792d252bf992f423049d065 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/mot_challenge.py @@ -0,0 +1,90 @@ +# dataset settings +dataset_type = 'MOTChallengeDataset' +data_root = 'data/MOT17/' +img_scale = (1088, 1088) + +backend_args = None +# data pipeline +train_pipeline = [ + dict( + type='UniformRefFrameSample', + num_ref_imgs=1, + frame_range=10, + filter_key_img=True), + dict( + type='TransformBroadcaster', + share_random_params=True, + transforms=[ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadTrackAnnotations'), + dict( + type='RandomResize', + scale=img_scale, + ratio_range=(0.8, 1.2), + keep_ratio=True, + clip_object_border=False), + dict(type='PhotoMetricDistortion') + ]), + dict( + type='TransformBroadcaster', + # different cropped positions for different frames + share_random_params=False, + transforms=[ + dict( + type='RandomCrop', crop_size=img_scale, bbox_clip_border=False) + ]), + dict( + type='TransformBroadcaster', + share_random_params=True, + transforms=[ + dict(type='RandomFlip', prob=0.5), + ]), + dict(type='PackTrackInputs') +] + +test_pipeline = [ + dict( + type='TransformBroadcaster', + transforms=[ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='LoadTrackAnnotations') + ]), + dict(type='PackTrackInputs') +] + +# dataloader +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='TrackImgSampler'), # image-based sampling + dataset=dict( + type=dataset_type, + data_root=data_root, + visibility_thr=-1, + ann_file='annotations/half-train_cocoformat.json', + data_prefix=dict(img_path='train'), + metainfo=dict(classes=('pedestrian', )), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + # Now we support two ways to test, image_based and video_based + # if you want to use video_based sampling, you can use as follows + # sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + sampler=dict(type='TrackImgSampler'), # image-based sampling + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/half-val_cocoformat.json', + data_prefix=dict(img_path='train'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# evaluator +val_evaluator = dict( + type='MOTChallengeMetric', metric=['HOTA', 'CLEAR', 'Identity']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/mot_challenge_det.py b/mmpose/configs/mmdet/_base_/datasets/mot_challenge_det.py new file mode 100644 index 0000000000000000000000000000000000000000..a988572c3837eb2a8a6bf7b9eca06f3d82abdfda --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/mot_challenge_det.py @@ -0,0 +1,66 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/MOT17/' + +backend_args = None +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args, to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(1088, 1088), + ratio_range=(0.8, 1.2), + keep_ratio=True, + clip_object_border=False), + dict(type='PhotoMetricDistortion'), + dict(type='RandomCrop', crop_size=(1088, 1088), bbox_clip_border=False), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1088, 1088), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/half-train_cocoformat.json', + data_prefix=dict(img='train/'), + metainfo=dict(classes=('pedestrian', )), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/half-val_cocoformat.json', + data_prefix=dict(img='train/'), + metainfo=dict(classes=('pedestrian', )), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/half-val_cocoformat.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/mot_challenge_reid.py b/mmpose/configs/mmdet/_base_/datasets/mot_challenge_reid.py new file mode 100644 index 0000000000000000000000000000000000000000..57a95b531f3591e60daaabc5eea6f11c7424215b --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/mot_challenge_reid.py @@ -0,0 +1,61 @@ +# dataset settings +dataset_type = 'ReIDDataset' +data_root = 'data/MOT17/' + +backend_args = None +# data pipeline +train_pipeline = [ + dict( + type='TransformBroadcaster', + share_random_params=False, + transforms=[ + dict( + type='LoadImageFromFile', + backend_args=backend_args, + to_float32=True), + dict( + type='Resize', + scale=(128, 256), + keep_ratio=False, + clip_object_border=False), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + ]), + dict(type='PackReIDInputs', meta_keys=('flip', 'flip_direction')) +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args, to_float32=True), + dict(type='Resize', scale=(128, 256), keep_ratio=False), + dict(type='PackReIDInputs') +] + +# dataloader +train_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + triplet_sampler=dict(num_ids=8, ins_per_id=4), + data_prefix=dict(img_path='reid/imgs'), + ann_file='reid/meta/train_80.txt', + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + triplet_sampler=None, + data_prefix=dict(img_path='reid/imgs'), + ann_file='reid/meta/val_20.txt', + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# evaluator +val_evaluator = dict(type='ReIDMetrics', metric=['mAP', 'CMC']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/objects365v1_detection.py b/mmpose/configs/mmdet/_base_/datasets/objects365v1_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..ee398698608543e13188452a816283e9a2563390 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/objects365v1_detection.py @@ -0,0 +1,74 @@ +# dataset settings +dataset_type = 'Objects365V1Dataset' +data_root = 'data/Objects365/Obj365_v1/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/objects365_val.json', + data_prefix=dict(img='val/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/objects365_val.json', + metric='bbox', + sort_categories=True, + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/objects365v2_detection.py b/mmpose/configs/mmdet/_base_/datasets/objects365v2_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..b25a7ba901befa8d61e3cdae8a7c68fb8a9c5aef --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/objects365v2_detection.py @@ -0,0 +1,73 @@ +# dataset settings +dataset_type = 'Objects365V2Dataset' +data_root = 'data/Objects365/Obj365_v2/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/zhiyuan_objv2_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/zhiyuan_objv2_val.json', + data_prefix=dict(img='val/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/zhiyuan_objv2_val.json', + metric='bbox', + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/openimages_detection.py b/mmpose/configs/mmdet/_base_/datasets/openimages_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..129661b405c70d3e2d0d2c4741e3a59333dd960c --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/openimages_detection.py @@ -0,0 +1,81 @@ +# dataset settings +dataset_type = 'OpenImagesDataset' +data_root = 'data/OpenImages/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1024, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1024, 800), keep_ratio=True), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + # TODO: find a better way to collect image_level_labels + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'instances', 'image_level_labels')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=0, # workers_per_gpu > 0 may occur out of memory + persistent_workers=False, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/oidv6-train-annotations-bbox.csv', + data_prefix=dict(img='OpenImages/train/'), + label_file='annotations/class-descriptions-boxable.csv', + hierarchy_file='annotations/bbox_labels_600_hierarchy.json', + meta_file='annotations/train-image-metas.pkl', + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=0, + persistent_workers=False, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/validation-annotations-bbox.csv', + data_prefix=dict(img='OpenImages/validation/'), + label_file='annotations/class-descriptions-boxable.csv', + hierarchy_file='annotations/bbox_labels_600_hierarchy.json', + meta_file='annotations/validation-image-metas.pkl', + image_level_ann_file='annotations/validation-' + 'annotations-human-imagelabels-boxable.csv', + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='OpenImagesMetric', + iou_thrs=0.5, + ioa_thrs=0.5, + use_group_of=True, + get_supercategory=True) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/refcoco+.py b/mmpose/configs/mmdet/_base_/datasets/refcoco+.py new file mode 100644 index 0000000000000000000000000000000000000000..ae0278ddf6c30fda6e4fb42aed1cb1b9a55109ec --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/refcoco+.py @@ -0,0 +1,55 @@ +# dataset settings +dataset_type = 'RefCocoDataset' +data_root = 'data/coco/' + +backend_args = None + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict( + type='LoadAnnotations', + with_mask=True, + with_bbox=False, + with_seg=False, + with_label=False), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'gt_masks', 'text')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='train2014/'), + ann_file='refcoco+/instances.json', + split_file='refcoco+/refs(unc).p', + split='val', + text_mode='select_first', + pipeline=test_pipeline)) + +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='train2014/'), + ann_file='refcoco+/instances.json', + split_file='refcoco+/refs(unc).p', + split='testA', # or 'testB' + text_mode='select_first', + pipeline=test_pipeline)) + +val_evaluator = dict(type='RefSegMetric', metric=['cIoU', 'mIoU']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/refcoco.py b/mmpose/configs/mmdet/_base_/datasets/refcoco.py new file mode 100644 index 0000000000000000000000000000000000000000..7b6caefa9a4bbfabdb49689588821f99d882a80f --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/refcoco.py @@ -0,0 +1,55 @@ +# dataset settings +dataset_type = 'RefCocoDataset' +data_root = 'data/coco/' + +backend_args = None + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict( + type='LoadAnnotations', + with_mask=True, + with_bbox=False, + with_seg=False, + with_label=False), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'gt_masks', 'text')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='train2014/'), + ann_file='refcoco/instances.json', + split_file='refcoco/refs(unc).p', + split='val', + text_mode='select_first', + pipeline=test_pipeline)) + +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='train2014/'), + ann_file='refcoco/instances.json', + split_file='refcoco/refs(unc).p', + split='testA', # or 'testB' + text_mode='select_first', + pipeline=test_pipeline)) + +val_evaluator = dict(type='RefSegMetric', metric=['cIoU', 'mIoU']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/refcocog.py b/mmpose/configs/mmdet/_base_/datasets/refcocog.py new file mode 100644 index 0000000000000000000000000000000000000000..19dbeef1cde79fcb2aa80bb9936a60cc30089963 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/refcocog.py @@ -0,0 +1,55 @@ +# dataset settings +dataset_type = 'RefCocoDataset' +data_root = 'data/coco/' + +backend_args = None + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict( + type='LoadAnnotations', + with_mask=True, + with_bbox=False, + with_seg=False, + with_label=False), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'gt_masks', 'text')) +] + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='train2014/'), + ann_file='refcocog/instances.json', + split_file='refcocog/refs(umd).p', + split='val', + text_mode='select_first', + pipeline=test_pipeline)) + +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='train2014/'), + ann_file='refcocog/instances.json', + split_file='refcocog/refs(umd).p', + split='test', + text_mode='select_first', + pipeline=test_pipeline)) + +val_evaluator = dict(type='RefSegMetric', metric=['cIoU', 'mIoU']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/semi_coco_detection.py b/mmpose/configs/mmdet/_base_/datasets/semi_coco_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..694f25f841e06dbb59a699dfe13c18e34dbdce9f --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/semi_coco_detection.py @@ -0,0 +1,178 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +color_space = [ + [dict(type='ColorTransform')], + [dict(type='AutoContrast')], + [dict(type='Equalize')], + [dict(type='Sharpness')], + [dict(type='Posterize')], + [dict(type='Solarize')], + [dict(type='Color')], + [dict(type='Contrast')], + [dict(type='Brightness')], +] + +geometric = [ + [dict(type='Rotate')], + [dict(type='ShearX')], + [dict(type='ShearY')], + [dict(type='TranslateX')], + [dict(type='TranslateY')], +] + +scale = [(1333, 400), (1333, 1200)] + +branch_field = ['sup', 'unsup_teacher', 'unsup_student'] +# pipeline used to augment labeled data, +# which will be sent to student model for supervised training. +sup_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomResize', scale=scale, keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='RandAugment', aug_space=color_space, aug_num=1), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='MultiBranch', + branch_field=branch_field, + sup=dict(type='PackDetInputs')) +] + +# pipeline used to augment unlabeled data weakly, +# which will be sent to teacher model for predicting pseudo instances. +weak_pipeline = [ + dict(type='RandomResize', scale=scale, keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', + 'homography_matrix')), +] + +# pipeline used to augment unlabeled data strongly, +# which will be sent to student model for unsupervised training. +strong_pipeline = [ + dict(type='RandomResize', scale=scale, keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomOrder', + transforms=[ + dict(type='RandAugment', aug_space=color_space, aug_num=1), + dict(type='RandAugment', aug_space=geometric, aug_num=1), + ]), + dict(type='RandomErasing', n_patches=(1, 5), ratio=(0, 0.2)), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', + 'homography_matrix')), +] + +# pipeline used to augment unlabeled data into different views +unsup_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadEmptyAnnotations'), + dict( + type='MultiBranch', + branch_field=branch_field, + unsup_teacher=weak_pipeline, + unsup_student=strong_pipeline, + ) +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +batch_size = 5 +num_workers = 5 +# There are two common semi-supervised learning settings on the coco dataset: +# (1) Divide the train2017 into labeled and unlabeled datasets +# by a fixed percentage, such as 1%, 2%, 5% and 10%. +# The format of labeled_ann_file and unlabeled_ann_file are +# instances_train2017.{fold}@{percent}.json, and +# instances_train2017.{fold}@{percent}-unlabeled.json +# `fold` is used for cross-validation, and `percent` represents +# the proportion of labeled data in the train2017. +# (2) Choose the train2017 as the labeled dataset +# and unlabeled2017 as the unlabeled dataset. +# The labeled_ann_file and unlabeled_ann_file are +# instances_train2017.json and image_info_unlabeled2017.json +# We use this configuration by default. +labeled_dataset = dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=sup_pipeline, + backend_args=backend_args) + +unlabeled_dataset = dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_unlabeled2017.json', + data_prefix=dict(img='unlabeled2017/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=unsup_pipeline, + backend_args=backend_args) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=num_workers, + persistent_workers=True, + sampler=dict( + type='GroupMultiSourceSampler', + batch_size=batch_size, + source_ratio=[1, 4]), + dataset=dict( + type='ConcatDataset', datasets=[labeled_dataset, unlabeled_dataset])) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) + +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/v3det.py b/mmpose/configs/mmdet/_base_/datasets/v3det.py new file mode 100644 index 0000000000000000000000000000000000000000..38ccbf864b6248192dfbf4abaf4858b5f93d45e8 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/v3det.py @@ -0,0 +1,69 @@ +# dataset settings +dataset_type = 'V3DetDataset' +data_root = 'data/V3Det/' + +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/v3det_2023_v1_train.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=True, min_size=4), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/v3det_2023_v1_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/v3det_2023_v1_val.json', + metric='bbox', + format_only=False, + backend_args=backend_args, + use_mp_eval=True, + proposal_nums=[300]) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/voc0712.py b/mmpose/configs/mmdet/_base_/datasets/voc0712.py new file mode 100644 index 0000000000000000000000000000000000000000..47f5e6563b7f47dd6cfec02248d4c8decd32afe4 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/voc0712.py @@ -0,0 +1,92 @@ +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically Infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/segmentation/VOCdevkit/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/segmentation/', +# 'data/': 's3://openmmlab/datasets/segmentation/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1000, 600), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1000, 600), keep_ratio=True), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type='ConcatDataset', + # VOCDataset will add different `dataset_type` in dataset.metainfo, + # which will get error if using ConcatDataset. Adding + # `ignore_keys` can avoid this error. + ignore_keys=['dataset_type'], + datasets=[ + dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + filter_cfg=dict( + filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline, + backend_args=backend_args), + dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2012/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2012/'), + filter_cfg=dict( + filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline, + backend_args=backend_args) + ]))) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2007/ImageSets/Main/test.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL +# VOC2012 defaults to use 'area'. +val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/wider_face.py b/mmpose/configs/mmdet/_base_/datasets/wider_face.py new file mode 100644 index 0000000000000000000000000000000000000000..7042bc46e877ed899969730325143307e15adf64 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/wider_face.py @@ -0,0 +1,73 @@ +# dataset settings +dataset_type = 'WIDERFaceDataset' +data_root = 'data/WIDERFace/' +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/cityscapes/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +img_scale = (640, 640) # VGA resolution + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='train.txt', + data_prefix=dict(img='WIDER_train'), + filter_cfg=dict(filter_empty_gt=True, bbox_min_size=17, min_size=32), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='val.txt', + data_prefix=dict(img='WIDER_val'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict( + # TODO: support WiderFace-Evaluation for easy, medium, hard cases + type='VOCMetric', + metric='mAP', + eval_mode='11points') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/_base_/datasets/youtube_vis.py b/mmpose/configs/mmdet/_base_/datasets/youtube_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..ece07cc3879e512082e302c2e3f76108c57a0234 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/datasets/youtube_vis.py @@ -0,0 +1,66 @@ +dataset_type = 'YouTubeVISDataset' +data_root = 'data/youtube_vis_2019/' +dataset_version = data_root[-5:-1] # 2019 or 2021 + +backend_args = None + +# dataset settings +train_pipeline = [ + dict( + type='UniformRefFrameSample', + num_ref_imgs=1, + frame_range=100, + filter_key_img=True), + dict( + type='TransformBroadcaster', + share_random_params=True, + transforms=[ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadTrackAnnotations', with_mask=True), + dict(type='Resize', scale=(640, 360), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + ]), + dict(type='PackTrackInputs') +] + +test_pipeline = [ + dict( + type='TransformBroadcaster', + transforms=[ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(640, 360), keep_ratio=True), + dict(type='LoadTrackAnnotations', with_mask=True), + ]), + dict(type='PackTrackInputs') +] + +# dataloader +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + # sampler=dict(type='TrackImgSampler'), # image-based sampling + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='TrackAspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2019_train.json', + data_prefix=dict(img_path='train/JPEGImages'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2019_valid.json', + data_prefix=dict(img_path='valid/JPEGImages'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/_base_/default_runtime.py b/mmpose/configs/mmdet/_base_/default_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..870e5614c86d7e1bbdad13d77a0db03a46ce717a --- /dev/null +++ b/mmpose/configs/mmdet/_base_/default_runtime.py @@ -0,0 +1,24 @@ +default_scope = 'mmdet' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='DetVisualizationHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False diff --git a/mmpose/configs/mmdet/_base_/models/cascade-mask-rcnn_r50_fpn.py b/mmpose/configs/mmdet/_base_/models/cascade-mask-rcnn_r50_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..c5167f7a02e66c80bd8ec8cc7572acb22eaadba5 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/cascade-mask-rcnn_r50_fpn.py @@ -0,0 +1,203 @@ +# model settings +model = dict( + type='CascadeRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/mmpose/configs/mmdet/_base_/models/cascade-rcnn_r50_fpn.py b/mmpose/configs/mmdet/_base_/models/cascade-rcnn_r50_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..50c57f01ca3a6ea827f71801b0c233af268914f9 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/cascade-rcnn_r50_fpn.py @@ -0,0 +1,185 @@ +# model settings +model = dict( + type='CascadeRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/mmpose/configs/mmdet/_base_/models/fast-rcnn_r50_fpn.py b/mmpose/configs/mmdet/_base_/models/fast-rcnn_r50_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..2bd45e9266b01df302b78e50258fa1572144cb21 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/fast-rcnn_r50_fpn.py @@ -0,0 +1,68 @@ +# model settings +model = dict( + type='FastRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50-caffe-c4.py b/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50-caffe-c4.py new file mode 100644 index 0000000000000000000000000000000000000000..15d2db72e48790505c2a1e4e7d184c1803f7ab31 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50-caffe-c4.py @@ -0,0 +1,123 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FasterRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=6000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50-caffe-dc5.py b/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50-caffe-dc5.py new file mode 100644 index 0000000000000000000000000000000000000000..189915e3d9ce7239493da6465931f91e2d9d664f --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50-caffe-dc5.py @@ -0,0 +1,111 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FasterRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + strides=(1, 2, 2, 1), + dilations=(1, 1, 1, 2), + out_indices=(3, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=2048, + feat_channels=2048, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=2048, + featmap_strides=[16]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=2048, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms=dict(type='nms', iou_threshold=0.7), + nms_pre=6000, + max_per_img=1000, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50_fpn.py b/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..31aa1461799a988a11adb901306a063fd3f0b951 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/faster-rcnn_r50_fpn.py @@ -0,0 +1,114 @@ +# model settings +model = dict( + type='FasterRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) + )) diff --git a/mmpose/configs/mmdet/_base_/models/mask-rcnn_r50-caffe-c4.py b/mmpose/configs/mmdet/_base_/models/mask-rcnn_r50-caffe-c4.py new file mode 100644 index 0000000000000000000000000000000000000000..de1131b24893ae24bd99923895fd844837c9b46d --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/mask-rcnn_r50-caffe-c4.py @@ -0,0 +1,132 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='MaskRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=None, + mask_head=dict( + type='FCNMaskHead', + num_convs=0, + in_channels=2048, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=6000, + nms=dict(type='nms', iou_threshold=0.7), + max_per_img=1000, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/mmpose/configs/mmdet/_base_/models/mask-rcnn_r50_fpn.py b/mmpose/configs/mmdet/_base_/models/mask-rcnn_r50_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..b4ff7a49d0a2f0abd4823ef89ad957d9708085e7 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/mask-rcnn_r50_fpn.py @@ -0,0 +1,127 @@ +# model settings +model = dict( + type='MaskRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/mmpose/configs/mmdet/_base_/models/retinanet_r50_fpn.py b/mmpose/configs/mmdet/_base_/models/retinanet_r50_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..53662c9f1390af22b15c5591e122b0aa0b2d6c92 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/retinanet_r50_fpn.py @@ -0,0 +1,68 @@ +# model settings +model = dict( + type='RetinaNet', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + sampler=dict( + type='PseudoSampler'), # Focal loss should use PseudoSampler + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) diff --git a/mmpose/configs/mmdet/_base_/models/rpn_r50-caffe-c4.py b/mmpose/configs/mmdet/_base_/models/rpn_r50-caffe-c4.py new file mode 100644 index 0000000000000000000000000000000000000000..ed1dbe746d432d96d70e7dc9048c9e1b1727c938 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/rpn_r50-caffe-c4.py @@ -0,0 +1,64 @@ +# model settings +model = dict( + type='RPN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=None, + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/mmpose/configs/mmdet/_base_/models/rpn_r50_fpn.py b/mmpose/configs/mmdet/_base_/models/rpn_r50_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc4790434a368d0728d74dcd7ba79e665aae276 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/rpn_r50_fpn.py @@ -0,0 +1,64 @@ +# model settings +model = dict( + type='RPN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/mmpose/configs/mmdet/_base_/models/ssd300.py b/mmpose/configs/mmdet/_base_/models/ssd300.py new file mode 100644 index 0000000000000000000000000000000000000000..fd113c7cbc41494eabb6a56061f8a90343ac9efd --- /dev/null +++ b/mmpose/configs/mmdet/_base_/models/ssd300.py @@ -0,0 +1,63 @@ +# model settings +input_size = 300 +model = dict( + type='SingleStageDetector', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[1, 1, 1], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type='SSDVGG', + depth=16, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')), + neck=dict( + type='SSDNeck', + in_channels=(512, 1024), + out_channels=(512, 1024, 512, 256, 256, 256), + level_strides=(2, 2, 1, 1), + level_paddings=(1, 1, 0, 0), + l2_norm_scale=20), + bbox_head=dict( + type='SSDHead', + in_channels=(512, 1024, 512, 256, 256, 256), + num_classes=80, + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + input_size=input_size, + basesize_ratio_range=(0.15, 0.9), + strides=[8, 16, 32, 64, 100, 300], + ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + sampler=dict(type='PseudoSampler'), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +cudnn_benchmark = True diff --git a/mmpose/configs/mmdet/_base_/schedules/schedule_1x.py b/mmpose/configs/mmdet/_base_/schedules/schedule_1x.py new file mode 100644 index 0000000000000000000000000000000000000000..95f30be74ff37080ba0d227d55bbd587feeaa892 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/schedules/schedule_1x.py @@ -0,0 +1,28 @@ +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/_base_/schedules/schedule_20e.py b/mmpose/configs/mmdet/_base_/schedules/schedule_20e.py new file mode 100644 index 0000000000000000000000000000000000000000..75f958b0ed11d77ae3aebff6b7a5d8cb80797d9f --- /dev/null +++ b/mmpose/configs/mmdet/_base_/schedules/schedule_20e.py @@ -0,0 +1,28 @@ +# training schedule for 20e +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=20, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=20, + by_epoch=True, + milestones=[16, 19], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/_base_/schedules/schedule_2x.py b/mmpose/configs/mmdet/_base_/schedules/schedule_2x.py new file mode 100644 index 0000000000000000000000000000000000000000..5b7b241de6f3285e0f127f3c0581c8c84de463e4 --- /dev/null +++ b/mmpose/configs/mmdet/_base_/schedules/schedule_2x.py @@ -0,0 +1,28 @@ +# training schedule for 2x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/albu_example/README.md b/mmpose/configs/mmdet/albu_example/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fa362f95fb91ba4beed5c9d6814e087324bd74d5 --- /dev/null +++ b/mmpose/configs/mmdet/albu_example/README.md @@ -0,0 +1,31 @@ +# Albu Example + +> [Albumentations: fast and flexible image augmentations](https://arxiv.org/abs/1809.06839) + + + +## Abstract + +Data augmentation is a commonly used technique for increasing both the size and the diversity of labeled training sets by leveraging input transformations that preserve output labels. In computer vision domain, image augmentations have become a common implicit regularization technique to combat overfitting in deep convolutional neural networks and are ubiquitously used to improve performance. While most deep learning frameworks implement basic image transformations, the list is typically limited to some variations and combinations of flipping, rotating, scaling, and cropping. Moreover, the image processing speed varies in existing tools for image augmentation. We present Albumentations, a fast and flexible library for image augmentations with many various image transform operations available, that is also an easy-to-use wrapper around other augmentation libraries. We provide examples of image augmentations for different computer vision tasks and show that Albumentations is faster than other commonly used image augmentation tools on the most of commonly used image transformations. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | 4.4 | 16.6 | 38.0 | 34.5 | [config](./mask-rcnn_r50_fpn_albu-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208_225520.log.json) | + +## Citation + +```latex +@article{2018arXiv180906839B, + author = {A. Buslaev, A. Parinov, E. Khvedchenya, V.~I. Iglovikov and A.~A. Kalinin}, + title = "{Albumentations: fast and flexible image augmentations}", + journal = {ArXiv e-prints}, + eprint = {1809.06839}, + year = 2018 +} +``` diff --git a/mmpose/configs/mmdet/albu_example/mask-rcnn_r50_fpn_albu-1x_coco.py b/mmpose/configs/mmdet/albu_example/mask-rcnn_r50_fpn_albu-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b8a2780e99b88c78adbe74c024fcd2d693817030 --- /dev/null +++ b/mmpose/configs/mmdet/albu_example/mask-rcnn_r50_fpn_albu-1x_coco.py @@ -0,0 +1,66 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' + +albu_train_transforms = [ + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=0.5), + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict( + type='OneOf', + transforms=[ + dict( + type='RGBShift', + r_shift_limit=10, + g_shift_limit=10, + b_shift_limit=10, + p=1.0), + dict( + type='HueSaturationValue', + hue_shift_limit=20, + sat_shift_limit=30, + val_shift_limit=20, + p=1.0) + ], + p=0.1), + dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), +] +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict( + type='Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags'], + min_visibility=0.0, + filter_lost_elements=True), + keymap={ + 'img': 'image', + 'gt_masks': 'masks', + 'gt_bboxes': 'bboxes' + }, + skip_img_without_anno=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/albu_example/metafile.yml b/mmpose/configs/mmdet/albu_example/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..3b54bdf15688281e5896faac3f841433497c7eaf --- /dev/null +++ b/mmpose/configs/mmdet/albu_example/metafile.yml @@ -0,0 +1,17 @@ +Models: + - Name: mask-rcnn_r50_fpn_albu-1x_coco + In Collection: Mask R-CNN + Config: mask-rcnn_r50_fpn_albu-1x_coco.py + Metadata: + Training Memory (GB): 4.4 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth diff --git a/mmpose/configs/mmdet/atss/README.md b/mmpose/configs/mmdet/atss/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1411672e205683914c24bec47ac02517d44f684b --- /dev/null +++ b/mmpose/configs/mmdet/atss/README.md @@ -0,0 +1,31 @@ +# ATSS + +> [Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection](https://arxiv.org/abs/1912.02424) + + + +## Abstract + +Object detection has been dominated by anchor-based detectors for several years. Recently, anchor-free detectors have become popular due to the proposal of FPN and Focal Loss. In this paper, we first point out that the essential difference between anchor-based and anchor-free detection is actually how to define positive and negative training samples, which leads to the performance gap between them. If they adopt the same definition of positive and negative samples during training, there is no obvious difference in the final performance, no matter regressing from a box or a point. This shows that how to select positive and negative training samples is important for current object detectors. Then, we propose an Adaptive Training Sample Selection (ATSS) to automatically select positive and negative samples according to statistical characteristics of object. It significantly improves the performance of anchor-based and anchor-free detectors and bridges the gap between them. Finally, we discuss the necessity of tiling multiple anchors per location on the image to detect objects. Extensive experiments conducted on MS COCO support our aforementioned analysis and conclusions. With the newly introduced ATSS, we improve state-of-the-art detectors by a large margin to 50.7% AP without introducing any overhead. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :----------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | 3.7 | 19.7 | 39.4 | [config](./atss_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209_102539.log.json) | +| R-101 | pytorch | 1x | 5.6 | 12.3 | 41.5 | [config](./atss_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.log.json) | + +## Citation + +```latex +@article{zhang2019bridging, + title = {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection}, + author = {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.}, + journal = {arXiv preprint arXiv:1912.02424}, + year = {2019} +} +``` diff --git a/mmpose/configs/mmdet/atss/atss_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/atss/atss_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5225d2ab672738d4d427eba252e92bd554252476 --- /dev/null +++ b/mmpose/configs/mmdet/atss/atss_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './atss_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/atss/atss_r101_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/atss/atss_r101_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..69999ce45aee9c76dcc4af974e6e9baabbd5b44b --- /dev/null +++ b/mmpose/configs/mmdet/atss/atss_r101_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/atss/atss_r18_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/atss/atss_r18_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..12d9f13263619333391befd6692c83622091ef4e --- /dev/null +++ b/mmpose/configs/mmdet/atss/atss_r18_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/atss/atss_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/atss/atss_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..306435d7d2fc645f1c2deae784c1875cc4ceaf98 --- /dev/null +++ b/mmpose/configs/mmdet/atss/atss_r50_fpn_1x_coco.py @@ -0,0 +1,71 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ATSS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/atss/atss_r50_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/atss/atss_r50_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e3b3c46f4b926b82fbab438d6d50eb6c079dabc3 --- /dev/null +++ b/mmpose/configs/mmdet/atss/atss_r50_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,81 @@ +_base_ = '../common/lsj-200e_coco-detection.py' + +image_size = (1024, 1024) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +model = dict( + type='ATSS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +train_dataloader = dict(batch_size=8, num_workers=4) + +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/atss/metafile.yml b/mmpose/configs/mmdet/atss/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..f4c567ef29ba9ea4fddd7bc00d63a4bca41b1cfa --- /dev/null +++ b/mmpose/configs/mmdet/atss/metafile.yml @@ -0,0 +1,60 @@ +Collections: + - Name: ATSS + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ATSS + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1912.02424 + Title: 'Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection' + README: configs/atss/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/atss.py#L6 + Version: v2.0.0 + +Models: + - Name: atss_r50_fpn_1x_coco + In Collection: ATSS + Config: configs/atss/atss_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.7 + inference time (ms/im): + - value: 50.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth + + - Name: atss_r101_fpn_1x_coco + In Collection: ATSS + Config: configs/atss/atss_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.6 + inference time (ms/im): + - value: 81.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth diff --git a/mmpose/configs/mmdet/autoassign/README.md b/mmpose/configs/mmdet/autoassign/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f6b05738ccc222223863974697cee7f2770d8f25 --- /dev/null +++ b/mmpose/configs/mmdet/autoassign/README.md @@ -0,0 +1,35 @@ +# AutoAssign + +> [AutoAssign: Differentiable Label Assignment for Dense Object Detection](https://arxiv.org/abs/2007.03496) + + + +## Abstract + +Determining positive/negative samples for object detection is known as label assignment. Here we present an anchor-free detector named AutoAssign. It requires little human knowledge and achieves appearance-aware through a fully differentiable weighting mechanism. During training, to both satisfy the prior distribution of data and adapt to category characteristics, we present Center Weighting to adjust the category-specific prior distributions. To adapt to object appearances, Confidence Weighting is proposed to adjust the specific assign strategy of each instance. The two weighting modules are then combined to generate positive and negative weights to adjust each location's confidence. Extensive experiments on the MS COCO show that our method steadily surpasses other best sampling strategies by large margins with various backbones. Moreover, our best model achieves 52.1% AP, outperforming all existing one-stage detectors. Besides, experiments on other datasets, e.g., PASCAL VOC, Objects365, and WiderFace, demonstrate the broad applicability of AutoAssign. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | +| :------: | :---: | :-----: | :------: | :----: | :---------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | caffe | 1x | 4.08 | 40.4 | [config](./autoassign_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.log.json) | + +**Note**: + +1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.3 mAP. mAP 40.3 ~ 40.6 is acceptable. Such fluctuation can also be found in the original implementation. +2. You can get a more stable results ~ mAP 40.6 with a schedule total 13 epoch, and learning rate is divided by 10 at 10th and 13th epoch. + +## Citation + +```latex +@article{zhu2020autoassign, + title={AutoAssign: Differentiable Label Assignment for Dense Object Detection}, + author={Zhu, Benjin and Wang, Jianfeng and Jiang, Zhengkai and Zong, Fuhang and Liu, Songtao and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2007.03496}, + year={2020} +} +``` diff --git a/mmpose/configs/mmdet/autoassign/autoassign_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/autoassign/autoassign_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..76a361952d95b655451186ef1cb39df2f24ae305 --- /dev/null +++ b/mmpose/configs/mmdet/autoassign/autoassign_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,69 @@ +# We follow the original implementation which +# adopts the Caffe pre-trained backbone. +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='AutoAssign', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[102.9801, 115.9465, 122.7717], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5, + relu_before_extra_convs=True, + init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')), + bbox_head=dict( + type='AutoAssignHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + loss_bbox=dict(type='GIoULoss', loss_weight=5.0)), + train_cfg=None, + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.01), paramwise_cfg=dict(norm_decay_mult=0.)) diff --git a/mmpose/configs/mmdet/autoassign/metafile.yml b/mmpose/configs/mmdet/autoassign/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..ab7a4af3371d4be5325498db97af0e7dd8fdc28c --- /dev/null +++ b/mmpose/configs/mmdet/autoassign/metafile.yml @@ -0,0 +1,33 @@ +Collections: + - Name: AutoAssign + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - AutoAssign + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2007.03496 + Title: 'AutoAssign: Differentiable Label Assignment for Dense Object Detection' + README: configs/autoassign/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/autoassign.py#L6 + Version: v2.12.0 + +Models: + - Name: autoassign_r50-caffe_fpn_1x_coco + In Collection: AutoAssign + Config: configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.08 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth diff --git a/mmpose/configs/mmdet/boxinst/README.md b/mmpose/configs/mmdet/boxinst/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f6f01c5d27b13b1758a5c9a60251383852e0f48e --- /dev/null +++ b/mmpose/configs/mmdet/boxinst/README.md @@ -0,0 +1,32 @@ +# BoxInst + +> [BoxInst: High-Performance Instance Segmentation with Box Annotations](https://arxiv.org/pdf/2012.02310.pdf) + + + +## Abstract + +We present a high-performance method that can achieve mask-level instance segmentation with only bounding-box annotations for training. While this setting has been studied in the literature, here we show significantly stronger performance with a simple design (e.g., dramatically improving previous best reported mask AP of 21.1% to 31.6% on the COCO dataset). Our core idea is to redesign the loss +of learning masks in instance segmentation, with no modification to the segmentation network itself. The new loss functions can supervise the mask training without relying on mask annotations. This is made possible with two loss terms, namely, 1) a surrogate term that minimizes the discrepancy between the projections of the ground-truth box and the predicted mask; 2) a pairwise loss that can exploit the prior that proximal pixels with similar colors are very likely to have the same category label. Experiments demonstrate that the redesigned mask loss can yield surprisingly high-quality instance masks with only box annotations. For example, without using any mask annotations, with a ResNet-101 backbone and 3× training schedule, we achieve 33.2% mask AP on COCO test-dev split (vs. 39.1% of the fully supervised counterpart). Our excellent experiment results on COCO and Pascal VOC indicate that our method dramatically narrows the performance gap between weakly and fully supervised instance segmentation. + +
+ +
+ +## Results and Models + +| Backbone | Style | MS train | Lr schd | bbox AP | mask AP | Config | Download | +| :------: | :-----: | :------: | :-----: | :-----: | :-----: | :-----------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | Y | 1x | 39.6 | 31.1 | [config](./boxinst_r50_fpn_ms-90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/boxinst/boxinst_r50_fpn_ms-90k_coco/boxinst_r50_fpn_ms-90k_coco_20221228_163052-6add751a.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/boxinst/boxinst_r50_fpn_ms-90k_coco/boxinst_r50_fpn_ms-90k_coco_20221228_163052.log.json) | +| R-101 | pytorch | Y | 1x | 41.8 | 32.7 | [config](./boxinst_r101_fpn_ms-90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/boxinst/boxinst_r101_fpn_ms-90k_coco/boxinst_r101_fpn_ms-90k_coco_20221229_145106-facf375b.pth) \|[log](https://download.openmmlab.com/mmdetection/v3.0/boxinst/boxinst_r101_fpn_ms-90k_coco/boxinst_r101_fpn_ms-90k_coco_20221229_145106.log.json) | + +## Citation + +```latex +@inproceedings{tian2020boxinst, + title = {{BoxInst}: High-Performance Instance Segmentation with Box Annotations}, + author = {Tian, Zhi and Shen, Chunhua and Wang, Xinlong and Chen, Hao}, + booktitle = {Proc. IEEE Conf. Computer Vision and Pattern Recognition (CVPR)}, + year = {2021} +} +``` diff --git a/mmpose/configs/mmdet/boxinst/boxinst_r101_fpn_ms-90k_coco.py b/mmpose/configs/mmdet/boxinst/boxinst_r101_fpn_ms-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ab2b11628a79aee7f6f6403cecf8f7b1d0526d69 --- /dev/null +++ b/mmpose/configs/mmdet/boxinst/boxinst_r101_fpn_ms-90k_coco.py @@ -0,0 +1,8 @@ +_base_ = './boxinst_r50_fpn_ms-90k_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/boxinst/boxinst_r50_fpn_ms-90k_coco.py b/mmpose/configs/mmdet/boxinst/boxinst_r50_fpn_ms-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..371f252a153855e19f3a3bb25cd42c83a4bb77fd --- /dev/null +++ b/mmpose/configs/mmdet/boxinst/boxinst_r50_fpn_ms-90k_coco.py @@ -0,0 +1,93 @@ +_base_ = '../common/ms-90k_coco.py' + +# model settings +model = dict( + type='BoxInst', + data_preprocessor=dict( + type='BoxInstDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + mask_stride=4, + pairwise_size=3, + pairwise_dilation=2, + pairwise_color_thresh=0.3, + bottom_pixels_removed=10), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='BoxInstBboxHead', + num_params=593, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + norm_on_bbox=True, + centerness_on_reg=True, + dcn_on_last_conv=False, + center_sampling=True, + conv_bias=True, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + mask_head=dict( + type='BoxInstMaskHead', + num_layers=3, + feat_channels=16, + size_of_interest=8, + mask_out_stride=4, + topk_masks_per_img=64, + mask_feature_head=dict( + in_channels=256, + feat_channels=128, + start_level=0, + end_level=2, + out_channels=16, + mask_stride=8, + num_stacked_convs=4, + norm_cfg=dict(type='BN', requires_grad=True)), + loss_mask=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + eps=5e-6, + loss_weight=1.0)), + # model training and testing settings + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100, + mask_thr=0.5)) + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.01)) + +# evaluator +val_evaluator = dict(metric=['bbox', 'segm']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/boxinst/metafile.yml b/mmpose/configs/mmdet/boxinst/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..c97fcdcd636cd4d8d1a1437679f20b96d90fc74f --- /dev/null +++ b/mmpose/configs/mmdet/boxinst/metafile.yml @@ -0,0 +1,52 @@ +Collections: + - Name: BoxInst + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x A100 GPUs + Architecture: + - ResNet + - FPN + - CondInst + Paper: + URL: https://arxiv.org/abs/2012.02310 + Title: 'BoxInst: High-Performance Instance Segmentation with Box Annotations' + README: configs/boxinst/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v3.0.0rc6/mmdet/models/detectors/boxinst.py#L8 + Version: v3.0.0rc6 + +Models: + - Name: boxinst_r50_fpn_ms-90k_coco + In Collection: BoxInst + Config: configs/boxinst/boxinst_r50_fpn_ms-90k_coco.py + Metadata: + Iterations: 90000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 30.8 + Weights: https://download.openmmlab.com/mmdetection/v3.0/boxinst/boxinst_r50_fpn_ms-90k_coco/boxinst_r50_fpn_ms-90k_coco_20221228_163052-6add751a.pth + + - Name: boxinst_r101_fpn_ms-90k_coco + In Collection: BoxInst + Config: configs/boxinst/boxinst_r101_fpn_ms-90k_coco.py + Metadata: + Iterations: 90000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 32.7 + Weights: https://download.openmmlab.com/mmdetection/v3.0/boxinst/boxinst_r101_fpn_ms-90k_coco/boxinst_r101_fpn_ms-90k_coco_20221229_145106-facf375b.pth diff --git a/mmpose/configs/mmdet/bytetrack/README.md b/mmpose/configs/mmdet/bytetrack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..30b96f07cece7666c488972e315dbdcededdb21f --- /dev/null +++ b/mmpose/configs/mmdet/bytetrack/README.md @@ -0,0 +1,132 @@ +# ByteTrack: Multi-Object Tracking by Associating Every Detection Box + +## Abstract + + + +Multi-object tracking (MOT) aims at estimating bounding boxes and identities of objects in videos. Most methods obtain identities by associating detection boxes whose scores are higher than a threshold. The objects with low detection scores, e.g. occluded objects, are simply thrown away, which brings non-negligible true object missing and fragmented trajectories. To solve this problem, we present a simple, effective and generic association method, tracking by associating every detection box instead of only the high score ones. For the low score detection boxes, we utilize their similarities with tracklets to recover true objects and filter out the background detections. When applied to 9 different state-of-the-art trackers, our method achieves consistent improvement on IDF1 score ranging from 1 to 10 points. To put forwards the state-of-the-art performance of MOT, we design a simple and strong tracker, named ByteTrack. For the first time, we achieve 80.3 MOTA, 77.3 IDF1 and 63.1 HOTA on the test set of MOT17 with 30 FPS running speed on a single V100 GPU. + + + +
+ +
+ +## Citation + + + +```latex +@inproceedings{zhang2021bytetrack, + title={ByteTrack: Multi-Object Tracking by Associating Every Detection Box}, + author={Zhang, Yifu and Sun, Peize and Jiang, Yi and Yu, Dongdong and Yuan, Zehuan and Luo, Ping and Liu, Wenyu and Wang, Xinggang}, + journal={arXiv preprint arXiv:2110.06864}, + year={2021} +} +``` + +## Results and models on MOT17 + +Please note that the performance on `MOT17-half-val` is comparable with the performance reported in the manuscript, while the performance on `MOT17-test` is lower than the performance reported in the manuscript. + +The reason is that ByteTrack tunes customized hyper-parameters (e.g., image resolution and the high threshold of detection score) for each video in `MOT17-test` set, while we use unified parameters. + +| Method | Detector | Train Set | Test Set | Public | Inf time (fps) | HOTA | MOTA | IDF1 | FP | FN | IDSw. | Config | Download | +| :-------: | :------: | :---------------------------: | :------------: | :----: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :-------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ByteTrack | YOLOX-X | CrowdHuman + MOT17-half-train | MOT17-half-val | N | - | 67.5 | 78.6 | 78.5 | 12852 | 21060 | 672 | [config](bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py) | [model](https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot17-private-half_20211218_205500-1985c9f0.pth) \| [log](https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot17-private-half_20211218_205500.log.json) | +| ByteTrack | YOLOX-X | CrowdHuman + MOT17-half-train | MOT17-test | N | - | 61.7 | 78.1 | 74.8 | 36705 | 85032 | 2049 | [config](bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17test.py) | [model](https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot17-private-half_20211218_205500-1985c9f0.pth) \| [log](https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot17-private-half_20211218_205500.log.json) | + +## Results and models on MOT20 + +Since there are only 4 videos in `MOT20-train`, ByteTrack is validated on `MOT17-train` rather than `MOT20-half-train`. + +Please note that the MOTA on `MOT20-test` is slightly lower than that reported in the manuscript, because we don't tune the threshold for each video. + +| Method | Detector | Train Set | Test Set | Public | Inf time (fps) | HOTA | MOTA | IDF1 | FP | FN | IDSw. | Config | Download | +| :-------: | :------: | :----------------------: | :---------: | :----: | :------------: | :--: | :--: | :--: | :----: | :----: | :---: | :------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ByteTrack | YOLOX-X | CrowdHuman + MOT20-train | MOT17-train | N | - | 57.3 | 64.9 | 71.8 | 33,747 | 83,385 | 1,263 | [config](bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py) | [model](https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot20-private_20220506_101040-9ce38a60.pth) \| [log](https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot20-private_20220506_101040.log.json) | +| ByteTrack | YOLOX-X | CrowdHuman + MOT20-train | MOT20-test | N | - | 61.5 | 77.0 | 75.4 | 33,083 | 84,433 | 1,345 | [config](bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py) | [model](https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot20-private_20220506_101040-9ce38a60.pth) \| [log](https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot20-private_20220506_101040.log.json) | + +## Get started + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Prepare + +Tracking Dataset Prepare can refer to this [document](../../docs/en/user_guides/tracking_dataset_prepare.md). + +### 3. Training + +Due to the influence of parameters such as learning rate in default configuration file, we recommend using 8 GPUs for training in order to reproduce accuracy. You can use the following command to start the training. + +**3.1 Joint training and tracking** + +Some algorithm like ByteTrack, OCSORT don't need reid model, so we provide joint training and tracking for convenient. + +```shell +# Training Bytetrack on crowdhuman and mot17-half-train dataset with following command +# The number after config file represents the number of GPUs used. Here we use 8 GPUs +bash tools/dist_train.sh configs/bytetrack/bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py 8 +``` + +**3.2 Separate training and tracking** + +Of course, we provide train detector independently like SORT, DeepSORT, StrongSORT. Then use this detector to track. + +```shell +# Training Bytetrack on crowdhuman and mot17-half-train dataset with following command +# The number after config file represents the number of GPUs used. Here we use 8 GPUs +bash tools/dist_train.sh configs/bytetrack/yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py 8 +``` + +If you want to know about more detailed usage of `train.py/dist_train.sh/slurm_train.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 4. Testing and evaluation + +### 4.1 Example on MOTxx-halfval dataset + +**4.1.1 use joint trained detector to evaluating and testing** + +```shell +bash tools/dist_test_tracking.sh configs/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py 8 --checkpoint ${CHECKPOINT_FILE} +``` + +**4.1.2 use separate trained detector to evaluating and testing** + +```shell +bash tools/dist_test_tracking.sh configs/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py 8 --detector ${CHECKPOINT_FILE} +``` + +**4.1.3 use video_baesd to evaluating and testing** + +we also provide two_ways(img_based or video_based) to evaluating and testing. +if you want to use video_based to evaluating and testing, you can modify config as follows + +``` +val_dataloader = dict( + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False)) +``` + +#### 4.2 Example on MOTxx-test dataset + +If you want to get the results of the [MOT Challenge](https://motchallenge.net/) test set, please use the following command to generate result files that can be used for submission. It will be stored in `./mot_17_test_res`, you can modify the saved path in `test_evaluator` of the config. + +```shell +bash tools/dist_test_tracking.sh configs/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17test.py 8 --checkpoint ${CHECKPOINT_FILE} +``` + +If you want to know about more detailed usage of `test_tracking.py/dist_test_tracking.sh/slurm_test_tracking.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 5.Inference + +Use a single GPU to predict a video and save it as a video. + +```shell +python demo/mot_demo.py demo/demo_mot.mp4 configs/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py --checkpoint ${CHECKPOINT_FILE} --out mot.mp4 +``` + +If you want to know about more detailed usage of `mot_demo.py`, please refer to this [document](../../docs/en/user_guides/tracking_inference.md). diff --git a/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..24b3f7841947204f2ecea385dcfa8b97fa0c6e85 --- /dev/null +++ b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py @@ -0,0 +1,249 @@ +_base_ = ['../yolox/yolox_x_8xb8-300e_coco.py'] + +dataset_type = 'MOTChallengeDataset' +data_root = 'data/MOT17/' + +img_scale = (1440, 800) # weight, height +batch_size = 4 + +detector = _base_.model +detector.pop('data_preprocessor') +detector.bbox_head.update(dict(num_classes=1)) +detector.test_cfg.nms.update(dict(iou_threshold=0.7)) +detector['init_cfg'] = dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth' # noqa: E501 +) +del _base_.model + +model = dict( + type='ByteTrack', + data_preprocessor=dict( + type='TrackDataPreprocessor', + pad_size_divisor=32, + # in bytetrack, we provide joint train detector and evaluate tracking + # performance, use_det_processor means use independent detector + # data_preprocessor. of course, you can train detector independently + # like strongsort + use_det_processor=True, + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(576, 1024), + size_divisor=32, + interval=10) + ]), + detector=detector, + tracker=dict( + type='ByteTracker', + motion=dict(type='KalmanFilter'), + obj_score_thrs=dict(high=0.6, low=0.1), + init_track_thr=0.7, + weight_iou_with_det_scores=True, + match_iou_thrs=dict(high=0.1, low=0.5, tentative=0.3), + num_frames_retain=30)) + +train_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + bbox_clip_border=False), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-img_scale[0] // 2, -img_scale[1] // 2), + bbox_clip_border=False), + dict( + type='MixUp', + img_scale=img_scale, + ratio_range=(0.8, 1.6), + pad_val=114.0, + bbox_clip_border=False), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Resize', + scale=img_scale, + keep_ratio=True, + clip_object_border=False), + dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict( + type='TransformBroadcaster', + transforms=[ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict( + type='Pad', + size_divisor=32, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadTrackAnnotations'), + ]), + dict(type='PackTrackInputs') +] +train_dataloader = dict( + _delete_=True, + batch_size=batch_size, + num_workers=4, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='MultiImageMixDataset', + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type='CocoDataset', + data_root='data/MOT17', + ann_file='annotations/half-train_cocoformat.json', + data_prefix=dict(img='train'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + dict( + type='CocoDataset', + data_root='data/crowdhuman', + ann_file='annotations/crowdhuman_train.json', + data_prefix=dict(img='train'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + dict( + type='CocoDataset', + data_root='data/crowdhuman', + ann_file='annotations/crowdhuman_val.json', + data_prefix=dict(img='val'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + ]), + pipeline=train_pipeline)) + +val_dataloader = dict( + _delete_=True, + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + # video_based + # sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + sampler=dict(type='TrackImgSampler'), # image_based + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/half-val_cocoformat.json', + data_prefix=dict(img_path='train'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +# default 8 gpu +base_lr = 0.001 / 8 * batch_size +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +# some hyper parameters +# training settings +max_epochs = 80 +num_last_epochs = 10 +interval = 5 + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_begin=70, + val_interval=1) + +# learning policy +param_scheduler = [ + dict( + # use quadratic formula to warm up 1 epochs + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=1, + convert_to_iter_based=True), + dict( + # use cosine lr from 1 to 70 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=1, + T_max=max_epochs - num_last_epochs, + end=max_epochs - num_last_epochs, + by_epoch=True, + convert_to_iter_based=True), + dict( + # use fixed lr during last 10 epochs + type='ConstantLR', + by_epoch=True, + factor=1, + begin=max_epochs - num_last_epochs, + end=max_epochs, + ) +] + +custom_hooks = [ + dict( + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + priority=49) +] + +default_hooks = dict( + checkpoint=dict( + _delete_=True, type='CheckpointHook', interval=1, max_keep_ckpts=10), + visualization=dict(type='TrackVisualizationHook', draw=False)) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='TrackLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# evaluator +val_evaluator = dict( + _delete_=True, + type='MOTChallengeMetric', + metric=['HOTA', 'CLEAR', 'Identity'], + postprocess_tracklet_cfg=[ + dict(type='InterpolateTracklets', min_num_frames=5, max_num_frames=20) + ]) +test_evaluator = val_evaluator + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (4 samples per GPU) +auto_scale_lr = dict(base_batch_size=32) + +del detector +del _base_.tta_model +del _base_.tta_pipeline +del _base_.train_dataset diff --git a/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py new file mode 100644 index 0000000000000000000000000000000000000000..9202f5fbda29d2a1d4cc81322c99d638ebf475d6 --- /dev/null +++ b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py @@ -0,0 +1,127 @@ +_base_ = [ + './bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_' + 'test-mot17halfval.py' +] + +dataset_type = 'MOTChallengeDataset' + +img_scale = (1600, 896) # weight, height + +model = dict( + data_preprocessor=dict( + type='TrackDataPreprocessor', + use_det_processor=True, + pad_size_divisor=32, + batch_augments=[ + dict(type='BatchSyncRandomResize', random_size_range=(640, 1152)) + ]), + tracker=dict( + weight_iou_with_det_scores=False, + match_iou_thrs=dict(high=0.3), + )) + +train_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + bbox_clip_border=True), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-img_scale[0] // 2, -img_scale[1] // 2), + bbox_clip_border=True), + dict( + type='MixUp', + img_scale=img_scale, + ratio_range=(0.8, 1.6), + pad_val=114.0, + bbox_clip_border=True), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Resize', + scale=img_scale, + keep_ratio=True, + clip_object_border=True), + dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict( + type='TransformBroadcaster', + transforms=[ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict( + type='Pad', + size_divisor=32, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadTrackAnnotations'), + ]), + dict(type='PackTrackInputs') +] +train_dataloader = dict( + dataset=dict( + type='MultiImageMixDataset', + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type='CocoDataset', + data_root='data/MOT20', + ann_file='annotations/train_cocoformat.json', + # TODO: mmdet use img as key, but img_path is needed + data_prefix=dict(img='train'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + dict( + type='CocoDataset', + data_root='data/crowdhuman', + ann_file='annotations/crowdhuman_train.json', + data_prefix=dict(img='train'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + dict( + type='CocoDataset', + data_root='data/crowdhuman', + ann_file='annotations/crowdhuman_val.json', + data_prefix=dict(img='val'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + ]), + pipeline=train_pipeline)) +val_dataloader = dict( + dataset=dict(ann_file='annotations/train_cocoformat.json')) + +test_dataloader = dict( + dataset=dict( + data_root='data/MOT20', ann_file='annotations/test_cocoformat.json')) + +test_evaluator = dict( + type='MOTChallengeMetrics', + postprocess_tracklet_cfg=[ + dict(type='InterpolateTracklets', min_num_frames=5, max_num_frames=20) + ], + format_only=True, + outfile_prefix='./mot_20_test_res') diff --git a/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..9c2119203a46e76cd8b6cc8f755334f58ffb086d --- /dev/null +++ b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py @@ -0,0 +1,9 @@ +_base_ = [ + './bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_' + 'test-mot17halfval.py' +] + +# fp16 settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic') +val_cfg = dict(type='ValLoop', fp16=True) +test_cfg = dict(type='TestLoop', fp16=True) diff --git a/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17test.py b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17test.py new file mode 100644 index 0000000000000000000000000000000000000000..3f4427c18bff66ab1fa2a9ba22517989722d0625 --- /dev/null +++ b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17test.py @@ -0,0 +1,17 @@ +_base_ = [ + './bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-' + 'mot17halftrain_test-mot17halfval.py' +] + +test_dataloader = dict( + dataset=dict( + data_root='data/MOT17/', + ann_file='annotations/test_cocoformat.json', + data_prefix=dict(img_path='test'))) +test_evaluator = dict( + type='MOTChallengeMetrics', + postprocess_tracklet_cfg=[ + dict(type='InterpolateTracklets', min_num_frames=5, max_num_frames=20) + ], + format_only=True, + outfile_prefix='./mot_17_test_res') diff --git a/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py new file mode 100644 index 0000000000000000000000000000000000000000..1016999729263d72bbd75019be4968bc3960e368 --- /dev/null +++ b/mmpose/configs/mmdet/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py @@ -0,0 +1,8 @@ +_base_ = [ + './bytetrack_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py' +] + +# fp16 settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic') +val_cfg = dict(type='ValLoop', fp16=True) +test_cfg = dict(type='TestLoop', fp16=True) diff --git a/mmpose/configs/mmdet/bytetrack/metafile.yml b/mmpose/configs/mmdet/bytetrack/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..8ed638cf6dda0b0b3db264aa8847358d78ee0fbe --- /dev/null +++ b/mmpose/configs/mmdet/bytetrack/metafile.yml @@ -0,0 +1,53 @@ +Collections: + - Name: ByteTrack + Metadata: + Training Techniques: + - SGD with Momentum + Training Resources: 8x V100 GPUs + Architecture: + - YOLOX + Paper: + URL: https://arxiv.org/abs/2110.06864 + Title: ByteTrack Multi-Object Tracking by Associating Every Detection Box + README: configs/bytetrack/README.md + +Models: + - Name: bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval + In Collection: ByteTrack + Config: configs/bytetrack/bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py + Metadata: + Training Data: CrowdHuman + MOT17-half-train + Results: + - Task: Multiple Object Tracking + Dataset: MOT17-half-val + Metrics: + HOTA: 67.5 + MOTA: 78.6 + IDF1: 78.5 + Weights: https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot17-private-half_20211218_205500-1985c9f0.pth + + - Name: bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17test + In Collection: ByteTrack + Config: configs/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17test.py + Metadata: + Training Data: CrowdHuman + MOT17-half-train + Results: + - Task: Multiple Object Tracking + Dataset: MOT17-test + Metrics: + MOTA: 78.1 + IDF1: 74.8 + Weights: https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot17-private-half_20211218_205500-1985c9f0.pth + + - Name: bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test + In Collection: ByteTrack + Config: configs/bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py + Metadata: + Training Data: CrowdHuman + MOT20-train + Results: + - Task: Multiple Object Tracking + Dataset: MOT20-test + Metrics: + MOTA: 77.0 + IDF1: 75.4 + Weights: https://download.openmmlab.com/mmtracking/mot/bytetrack/bytetrack_yolox_x/bytetrack_yolox_x_crowdhuman_mot20-private_20220506_101040-9ce38a60.pth diff --git a/mmpose/configs/mmdet/bytetrack/yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/bytetrack/yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..8fc3acd487211d04fb3d6e4504ded5235393e4a7 --- /dev/null +++ b/mmpose/configs/mmdet/bytetrack/yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py @@ -0,0 +1,6 @@ +_base_ = [ + '../strongsort/yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py' # noqa: E501 +] + +# fp16 settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic') diff --git a/mmpose/configs/mmdet/carafe/README.md b/mmpose/configs/mmdet/carafe/README.md new file mode 100644 index 0000000000000000000000000000000000000000..61e1fa60fee5d1dc89539874c784c75df63b2ad3 --- /dev/null +++ b/mmpose/configs/mmdet/carafe/README.md @@ -0,0 +1,42 @@ +# CARAFE + +> [CARAFE: Content-Aware ReAssembly of FEatures](https://arxiv.org/abs/1905.02188) + + + +## Abstract + +Feature upsampling is a key operation in a number of modern convolutional network architectures, e.g. feature pyramids. Its design is critical for dense prediction tasks such as object detection and semantic/instance segmentation. In this work, we propose Content-Aware ReAssembly of FEatures (CARAFE), a universal, lightweight and highly effective operator to fulfill this goal. CARAFE has several appealing properties: (1) Large field of view. Unlike previous works (e.g. bilinear interpolation) that only exploit sub-pixel neighborhood, CARAFE can aggregate contextual information within a large receptive field. (2) Content-aware handling. Instead of using a fixed kernel for all samples (e.g. deconvolution), CARAFE enables instance-specific content-aware handling, which generates adaptive kernels on-the-fly. (3) Lightweight and fast to compute. CARAFE introduces little computational overhead and can be readily integrated into modern network architectures. We conduct comprehensive evaluations on standard benchmarks in object detection, instance/semantic segmentation and inpainting. CARAFE shows consistent and substantial gains across all the tasks (1.2%, 1.3%, 1.8%, 1.1db respectively) with negligible computational overhead. It has great potential to serve as a strong building block for future research. It has great potential to serve as a strong building block for future research. + +
+ +
+ +## Results and Models + +The results on COCO 2017 val is shown in the below table. + +| Method | Backbone | Style | Lr schd | Test Proposal Num | Inf time (fps) | Box AP | Mask AP | Config | Download | +| :--------------------: | :------: | :-----: | :-----: | :---------------: | :------------: | :----: | :-----: | :-----------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN w/ CARAFE | R-50-FPN | pytorch | 1x | 1000 | 16.5 | 38.6 | 38.6 | [config](./faster-rcnn_r50_fpn-carafe_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_20200504_175733.log.json) | +| - | - | - | - | 2000 | | | | | | +| Mask R-CNN w/ CARAFE | R-50-FPN | pytorch | 1x | 1000 | 14.0 | 39.3 | 35.8 | [config](./mask-rcnn_r50_fpn-carafe_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_20200503_135957.log.json) | +| - | - | - | - | 2000 | | | | | | + +## Implementation + +The CUDA implementation of CARAFE can be find at https://github.com/myownskyW7/CARAFE. + +## Citation + +We provide config files to reproduce the object detection & instance segmentation results in the ICCV 2019 Oral paper for [CARAFE: Content-Aware ReAssembly of FEatures](https://arxiv.org/abs/1905.02188). + +```latex +@inproceedings{Wang_2019_ICCV, + title = {CARAFE: Content-Aware ReAssembly of FEatures}, + author = {Wang, Jiaqi and Chen, Kai and Xu, Rui and Liu, Ziwei and Loy, Chen Change and Lin, Dahua}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` diff --git a/mmpose/configs/mmdet/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py b/mmpose/configs/mmdet/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..388305cceac2e81eb1b4df6eac36662df7b8bf0d --- /dev/null +++ b/mmpose/configs/mmdet/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py @@ -0,0 +1,20 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + data_preprocessor=dict(pad_size_divisor=64), + neck=dict( + type='FPN_CARAFE', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + start_level=0, + end_level=-1, + norm_cfg=None, + act_cfg=None, + order=('conv', 'norm', 'act'), + upsample_cfg=dict( + type='carafe', + up_kernel=5, + up_group=1, + encoder_kernel=3, + encoder_dilation=1, + compressed_channels=64))) diff --git a/mmpose/configs/mmdet/carafe/mask-rcnn_r50_fpn-carafe_1x_coco.py b/mmpose/configs/mmdet/carafe/mask-rcnn_r50_fpn-carafe_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6ce621de77aff60f39126136cb25ca9ca38a1c9f --- /dev/null +++ b/mmpose/configs/mmdet/carafe/mask-rcnn_r50_fpn-carafe_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + data_preprocessor=dict(pad_size_divisor=64), + neck=dict( + type='FPN_CARAFE', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + start_level=0, + end_level=-1, + norm_cfg=None, + act_cfg=None, + order=('conv', 'norm', 'act'), + upsample_cfg=dict( + type='carafe', + up_kernel=5, + up_group=1, + encoder_kernel=3, + encoder_dilation=1, + compressed_channels=64)), + roi_head=dict( + mask_head=dict( + upsample_cfg=dict( + type='carafe', + scale_factor=2, + up_kernel=5, + up_group=1, + encoder_kernel=3, + encoder_dilation=1, + compressed_channels=64)))) diff --git a/mmpose/configs/mmdet/carafe/metafile.yml b/mmpose/configs/mmdet/carafe/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..863c0f49ae6322429e91cf068b06f713a29fcbdc --- /dev/null +++ b/mmpose/configs/mmdet/carafe/metafile.yml @@ -0,0 +1,55 @@ +Collections: + - Name: CARAFE + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RPN + - FPN_CARAFE + - ResNet + - RoIPool + Paper: + URL: https://arxiv.org/abs/1905.02188 + Title: 'CARAFE: Content-Aware ReAssembly of FEatures' + README: configs/carafe/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/necks/fpn_carafe.py#L11 + Version: v2.12.0 + +Models: + - Name: faster-rcnn_r50_fpn_carafe_1x_coco + In Collection: CARAFE + Config: configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py + Metadata: + Training Memory (GB): 4.26 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth + + - Name: mask-rcnn_r50_fpn_carafe_1x_coco + In Collection: CARAFE + Config: configs/carafe/mask-rcnn_r50_fpn-carafe_1x_coco.py + Metadata: + Training Memory (GB): 4.31 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth diff --git a/mmpose/configs/mmdet/cascade_rcnn/README.md b/mmpose/configs/mmdet/cascade_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..81fce448f9daec77b3e716ac731dce13be751c74 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/README.md @@ -0,0 +1,79 @@ +# Cascade R-CNN + +> [Cascade R-CNN: High Quality Object Detection and Instance Segmentation](https://arxiv.org/abs/1906.09756) + + + +## Abstract + +In object detection, the intersection over union (IoU) threshold is frequently used to define positives/negatives. The threshold used to train a detector defines its quality. While the commonly used threshold of 0.5 leads to noisy (low-quality) detections, detection performance frequently degrades for larger thresholds. This paradox of high-quality detection has two causes: 1) overfitting, due to vanishing positive samples for large thresholds, and 2) inference-time quality mismatch between detector and test hypotheses. A multi-stage object detection architecture, the Cascade R-CNN, composed of a sequence of detectors trained with increasing IoU thresholds, is proposed to address these problems. The detectors are trained sequentially, using the output of a detector as training set for the next. This resampling progressively improves hypotheses quality, guaranteeing a positive training set of equivalent size for all detectors and minimizing overfitting. The same cascade is applied at inference, to eliminate quality mismatches between hypotheses and detectors. An implementation of the Cascade R-CNN without bells or whistles achieves state-of-the-art performance on the COCO dataset, and significantly improves high-quality detection on generic and specific object detection datasets, including VOC, KITTI, CityPerson, and WiderFace. Finally, the Cascade R-CNN is generalized to instance segmentation, with nontrivial improvements over the Mask R-CNN. + +
+ +
+ +## Results and Models + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 4.2 | | 40.4 | [config](./cascade-rcnn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_20200504_174853.log.json) | +| R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 40.3 | [config](./cascade-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316_214748.log.json) | +| R-50-FPN | pytorch | 20e | - | - | 41.0 | [config](./cascade-rcnn_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_20200504_175131.log.json) | +| R-101-FPN | caffe | 1x | 6.2 | | 42.3 | [config](./cascade-rcnn_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_20200504_175649.log.json) | +| R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 42.0 | [config](./cascade-rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317_101744.log.json) | +| R-101-FPN | pytorch | 20e | - | - | 42.5 | [config](./cascade-rcnn_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_20200504_231812.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.6 | 10.9 | 43.7 | [config](./cascade-rcnn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316_055608.log.json) | +| X-101-32x4d-FPN | pytorch | 20e | 7.6 | | 43.7 | [config](./cascade-rcnn_x101-32x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.7 | | 44.7 | [config](./cascade-rcnn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 10.7 | | 44.5 | [config](./cascade-rcnn_x101_64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357.log.json) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 5.9 | | 41.2 | 36.0 | [config](./cascade-mask-rcnn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_20200504_174659.log.json) | +| R-50-FPN | pytorch | 1x | 6.0 | 11.2 | 41.2 | 35.9 | [config](./cascade-mask-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203_170449.log.json) | +| R-50-FPN | pytorch | 20e | - | - | 41.9 | 36.5 | [config](./cascade-mask-rcnn_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_20200504_174711.log.json) | +| R-101-FPN | caffe | 1x | 7.8 | | 43.2 | 37.6 | [config](./cascade-mask-rcnn_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_20200504_174813.log.json) | +| R-101-FPN | pytorch | 1x | 7.9 | 9.8 | 42.9 | 37.3 | [config](./cascade-mask-rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203_092521.log.json) | +| R-101-FPN | pytorch | 20e | - | - | 43.4 | 37.8 | [config](./cascade-mask-rcnn_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_20200504_174836.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 9.2 | 8.6 | 44.3 | 38.3 | [config](./cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201_052416.log.json) | +| X-101-32x4d-FPN | pytorch | 20e | 9.2 | - | 45.0 | 39.0 | [config](./cascade-mask-rcnn_x101-32x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 12.2 | 6.7 | 45.3 | 39.2 | [config](./cascade-mask-rcnn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203_044059.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 12.2 | | 45.6 | 39.5 | [config](./cascade-mask-rcnn_x101-64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033.log.json) | + +**Notes:** + +- The `20e` schedule in Cascade (Mask) R-CNN indicates decreasing the lr at 16 and 19 epochs, with a total of 20 epochs. + +## Pre-trained Models + +We also train some models with longer schedules and multi-scale training for Cascade Mask R-CNN. The users could finetune them for downstream tasks. + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 3x | 5.7 | | 44.0 | 38.1 | [config](./cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651-6e29b3a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651.log.json) | +| R-50-FPN | pytorch | 3x | 5.9 | | 44.3 | 38.5 | [config](./cascade-mask-rcnn_r50_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719-5bdc3824.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719.log.json) | +| R-101-FPN | caffe | 3x | 7.7 | | 45.4 | 39.5 | [config](./cascade-mask-rcnn_r101-caffe_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620-a5bd2389.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620.log.json) | +| R-101-FPN | pytorch | 3x | 7.8 | | 45.5 | 39.6 | [config](./cascade-mask-rcnn_r101_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236-51a2d363.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236.log.json) | +| X-101-32x4d-FPN | pytorch | 3x | 9.0 | | 46.3 | 40.1 | [config](./cascade-mask-rcnn_x101-32x4d_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234-40773067.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234.log.json) | +| X-101-32x8d-FPN | pytorch | 3x | 12.1 | | 46.1 | 39.9 | [config](./cascade-mask-rcnn_x101-32x8d_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640-9ff7e76f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640.log.json) | +| X-101-64x4d-FPN | pytorch | 3x | 12.0 | | 46.6 | 40.3 | [config](./cascade-mask-rcnn_x101-64x4d_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311-d3e64ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311.log.json) | + +## Citation + +```latex +@article{Cai_2019, + title={Cascade R-CNN: High Quality Object Detection and Instance Segmentation}, + ISSN={1939-3539}, + url={http://dx.doi.org/10.1109/tpami.2019.2956516}, + DOI={10.1109/tpami.2019.2956516}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Cai, Zhaowei and Vasconcelos, Nuno}, + year={2019}, + pages={1–1} +} +``` diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6d85340e1cb92c60293c3710d05ef708d3726fdd --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade-mask-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101-caffe_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101-caffe_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a6855ee8c6fffd5e8d48f6cc2bb41e9dde9f6516 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101-caffe_fpn_ms-3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d962c229d2621e7364c13959e3c4c1137edef1 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_20e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..497148f513edb79ca58f719f242be6274f923a65 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_20e_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..183b5c50ff5563d987b2937d27d6d02bdd6cc2bd --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r101_fpn_ms-3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..497f68c4ab458ec49ad1d0c89cabbb2c0eb444f3 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = ['./cascade-mask-rcnn_r50_fpn_1x_coco.py'] + +model = dict( + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False), + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6677a9fea501a7683475dc8b865659cef5485bbe --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../common/ms_3x_coco-instance.py', + '../_base_/models/cascade-mask-rcnn_r50_fpn.py' +] + +model = dict( + # use caffe img_norm + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False), + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f59bb94eaaf3e850e971268383cd0275bcddf54d --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/cascade-mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_20e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..35c8aa6748d25e4c9c834478488ee21b44c8f2bd --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_20e_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/cascade-mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b15006f451f346216243dc61140e9907535f0b20 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_r50_fpn_ms-3x_coco.py @@ -0,0 +1,4 @@ +_base_ = [ + '../common/ms_3x_coco-instance.py', + '../_base_/models/cascade-mask-rcnn_r50_fpn.py' +] diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..87a4cc325a10b01cbf5a91e336da2281bc19a728 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_20e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5e8dcaa6891877c89acb024b9811a4fe7a87bc3b --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_20e_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0f61b9aee2b0ab80c5c9b998a73826e5ff45a6 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_ms-3x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x8d_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x8d_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8cf08306850bdaef776a0ce53b88b23b9013a1a0 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-32x8d_fpn_ms-3x_coco.py @@ -0,0 +1,24 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' + +model = dict( + # ResNeXt-101-32x8d model trained with Caffe2 at FB, + # so the mean and std need to be changed. + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fb2e6b6b9507dcf38403d38499e1d57bd792a4da --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_20e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cc20c171542b5d75634d99d9ed25eea3acf8df19 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_20e_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ecc42655903c271e7e181b719d09821118a204 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_ms-3x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b6eaee2db700b897255ed44a5fd30bc23929388f --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1cdf5108b7d2908e420c52c59f8a9805c7989702 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_20e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..84c285fc9e59d4191e79dd337ece2baff3d38b02 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_20e_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade-rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1fc52e9cb8e1e9c27d45e32200b0b72efa8c363d --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..aa30a3d07f5644dfc6f79f0eafc374518149e777 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ad90e259b2d8410309bfd877b74755524b94f788 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = './cascade-rcnn_r50_fpn_1x_coco.py' + +model = dict( + # use caffe img_norm + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1a07c8b2302b9c2337d4da2d32c388142ca1f748 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_20e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..30f3ff106018ba51173f018c196cf62a88fdb172 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_20e_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cd25f02608c3f51a59e35185a41080c6e8e3a1ea --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', + '../common/lsj-200e_coco-detection.py' +] +image_size = (1024, 1024) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +# disable allowed_border to avoid potential errors. +model = dict( + data_preprocessor=dict(batch_augments=batch_augments), + train_cfg=dict(rpn=dict(allowed_border=-1))) + +train_dataloader = dict(batch_size=8, num_workers=4) +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..50e0b9544592d61b3c14ec7f64f3e6eaa2e96a57 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_20e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6120189205d883d98b2d323a160ec54ea26aab13 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_20e_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..29475e39273dccad13058e9114728770e77f71ef --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,15 @@ +_base_ = './cascade-rcnn_r50_fpn_1x_coco.py' +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101_64x4d_fpn_20e_coco.py b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101_64x4d_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e2aa57eaaf43788fc3628f1463e94405279c7416 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/cascade-rcnn_x101_64x4d_fpn_20e_coco.py @@ -0,0 +1,15 @@ +_base_ = './cascade-rcnn_r50_fpn_20e_coco.py' +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/cascade_rcnn/metafile.yml b/mmpose/configs/mmdet/cascade_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..7e0385daeed3f3310dc7f9a8b64c99b5cb8324b4 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rcnn/metafile.yml @@ -0,0 +1,545 @@ +Collections: + - Name: Cascade R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Cascade R-CNN + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: http://dx.doi.org/10.1109/tpami.2019.2956516 + Title: 'Cascade R-CNN: Delving into High Quality Object Detection' + README: configs/cascade_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/cascade_rcnn.py#L6 + Version: v2.0.0 + - Name: Cascade Mask R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Cascade R-CNN + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: http://dx.doi.org/10.1109/tpami.2019.2956516 + Title: 'Cascade R-CNN: Delving into High Quality Object Detection' + README: configs/cascade_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/cascade_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: cascade-rcnn_r50-caffe_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth + + - Name: cascade-rcnn_r50_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 62.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth + + - Name: cascade-rcnn_r50_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_r50_fpn_20e_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 62.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth + + - Name: cascade-rcnn_r101-caffe_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth + + - Name: cascade-rcnn_r101_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 74.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth + + - Name: cascade-rcnn_r101_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_r101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 74.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth + + - Name: cascade-rcnn_x101-32x4d_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 91.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth + + - Name: cascade-rcnn_x101-32x4d_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 7.6 + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth + + - Name: cascade-rcnn_x101-64x4d_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.7 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth + + - Name: cascade-rcnn_x101_64x4d_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-rcnn_x101_64x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 10.7 + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth + + - Name: cascade-mask-rcnn_r50-caffe_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.9 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth + + - Name: cascade-mask-rcnn_r50_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 89.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth + + - Name: cascade-mask-rcnn_r50_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_20e_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 89.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth + + - Name: cascade-mask-rcnn_r101-caffe_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth + + - Name: cascade-mask-rcnn_r101_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.9 + inference time (ms/im): + - value: 102.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth + + - Name: cascade-mask-rcnn_r101_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 7.9 + inference time (ms/im): + - value: 102.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth + + - Name: cascade-mask-rcnn_x101-32x4d_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.2 + inference time (ms/im): + - value: 116.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth + + - Name: cascade-mask-rcnn_x101-32x4d_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 9.2 + inference time (ms/im): + - value: 116.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth + + - Name: cascade-mask-rcnn_x101-64x4d_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 12.2 + inference time (ms/im): + - value: 149.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth + + - Name: cascade-mask-rcnn_x101-64x4d_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 12.2 + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth + + - Name: cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 5.7 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651-6e29b3a6.pth + + - Name: cascade-mask-rcnn_r50_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 5.9 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719-5bdc3824.pth + + - Name: cascade-mask-rcnn_r101-caffe_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r101-caffe_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 7.7 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620-a5bd2389.pth + + - Name: cascade-mask-rcnn_r101_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_r101_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236-51a2d363.pth + + - Name: cascade-mask-rcnn_x101-32x4d_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 9.0 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234-40773067.pth + + - Name: cascade-mask-rcnn_x101-32x8d_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_x101-32x8d_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 12.1 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640-9ff7e76f.pth + + - Name: cascade-mask-rcnn_x101-64x4d_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 12.0 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311-d3e64ba0.pth diff --git a/mmpose/configs/mmdet/cascade_rpn/README.md b/mmpose/configs/mmdet/cascade_rpn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..868a25eda26967576db85dc0686dda53a1d9c9b1 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rpn/README.md @@ -0,0 +1,41 @@ +# Cascade RPN + +> [Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution](https://arxiv.org/abs/1909.06720) + + + +## Abstract + +This paper considers an architecture referred to as Cascade Region Proposal Network (Cascade RPN) for improving the region-proposal quality and detection performance by systematically addressing the limitation of the conventional RPN that heuristically defines the anchors and aligns the features to the anchors. First, instead of using multiple anchors with predefined scales and aspect ratios, Cascade RPN relies on a single anchor per location and performs multi-stage refinement. Each stage is progressively more stringent in defining positive samples by starting out with an anchor-free metric followed by anchor-based metrics in the ensuing stages. Second, to attain alignment between the features and the anchors throughout the stages, adaptive convolution is proposed that takes the anchors in addition to the image features as its input and learns the sampled features guided by the anchors. A simple implementation of a two-stage Cascade RPN achieves AR 13.4 points higher than that of the conventional RPN, surpassing any existing region proposal methods. When adopting to Fast R-CNN and Faster R-CNN, Cascade RPN can improve the detection mAP by 3.1 and 3.5 points, respectively. + +
+ +
+ +## Results and Models + +### Region proposal performance + +| Method | Backbone | Style | Mem (GB) | Train time (s/iter) | Inf time (fps) | AR 1000 | Config | Download | +| :----: | :------: | :---: | :------: | :-----------------: | :------------: | :-----: | :----------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------: | +| CRPN | R-50-FPN | caffe | - | - | - | 72.0 | [config](./cascade-rpn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_r50_caffe_fpn_1x_coco/cascade_rpn_r50_caffe_fpn_1x_coco-7aa93cef.pth) | + +### Detection performance + +| Method | Proposal | Backbone | Style | Schedule | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Config | Download | +| :----------: | :---------: | :------: | :---: | :------: | :------: | :-----------------: | :------------: | :----: | :----------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Fast R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 39.9 | [config](./cascade-rpn_fast-rcnn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco/crpn_fast_rcnn_r50_caffe_fpn_1x_coco-cb486e66.pth) | +| Faster R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 40.4 | [config](./cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth) | + +## Citation + +We provide the code for reproducing experiment results of [Cascade RPN](https://arxiv.org/abs/1909.06720). + +```latex +@inproceedings{vu2019cascade, + title={Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution}, + author={Vu, Thang and Jang, Hyunjun and Pham, Trung X and Yoo, Chang D}, + booktitle={Conference on Neural Information Processing Systems (NeurIPS)}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_fast-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_fast-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ba23ce90652d2ab2e9362be9a6231742d1815a70 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_fast-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,27 @@ +_base_ = '../fast_rcnn/fast-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + assigner=dict( + pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), + sampler=dict(num=256))), + test_cfg=dict(rcnn=dict(score_thr=1e-3))) + +# MMEngine support the following two ways, users can choose +# according to convenience +# train_dataloader = dict(dataset=dict(proposal_file='proposals/crpn_r50_caffe_fpn_1x_train2017.pkl')) # noqa +_base_.train_dataloader.dataset.proposal_file = 'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl' # noqa + +# val_dataloader = dict(dataset=dict(proposal_file='proposals/crpn_r50_caffe_fpn_1x_val2017.pkl')) # noqa +# test_dataloader = val_dataloader +_base_.val_dataloader.dataset.proposal_file = 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl' # noqa +test_dataloader = _base_.val_dataloader + +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7eced00144fb8fff1f234210a2b3f3fe475c8f --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,89 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py' +rpn_weight = 0.7 +model = dict( + rpn_head=dict( + _delete_=True, + type='CascadeRPNHead', + num_stages=2, + stages=[ + dict( + type='StageCascadeRPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[1.0], + strides=[4, 8, 16, 32, 64]), + adapt_cfg=dict(type='dilation', dilation=3), + bridged_feature=True, + with_cls=False, + reg_decoded_bbox=True, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.5, 0.5)), + loss_bbox=dict( + type='IoULoss', linear=True, + loss_weight=10.0 * rpn_weight)), + dict( + type='StageCascadeRPNHead', + in_channels=256, + feat_channels=256, + adapt_cfg=dict(type='offset'), + bridged_feature=False, + with_cls=True, + reg_decoded_bbox=True, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.05, 0.05, 0.1, 0.1)), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0 * rpn_weight), + loss_bbox=dict( + type='IoULoss', linear=True, + loss_weight=10.0 * rpn_weight)) + ]), + roi_head=dict( + bbox_head=dict( + bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=[ + dict( + assigner=dict( + type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5), + allowed_border=-1, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False) + ], + rpn_proposal=dict(max_per_img=300, nms=dict(iou_threshold=0.8)), + rcnn=dict( + assigner=dict( + pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), + sampler=dict(type='RandomSampler', num=256))), + test_cfg=dict( + rpn=dict(max_per_img=300, nms=dict(iou_threshold=0.8)), + rcnn=dict(score_thr=1e-3))) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6eba24d11368ee0cdaae4fa316020ea3750be7f0 --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rpn/cascade-rpn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,76 @@ +_base_ = '../rpn/rpn_r50-caffe_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='CascadeRPNHead', + num_stages=2, + stages=[ + dict( + type='StageCascadeRPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[1.0], + strides=[4, 8, 16, 32, 64]), + adapt_cfg=dict(type='dilation', dilation=3), + bridged_feature=True, + sampling=False, + with_cls=False, + reg_decoded_bbox=True, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.5, 0.5)), + loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)), + dict( + type='StageCascadeRPNHead', + in_channels=256, + feat_channels=256, + adapt_cfg=dict(type='offset'), + bridged_feature=False, + sampling=True, + with_cls=True, + reg_decoded_bbox=True, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.05, 0.05, 0.1, 0.1)), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)) + ]), + train_cfg=dict(rpn=[ + dict( + assigner=dict( + type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5), + allowed_border=-1, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.3, + ignore_iof_thr=-1, + iou_calculator=dict(type='BboxOverlaps2D')), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.8), + min_bbox_size=0))) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/cascade_rpn/metafile.yml b/mmpose/configs/mmdet/cascade_rpn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..62a88c5d2185ffd3aa7884f7a8c7d68cc3d60c8f --- /dev/null +++ b/mmpose/configs/mmdet/cascade_rpn/metafile.yml @@ -0,0 +1,44 @@ +Collections: + - Name: Cascade RPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Cascade RPN + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1909.06720 + Title: 'Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution' + README: configs/cascade_rpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/dense_heads/cascade_rpn_head.py#L538 + Version: v2.8.0 + +Models: + - Name: cascade-rpn_fast-rcnn_r50-caffe_fpn_1x_coco + In Collection: Cascade RPN + Config: configs/cascade_rpn/cascade-rpn_fast-rcnn_r50-caffe_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco/crpn_fast_rcnn_r50_caffe_fpn_1x_coco-cb486e66.pth + + - Name: cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco + In Collection: Cascade RPN + Config: configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth diff --git a/mmpose/configs/mmdet/centernet/README.md b/mmpose/configs/mmdet/centernet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..81e229c62f7816f20459a53132bfca676c01ac78 --- /dev/null +++ b/mmpose/configs/mmdet/centernet/README.md @@ -0,0 +1,58 @@ +# CenterNet + +> [Objects as Points](https://arxiv.org/abs/1904.07850) + + + +## Abstract + +Detection identifies objects as axis-aligned boxes in an image. Most successful object detectors enumerate a nearly exhaustive list of potential object locations and classify each. This is wasteful, inefficient, and requires additional post-processing. In this paper, we take a different approach. We model an object as a single point --- the center point of its bounding box. Our detector uses keypoint estimation to find center points and regresses to all other object properties, such as size, 3D location, orientation, and even pose. Our center point based approach, CenterNet, is end-to-end differentiable, simpler, faster, and more accurate than corresponding bounding box based detectors. CenterNet achieves the best speed-accuracy trade-off on the MS COCO dataset, with 28.1% AP at 142 FPS, 37.4% AP at 52 FPS, and 45.1% AP with multi-scale testing at 1.4 FPS. We use the same approach to estimate 3D bounding box in the KITTI benchmark and human pose on the COCO keypoint dataset. Our method performs competitively with sophisticated multi-stage methods and runs in real-time. + +
+ +
+ +## Results and Models + +| Backbone | DCN | Mem (GB) | Box AP | Flip box AP | Config | Download | +| :-------: | :-: | :------: | :----: | :---------: | :--------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ResNet-18 | N | 3.45 | 25.9 | 27.3 | [config](./centernet_r18_8xb16-crop512-140e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630.log.json) | +| ResNet-18 | Y | 3.47 | 29.5 | 30.9 | [config](./centernet_r18-dcnv2_8xb16-crop512-140e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131.log.json) | + +Note: + +- Flip box AP setting is single-scale and `flip=True`. +- Due to complex data enhancement, we find that the performance is unstable and may fluctuate by about 0.4 mAP. mAP 29.4 ~ 29.8 is acceptable in ResNet-18-DCNv2. +- Compared to the source code, we refer to [CenterNet-Better](https://github.com/FateScript/CenterNet-better), and make the following changes + - fix wrong image mean and variance in image normalization to be compatible with the pre-trained backbone. + - Use SGD rather than ADAM optimizer and add warmup and grad clip. + - Use DistributedDataParallel as other models in MMDetection rather than using DataParallel. + +## CenterNet Update + +| Backbone | Style | Lr schd | MS train | Mem (GB) | Box AP | Config | Download | +| :-------: | :---: | :-----: | :------: | :------: | :----: | :------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ResNet-50 | caffe | 1x | True | 3.3 | 40.2 | [config](./centernet-update_r50-caffe_fpn_ms-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco/centernet-update_r50-caffe_fpn_ms-1x_coco_20230512_203845-8306baf2.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco/centernet-update_r50-caffe_fpn_ms-1x_coco_20230512_203845.log.json) | + +CenterNet Update from the paper of [Probabilistic two-stage detection](https://arxiv.org/abs/2103.07461). The author has updated CenterNet to greatly improve performance and convergence speed. +The [Details](https://github.com/xingyizhou/CenterNet2/blob/master/docs/MODEL_ZOO.md) are as follows: + +- Using top-left-right-bottom box encoding and GIoU Loss +- Adding regression loss to the center 3x3 region +- Adding more positive pixels for the heatmap loss whose regression loss is small and is within the center3x3 region +- Using RetinaNet-style optimizer (SGD), learning rate rule (0.01 for each batch size 16), and schedule (12 epochs) +- Added FPN neck layers, and assigns objects to FPN levels based on a fixed size range. +- Using standard NMS instead of max pooling + +Note: We found that the performance of the r50 model fluctuates greatly and sometimes it does not converge. If the model does not converge, you can try running it again or reduce the learning rate. + +## Citation + +```latex +@article{zhou2019objects, + title={Objects as Points}, + author={Zhou, Xingyi and Wang, Dequan and Kr{\"a}henb{\"u}hl, Philipp}, + booktitle={arXiv preprint arXiv:1904.07850}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/centernet/centernet-update_r101_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/centernet/centernet-update_r101_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4fc65e0f8aeb1f02a0bea675146ced7a56800251 --- /dev/null +++ b/mmpose/configs/mmdet/centernet/centernet-update_r101_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/centernet/centernet-update_r18_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/centernet/centernet-update_r18_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ab3ae32ecd54cd08664e883a0888ef43040528d1 --- /dev/null +++ b/mmpose/configs/mmdet/centernet/centernet-update_r18_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py b/mmpose/configs/mmdet/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1f6e2b3919d6d2197c0ae9e1d721dc4eab00cf9c --- /dev/null +++ b/mmpose/configs/mmdet/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py @@ -0,0 +1,105 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='CenterNet', + # use caffe img_norm + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5, + # There is a chance to get 40.3 after switching init_cfg, + # otherwise it is about 39.9~40.1 + init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'), + relu_before_extra_convs=True), + bbox_head=dict( + type='CenterNetUpdateHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + hm_min_radius=4, + hm_min_overlap=0.8, + more_pos_thresh=0.2, + more_pos_topk=9, + soft_weight_on_reg=False, + loss_cls=dict( + type='GaussianFocalLoss', + pos_weight=0.25, + neg_weight=0.75, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + ), + train_cfg=None, + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# single-scale training is about 39.3 +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.00025, + by_epoch=False, + begin=0, + end=4000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +optim_wrapper = dict( + optimizer=dict(lr=0.01), + # Experiments show that there is no need to turn on clip_grad. + paramwise_cfg=dict(norm_decay_mult=0.)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/centernet/centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/centernet/centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..34e0c680d39486467464f0ea7d6e1e08bf0c5240 --- /dev/null +++ b/mmpose/configs/mmdet/centernet/centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,83 @@ +_base_ = '../common/lsj-200e_coco-detection.py' + +image_size = (1024, 1024) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +model = dict( + type='CenterNet', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5, + init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'), + relu_before_extra_convs=True), + bbox_head=dict( + type='CenterNetUpdateHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + loss_cls=dict( + type='GaussianFocalLoss', + pos_weight=0.25, + neg_weight=0.75, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + ), + train_cfg=None, + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +train_dataloader = dict(batch_size=8, num_workers=4) +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004), + paramwise_cfg=dict(norm_decay_mult=0.)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.00025, + by_epoch=False, + begin=0, + end=4000), + dict( + type='MultiStepLR', + begin=0, + end=25, + by_epoch=True, + milestones=[22, 24], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py b/mmpose/configs/mmdet/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..732a55d59ad7dee175d8b72f798f0be044f23326 --- /dev/null +++ b/mmpose/configs/mmdet/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py @@ -0,0 +1,136 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', + './centernet_tta.py' +] + +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# model settings +model = dict( + type='CenterNet', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=18, + norm_eval=False, + norm_cfg=dict(type='BN'), + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict( + type='CTResNetNeck', + in_channels=512, + num_deconv_filters=(256, 128, 64), + num_deconv_kernels=(4, 4, 4), + use_dcn=True), + bbox_head=dict( + type='CenterNetHead', + num_classes=80, + in_channels=64, + feat_channels=64, + loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0), + loss_wh=dict(type='L1Loss', loss_weight=0.1), + loss_offset=dict(type='L1Loss', loss_weight=1.0)), + train_cfg=None, + test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='RandomCenterCropPad', + # The cropped images are padded into squares during training, + # but may be less than crop_size. + crop_size=(512, 512), + ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True, + test_pad_mode=None), + # Make sure the output is always crop_size. + dict(type='Resize', scale=(512, 512), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args={{_base_.backend_args}}, + to_float32=True), + # don't need Resize + dict( + type='RandomCenterCropPad', + ratios=None, + border=None, + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True, + test_mode=True, + test_pad_mode=['logical_or', 31], + test_pad_add_pix=1), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border')) +] + +# Use RepeatDataset to speed up training +train_dataloader = dict( + batch_size=16, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args={{_base_.backend_args}}, + ))) + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +# Based on the default settings of modern detectors, the SGD effect is better +# than the Adam in the source code, so we use SGD default settings and +# if you use adam+lr5e-4, the map is 29.1. +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) + +max_epochs = 28 +# learning policy +# Based on the default settings of modern detectors, we added warmup settings. +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[18, 24], # the real step is [18*5, 24*5] + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140 + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (16 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/mmpose/configs/mmdet/centernet/centernet_r18_8xb16-crop512-140e_coco.py b/mmpose/configs/mmdet/centernet/centernet_r18_8xb16-crop512-140e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6094b64221bd91eaafc9868e01c718d4421b418a --- /dev/null +++ b/mmpose/configs/mmdet/centernet/centernet_r18_8xb16-crop512-140e_coco.py @@ -0,0 +1,3 @@ +_base_ = './centernet_r18-dcnv2_8xb16-crop512-140e_coco.py' + +model = dict(neck=dict(use_dcn=False)) diff --git a/mmpose/configs/mmdet/centernet/centernet_tta.py b/mmpose/configs/mmdet/centernet/centernet_tta.py new file mode 100644 index 0000000000000000000000000000000000000000..edd7b03ecdeb272870919dcbd4842d6b8e32d8d4 --- /dev/null +++ b/mmpose/configs/mmdet/centernet/centernet_tta.py @@ -0,0 +1,39 @@ +# This is different from the TTA of official CenterNet. + +tta_model = dict( + type='DetTTAModel', + tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) + +tta_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True, backend_args=None), + dict( + type='TestTimeAug', + transforms=[ + [ + # ``RandomFlip`` must be placed before ``RandomCenterCropPad``, + # otherwise bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], + [ + dict( + type='RandomCenterCropPad', + ratios=None, + border=None, + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True, + test_mode=True, + test_pad_mode=['logical_or', 31], + test_pad_add_pix=1), + ], + [dict(type='LoadAnnotations', with_bbox=True)], + [ + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'flip', 'flip_direction', 'border')) + ] + ]) +] diff --git a/mmpose/configs/mmdet/centernet/metafile.yml b/mmpose/configs/mmdet/centernet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..496b8ea22df0ac1e757a40c2750893034e08a81c --- /dev/null +++ b/mmpose/configs/mmdet/centernet/metafile.yml @@ -0,0 +1,60 @@ +Collections: + - Name: CenterNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x TITANXP GPUs + Architecture: + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.07850 + Title: 'Objects as Points' + README: configs/centernet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.13.0/mmdet/models/detectors/centernet.py#L10 + Version: v2.13.0 + +Models: + - Name: centernet_r18-dcnv2_8xb16-crop512-140e_coco + In Collection: CenterNet + Config: configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py + Metadata: + Batch Size: 128 + Training Memory (GB): 3.47 + Epochs: 140 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 29.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth + + - Name: centernet_r18_8xb16-crop512-140e_coco + In Collection: CenterNet + Config: configs/centernet/centernet_r18_8xb16-crop512-140e_coco.py + Metadata: + Batch Size: 128 + Training Memory (GB): 3.45 + Epochs: 140 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 25.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth + + - Name: centernet-update_r50-caffe_fpn_ms-1x_coco + In Collection: CenterNet + Config: configs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py + Metadata: + Batch Size: 16 + Training Memory (GB): 3.3 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v3.0/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco/centernet-update_r50-caffe_fpn_ms-1x_coco_20230512_203845-8306baf2.pth diff --git a/mmpose/configs/mmdet/centripetalnet/README.md b/mmpose/configs/mmdet/centripetalnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..21edbd261af502d41fc6a24323bc28474a6d1c5a --- /dev/null +++ b/mmpose/configs/mmdet/centripetalnet/README.md @@ -0,0 +1,36 @@ +# CentripetalNet + +> [CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection](https://arxiv.org/abs/2003.09119) + + + +## Abstract + +Keypoint-based detectors have achieved pretty-well performance. However, incorrect keypoint matching is still widespread and greatly affects the performance of the detector. In this paper, we propose CentripetalNet which uses centripetal shift to pair corner keypoints from the same instance. CentripetalNet predicts the position and the centripetal shift of the corner points and matches corners whose shifted results are aligned. Combining position information, our approach matches corner points more accurately than the conventional embedding approaches do. Corner pooling extracts information inside the bounding boxes onto the border. To make this information more aware at the corners, we design a cross-star deformable convolution network to conduct feature adaption. Furthermore, we explore instance segmentation on anchor-free detectors by equipping our CentripetalNet with a mask prediction module. On MS-COCO test-dev, our CentripetalNet not only outperforms all existing anchor-free detectors with an AP of 48.0% but also achieves comparable performance to the state-of-the-art instance segmentation approaches with a 40.2% MaskAP. + +
+ +
+ +## Results and Models + +| Backbone | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------------: | :-----------------------------------------------------------------------: | :---------------: | :------: | :------------: | :----: | :-----------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HourglassNet-104 | [16 x 6](./centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py) | 190/210 | 16.7 | 3.7 | 44.8 | [config](./centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804.log.json) | + +Note: + +- TTA setting is single-scale and `flip=True`. If you want to reproduce the TTA performance, please add `--tta` in the test command. +- The model we released is the best checkpoint rather than the latest checkpoint (box AP 44.8 vs 44.6 in our experiment). + +## Citation + +```latex +@InProceedings{Dong_2020_CVPR, +author = {Dong, Zhiwei and Li, Guoxuan and Liao, Yue and Wang, Fei and Ren, Pengju and Qian, Chen}, +title = {CentripetalNet: Pursuing High-Quality Keypoint Pairs for Object Detection}, +booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, +month = {June}, +year = {2020} +} +``` diff --git a/mmpose/configs/mmdet/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py b/mmpose/configs/mmdet/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b757ffd16dca2d2b51d27ad413fdba889252c87f --- /dev/null +++ b/mmpose/configs/mmdet/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py @@ -0,0 +1,181 @@ +_base_ = [ + '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' +] + +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True) + +# model settings +model = dict( + type='CornerNet', + data_preprocessor=data_preprocessor, + backbone=dict( + type='HourglassNet', + downsample_times=5, + num_stacks=2, + stage_channels=[256, 256, 384, 384, 384, 512], + stage_blocks=[2, 2, 2, 2, 2, 4], + norm_cfg=dict(type='BN', requires_grad=True)), + neck=None, + bbox_head=dict( + type='CentripetalHead', + num_classes=80, + in_channels=256, + num_feat_levels=2, + corner_emb_channels=0, + loss_heatmap=dict( + type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), + loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1), + loss_guiding_shift=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=0.05), + loss_centripetal_shift=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=1)), + # training and testing settings + train_cfg=None, + test_cfg=dict( + corner_topk=100, + local_maximum_kernel=3, + distance_threshold=0.5, + score_thr=0.05, + max_per_img=100, + nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + # The cropped images are padded into squares during training, + # but may be smaller than crop_size. + type='RandomCenterCropPad', + crop_size=(511, 511), + ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), + test_mode=False, + test_pad_mode=None, + mean=data_preprocessor['mean'], + std=data_preprocessor['std'], + # Image data is not converted to rgb. + to_rgb=data_preprocessor['bgr_to_rgb']), + dict(type='Resize', scale=(511, 511), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs'), +] + +test_pipeline = [ + dict( + type='LoadImageFromFile', + to_float32=True, + backend_args=_base_.backend_args), + # don't need Resize + dict( + type='RandomCenterCropPad', + crop_size=None, + ratios=None, + border=None, + test_mode=True, + test_pad_mode=['logical_or', 127], + mean=data_preprocessor['mean'], + std=data_preprocessor['std'], + # Image data is not converted to rgb. + to_rgb=data_preprocessor['bgr_to_rgb']), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border')) +] + +train_dataloader = dict( + batch_size=6, + num_workers=3, + batch_sampler=None, + dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='Adam', lr=0.0005), + clip_grad=dict(max_norm=35, norm_type=2)) + +max_epochs = 210 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[190], + gamma=0.1) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (16 GPUs) x (6 samples per GPU) +auto_scale_lr = dict(base_batch_size=96) + +tta_model = dict( + type='DetTTAModel', + tta_cfg=dict( + nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'), + max_per_img=100)) + +tta_pipeline = [ + dict( + type='LoadImageFromFile', + to_float32=True, + backend_args=_base_.backend_args), + dict( + type='TestTimeAug', + transforms=[ + [ + # ``RandomFlip`` must be placed before ``RandomCenterCropPad``, + # otherwise bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], + [ + dict( + type='RandomCenterCropPad', + crop_size=None, + ratios=None, + border=None, + test_mode=True, + test_pad_mode=['logical_or', 127], + mean=data_preprocessor['mean'], + std=data_preprocessor['std'], + # Image data is not converted to rgb. + to_rgb=data_preprocessor['bgr_to_rgb']) + ], + [dict(type='LoadAnnotations', with_bbox=True)], + [ + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'flip', 'flip_direction', 'border')) + ] + ]) +] diff --git a/mmpose/configs/mmdet/centripetalnet/metafile.yml b/mmpose/configs/mmdet/centripetalnet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..526572dfed0d158b55205c23031b5dfdbdfa9dc0 --- /dev/null +++ b/mmpose/configs/mmdet/centripetalnet/metafile.yml @@ -0,0 +1,39 @@ +Collections: + - Name: CentripetalNet + Metadata: + Training Data: COCO + Training Techniques: + - Adam + Training Resources: 16x V100 GPUs + Architecture: + - Corner Pooling + - Stacked Hourglass Network + Paper: + URL: https://arxiv.org/abs/2003.09119 + Title: 'CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection' + README: configs/centripetalnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/cornernet.py#L9 + Version: v2.5.0 + +Models: + - Name: centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco + In Collection: CentripetalNet + Config: configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py + Metadata: + Batch Size: 96 + Training Memory (GB): 16.7 + inference time (ms/im): + - value: 270.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 210 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth diff --git a/mmpose/configs/mmdet/cityscapes/README.md b/mmpose/configs/mmdet/cityscapes/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e37b64edb7eded69bafa37244aa5a411e475d2c --- /dev/null +++ b/mmpose/configs/mmdet/cityscapes/README.md @@ -0,0 +1,46 @@ +# Cityscapes + +> [The Cityscapes Dataset for Semantic Urban Scene Understanding](https://arxiv.org/abs/1604.01685) + + + +## Abstract + +Visual understanding of complex urban street scenes is an enabling factor for a wide range of applications. Object detection has benefited enormously from large-scale datasets, especially in the context of deep learning. For semantic urban scene understanding, however, no current dataset adequately captures the complexity of real-world urban scenes. +To address this, we introduce Cityscapes, a benchmark suite and large-scale dataset to train and test approaches for pixel-level and instance-level semantic labeling. Cityscapes is comprised of a large, diverse set of stereo video sequences recorded in streets from 50 different cities. 5000 of these images have high quality pixel-level annotations; 20000 additional images have coarse annotations to enable methods that leverage large volumes of weakly-labeled data. Crucially, our effort exceeds previous attempts in terms of dataset size, annotation richness, scene variability, and complexity. Our accompanying empirical study provides an in-depth analysis of the dataset characteristics, as well as a performance evaluation of several state-of-the-art approaches based on our benchmark. + +
+ +
+ +## Common settings + +- All baselines were trained using 8 GPU with a batch size of 8 (1 images per GPU) using the [linear scaling rule](https://arxiv.org/abs/1706.02677) to scale the learning rate. +- All models were trained on `cityscapes_train`, and tested on `cityscapes_val`. +- 1x training schedule indicates 64 epochs which corresponds to slightly less than the 24k iterations reported in the original schedule from the [Mask R-CNN paper](https://arxiv.org/abs/1703.06870) +- COCO pre-trained weights are used to initialize. +- A conversion [script](../../tools/dataset_converters/cityscapes.py) is provided to convert Cityscapes into COCO format. Please refer to [install.md](../../docs/1_exist_data_model.md#prepare-datasets) for details. +- `CityscapesDataset` implemented three evaluation methods. `bbox` and `segm` are standard COCO bbox/mask AP. `cityscapes` is the cityscapes dataset official evaluation, which may be slightly higher than COCO. + +### Faster R-CNN + +| Backbone | Style | Lr schd | Scale | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------: | :------------: | :----: | :----------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 800-1024 | 5.2 | - | 40.3 | [config](./faster-rcnn_r50_fpn_1x_cityscapes.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502-829424c0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502_114915.log.json) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Scale | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------: | :------------: | :----: | :-----: | :--------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 800-1024 | 5.3 | - | 40.9 | 36.4 | [config](./mask-rcnn_r50_fpn_1x_cityscapes.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes/mask_rcnn_r50_fpn_1x_cityscapes_20201211_133733-d2858245.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes/mask_rcnn_r50_fpn_1x_cityscapes_20201211_133733.log.json) | + +## Citation + +```latex +@inproceedings{Cordts2016Cityscapes, + title={The Cityscapes Dataset for Semantic Urban Scene Understanding}, + author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt}, + booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2016} +} +``` diff --git a/mmpose/configs/mmdet/cityscapes/faster-rcnn_r50_fpn_1x_cityscapes.py b/mmpose/configs/mmdet/cityscapes/faster-rcnn_r50_fpn_1x_cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..ccd0de2aff1c1f3071e70e67dbf94b1c1cfe7e8b --- /dev/null +++ b/mmpose/configs/mmdet/cityscapes/faster-rcnn_r50_fpn_1x_cityscapes.py @@ -0,0 +1,41 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/cityscapes_detection.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' +] +model = dict( + backbone=dict(init_cfg=None), + roi_head=dict( + bbox_head=dict( + num_classes=8, + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))) + +# optimizer +# lr is set for a batch size of 8 +optim_wrapper = dict(optimizer=dict(lr=0.01)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=8, + by_epoch=True, + # [7] yields higher performance than [6] + milestones=[7], + gamma=0.1) +] + +# actual epoch = 8 * 8 = 64 +train_cfg = dict(max_epochs=8) + +# For better, more stable performance initialize from COCO +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +# TODO: support auto scaling lr +# auto_scale_lr = dict(base_batch_size=8) diff --git a/mmpose/configs/mmdet/cityscapes/mask-rcnn_r50_fpn_1x_cityscapes.py b/mmpose/configs/mmdet/cityscapes/mask-rcnn_r50_fpn_1x_cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..772268b121e7b8858c4cfcf3b6820e6146634d0d --- /dev/null +++ b/mmpose/configs/mmdet/cityscapes/mask-rcnn_r50_fpn_1x_cityscapes.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/cityscapes_instance.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' +] +model = dict( + backbone=dict(init_cfg=None), + roi_head=dict( + bbox_head=dict( + type='Shared2FCBBoxHead', + num_classes=8, + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + mask_head=dict(num_classes=8))) + +# optimizer +# lr is set for a batch size of 8 +optim_wrapper = dict(optimizer=dict(lr=0.01)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=8, + by_epoch=True, + # [7] yields higher performance than [6] + milestones=[7], + gamma=0.1) +] + +# actual epoch = 8 * 8 = 64 +train_cfg = dict(max_epochs=8) + +# For better, more stable performance initialize from COCO +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +# TODO: support auto scaling lr +# auto_scale_lr = dict(base_batch_size=8) diff --git a/mmpose/configs/mmdet/common/lsj-100e_coco-detection.py b/mmpose/configs/mmdet/common/lsj-100e_coco-detection.py new file mode 100644 index 0000000000000000000000000000000000000000..bb631e5d5c1253cc3a5d81a8cdc6cd86133d9b53 --- /dev/null +++ b/mmpose/configs/mmdet/common/lsj-100e_coco-detection.py @@ -0,0 +1,122 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +image_size = (1024, 1024) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +# Use RepeatDataset to speed up training +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=4, # simply change this from 2 to 16 for 50e - 400e training. + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +max_epochs = 25 + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# optimizer assumes bs=64 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[22, 24], + gamma=0.1) +] + +# only keep latest 2 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=2)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/common/lsj-100e_coco-instance.py b/mmpose/configs/mmdet/common/lsj-100e_coco-instance.py new file mode 100644 index 0000000000000000000000000000000000000000..6e62729d639c7659115a7f5f6449fa9021338be6 --- /dev/null +++ b/mmpose/configs/mmdet/common/lsj-100e_coco-instance.py @@ -0,0 +1,122 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +image_size = (1024, 1024) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +# Use RepeatDataset to speed up training +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=4, # simply change this from 2 to 16 for 50e - 400e training. + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +max_epochs = 25 + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# optimizer assumes bs=64 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[22, 24], + gamma=0.1) +] + +# only keep latest 2 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=2)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/common/lsj-200e_coco-detection.py b/mmpose/configs/mmdet/common/lsj-200e_coco-detection.py new file mode 100644 index 0000000000000000000000000000000000000000..83d12947fed900f05d748b6f90ef29cc5fbc407a --- /dev/null +++ b/mmpose/configs/mmdet/common/lsj-200e_coco-detection.py @@ -0,0 +1,18 @@ +_base_ = './lsj-100e_coco-detection.py' + +# 8x25=200e +train_dataloader = dict(dataset=dict(times=8)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=25, + by_epoch=True, + milestones=[22, 24], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/common/lsj-200e_coco-instance.py b/mmpose/configs/mmdet/common/lsj-200e_coco-instance.py new file mode 100644 index 0000000000000000000000000000000000000000..af3e4bf160c01045c6e36d67bdee796e7bf96cd3 --- /dev/null +++ b/mmpose/configs/mmdet/common/lsj-200e_coco-instance.py @@ -0,0 +1,18 @@ +_base_ = './lsj-100e_coco-instance.py' + +# 8x25=200e +train_dataloader = dict(dataset=dict(times=8)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=25, + by_epoch=True, + milestones=[22, 24], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/common/ms-90k_coco.py b/mmpose/configs/mmdet/common/ms-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e2d6c3dafb61d59bbbe9d0c6188a1bbff3b736b3 --- /dev/null +++ b/mmpose/configs/mmdet/common/ms-90k_coco.py @@ -0,0 +1,122 @@ +_base_ = '../_base_/default_runtime.py' + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +# Align with Detectron2 +backend = 'pillow' +train_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=backend_args, + imdecode_backend=backend), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True, + backend=backend), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=backend_args, + imdecode_backend=backend), + dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +# training schedule for 90k +max_iter = 90000 +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=max_iter, val_interval=10000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[60000, 80000], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) + +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000)) +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/common/ms-poly-90k_coco-instance.py b/mmpose/configs/mmdet/common/ms-poly-90k_coco-instance.py new file mode 100644 index 0000000000000000000000000000000000000000..d5566b3c3b8bfe0a49c8c062fb0fc972d5ae1f55 --- /dev/null +++ b/mmpose/configs/mmdet/common/ms-poly-90k_coco-instance.py @@ -0,0 +1,130 @@ +_base_ = '../_base_/default_runtime.py' + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +# Align with Detectron2 +backend = 'pillow' +train_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=backend_args, + imdecode_backend=backend), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True, + backend=backend), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=backend_args, + imdecode_backend=backend), + dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +# training schedule for 90k +max_iter = 90000 +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=max_iter, val_interval=10000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[60000, 80000], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) + +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000)) +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/common/ms-poly_3x_coco-instance.py b/mmpose/configs/mmdet/common/ms-poly_3x_coco-instance.py new file mode 100644 index 0000000000000000000000000000000000000000..04072f9b84c06d546767649f7e17736444db7ce2 --- /dev/null +++ b/mmpose/configs/mmdet/common/ms-poly_3x_coco-instance.py @@ -0,0 +1,118 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs'), +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + backend_args=backend_args) +test_evaluator = val_evaluator + +# training schedule for 3x with `RepeatDataset` +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +# Experiments show that using milestones=[9, 11] has higher performance +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[9, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/common/ms_3x_coco-instance.py b/mmpose/configs/mmdet/common/ms_3x_coco-instance.py new file mode 100644 index 0000000000000000000000000000000000000000..f80cf88e9b1e770dce3157abc852aea996eec624 --- /dev/null +++ b/mmpose/configs/mmdet/common/ms_3x_coco-instance.py @@ -0,0 +1,108 @@ +_base_ = '../_base_/default_runtime.py' + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +# training schedule for 3x with `RepeatDataset` +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +# Experiments show that using milestones=[9, 11] has higher performance +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[9, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/common/ms_3x_coco.py b/mmpose/configs/mmdet/common/ms_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..facbb34cf05088d8832502d3c9a38d812d328308 --- /dev/null +++ b/mmpose/configs/mmdet/common/ms_3x_coco.py @@ -0,0 +1,108 @@ +_base_ = '../_base_/default_runtime.py' + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +# training schedule for 3x with `RepeatDataset` +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +# Experiments show that using milestones=[9, 11] has higher performance +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[9, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/common/ssj_270k_coco-instance.py b/mmpose/configs/mmdet/common/ssj_270k_coco-instance.py new file mode 100644 index 0000000000000000000000000000000000000000..7407644fd59bb03d6e0afde83f8893a351ddc356 --- /dev/null +++ b/mmpose/configs/mmdet/common/ssj_270k_coco-instance.py @@ -0,0 +1,125 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +image_size = (1024, 1024) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +# Standard Scale Jittering (SSJ) resizes and crops an image +# with a resize range of 0.8 to 1.25 of the original image size. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.8, 1.25), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='InfiniteSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +# The model is trained by 270k iterations with batch_size 64, +# which is roughly equivalent to 144 epochs. + +max_iters = 270000 +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# optimizer assumes bs=64 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004)) + +# learning rate policy +# lr steps at [0.9, 0.95, 0.975] of the maximum iterations +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=270000, + by_epoch=False, + milestones=[243000, 256500, 263250], + gamma=0.1) +] + +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000)) +log_processor = dict(by_epoch=False) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/common/ssj_scp_270k_coco-instance.py b/mmpose/configs/mmdet/common/ssj_scp_270k_coco-instance.py new file mode 100644 index 0000000000000000000000000000000000000000..06159dd40312ec935ac383701fa7b052b863e1bf --- /dev/null +++ b/mmpose/configs/mmdet/common/ssj_scp_270k_coco-instance.py @@ -0,0 +1,60 @@ +_base_ = 'ssj_270k_coco-instance.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +image_size = (1024, 1024) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +# Standard Scale Jittering (SSJ) resizes and crops an image +# with a resize range of 0.8 to 1.25 of the original image size. +load_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.8, 1.25), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=image_size), +] +train_pipeline = [ + dict(type='CopyPaste', max_num_pasted=100), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='MultiImageMixDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=load_pipeline, + backend_args=backend_args), + pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/condinst/README.md b/mmpose/configs/mmdet/condinst/README.md new file mode 100644 index 0000000000000000000000000000000000000000..01deb0ecff4e2a5526029aed31d4cf8a87c8545f --- /dev/null +++ b/mmpose/configs/mmdet/condinst/README.md @@ -0,0 +1,40 @@ +# CondInst + +> [CondInst: Conditional Convolutions for Instance +> Segmentation](https://arxiv.org/pdf/2003.05664.pdf) + + + +## Abstract + +We propose a simple yet effective instance segmentation framework, termed CondInst (conditional convolutions for instance segmentation). Top-performing instance segmentation methods such as Mask +R-CNN rely on ROI operations (typically ROIPool or ROIAlign) to +obtain the final instance masks. In contrast, we propose to solve instance segmentation from a new perspective. Instead of using instancewise ROIs as inputs to a network of fixed weights, we employ dynamic +instance-aware networks, conditioned on instances. CondInst enjoys two +advantages: 1) Instance segmentation is solved by a fully convolutional +network, eliminating the need for ROI cropping and feature alignment. +2\) Due to the much improved capacity of dynamically-generated conditional convolutions, the mask head can be very compact (e.g., 3 conv. +layers, each having only 8 channels), leading to significantly faster inference. We demonstrate a simpler instance segmentation method that can +achieve improved performance in both accuracy and inference speed. On +the COCO dataset, we outperform a few recent methods including welltuned Mask R-CNN baselines, without longer training schedules needed. + +
+ +
+ +## Results and Models + +| Backbone | Style | MS train | Lr schd | bbox AP | mask AP | Config | Download | +| :------: | :-----: | :------: | :-----: | :-----: | :-----: | :-------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | Y | 1x | 39.8 | 36.0 | [config](./condinst_r50_fpn_ms-poly-90k_coco_instance.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance/condinst_r50_fpn_ms-poly-90k_coco_instance_20221129_125223-4c186406.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance/condinst_r50_fpn_ms-poly-90k_coco_instance_20221129_125223.json) | + +## Citation + +```latex +@inproceedings{tian2020conditional, + title = {Conditional Convolutions for Instance Segmentation}, + author = {Tian, Zhi and Shen, Chunhua and Chen, Hao}, + booktitle = {Proc. Eur. Conf. Computer Vision (ECCV)}, + year = {2020} +} +``` diff --git a/mmpose/configs/mmdet/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py b/mmpose/configs/mmdet/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..39639d874cbeb54b64a2789f251f1f6dad585ce3 --- /dev/null +++ b/mmpose/configs/mmdet/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py @@ -0,0 +1,85 @@ +_base_ = '../common/ms-poly-90k_coco-instance.py' + +# model settings +model = dict( + type='CondInst', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='CondInstBboxHead', + num_params=169, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + norm_on_bbox=True, + centerness_on_reg=True, + dcn_on_last_conv=False, + center_sampling=True, + conv_bias=True, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + mask_head=dict( + type='CondInstMaskHead', + num_layers=3, + feat_channels=8, + size_of_interest=8, + mask_out_stride=4, + max_masks_to_train=300, + mask_feature_head=dict( + in_channels=256, + feat_channels=128, + start_level=0, + end_level=2, + out_channels=8, + mask_stride=8, + num_stacked_convs=4, + norm_cfg=dict(type='BN', requires_grad=True)), + loss_mask=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + eps=5e-6, + loss_weight=1.0)), + # model training and testing settings + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100, + mask_thr=0.5)) + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/condinst/metafile.yml b/mmpose/configs/mmdet/condinst/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..1237b74d77a8b1f1e4b0ba74c6bdc5e5595d9816 --- /dev/null +++ b/mmpose/configs/mmdet/condinst/metafile.yml @@ -0,0 +1,32 @@ +Collections: + - Name: CondInst + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x A100 GPUs + Architecture: + - FPN + - FCOS + - ResNet + Paper: https://arxiv.org/abs/2003.05664 + README: configs/condinst/README.md + +Models: + - Name: condinst_r50_fpn_ms-poly-90k_coco_instance + In Collection: CondInst + Config: configs/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py + Metadata: + Training Memory (GB): 4.4 + Iterations: 90000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v3.0/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance/condinst_r50_fpn_ms-poly-90k_coco_instance_20221129_125223-4c186406.pth diff --git a/mmpose/configs/mmdet/conditional_detr/README.md b/mmpose/configs/mmdet/conditional_detr/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4043571c576bba7f287e16e7e464950b5568543e --- /dev/null +++ b/mmpose/configs/mmdet/conditional_detr/README.md @@ -0,0 +1,39 @@ +# Conditional DETR + +> [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) + + + +## Abstract + +The DETR approach applies the transformer encoder and decoder architecture to object detection and achieves promising performance. In this paper, we handle the critical issue, slow training convergence, and present a conditional cross-attention mechanism for fast DETR training. Our approach is motivated by that the cross-attention in DETR relies highly on the content embeddings and that the spatial embeddings make minor contributions, increasing the need for high-quality content embeddings and thus increasing the training difficulty. + +
+ +
+ +Our conditional DETR learns a conditional spatial query from the decoder embedding for decoder multi-head cross-attention. The benefit is that through the conditional spatial query, each cross-attention head is able to attend to a band containing a distinct region, e.g., one object extremity or a region inside the object box (Figure 1). This narrows down the spatial range for localizing the distinct regions for object classification and box regression, thus relaxing the dependence on the content embeddings and easing the training. Empirical results show that conditional DETR converges 6.7x faster for the backbones R50 and R101 and 10x faster for stronger backbones DC5-R50 and DC5-R101. + +
+ + +
+ +## Results and Models + +We provide the config files and models for Conditional DETR: [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152). + +| Backbone | Model | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :--------------: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | Conditional DETR | 50e | | | 41.1 | [config](./conditional-detr_r50_8xb2-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/conditional_detr/conditional-detr_r50_8xb2-50e_coco/conditional-detr_r50_8xb2-50e_coco_20221121_180202-c83a1dc0.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/conditional_detr/conditional-detr_r50_8xb2-50e_coco/conditional-detr_r50_8xb2-50e_coco_20221121_180202.log.json) | + +## Citation + +```latex +@inproceedings{meng2021-CondDETR, + title = {Conditional DETR for Fast Training Convergence}, + author = {Meng, Depu and Chen, Xiaokang and Fan, Zejia and Zeng, Gang and Li, Houqiang and Yuan, Yuhui and Sun, Lei and Wang, Jingdong}, + booktitle = {Proceedings of the IEEE International Conference on Computer Vision (ICCV)}, + year = {2021} +} +``` diff --git a/mmpose/configs/mmdet/conditional_detr/conditional-detr_r50_8xb2-50e_coco.py b/mmpose/configs/mmdet/conditional_detr/conditional-detr_r50_8xb2-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a21476448d0cbab6b6e4b94aa46d686e38667879 --- /dev/null +++ b/mmpose/configs/mmdet/conditional_detr/conditional-detr_r50_8xb2-50e_coco.py @@ -0,0 +1,42 @@ +_base_ = ['../detr/detr_r50_8xb2-150e_coco.py'] +model = dict( + type='ConditionalDETR', + num_queries=300, + decoder=dict( + num_layers=6, + layer_cfg=dict( + self_attn_cfg=dict( + _delete_=True, + embed_dims=256, + num_heads=8, + attn_drop=0.1, + cross_attn=False), + cross_attn_cfg=dict( + _delete_=True, + embed_dims=256, + num_heads=8, + attn_drop=0.1, + cross_attn=True))), + bbox_head=dict( + type='ConditionalDETRHead', + loss_cls=dict( + _delete_=True, + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ]))) + +# learning policy +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=50, val_interval=1) + +param_scheduler = [dict(type='MultiStepLR', end=50, milestones=[40])] diff --git a/mmpose/configs/mmdet/conditional_detr/metafile.yml b/mmpose/configs/mmdet/conditional_detr/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..83f5532ce380c903d644b36055c4c2610455472a --- /dev/null +++ b/mmpose/configs/mmdet/conditional_detr/metafile.yml @@ -0,0 +1,32 @@ +Collections: + - Name: Conditional DETR + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 8x A100 GPUs + Architecture: + - ResNet + - Transformer + Paper: + URL: https://arxiv.org/abs/2108.06152 + Title: 'Conditional DETR for Fast Training Convergence' + README: configs/conditional_detr/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/f4112c9e5611468ffbd57cfba548fd1289264b52/mmdet/models/detectors/conditional_detr.py#L14 + Version: v3.0.0rc6 + +Models: + - Name: conditional-detr_r50_8xb2-50e_coco + In Collection: Conditional DETR + Config: configs/conditional_detr/conditional-detr_r50_8xb2-50e_coco.py + Metadata: + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/conditional_detr/conditional-detr_r50_8xb2-50e_coco/conditional-detr_r50_8xb2-50e_coco_20221121_180202-c83a1dc0.pth diff --git a/mmpose/configs/mmdet/convnext/README.md b/mmpose/configs/mmdet/convnext/README.md new file mode 100644 index 0000000000000000000000000000000000000000..33497bb57aa9ae89b91ee16ac81e1ce02bf2ae0d --- /dev/null +++ b/mmpose/configs/mmdet/convnext/README.md @@ -0,0 +1,42 @@ +# ConvNeXt + +> [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) + + + +## Abstract + +The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets. + +
+ +
+ +## Results and models + +| Method | Backbone | Pretrain | Lr schd | Multi-scale crop | FP16 | Mem (GB) | box AP | mask AP | Config | Download | +| :----------------: | :--------: | :---------: | :-----: | :--------------: | :--: | :------: | :----: | :-----: | :-------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN | ConvNeXt-T | ImageNet-1K | 3x | yes | yes | 7.3 | 46.2 | 41.7 | [config](./mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953-050731f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953.log.json) | +| Cascade Mask R-CNN | ConvNeXt-T | ImageNet-1K | 3x | yes | yes | 9.0 | 50.3 | 43.6 | [config](./cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200.log.json) | +| Cascade Mask R-CNN | ConvNeXt-S | ImageNet-1K | 3x | yes | yes | 12.3 | 51.8 | 44.8 | [config](./cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004.log.json) | + +**Note**: + +- ConvNeXt backbone needs to install [MMPreTrain](https://github.com/open-mmlab/mmpretrain) first, which has abundant backbones for downstream tasks. + +```shell +pip install mmpretrain +``` + +- The performance is unstable. `Cascade Mask R-CNN` may fluctuate about 0.2 mAP. + +## Citation + +```bibtex +@article{liu2022convnet, + title={A ConvNet for the 2020s}, + author={Liu, Zhuang and Mao, Hanzi and Wu, Chao-Yuan and Feichtenhofer, Christoph and Darrell, Trevor and Xie, Saining}, + journal={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2022} +} +``` diff --git a/mmpose/configs/mmdet/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py b/mmpose/configs/mmdet/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5fbedcaa78636f11a5718f1123d33e7e2ac273 --- /dev/null +++ b/mmpose/configs/mmdet/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py @@ -0,0 +1,26 @@ +_base_ = './cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' # noqa + +# please install mmpretrain +# import mmpretrain.models to trigger register_module in mmpretrain +custom_imports = dict( + imports=['mmpretrain.models'], allow_failed_imports=False) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa + +model = dict( + backbone=dict( + _delete_=True, + type='mmpretrain.ConvNeXt', + arch='small', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.6, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.'))) + +optim_wrapper = dict(paramwise_cfg={ + 'decay_rate': 0.7, + 'decay_type': 'layer_wise', + 'num_layers': 12 +}) diff --git a/mmpose/configs/mmdet/convnext/cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py b/mmpose/configs/mmdet/convnext/cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c92f86838c31710dd550c36d9abc11d79bb6e2eb --- /dev/null +++ b/mmpose/configs/mmdet/convnext/cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py @@ -0,0 +1,154 @@ +_base_ = [ + '../_base_/models/cascade-mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmpretrain +# import mmpretrain.models to trigger register_module in mmpretrain +custom_imports = dict( + imports=['mmpretrain.models'], allow_failed_imports=False) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa + +model = dict( + backbone=dict( + _delete_=True, + type='mmpretrain.ConvNeXt', + arch='tiny', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + neck=dict(in_channels=[96, 192, 384, 768]), + roi_head=dict(bbox_head=[ + dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + reg_decoded_bbox=True, + norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), + dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=False, + reg_decoded_bbox=True, + norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), + dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=False, + reg_decoded_bbox=True, + norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) + ])) + +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[[ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + scales=[(400, 1333), (500, 1333), (600, 1333)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + keep_ratio=True) + ]]), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] + +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + constructor='LearningRateDecayOptimizerConstructor', + paramwise_cfg={ + 'decay_rate': 0.7, + 'decay_type': 'layer_wise', + 'num_layers': 6 + }, + optimizer=dict( + _delete_=True, + type='AdamW', + lr=0.0002, + betas=(0.9, 0.999), + weight_decay=0.05)) diff --git a/mmpose/configs/mmdet/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py b/mmpose/configs/mmdet/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5792b5b5c5a03c85a7d69040dd9a0b5381bc7995 --- /dev/null +++ b/mmpose/configs/mmdet/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py @@ -0,0 +1,96 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmpretrain +# import mmpretrain.models to trigger register_module in mmpretrain +custom_imports = dict( + imports=['mmpretrain.models'], allow_failed_imports=False) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa + +model = dict( + backbone=dict( + _delete_=True, + type='mmpretrain.ConvNeXt', + arch='tiny', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + neck=dict(in_channels=[96, 192, 384, 768])) + +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[[ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + scales=[(400, 1333), (500, 1333), (600, 1333)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + keep_ratio=True) + ]]), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] + +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + constructor='LearningRateDecayOptimizerConstructor', + paramwise_cfg={ + 'decay_rate': 0.95, + 'decay_type': 'layer_wise', + 'num_layers': 6 + }, + optimizer=dict( + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + )) diff --git a/mmpose/configs/mmdet/convnext/metafile.yml b/mmpose/configs/mmdet/convnext/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..b9fd7506cf46896d6c5f2238b594d32558ed3195 --- /dev/null +++ b/mmpose/configs/mmdet/convnext/metafile.yml @@ -0,0 +1,93 @@ +Models: + - Name: mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco + In Collection: Mask R-CNN + Config: configs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 7.3 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x A100 GPUs + Architecture: + - ConvNeXt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953-050731f4.pth + Paper: + URL: https://arxiv.org/abs/2201.03545 + Title: 'A ConvNet for the 2020s' + README: configs/convnext/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + + - Name: cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco + In Collection: Cascade Mask R-CNN + Config: configs/convnext/cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 9.0 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x A100 GPUs + Architecture: + - ConvNeXt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth + Paper: + URL: https://arxiv.org/abs/2201.03545 + Title: 'A ConvNet for the 2020s' + README: configs/convnext/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.25.0 + + - Name: cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco + In Collection: Cascade Mask R-CNN + Config: configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 12.3 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x A100 GPUs + Architecture: + - ConvNeXt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth + Paper: + URL: https://arxiv.org/abs/2201.03545 + Title: 'A ConvNet for the 2020s' + README: configs/convnext/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.25.0 diff --git a/mmpose/configs/mmdet/cornernet/README.md b/mmpose/configs/mmdet/cornernet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e44964d8eac120f7313e7891b1771393b66bd9ae --- /dev/null +++ b/mmpose/configs/mmdet/cornernet/README.md @@ -0,0 +1,43 @@ +# CornerNet + +> [Cornernet: Detecting objects as paired keypoints](https://arxiv.org/abs/1808.01244) + + + +## Abstract + +We propose CornerNet, a new approach to object detection where we detect an object bounding box as a pair of keypoints, the top-left corner and the bottom-right corner, using a single convolution neural network. By detecting objects as paired keypoints, we eliminate the need for designing a set of anchor boxes commonly used in prior single-stage detectors. In addition to our novel formulation, we introduce corner pooling, a new type of pooling layer that helps the network better localize corners. Experiments show that CornerNet achieves a 42.2% AP on MS COCO, outperforming all existing one-stage detectors. + +
+ +
+ +## Results and Models + +| Backbone | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------------: | :------------------------------------------------------------------: | :---------------: | :------: | :------------: | :----: | :------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HourglassNet-104 | [10 x 5](./cornernet_hourglass104_10xb5-crop511-210e-mstest_coco.py) | 180/210 | 13.9 | 4.2 | 41.2 | [config](./cornernet_hourglass104_10xb5-crop511-210e-mstest_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720.log.json) | +| HourglassNet-104 | [8 x 6](./cornernet_hourglass104_8xb6-210e-mstest_coco.py) | 180/210 | 15.9 | 4.2 | 41.2 | [config](./cornernet_hourglass104_8xb6-210e-mstest_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618.log.json) | +| HourglassNet-104 | [32 x 3](./cornernet_hourglass104_32xb3-210e-mstest_coco.py) | 180/210 | 9.5 | 3.9 | 40.4 | [config](./cornernet_hourglass104_32xb3-210e-mstest_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110-1efaea91.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110.log.json) | + +Note: + +- TTA setting is single-scale and `flip=True`. If you want to reproduce the TTA performance, please add `--tta` in the test command. +- Experiments with `images_per_gpu=6` are conducted on Tesla V100-SXM2-32GB, `images_per_gpu=3` are conducted on GeForce GTX 1080 Ti. +- Here are the descriptions of each experiment setting: + - 10 x 5: 10 GPUs with 5 images per gpu. This is the same setting as that reported in the original paper. + - 8 x 6: 8 GPUs with 6 images per gpu. The total batchsize is similar to paper and only need 1 node to train. + - 32 x 3: 32 GPUs with 3 images per gpu. The default setting for 1080TI and need 4 nodes to train. + +## Citation + +```latex +@inproceedings{law2018cornernet, + title={Cornernet: Detecting objects as paired keypoints}, + author={Law, Hei and Deng, Jia}, + booktitle={15th European Conference on Computer Vision, ECCV 2018}, + pages={765--781}, + year={2018}, + organization={Springer Verlag} +} +``` diff --git a/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_10xb5-crop511-210e-mstest_coco.py b/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_10xb5-crop511-210e-mstest_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..76339163b618a5a9d41a542ec75192aedb409eea --- /dev/null +++ b/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_10xb5-crop511-210e-mstest_coco.py @@ -0,0 +1,8 @@ +_base_ = './cornernet_hourglass104_8xb6-210e-mstest_coco.py' + +train_dataloader = dict(batch_size=5) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (10 GPUs) x (5 samples per GPU) +auto_scale_lr = dict(base_batch_size=50) diff --git a/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_32xb3-210e-mstest_coco.py b/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_32xb3-210e-mstest_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..51a4740318a1d85a62b6b4482c53808c98fb8a62 --- /dev/null +++ b/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_32xb3-210e-mstest_coco.py @@ -0,0 +1,8 @@ +_base_ = './cornernet_hourglass104_8xb6-210e-mstest_coco.py' + +train_dataloader = dict(batch_size=3) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (3 samples per GPU) +auto_scale_lr = dict(base_batch_size=96) diff --git a/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py b/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..bdb46fff164f796d9333c123deb701c341bdc1e3 --- /dev/null +++ b/mmpose/configs/mmdet/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py @@ -0,0 +1,183 @@ +_base_ = [ + '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' +] + +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True) + +# model settings +model = dict( + type='CornerNet', + data_preprocessor=data_preprocessor, + backbone=dict( + type='HourglassNet', + downsample_times=5, + num_stacks=2, + stage_channels=[256, 256, 384, 384, 384, 512], + stage_blocks=[2, 2, 2, 2, 2, 4], + norm_cfg=dict(type='BN', requires_grad=True)), + neck=None, + bbox_head=dict( + type='CornerHead', + num_classes=80, + in_channels=256, + num_feat_levels=2, + corner_emb_channels=1, + loss_heatmap=dict( + type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), + loss_embedding=dict( + type='AssociativeEmbeddingLoss', + pull_weight=0.10, + push_weight=0.10), + loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), + # training and testing settings + train_cfg=None, + test_cfg=dict( + corner_topk=100, + local_maximum_kernel=3, + distance_threshold=0.5, + score_thr=0.05, + max_per_img=100, + nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + # The cropped images are padded into squares during training, + # but may be smaller than crop_size. + type='RandomCenterCropPad', + crop_size=(511, 511), + ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), + test_mode=False, + test_pad_mode=None, + mean=data_preprocessor['mean'], + std=data_preprocessor['std'], + # Image data is not converted to rgb. + to_rgb=data_preprocessor['bgr_to_rgb']), + # Make sure the output is always crop_size. + dict(type='Resize', scale=(511, 511), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs'), +] + +test_pipeline = [ + dict( + type='LoadImageFromFile', + to_float32=True, + backend_args=_base_.backend_args, + ), + # don't need Resize + dict( + type='RandomCenterCropPad', + crop_size=None, + ratios=None, + border=None, + test_mode=True, + test_pad_mode=['logical_or', 127], + mean=data_preprocessor['mean'], + std=data_preprocessor['std'], + # Image data is not converted to rgb. + to_rgb=data_preprocessor['bgr_to_rgb']), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border')) +] + +train_dataloader = dict( + batch_size=6, + num_workers=3, + batch_sampler=None, + dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='Adam', lr=0.0005), + clip_grad=dict(max_norm=35, norm_type=2)) + +max_epochs = 210 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[180], + gamma=0.1) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (6 samples per GPU) +auto_scale_lr = dict(base_batch_size=48) + +tta_model = dict( + type='DetTTAModel', + tta_cfg=dict( + nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'), + max_per_img=100)) + +tta_pipeline = [ + dict( + type='LoadImageFromFile', + to_float32=True, + backend_args=_base_.backend_args), + dict( + type='TestTimeAug', + transforms=[ + [ + # ``RandomFlip`` must be placed before ``RandomCenterCropPad``, + # otherwise bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], + [ + dict( + type='RandomCenterCropPad', + crop_size=None, + ratios=None, + border=None, + test_mode=True, + test_pad_mode=['logical_or', 127], + mean=data_preprocessor['mean'], + std=data_preprocessor['std'], + # Image data is not converted to rgb. + to_rgb=data_preprocessor['bgr_to_rgb']) + ], + [dict(type='LoadAnnotations', with_bbox=True)], + [ + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'flip', 'flip_direction', 'border')) + ] + ]) +] diff --git a/mmpose/configs/mmdet/cornernet/metafile.yml b/mmpose/configs/mmdet/cornernet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..f915cf37e8e157405a66431dfb21595db319b8b6 --- /dev/null +++ b/mmpose/configs/mmdet/cornernet/metafile.yml @@ -0,0 +1,83 @@ +Collections: + - Name: CornerNet + Metadata: + Training Data: COCO + Training Techniques: + - Adam + Training Resources: 8x V100 GPUs + Architecture: + - Corner Pooling + - Stacked Hourglass Network + Paper: + URL: https://arxiv.org/abs/1808.01244 + Title: 'CornerNet: Detecting Objects as Paired Keypoints' + README: configs/cornernet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.3.0/mmdet/models/detectors/cornernet.py#L9 + Version: v2.3.0 + +Models: + - Name: cornernet_hourglass104_10xb5-crop511-210e-mstest_coco + In Collection: CornerNet + Config: configs/cornernet/cornernet_hourglass104_10xb5-crop511-210e-mstest_coco.py + Metadata: + Training Resources: 10x V100 GPUs + Batch Size: 50 + Training Memory (GB): 13.9 + inference time (ms/im): + - value: 238.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 210 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth + + - Name: cornernet_hourglass104_8xb6-210e-mstest_coco + In Collection: CornerNet + Config: configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py + Metadata: + Batch Size: 48 + Training Memory (GB): 15.9 + inference time (ms/im): + - value: 238.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 210 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth + + - Name: cornernet_hourglass104_32xb3-210e-mstest_coco + In Collection: CornerNet + Config: configs/cornernet/cornernet_hourglass104_32xb3-210e-mstest_coco.py + Metadata: + Training Resources: 32x V100 GPUs + Batch Size: 96 + Training Memory (GB): 9.5 + inference time (ms/im): + - value: 256.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 210 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110-1efaea91.pth diff --git a/mmpose/configs/mmdet/crowddet/README.md b/mmpose/configs/mmdet/crowddet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..abc0f2d2dfac8fa64cab267c20f58c9113737d07 --- /dev/null +++ b/mmpose/configs/mmdet/crowddet/README.md @@ -0,0 +1,37 @@ +# CrowdDet + +> [Detection in Crowded Scenes: One Proposal, Multiple Predictions](https://arxiv.org/abs/2003.09163) + + + +## Abstract + +We propose a simple yet effective proposal-based object detector, aiming at detecting highly-overlapped instances in crowded scenes. The key of our approach is to let each proposal predict a set of correlated instances rather than a single one in previous proposal-based frameworks. Equipped with new techniques such as EMD Loss and Set NMS, our detector can effectively handle the difficulty of detecting highly overlapped objects. On a FPN-Res50 baseline, our detector can obtain 4.9% AP gains on challenging CrowdHuman dataset and 1.0% MR^−2 improvements on CityPersons dataset, without bells and whistles. Moreover, on less crowed datasets like COCO, our approach can still achieve moderate improvement, suggesting the proposed method is robust to crowdedness. Code and pre-trained models will be released at https://github.com/megvii-model/CrowdDetection. + +
+ +
+ +## Results and Models + +| Backbone | RM | Style | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :---: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | False | pytorch | 4.4 | - | 90.0 | [config](./crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman_20221023_174954-dc319c2d.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman_20221023_174954.log.json) | +| R-50-FPN | True | pytorch | 4.8 | - | 90.32 | [config](./crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/crowddet/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman_20221024_215917-45602806.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/crowddet/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman_20221024_215917.log.json) | + +Note: + +- RM indicates whether to use the refine module. +- The dataset for training and testing this model is `CrowdHuman`, and the metric of `box AP` is calculated by `mmdet/evaluation/metrics/crowdhuman_metric.py`. + +## Citation + +```latex +@inproceedings{Chu_2020_CVPR, + title={Detection in Crowded Scenes: One Proposal, Multiple Predictions}, + author={Chu, Xuangeng and Zheng, Anlin and Zhang, Xiangyu and Sun, Jian}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` diff --git a/mmpose/configs/mmdet/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py b/mmpose/configs/mmdet/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py new file mode 100644 index 0000000000000000000000000000000000000000..8815be77d49cf77afff6f888ee225e928e43b402 --- /dev/null +++ b/mmpose/configs/mmdet/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py @@ -0,0 +1,227 @@ +_base_ = ['../_base_/default_runtime.py'] + +model = dict( + type='CrowdDet', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False, + pad_size_divisor=64, + # This option is set according to https://github.com/Purkialo/CrowdDet/ + # blob/master/lib/data/CrowdHuman.py The images in the entire batch are + # resize together. + batch_augments=[ + dict(type='BatchResize', scale=(1400, 800), pad_size_divisor=64) + ]), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + upsample_cfg=dict(mode='bilinear', align_corners=False)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[1.0, 2.0, 3.0], + strides=[4, 8, 16, 32, 64], + centers=[(8, 8), (8, 8), (8, 8), (8, 8), (8, 8)]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + clip_border=False), + loss_cls=dict(type='CrossEntropyLoss', loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='MultiInstanceRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=-1, + aligned=True, + use_torchvision=True), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='MultiInstanceBBoxHead', + with_refine=False, + num_shared_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', + loss_weight=1.0, + use_sigmoid=False, + reduction='none'), + loss_bbox=dict( + type='SmoothL1Loss', loss_weight=1.0, reduction='none'))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=(0.3, 0.7), + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2400, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=2), + rcnn=dict( + assigner=dict( + type='MultiInstanceAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.3, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='MultiInsRandomSampler', + num=512, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1200, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=2), + rcnn=dict( + nms=dict(type='nms', iou_threshold=0.5), + score_thr=0.01, + max_per_img=500))) + +dataset_type = 'CrowdHumanDataset' +data_root = 'data/CrowdHuman/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/tracking/CrowdHuman/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/tracking/', +# 'data/': 's3://openmmlab/datasets/tracking/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1400, 800), keep_ratio=True), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=2, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=None, # The 'batch_sampler' may decrease the precision + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotation_train.odgt', + data_prefix=dict(img='Images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotation_val.odgt', + data_prefix=dict(img='Images/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CrowdHumanMetric', + ann_file=data_root + 'annotation_val.odgt', + metric=['AP', 'MR', 'JI'], + backend_args=backend_args) +test_evaluator = val_evaluator + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=30, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=800), + dict( + type='MultiStepLR', + begin=0, + end=30, + by_epoch=True, + milestones=[24, 27], + gamma=0.1) +] + +# optimizer +auto_scale_lr = dict(base_batch_size=16) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/crowddet/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman.py b/mmpose/configs/mmdet/crowddet/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman.py new file mode 100644 index 0000000000000000000000000000000000000000..80277ce1c1436c37c4e2a4d13293d0ecb8ba4722 --- /dev/null +++ b/mmpose/configs/mmdet/crowddet/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman.py @@ -0,0 +1,3 @@ +_base_ = './crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py' + +model = dict(roi_head=dict(bbox_head=dict(with_refine=True))) diff --git a/mmpose/configs/mmdet/crowddet/metafile.yml b/mmpose/configs/mmdet/crowddet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..4f191dea9cc599f64091434152000e67289f9180 --- /dev/null +++ b/mmpose/configs/mmdet/crowddet/metafile.yml @@ -0,0 +1,47 @@ +Collections: + - Name: CrowdDet + Metadata: + Training Data: CrowdHuman + Training Techniques: + - SGD + - EMD Loss + Training Resources: 8x A100 GPUs + Architecture: + - FPN + - RPN + - ResNet + - RoIPool + Paper: + URL: https://arxiv.org/abs/2003.09163 + Title: 'Detection in Crowded Scenes: One Proposal, Multiple Predictions' + README: configs/crowddet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v3.0.0rc3/mmdet/models/detectors/crowddet.py + Version: v3.0.0rc3 + +Models: + - Name: crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman + In Collection: CrowdDet + Config: configs/crowddet/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman.py + Metadata: + Training Memory (GB): 4.8 + Epochs: 30 + Results: + - Task: Object Detection + Dataset: CrowdHuman + Metrics: + box AP: 90.32 + Weights: https://download.openmmlab.com/mmdetection/v3.0/crowddet/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman_20221024_215917-45602806.pth + + - Name: crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman + In Collection: CrowdDet + Config: configs/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py + Metadata: + Training Memory (GB): 4.4 + Epochs: 30 + Results: + - Task: Object Detection + Dataset: CrowdHuman + Metrics: + box AP: 90.0 + Weights: https://download.openmmlab.com/mmdetection/v3.0/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman_20221023_174954-dc319c2d.pth diff --git a/mmpose/configs/mmdet/dab_detr/README.md b/mmpose/configs/mmdet/dab_detr/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5661f27a30268a9a50a956e51e948c36c9287356 --- /dev/null +++ b/mmpose/configs/mmdet/dab_detr/README.md @@ -0,0 +1,40 @@ +# DAB-DETR + +> [DAB-DETR: Dynamic Anchor Boxes are Better Queries for DETR](https://arxiv.org/abs/2201.12329) + + + +## Abstract + +We present in this paper a novel query formulation using dynamic anchor boxes for DETR (DEtection TRansformer) and offer a deeper understanding of the role of queries in DETR. This new formulation directly uses box coordinates as queries in Transformer decoders and dynamically updates them layer-by-layer. Using box coordinates not only helps using explicit positional priors to improve the query-to-feature similarity and eliminate the slow training convergence issue in DETR, but also allows us to modulate the positional attention map using the box width and height information. Such a design makes it clear that queries in DETR can be implemented as performing soft ROI pooling layer-by-layer in a cascade manner. As a result, it leads to the best performance on MS-COCO benchmark among the DETR-like detection models under the same setting, e.g., AP 45.7% using ResNet50-DC5 as backbone trained in 50 epochs. We also conducted extensive experiments to confirm our analysis and verify the effectiveness of our methods. + +
+ +
+
+ +
+
+ +
+ +## Results and Models + +We provide the config files and models for DAB-DETR: [DAB-DETR: Dynamic Anchor Boxes are Better Queries for DETR](https://arxiv.org/abs/2201.12329). + +| Backbone | Model | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :------: | :-----: | :------: | :------------: | :----: | :---------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | DAB-DETR | 50e | | | 42.3 | [config](./dab-detr_r50_8xb2-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/dab_detr/dab-detr_r50_8xb2-50e_coco/dab-detr_r50_8xb2-50e_coco_20221122_120837-c1035c8c.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/dab_detr/dab-detr_r50_8xb2-50e_coco/dab-detr_r50_8xb2-50e_coco_20221122_120837.log.json) | + +## Citation + +```latex +@inproceedings{ + liu2022dabdetr, + title={{DAB}-{DETR}: Dynamic Anchor Boxes are Better Queries for {DETR}}, + author={Shilong Liu and Feng Li and Hao Zhang and Xiao Yang and Xianbiao Qi and Hang Su and Jun Zhu and Lei Zhang}, + booktitle={International Conference on Learning Representations}, + year={2022}, + url={https://openreview.net/forum?id=oMI9PjOb9Jl} +} +``` diff --git a/mmpose/configs/mmdet/dab_detr/dab-detr_r50_8xb2-50e_coco.py b/mmpose/configs/mmdet/dab_detr/dab-detr_r50_8xb2-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..314ed97e2d80ae3c95119abf9166f95d416c010e --- /dev/null +++ b/mmpose/configs/mmdet/dab_detr/dab-detr_r50_8xb2-50e_coco.py @@ -0,0 +1,159 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +model = dict( + type='DABDETR', + num_queries=300, + with_random_refpoints=False, + num_patterns=0, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=None, + num_outs=1), + encoder=dict( + num_layers=6, + layer_cfg=dict( + self_attn_cfg=dict( + embed_dims=256, num_heads=8, dropout=0., batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='PReLU')))), + decoder=dict( + num_layers=6, + query_dim=4, + query_scale_type='cond_elewise', + with_modulated_hw_attn=True, + layer_cfg=dict( + self_attn_cfg=dict( + embed_dims=256, + num_heads=8, + attn_drop=0., + proj_drop=0., + cross_attn=False), + cross_attn_cfg=dict( + embed_dims=256, + num_heads=8, + attn_drop=0., + proj_drop=0., + cross_attn=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='PReLU'))), + return_intermediate=True), + positional_encoding=dict(num_feats=128, temperature=20, normalize=True), + bbox_head=dict( + type='DABDETRHead', + num_classes=80, + embed_dims=256, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2., eps=1e-8), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=300)) + +# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different +# from the default setting in mmdet. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[[ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + scales=[(400, 1333), (500, 1333), (600, 1333)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + keep_ratio=True) + ]]), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) + +# learning policy +max_epochs = 50 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[40], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16, enable=False) diff --git a/mmpose/configs/mmdet/dab_detr/metafile.yml b/mmpose/configs/mmdet/dab_detr/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..94383a0493b86a730181f78ab2f0e94a2ab2de73 --- /dev/null +++ b/mmpose/configs/mmdet/dab_detr/metafile.yml @@ -0,0 +1,32 @@ +Collections: + - Name: DAB-DETR + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 8x A100 GPUs + Architecture: + - ResNet + - Transformer + Paper: + URL: https://arxiv.org/abs/2201.12329 + Title: 'DAB-DETR: Dynamic Anchor Boxes are Better Queries for DETR' + README: configs/dab_detr/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/f4112c9e5611468ffbd57cfba548fd1289264b52/mmdet/models/detectors/dab_detr.py#L15 + Version: v3.0.0rc6 + +Models: + - Name: dab-detr_r50_8xb2-50e_coco + In Collection: DAB-DETR + Config: configs/dab_detr/dab-detr_r50_8xb2-50e_coco.py + Metadata: + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/dab_detr/dab-detr_r50_8xb2-50e_coco/dab-detr_r50_8xb2-50e_coco_20221122_120837-c1035c8c.pth diff --git a/mmpose/configs/mmdet/dcn/README.md b/mmpose/configs/mmdet/dcn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e287e1d5ef99e68dd2d7f2fccbacddde7428522e --- /dev/null +++ b/mmpose/configs/mmdet/dcn/README.md @@ -0,0 +1,48 @@ +# DCN + +> [Deformable Convolutional Networks](https://arxiv.org/abs/1703.06211) + + + +## Abstract + +Convolutional neural networks (CNNs) are inherently limited to model geometric transformations due to the fixed geometric structures in its building modules. In this work, we introduce two new modules to enhance the transformation modeling capacity of CNNs, namely, deformable convolution and deformable RoI pooling. Both are based on the idea of augmenting the spatial sampling locations in the modules with additional offsets and learning the offsets from target tasks, without additional supervision. The new modules can readily replace their plain counterparts in existing CNNs and can be easily trained end-to-end by standard back-propagation, giving rise to deformable convolutional networks. Extensive experiments validate the effectiveness of our approach on sophisticated vision tasks of object detection and semantic segmentation. + +
+ +
+ +## Results and Models + +| Backbone | Model | Style | Conv | Pool | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :----------: | :-----: | :----------: | :---: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 4.0 | 17.8 | 41.3 | | [config](./faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130_212941.log.json) | +| R-50-FPN | Faster | pytorch | - | dpool | 1x | 5.0 | 17.2 | 38.9 | | [config](./faster-rcnn_r50_fpn_dpool_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307_203250.log.json) | +| R-101-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 6.0 | 12.5 | 42.7 | | [config](./faster-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203_230019.log.json) | +| X-101-32x4d-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 7.3 | 10.0 | 44.5 | | [config](./faster-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203_001325.log.json) | +| R-50-FPN | Mask | pytorch | dconv(c3-c5) | - | 1x | 4.5 | 15.4 | 41.8 | 37.4 | [config](./mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203_061339.log.json) | +| R-101-FPN | Mask | pytorch | dconv(c3-c5) | - | 1x | 6.5 | 11.7 | 43.5 | 38.9 | [config](./mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216_191601.log.json) | +| R-50-FPN | Cascade | pytorch | dconv(c3-c5) | - | 1x | 4.5 | 14.6 | 43.8 | | [config](./cascade-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130_220843.log.json) | +| R-101-FPN | Cascade | pytorch | dconv(c3-c5) | - | 1x | 6.4 | 11.0 | 45.0 | | [config](./cascade-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203_224829.log.json) | +| R-50-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 6.0 | 10.0 | 44.4 | 38.6 | [config](./cascade-mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202_010309.log.json) | +| R-101-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 8.0 | 8.6 | 45.8 | 39.7 | [config](./cascade-mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204_134006.log.json) | +| X-101-32x4d-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 9.2 | | 47.3 | 41.1 | [config](./cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-20200606_183737.log.json) | +| R-50-FPN (FP16) | Mask | pytorch | dconv(c3-c5) | - | 1x | 3.0 | | 41.9 | 37.5 | [config](./mask-rcnn_r50-dconv-c3-c5_fpn_amp-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247.log.json) | + +**Notes:** + +- `dconv` denotes deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `dpool` denotes deformable roi pooling. +- The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster. +- (\*) For R-50-FPN (dg=4), dg is short for deformable_group. This model is trained and tested on Amazon EC2 p3dn.24xlarge instance. +- **Memory, Train/Inf time is outdated.** + +## Citation + +```latex +@inproceedings{dai2017deformable, + title={Deformable Convolutional Networks}, + author={Dai, Jifeng and Qi, Haozhi and Xiong, Yuwen and Li, Yi and Zhang, Guodong and Hu, Han and Wei, Yichen}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + year={2017} +} +``` diff --git a/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8c0ff9890e82bd0c1ee4e445e37d2c7afa534161 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cfcc5e73cc508e11d77c5a3557f30632b545b803 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..48b25f62125da09368c446bcd6ccff9b0219a7cc --- /dev/null +++ b/mmpose/configs/mmdet/dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/cascade-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/cascade-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8a942da754119b8d913f807907322a3d96c83ff8 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/cascade-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/cascade-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/cascade-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f6bf5b7998a972f41b52f90955ef52977adfd68c --- /dev/null +++ b/mmpose/configs/mmdet/dcn/cascade-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/faster-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/faster-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..db44e7e87b2d11555140ab2c8a19f32e1ce65770 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/faster-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../faster_rcnn/faster-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..95f20467af60167a4a61f253e4354dadd832ccc7 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/faster-rcnn_r50_fpn_dpool_1x_coco.py b/mmpose/configs/mmdet/dcn/faster-rcnn_r50_fpn_dpool_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c65ce5fd0267dc892455da6495cd3be9f1f99fcf --- /dev/null +++ b/mmpose/configs/mmdet/dcn/faster-rcnn_r50_fpn_dpool_1x_coco.py @@ -0,0 +1,12 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + _delete_=True, + type='DeformRoIPoolPack', + output_size=7, + output_channels=256), + out_channels=256, + featmap_strides=[4, 8, 16, 32]))) diff --git a/mmpose/configs/mmdet/dcn/faster-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/faster-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e4ed832f5e7ff0d050be33e57d2fa611e9ae7e8e --- /dev/null +++ b/mmpose/configs/mmdet/dcn/faster-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/dcn/mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3f36714a5301823ca401820ab9d926374428ee70 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0b281d417b4f6a7320201da261e5fdf6950556a1 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_amp-1x_coco.py b/mmpose/configs/mmdet/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_amp-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9d01594314aad74bc47d7331c42a39f2ca453071 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_amp-1x_coco.py @@ -0,0 +1,10 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) + +# MMEngine support the following two ways, users can choose +# according to convenience +# optim_wrapper = dict(type='AmpOptimWrapper') +_base_.optim_wrapper.type = 'AmpOptimWrapper' diff --git a/mmpose/configs/mmdet/dcn/metafile.yml b/mmpose/configs/mmdet/dcn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..4aa35b5d95f7f531cc2bdb8a03553ae197cfe727 --- /dev/null +++ b/mmpose/configs/mmdet/dcn/metafile.yml @@ -0,0 +1,272 @@ +Collections: + - Name: Deformable Convolutional Networks + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Deformable Convolution + Paper: + URL: https://arxiv.org/abs/1703.06211 + Title: "Deformable Convolutional Networks" + README: configs/dcn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 + Version: v2.0.0 + +Models: + - Name: faster-rcnn_r50_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 56.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth + + - Name: faster-rcnn_r50_fpn_dpool_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/faster-rcnn_r50_fpn_dpool_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + inference time (ms/im): + - value: 58.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth + + - Name: faster-rcnn_r101-dconv-c3-c5_fpn_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/faster-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 80 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth + + - Name: faster-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/faster-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.3 + inference time (ms/im): + - value: 100 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth + + - Name: mask-rcnn_r50_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + inference time (ms/im): + - value: 64.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth + + - Name: mask-rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_amp-1x_coco.py + Metadata: + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + Training Memory (GB): 3.0 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth + + - Name: mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.5 + inference time (ms/im): + - value: 85.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth + + - Name: cascade-rcnn_r50_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + inference time (ms/im): + - value: 68.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth + + - Name: cascade-rcnn_r101-dconv-c3-c5_fpn_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth + + - Name: cascade-mask-rcnn_r50_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade-mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 100 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth + + - Name: cascade-mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade-mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + inference time (ms/im): + - value: 116.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth + + - Name: cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth diff --git a/mmpose/configs/mmdet/dcnv2/README.md b/mmpose/configs/mmdet/dcnv2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7f42c93401f836350c7b30cf5af9b4caa7ea75c7 --- /dev/null +++ b/mmpose/configs/mmdet/dcnv2/README.md @@ -0,0 +1,37 @@ +# DCNv2 + +> [Deformable ConvNets v2: More Deformable, Better Results](https://arxiv.org/abs/1811.11168) + + + +## Abstract + +The superior performance of Deformable Convolutional Networks arises from its ability to adapt to the geometric variations of objects. Through an examination of its adaptive behavior, we observe that while the spatial support for its neural features conforms more closely than regular ConvNets to object structure, this support may nevertheless extend well beyond the region of interest, causing features to be influenced by irrelevant image content. To address this problem, we present a reformulation of Deformable ConvNets that improves its ability to focus on pertinent image regions, through increased modeling power and stronger training. The modeling power is enhanced through a more comprehensive integration of deformable convolution within the network, and by introducing a modulation mechanism that expands the scope of deformation modeling. To effectively harness this enriched modeling capability, we guide network training via a proposed feature mimicking scheme that helps the network to learn features that reflect the object focus and classification power of RCNN features. With the proposed contributions, this new version of Deformable ConvNets yields significant performance gains over the original model and produces leading results on the COCO benchmark for object detection and instance segmentation. + +## Results and Models + +| Backbone | Model | Style | Conv | Pool | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :---------------: | :----: | :-----: | :-----------: | :----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Faster | pytorch | mdconv(c3-c5) | - | 1x | 4.1 | 17.6 | 41.4 | | [config](./faster-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130_222144.log.json) | +| \*R-50-FPN (dg=4) | Faster | pytorch | mdconv(c3-c5) | - | 1x | 4.2 | 17.4 | 41.5 | | [config](./faster-rcnn_r50-mdconv-group4-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130_222058.log.json) | +| R-50-FPN | Faster | pytorch | - | mdpool | 1x | 5.8 | 16.6 | 38.7 | | [config](./faster-rcnn_r50_fpn_mdpool_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307_203304.log.json) | +| R-50-FPN | Mask | pytorch | mdconv(c3-c5) | - | 1x | 4.5 | 15.1 | 41.5 | 37.1 | [config](./mask-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203_063443.log.json) | +| R-50-FPN (FP16) | Mask | pytorch | mdconv(c3-c5) | - | 1x | 3.1 | | 42.0 | 37.6 | [config](./mask-rcnn_r50-mdconv-c3-c5_fpn_amp-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434.log.json) | + +**Notes:** + +- `mdconv` denotes modulated deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `mdpool` denotes modulated deformable roi pooling. +- The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster. +- (\*) For R-50-FPN (dg=4), dg is short for deformable_group. This model is trained and tested on Amazon EC2 p3dn.24xlarge instance. +- **Memory, Train/Inf time is outdated.** + +## Citation + +```latex +@article{zhu2018deformable, + title={Deformable ConvNets v2: More Deformable, Better Results}, + author={Zhu, Xizhou and Hu, Han and Lin, Stephen and Dai, Jifeng}, + journal={arXiv preprint arXiv:1811.11168}, + year={2018} +} +``` diff --git a/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a7f7e4eecaf74418690975d54d09eeb0e31f9a1f --- /dev/null +++ b/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50-mdconv-group4-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50-mdconv-group4-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5c58dbed3782403a5fac3c6809598372e47cd72c --- /dev/null +++ b/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50-mdconv-group4-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py b/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6198d6d7d72f8d012c777330f1116b46b89290be --- /dev/null +++ b/mmpose/configs/mmdet/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py @@ -0,0 +1,12 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + _delete_=True, + type='ModulatedDeformRoIPoolPack', + output_size=7, + output_channels=256), + out_channels=256, + featmap_strides=[4, 8, 16, 32]))) diff --git a/mmpose/configs/mmdet/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f7a90bbf31bea3663820caa4541de3ceafeb7366 --- /dev/null +++ b/mmpose/configs/mmdet/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_amp-1x_coco.py b/mmpose/configs/mmdet/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_amp-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3b3894c2d61ee3208170235ba1aa98def79a7120 --- /dev/null +++ b/mmpose/configs/mmdet/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_amp-1x_coco.py @@ -0,0 +1,10 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) + +# MMEngine support the following two ways, users can choose +# according to convenience +# optim_wrapper = dict(type='AmpOptimWrapper') +_base_.optim_wrapper.type = 'AmpOptimWrapper' diff --git a/mmpose/configs/mmdet/dcnv2/metafile.yml b/mmpose/configs/mmdet/dcnv2/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..dea7bfa1b531410f3c81693d7012a835781a63ca --- /dev/null +++ b/mmpose/configs/mmdet/dcnv2/metafile.yml @@ -0,0 +1,123 @@ +Collections: + - Name: Deformable Convolutional Networks v2 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Deformable Convolution + Paper: + URL: https://arxiv.org/abs/1811.11168 + Title: "Deformable ConvNets v2: More Deformable, Better Results" + README: configs/dcnv2/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 + Version: v2.0.0 + +Models: + - Name: faster-rcnn_r50_fpn_mdconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcnv2/faster-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.1 + inference time (ms/im): + - value: 56.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth + + - Name: faster-rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcnv2/faster-rcnn_r50-mdconv-group4-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + inference time (ms/im): + - value: 57.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth + + - Name: faster-rcnn_r50_fpn_mdpool_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py + Metadata: + Training Memory (GB): 5.8 + inference time (ms/im): + - value: 60.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth + + - Name: mask-rcnn_r50_fpn_mdconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + inference time (ms/im): + - value: 66.23 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth + + - Name: mask-rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_amp-1x_coco.py + Metadata: + Training Memory (GB): 3.1 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth diff --git a/mmpose/configs/mmdet/ddod/README.md b/mmpose/configs/mmdet/ddod/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d5ea9cd0cc11f7de0adf34aa4574bc20a8c11219 --- /dev/null +++ b/mmpose/configs/mmdet/ddod/README.md @@ -0,0 +1,31 @@ +# DDOD + +> [Disentangle Your Dense Object Detector](https://arxiv.org/pdf/2107.02963.pdf) + + + +## Abstract + +Deep learning-based dense object detectors have achieved great success in the past few years and have been applied to numerous multimedia applications such as video understanding. However, the current training pipeline for dense detectors is compromised to lots of conjunctions that may not hold. In this paper, we investigate three such important conjunctions: 1) only samples assigned as positive in classification head are used to train the regression head; 2) classification and regression share the same input feature and computational fields defined by the parallel head architecture; and 3) samples distributed in different feature pyramid layers are treated equally when computing the loss. We first carry out a series of pilot experiments to show disentangling such conjunctions can lead to persistent performance improvement. Then, based on these findings, we propose Disentangled Dense Object Detector(DDOD), in which simple and effective disentanglement mechanisms are designed and integrated into the current state-of-the-art dense object detectors. Extensive experiments on MS COCO benchmark show that our approach can lead to 2.0 mAP, 2.4 mAP and 2.2 mAP absolute improvements on RetinaNet, FCOS, and ATSS baselines with negligible extra overhead. Notably, our best model reaches 55.0 mAP on the COCO test-dev set and 93.5 AP on the hard subset of WIDER FACE, achieving new state-of-the-art performance on these two competitive benchmarks. Code is available at https://github.com/zehuichen123/DDOD. + +
+ +
+ +## Results and Models + +| Model | Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | +| :-------: | :------: | :-----: | :-----: | :------: | :----: | :---------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DDOD-ATSS | R-50 | pytorch | 1x | 3.4 | 41.7 | [config](./ddod_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737.log.json) | + +## Citation + +```latex +@inproceedings{chen2021disentangle, +title={Disentangle Your Dense Object Detector}, +author={Chen, Zehui and Yang, Chenhongyi and Li, Qiaofei and Zhao, Feng and Zha, Zheng-Jun and Wu, Feng}, +booktitle={Proceedings of the 29th ACM International Conference on Multimedia}, +pages={4939--4948}, +year={2021} +} +``` diff --git a/mmpose/configs/mmdet/ddod/ddod_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/ddod/ddod_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fed1116b1f92e613517a57aa196839e4de3037dc --- /dev/null +++ b/mmpose/configs/mmdet/ddod/ddod_r50_fpn_1x_coco.py @@ -0,0 +1,72 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='DDOD', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='DDODHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_iou=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + train_cfg=dict( + # assigner is mean cls_assigner + assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8), + reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/ddod/metafile.yml b/mmpose/configs/mmdet/ddod/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..c22395002bd614cd0e75d753320c3f9e7ce54bd1 --- /dev/null +++ b/mmpose/configs/mmdet/ddod/metafile.yml @@ -0,0 +1,33 @@ +Collections: + - Name: DDOD + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - DDOD + - FPN + - ResNet + Paper: + URL: https://arxiv.org/pdf/2107.02963.pdf + Title: 'Disentangle Your Dense Object Detector' + README: configs/ddod/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/models/detectors/ddod.py#L6 + Version: v2.25.0 + +Models: + - Name: ddod_r50_fpn_1x_coco + In Collection: DDOD + Config: configs/ddod/ddod_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.4 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth diff --git a/mmpose/configs/mmdet/ddq/README.md b/mmpose/configs/mmdet/ddq/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3f6f459cbbb48c50d5fbd6abec3c6dbda4d422b4 --- /dev/null +++ b/mmpose/configs/mmdet/ddq/README.md @@ -0,0 +1,39 @@ +# DDQ + +> [Dense Distinct Query for End-to-End Object Detection](https://arxiv.org/abs/2303.12776) + + + +## Abstract + + + +One-to-one label assignment in object detection has successfully obviated the need for non-maximum suppression (NMS) as postprocessing and makes the pipeline end-to-end. However, it triggers a new dilemma as the widely used sparse queries cannot guarantee a high recall, while dense queries inevitably bring more similar queries and encounter optimization difficulties. As both sparse and dense queries are problematic, then what are the expected queries in end-to-end object detection? This paper shows that the solution should be Dense Distinct Queries (DDQ). Concretely, we first lay dense queries like traditional detectors and then select distinct ones for one-to-one assignments. DDQ blends the advantages of traditional and recent end-to-end detectors and significantly improves the performance of various detectors including FCN, R-CNN, and DETRs. Most impressively, DDQ-DETR achieves 52.1 AP on MS-COCO dataset within 12 epochs using a ResNet-50 backbone, outperforming all existing detectors in the same setting. DDQ also shares the benefit of end-to-end detectors in crowded scenes and achieves 93.8 AP on CrowdHuman. We hope DDQ can inspire researchers to consider the complementarity between traditional methods and end-to-end detectors. + +![ddq_arch](https://github.com/open-mmlab/mmdetection/assets/33146359/5ca9f11b-b6f3-454f-a2d1-3009ee337bbc) + +## Results and Models + +| Model | Backbone | Lr schd | Augmentation | box AP(val) | Config | Download | +| :---------------: | :------: | :-----: | :----------: | :---------: | :------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DDQ DETR-4scale | R-50 | 12e | DETR | 51.4 | [config](./ddq-detr-4scale_r50_8xb2-12e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq-detr-4scale_r50_8xb2-12e_coco/ddq-detr-4scale_r50_8xb2-12e_coco_20230809_170711-42528127.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq-detr-4scale_r50_8xb2-12e_coco/ddq-detr-4scale_r50_8xb2-12e_coco_20230809_170711.log.json) | +| DDQ DETR-5scale\* | R-50 | 12e | DETR | 52.1 | [config](./ddq-detr-5scale_r50_8xb2-12e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq_detr_5scale_coco_1x.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq_detr_5scale_coco_1x_20230319_103307.log) | +| DDQ DETR-4scale\* | Swin-L | 30e | DETR | 58.7 | [config](./ddq-detr-4scale_swinl_8xb2-30e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq_detr_swinl_30e.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq_detr_swinl_30e_20230316_221721_20230318_143554.log) | + +**Note** + +- Models labeled * are not trained by us, but from [DDQ official website](https://github.com/jshilong/DDQ). +- We find that the performance is unstable and may fluctuate by about 0.2 mAP. + +## Citation + +```latex +@InProceedings{Zhang_2023_CVPR, + author = {Zhang, Shilong and Wang, Xinjiang and Wang, Jiaqi and Pang, Jiangmiao and Lyu, Chengqi and Zhang, Wenwei and Luo, Ping and Chen, Kai}, + title = {Dense Distinct Query for End-to-End Object Detection}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2023}, + pages = {7329-7338} +} +``` diff --git a/mmpose/configs/mmdet/ddq/ddq-detr-4scale_r50_8xb2-12e_coco.py b/mmpose/configs/mmdet/ddq/ddq-detr-4scale_r50_8xb2-12e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5e64afc087e1ed68b8b5d1474127c832f893cb9b --- /dev/null +++ b/mmpose/configs/mmdet/ddq/ddq-detr-4scale_r50_8xb2-12e_coco.py @@ -0,0 +1,170 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +model = dict( + type='DDQDETR', + num_queries=900, # num_matching_queries + # ratio of num_dense queries to num_queries + dense_topk_ratio=1.5, + with_box_refine=True, + as_two_stage=True, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[512, 1024, 2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + # encoder class name: DeformableDetrTransformerEncoder + encoder=dict( + num_layers=6, + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=4, + dropout=0.0), # 0.1 for DeformDETR + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, # 1024 for DeformDETR + ffn_drop=0.0))), # 0.1 for DeformDETR + # decoder class name: DDQTransformerDecoder + decoder=dict( + # `num_layers` >= 2, because attention masks of the last + # `num_layers` - 1 layers are used for distinct query selection + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, + dropout=0.0), # 0.1 for DeformDETR + cross_attn_cfg=dict(embed_dims=256, num_levels=4, + dropout=0.0), # 0.1 for DeformDETR + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, # 1024 for DeformDETR + ffn_drop=0.0)), # 0.1 for DeformDETR + post_norm_cfg=None), + positional_encoding=dict( + num_feats=128, + normalize=True, + offset=0.0, # -0.5 for DeformDETR + temperature=20), # 10000 for DeformDETR + bbox_head=dict( + type='DDQDETRHead', + num_classes=80, + sync_cls_avg_factor=True, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + dn_cfg=dict( + label_noise_scale=0.5, + box_noise_scale=1.0, + group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=100)), + dqs_cfg=dict(type='nms', iou_threshold=0.8), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=300)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + dataset=dict( + filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.05), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})) + +# learning policy +max_epochs = 12 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=False, + begin=0, + end=2000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/ddq/ddq-detr-4scale_swinl_8xb2-30e_coco.py b/mmpose/configs/mmdet/ddq/ddq-detr-4scale_swinl_8xb2-30e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d863649411e3157373961b3da339990df1e6f267 --- /dev/null +++ b/mmpose/configs/mmdet/ddq/ddq-detr-4scale_swinl_8xb2-30e_coco.py @@ -0,0 +1,177 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa: E501 +model = dict( + type='DDQDETR', + num_queries=900, # num_matching_queries + # ratio of num_dense queries to num_queries + dense_topk_ratio=1.5, + with_box_refine=True, + as_two_stage=True, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict( + type='ChannelMapper', + in_channels=[384, 768, 1536], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + # encoder class name: DeformableDetrTransformerEncoder + encoder=dict( + num_layers=6, + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=4, + dropout=0.0), # 0.1 for DeformDETR + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, # 1024 for DeformDETR + ffn_drop=0.0))), # 0.1 for DeformDETR + # decoder class name: DDQTransformerDecoder + decoder=dict( + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, + dropout=0.0), # 0.1 for DeformDETR + cross_attn_cfg=dict(embed_dims=256, num_levels=4, + dropout=0.0), # 0.1 for DeformDETR + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, # 1024 for DeformDETR + ffn_drop=0.0)), # 0.1 for DeformDETR + post_norm_cfg=None), + positional_encoding=dict( + num_feats=128, + normalize=True, + offset=0.0, # -0.5 for DeformDETR + temperature=20), # 10000 for DeformDETR + bbox_head=dict( + type='DDQDETRHead', + num_classes=80, + sync_cls_avg_factor=True, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + dn_cfg=dict( + label_noise_scale=0.5, + box_noise_scale=1.0, + group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=100)), + dqs_cfg=dict(type='nms', iou_threshold=0.8), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=300)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + dataset=dict( + filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.05), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.05)})) + +# learning policy +max_epochs = 30 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=False, + begin=0, + end=2000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[20, 26], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/ddq/ddq-detr-5scale_r50_8xb2-12e_coco.py b/mmpose/configs/mmdet/ddq/ddq-detr-5scale_r50_8xb2-12e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3c38f553bdd46bc4e0611bbd0fd4bab0c1929825 --- /dev/null +++ b/mmpose/configs/mmdet/ddq/ddq-detr-5scale_r50_8xb2-12e_coco.py @@ -0,0 +1,171 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +model = dict( + type='DDQDETR', + num_queries=900, # num_matching_queries + # ratio of num_dense queries to num_queries + dense_topk_ratio=1.5, + with_box_refine=True, + as_two_stage=True, + num_feature_levels=5, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[256, 512, 1024, 2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=5), + # encoder class name: DeformableDetrTransformerEncoder + encoder=dict( + num_layers=6, + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=5, + dropout=0.0), # 0.1 for DeformDETR + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, # 1024 for DeformDETR + ffn_drop=0.0))), # 0.1 for DeformDETR + # decoder class name: DDQTransformerDecoder + decoder=dict( + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, + dropout=0.0), # 0.1 for DeformDETR + cross_attn_cfg=dict(embed_dims=256, num_levels=5, + dropout=0.0), # 0.1 for DeformDETR + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, # 1024 for DeformDETR + ffn_drop=0.0)), # 0.1 for DeformDETR + post_norm_cfg=None), + positional_encoding=dict( + num_feats=128, + normalize=True, + offset=0.0, # -0.5 for DeformDETR + temperature=20), # 10000 for DeformDETR + bbox_head=dict( + type='DDQDETRHead', + num_classes=80, + sync_cls_avg_factor=True, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + dn_cfg=dict( + label_noise_scale=0.5, + box_noise_scale=1.0, + group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=100)), + dqs_cfg=dict(type='nms', iou_threshold=0.8), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=300)) + +# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different +# from the default setting in mmdet. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + dataset=dict( + filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.05), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})) + +# learning policy +max_epochs = 12 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=False, + begin=0, + end=2000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/ddq/metafile.yml b/mmpose/configs/mmdet/ddq/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..bd33abe1a5122885913a1e8cbee60cb48014239f --- /dev/null +++ b/mmpose/configs/mmdet/ddq/metafile.yml @@ -0,0 +1,56 @@ +Collections: + - Name: DDQ + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 8x A100 GPUs + Architecture: + - ResNet + - Transformer + Paper: + URL: https://arxiv.org/abs/2303.12776 + Title: 'Dense Distinct Query for End-to-End Object Detection' + README: configs/ddq/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/dev-3.x/mmdet/models/detectors/ddq_detr.py#L21 + Version: dev-3.x + +Models: + - Name: ddq-detr-4scale_r50_8xb2-12e_coco + In Collection: DDQ + Config: configs/ddq/ddq-detr-4scale_r50_8xb2-12e_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq-detr-4scale_r50_8xb2-12e_coco/ddq-detr-4scale_r50_8xb2-12e_coco_20230809_170711-42528127.pth + + - Name: ddq-detr-5scale_r50_8xb2-12e_coco + In Collection: DDQ + Config: configs/dino/ddq-detr-5scale_r50_8xb2-12e_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.1 + Weights: https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq_detr_5scale_coco_1x.pth + + - Name: ddq-detr-4scale_swinl_8xb2-30e_coco + In Collection: DDQ + Config: configs/dino/ddq-detr-4scale_swinl_8xb2-30e_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 58.7 + Weights: https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq_detr_swinl_30e.pth diff --git a/mmpose/configs/mmdet/deepfashion/README.md b/mmpose/configs/mmdet/deepfashion/README.md new file mode 100644 index 0000000000000000000000000000000000000000..844e29d6a72906bc36fd682df270480af5a595c0 --- /dev/null +++ b/mmpose/configs/mmdet/deepfashion/README.md @@ -0,0 +1,70 @@ +# DeepFashion + +> [DeepFashion: Powering Robust Clothes Recognition and Retrieval With Rich Annotations](https://openaccess.thecvf.com/content_cvpr_2016/html/Liu_DeepFashion_Powering_Robust_CVPR_2016_paper.html) + + + +## Abstract + +Recent advances in clothes recognition have been driven by the construction of clothes datasets. Existing datasets are limited in the amount of annotations and are difficult to cope with the various challenges in real-world applications. In this work, we introduce DeepFashion, a large-scale clothes dataset with comprehensive annotations. It contains over 800,000 images, which are richly annotated with massive attributes, clothing landmarks, and correspondence of images taken under different scenarios including store, street snapshot, and consumer. Such rich annotations enable the development of powerful algorithms in clothes recognition and facilitating future researches. To demonstrate the advantages of DeepFashion, we propose a new deep model, namely FashionNet, which learns clothing features by jointly predicting clothing attributes and landmarks. The estimated landmarks are then employed to pool or gate the learned features. It is optimized in an iterative manner. Extensive experiments demonstrate the effectiveness of FashionNet and the usefulness of DeepFashion. + +
+ +
+ +## Introduction + +[MMFashion](https://github.com/open-mmlab/mmfashion) develops "fashion parsing and segmentation" module +based on the dataset +[DeepFashion-Inshop](https://drive.google.com/drive/folders/0B7EVK8r0v71pVDZFQXRsMDZCX1E?usp=sharing). +Its annotation follows COCO style. +To use it, you need to first download the data. Note that we only use "img_highres" in this task. +The file tree should be like this: + +```sh +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── DeepFashion +│ │ ├── In-shop +| │ │ ├── Anno +| │ │ │   ├── segmentation +| │ │ │   | ├── DeepFashion_segmentation_train.json +| │ │ │   | ├── DeepFashion_segmentation_query.json +| │ │ │   | ├── DeepFashion_segmentation_gallery.json +| │ │ │   ├── list_bbox_inshop.txt +| │ │ │   ├── list_description_inshop.json +| │ │ │   ├── list_item_inshop.txt +| │ │ │   └── list_landmarks_inshop.txt +| │ │ ├── Eval +| │ │ │ └── list_eval_partition.txt +| │ │ ├── Img +| │ │ │ ├── img +| │ │ │ │ ├──XXX.jpg +| │ │ │ ├── img_highres +| │ │ │ └── ├──XXX.jpg + +``` + +After that you can train the Mask RCNN r50 on DeepFashion-In-shop dataset by launching training with the `mask_rcnn_r50_fpn_1x.py` config +or creating your own config file. + +## Results and Models + +| Backbone | Model type | Dataset | bbox detection Average Precision | segmentation Average Precision | Config | Download (Google) | +| :------: | :--------: | :-----------------: | :------------------------------: | :----------------------------: | :----------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ResNet50 | Mask RCNN | DeepFashion-In-shop | 0.599 | 0.584 | [config](./mask-rcnn_r50_fpn_15e_deepfashion.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/mask_rcnn_r50_fpn_15e_deepfashion_20200329_192752.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/20200329_192752.log.json) | + +## Citation + +```latex +@inproceedings{liuLQWTcvpr16DeepFashion, + author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, + title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2016} +} +``` diff --git a/mmpose/configs/mmdet/deepfashion/mask-rcnn_r50_fpn_15e_deepfashion.py b/mmpose/configs/mmdet/deepfashion/mask-rcnn_r50_fpn_15e_deepfashion.py new file mode 100644 index 0000000000000000000000000000000000000000..403b18a4ca8ed61aedcb99218ecc79302826ff8c --- /dev/null +++ b/mmpose/configs/mmdet/deepfashion/mask-rcnn_r50_fpn_15e_deepfashion.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', + '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) +# runtime settings +max_epochs = 15 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/deepsort/README.md b/mmpose/configs/mmdet/deepsort/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e50ec17eb55ffef4fb59dae43175b2688eedfaa9 --- /dev/null +++ b/mmpose/configs/mmdet/deepsort/README.md @@ -0,0 +1,109 @@ +# Simple online and realtime tracking with a deep association metric + +## Abstract + + + +Simple Online and Realtime Tracking (SORT) is a pragmatic approach to multiple object tracking with a focus on simple, effective algorithms. In this paper, we integrate appearance information to improve the performance of SORT. Due to this extension we are able to track objects through longer periods of occlusions, effectively reducing the number of identity switches. In spirit of the original framework we place much of the computational complexity into an offline pre-training stage where we learn a deep association metric on a largescale person re-identification dataset. During online application, we establish measurement-to-track associations using nearest neighbor queries in visual appearance space. Experimental evaluation shows that our extensions reduce the number of identity switches by 45%, achieving overall competitive performance at high frame rates. + + + +
+ +
+ +## Results and models on MOT17 + +Currently we do not support training ReID models for DeepSORT. +We directly use the ReID model from [Tracktor](https://github.com/phil-bergmann/tracking_wo_bnw). These missed features will be supported in the future. + +| Method | Detector | ReID | Train Set | Test Set | Public | Inf time (fps) | HOTA | MOTA | IDF1 | FP | FN | IDSw. | Config | Download | +| :------: | :----------------: | :--: | :--------: | :------: | :----: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :--------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DeepSORT | R50-FasterRCNN-FPN | R50 | half-train | half-val | N | 13.8 | 57.0 | 63.7 | 69.5 | 15063 | 40323 | 3276 | [config](deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py) | [detector](https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth) [reid](https://download.openmmlab.com/mmtracking/mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth) | + +## Get started + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Prepare + +Tracking Dataset Prepare can refer to this [document](../../docs/en/user_guides/tracking_dataset_prepare.md). + +### 3. Training + +We implement DeepSORT with independent detector and ReID models. +Note that, due to the influence of parameters such as learning rate in default configuration file, +we recommend using 8 GPUs for training in order to reproduce accuracy. + +You can train the detector as follows. + +```shell script +# Training Faster R-CNN on mot17-half-train dataset with following command. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_train.sh configs/sort/faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py 8 +``` + +If you want to know about more detailed usage of `train.py/dist_train.sh/slurm_train.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 4. Testing and evaluation + +### 4.1 Example on MOTxx-halfval dataset + +**4.1.1 use separate trained detector and reid model to evaluating and testing** + +```shell +# Example 1: Test on motXX-half-val set. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_test_tracking.sh configs/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py 8 --detector ${DETECTOR_CHECKPOINT_PATH} --reid ${REID_CHECKPOINT_PATH} +``` + +**4.1.2 use video_baesd to evaluating and testing** + +we also provide two_ways(img_based or video_based) to evaluating and testing. +if you want to use video_based to evaluating and testing, you can modify config as follows + +``` +val_dataloader = dict( + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False)) +``` + +### 4.2 Example on MOTxx-test dataset + +If you want to get the results of the [MOT Challenge](https://motchallenge.net/) test set, +please use the following command to generate result files that can be used for submission. +It will be stored in `./mot_17_test_res`, you can modify the saved path in `test_evaluator` of the config. + +```shell script +# Example 2: Test on motxx-test set +# The number after config file represents the number of GPUs used +bash tools/dist_test_tracking.sh configs/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test 8 --detector ${DETECTOR_CHECKPOINT_PATH} --reid ${REID_CHECKPOINT_PATH} +``` + +If you want to know about more detailed usage of `test_tracking.py/dist_test_tracking.sh/slurm_test_tracking.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 5.Inference + +Use a single GPU to predict a video and save it as a video. + +```shell +python demo/mot_demo.py demo/demo_mot.mp4 configs/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test --detector ${DETECTOR_CHECKPOINT_PATH} --reid ${REID_CHECKPOINT_PATH} --out mot.mp4 +``` + +## Citation + + + +```latex +@inproceedings{wojke2017simple, + title={Simple online and realtime tracking with a deep association metric}, + author={Wojke, Nicolai and Bewley, Alex and Paulus, Dietrich}, + booktitle={2017 IEEE international conference on image processing (ICIP)}, + pages={3645--3649}, + year={2017}, + organization={IEEE} +} +``` diff --git a/mmpose/configs/mmdet/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..70d3393829b422740bfba5d1746c7651e9c2d69c --- /dev/null +++ b/mmpose/configs/mmdet/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py @@ -0,0 +1,85 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/mot_challenge.py', '../_base_/default_runtime.py' +] + +default_hooks = dict( + logger=dict(type='LoggerHook', interval=1), + visualization=dict(type='TrackVisualizationHook', draw=False)) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='TrackLocalVisualizer', vis_backends=vis_backends, name='visualizer') +# custom hooks +custom_hooks = [ + # Synchronize model buffers such as running_mean and running_var in BN + # at the end of each epoch + dict(type='SyncBuffersHook') +] + +detector = _base_.model +detector.pop('data_preprocessor') +detector.rpn_head.bbox_coder.update(dict(clip_border=False)) +detector.roi_head.bbox_head.update(dict(num_classes=1)) +detector.roi_head.bbox_head.bbox_coder.update(dict(clip_border=False)) +detector['init_cfg'] = dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/' + 'faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth') +del _base_.model + +model = dict( + type='DeepSORT', + data_preprocessor=dict( + type='TrackDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + detector=detector, + reid=dict( + type='BaseReID', + data_preprocessor=dict(type='mmpretrain.ClsDataPreprocessor'), + backbone=dict( + type='mmpretrain.ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), + head=dict( + type='LinearReIDHead', + num_fcs=1, + in_channels=2048, + fc_channels=1024, + out_channels=128, + num_classes=380, + loss_cls=dict(type='mmpretrain.CrossEntropyLoss', loss_weight=1.0), + loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU')), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmtracking/mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth' # noqa: E501 + )), + tracker=dict( + type='SORTTracker', + motion=dict(type='KalmanFilter', center_only=False), + obj_score_thr=0.5, + reid=dict( + num_samples=10, + img_scale=(256, 128), + img_norm_cfg=None, + match_score_thr=2.0), + match_iou_thr=0.5, + momentums=None, + num_tentatives=2, + num_frames_retain=100)) + +train_dataloader = None + +train_cfg = None +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/mmpose/configs/mmdet/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test.py b/mmpose/configs/mmdet/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test.py new file mode 100644 index 0000000000000000000000000000000000000000..687ce7adfcc1742bab75cca939a99df37b43689c --- /dev/null +++ b/mmpose/configs/mmdet/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test.py @@ -0,0 +1,15 @@ +_base_ = [ + './deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain' + '_test-mot17halfval.py' +] + +# dataloader +val_dataloader = dict( + dataset=dict(ann_file='annotations/train_cocoformat.json')) +test_dataloader = dict( + dataset=dict( + ann_file='annotations/test_cocoformat.json', + data_prefix=dict(img_path='test'))) + +# evaluator +test_evaluator = dict(format_only=True, outfile_prefix='./mot_17_test_res') diff --git a/mmpose/configs/mmdet/deepsort/metafile.yml b/mmpose/configs/mmdet/deepsort/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..2feb358e93d1590f0305e2ed08ae40e18bbd6cb9 --- /dev/null +++ b/mmpose/configs/mmdet/deepsort/metafile.yml @@ -0,0 +1,37 @@ +Collections: + - Name: DeepSORT + Metadata: + Training Techniques: + - SGD with Momentum + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - FPN + Paper: + URL: https://arxiv.org/abs/1703.07402 + Title: Simple Online and Realtime Tracking with a Deep Association Metric + README: configs/deepsort/README.md + +Models: + - Name: deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval + In Collection: DeepSORT + Config: configs/deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py + Metadata: + Training Data: MOT17-half-train + inference time (ms/im): + - value: 72.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640, 1088) + Results: + - Task: Multiple Object Tracking + Dataset: MOT17-half-val + Metrics: + MOTA: 63.7 + IDF1: 69.5 + HOTA: 57.0 + Weights: + - https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth + - https://download.openmmlab.com/mmtracking/mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth diff --git a/mmpose/configs/mmdet/deformable_detr/README.md b/mmpose/configs/mmdet/deformable_detr/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ca897cdb4cfc17b1d194d2aeaba7feea388839f0 --- /dev/null +++ b/mmpose/configs/mmdet/deformable_detr/README.md @@ -0,0 +1,41 @@ +# Deformable DETR + +> [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) + + + +## Abstract + +DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10 times less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach. + +
+ +
+ +## Results and Models + +| Backbone | Model | Lr schd | box AP | Config | Download | +| :------: | :---------------------------------: | :-----: | :----: | :---------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | Deformable DETR | 50e | 44.3 | [config](./deformable-detr_r50_16xb2-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr_r50_16xb2-50e_coco/deformable-detr_r50_16xb2-50e_coco_20221029_210934-6bc7d21b.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr_r50_16xb2-50e_coco/deformable-detr_r50_16xb2-50e_coco_20221029_210934.log.json) | +| R-50 | + iterative bounding box refinement | 50e | 46.2 | [config](./deformable-detr-refine_r50_16xb2-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco/deformable-detr-refine_r50_16xb2-50e_coco_20221022_225303-844e0f93.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco/deformable-detr-refine_r50_16xb2-50e_coco_20221022_225303.log.json) | +| R-50 | ++ two-stage Deformable DETR | 50e | 47.0 | [config](./deformable-detr-refine-twostage_r50_16xb2-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco/deformable-detr-refine-twostage_r50_16xb2-50e_coco_20221021_184714-acc8a5ff.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco/deformable-detr-refine-twostage_r50_16xb2-50e_coco_20221021_184714.log.json) | + +### NOTE + +1. All models are trained with batch size 32. +2. The performance is unstable. `Deformable DETR` and `iterative bounding box refinement` may fluctuate about 0.3 mAP. `two-stage Deformable DETR` may fluctuate about 0.2 mAP. + +## Citation + +We provide the config files for Deformable DETR: [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159). + +```latex +@inproceedings{ +zhu2021deformable, +title={Deformable DETR: Deformable Transformers for End-to-End Object Detection}, +author={Xizhou Zhu and Weijie Su and Lewei Lu and Bin Li and Xiaogang Wang and Jifeng Dai}, +booktitle={International Conference on Learning Representations}, +year={2021}, +url={https://openreview.net/forum?id=gZ9hCDWe6ke} +} +``` diff --git a/mmpose/configs/mmdet/deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py b/mmpose/configs/mmdet/deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..eeb67fc98486cfd929a8177b9af6be3cdab9aa4b --- /dev/null +++ b/mmpose/configs/mmdet/deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py @@ -0,0 +1,2 @@ +_base_ = 'deformable-detr-refine_r50_16xb2-50e_coco.py' +model = dict(as_two_stage=True) diff --git a/mmpose/configs/mmdet/deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco.py b/mmpose/configs/mmdet/deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b968674f4a9fc450803cdba018b0c4e9e6ca422a --- /dev/null +++ b/mmpose/configs/mmdet/deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco.py @@ -0,0 +1,2 @@ +_base_ = 'deformable-detr_r50_16xb2-50e_coco.py' +model = dict(with_box_refine=True) diff --git a/mmpose/configs/mmdet/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py b/mmpose/configs/mmdet/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e0dee411c8e27ab440ccc874e40f4207b24a21e7 --- /dev/null +++ b/mmpose/configs/mmdet/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py @@ -0,0 +1,156 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +model = dict( + type='DeformableDETR', + num_queries=300, + num_feature_levels=4, + with_box_refine=False, + as_two_stage=False, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[512, 1024, 2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + encoder=dict( # DeformableDetrTransformerEncoder + num_layers=6, + layer_cfg=dict( # DeformableDetrTransformerEncoderLayer + self_attn_cfg=dict( # MultiScaleDeformableAttention + embed_dims=256, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=1024, ffn_drop=0.1))), + decoder=dict( # DeformableDetrTransformerDecoder + num_layers=6, + return_intermediate=True, + layer_cfg=dict( # DeformableDetrTransformerDecoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True), + cross_attn_cfg=dict( # MultiScaleDeformableAttention + embed_dims=256, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=1024, ffn_drop=0.1)), + post_norm_cfg=None), + positional_encoding=dict(num_feats=128, normalize=True, offset=-0.5), + bbox_head=dict( + type='DeformableDETRHead', + num_classes=80, + sync_cls_avg_factor=True, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=100)) + +# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different +# from the default setting in mmdet. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] +train_dataloader = dict( + dataset=dict( + filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1), + 'sampling_offsets': dict(lr_mult=0.1), + 'reference_points': dict(lr_mult=0.1) + })) + +# learning policy +max_epochs = 50 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[40], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (16 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=32) diff --git a/mmpose/configs/mmdet/deformable_detr/metafile.yml b/mmpose/configs/mmdet/deformable_detr/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..a30c97914baf6f1ec56cea8fd67b5ad1efb574fe --- /dev/null +++ b/mmpose/configs/mmdet/deformable_detr/metafile.yml @@ -0,0 +1,56 @@ +Collections: + - Name: Deformable DETR + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - Transformer + Paper: + URL: https://openreview.net/forum?id=gZ9hCDWe6ke + Title: 'Deformable DETR: Deformable Transformers for End-to-End Object Detection' + README: configs/deformable_detr/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/deformable_detr.py#L6 + Version: v2.12.0 + +Models: + - Name: deformable-detr_r50_16xb2-50e_coco + In Collection: Deformable DETR + Config: configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py + Metadata: + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr_r50_16xb2-50e_coco/deformable-detr_r50_16xb2-50e_coco_20221029_210934-6bc7d21b.pth + + - Name: deformable-detr-refine_r50_16xb2-50e_coco + In Collection: Deformable DETR + Config: configs/deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco.py + Metadata: + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.2 + Weights: https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco/deformable-detr-refine_r50_16xb2-50e_coco_20221022_225303-844e0f93.pth + + - Name: deformable-detr-refine-twostage_r50_16xb2-50e_coco + In Collection: Deformable DETR + Config: configs/deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py + Metadata: + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.0 + Weights: https://download.openmmlab.com/mmdetection/v3.0/deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco/deformable-detr-refine-twostage_r50_16xb2-50e_coco_20221021_184714-acc8a5ff.pth diff --git a/mmpose/configs/mmdet/detectors/README.md b/mmpose/configs/mmdet/detectors/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2918d6e4f1072428bbefcfcd05e139fc590766aa --- /dev/null +++ b/mmpose/configs/mmdet/detectors/README.md @@ -0,0 +1,69 @@ +# DetectoRS + +> [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/abs/2006.02334) + + + +## Abstract + +Many modern object detectors demonstrate outstanding performances by using the mechanism of looking and thinking twice. In this paper, we explore this mechanism in the backbone design for object detection. At the macro level, we propose Recursive Feature Pyramid, which incorporates extra feedback connections from Feature Pyramid Networks into the bottom-up backbone layers. At the micro level, we propose Switchable Atrous Convolution, which convolves the features with different atrous rates and gathers the results using switch functions. Combining them results in DetectoRS, which significantly improves the performances of object detection. On COCO test-dev, DetectoRS achieves state-of-the-art 55.7% box AP for object detection, 48.5% mask AP for instance segmentation, and 50.0% PQ for panoptic segmentation. + +
+ +
+ +## Introduction + +DetectoRS requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +| | ├── stuffthingmaps +``` + +## Results and Models + +DetectoRS includes two major components: + +- Recursive Feature Pyramid (RFP). +- Switchable Atrous Convolution (SAC). + +They can be used independently. +Combining them together results in DetectoRS. +The results on COCO 2017 val are shown in the below table. + +| Method | Detector | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :-----------------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| RFP | Cascade + ResNet-50 | 1x | 7.5 | - | 44.8 | | [config](./cascade-rcnn_r50-rfp_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco_20200624_104126.log.json) | +| SAC | Cascade + ResNet-50 | 1x | 5.6 | - | 45.0 | | [config](./cascade-rcnn_r50-sac_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco_20200624_104402.log.json) | +| DetectoRS | Cascade + ResNet-50 | 1x | 9.9 | - | 47.4 | | [config](./detectors_cascade-rcnn_r50_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco_20200706_001203.log.json) | +| RFP | HTC + ResNet-50 | 1x | 11.2 | - | 46.6 | 40.9 | [config](./htc_r50-rfp_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco_20200624_103053.log.json) | +| SAC | HTC + ResNet-50 | 1x | 9.3 | - | 46.4 | 40.9 | [config](./htc_r50-sac_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco_20200624_103111.log.json) | +| DetectoRS | HTC + ResNet-50 | 1x | 13.6 | - | 49.1 | 42.6 | [config](./detectors_htc-r50_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco_20200624_103659.log.json) | +| DetectoRS | HTC + ResNet-101 | 20e | 19.6 | | 50.5 | 43.9 | [config](./detectors_htc-r101_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r101_20e_coco/detectors_htc_r101_20e_coco_20210419_203638-348d533b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r101_20e_coco/detectors_htc_r101_20e_coco_20210419_203638.log.json) | + +*Note*: This is a re-implementation based on MMDetection-V2. +The original implementation is based on MMDetection-V1. + +## Citation + +We provide the config files for [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/pdf/2006.02334.pdf). + +```latex +@article{qiao2020detectors, + title={DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution}, + author={Qiao, Siyuan and Chen, Liang-Chieh and Yuille, Alan}, + journal={arXiv preprint arXiv:2006.02334}, + year={2020} +} +``` diff --git a/mmpose/configs/mmdet/detectors/cascade-rcnn_r50-rfp_1x_coco.py b/mmpose/configs/mmdet/detectors/cascade-rcnn_r50-rfp_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c30c84d74cf68bc4369db16b6b2602626acb6fdf --- /dev/null +++ b/mmpose/configs/mmdet/detectors/cascade-rcnn_r50-rfp_1x_coco.py @@ -0,0 +1,28 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + pretrained='torchvision://resnet50', + style='pytorch'))) diff --git a/mmpose/configs/mmdet/detectors/cascade-rcnn_r50-sac_1x_coco.py b/mmpose/configs/mmdet/detectors/cascade-rcnn_r50-sac_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..24d6cd3a95ecf262caac667cfcc32d6885fa5880 --- /dev/null +++ b/mmpose/configs/mmdet/detectors/cascade-rcnn_r50-sac_1x_coco.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/detectors/detectors_cascade-rcnn_r50_1x_coco.py b/mmpose/configs/mmdet/detectors/detectors_cascade-rcnn_r50_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..19d13d9c8c38b666b7481a58a641918b5d20e0ad --- /dev/null +++ b/mmpose/configs/mmdet/detectors/detectors_cascade-rcnn_r50_1x_coco.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + pretrained='torchvision://resnet50', + style='pytorch'))) diff --git a/mmpose/configs/mmdet/detectors/detectors_htc-r101_20e_coco.py b/mmpose/configs/mmdet/detectors/detectors_htc-r101_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..93d7d2b1adeb3fbdb7bac0107edf4433669e8015 --- /dev/null +++ b/mmpose/configs/mmdet/detectors/detectors_htc-r101_20e_coco.py @@ -0,0 +1,28 @@ +_base_ = '../htc/htc_r101_fpn_20e_coco.py' + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + pretrained='torchvision://resnet101', + style='pytorch'))) diff --git a/mmpose/configs/mmdet/detectors/detectors_htc-r50_1x_coco.py b/mmpose/configs/mmdet/detectors/detectors_htc-r50_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0d2fc4f77fcca715c1dfb613306d214b636aa0c0 --- /dev/null +++ b/mmpose/configs/mmdet/detectors/detectors_htc-r50_1x_coco.py @@ -0,0 +1,28 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + pretrained='torchvision://resnet50', + style='pytorch'))) diff --git a/mmpose/configs/mmdet/detectors/htc_r50-rfp_1x_coco.py b/mmpose/configs/mmdet/detectors/htc_r50-rfp_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..496104e12550a1985f9c9e3748a343f69d7df6d8 --- /dev/null +++ b/mmpose/configs/mmdet/detectors/htc_r50-rfp_1x_coco.py @@ -0,0 +1,24 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + pretrained='torchvision://resnet50', + style='pytorch'))) diff --git a/mmpose/configs/mmdet/detectors/htc_r50-sac_1x_coco.py b/mmpose/configs/mmdet/detectors/htc_r50-sac_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..72d4db963ffd95851b945911b3db9941426583ab --- /dev/null +++ b/mmpose/configs/mmdet/detectors/htc_r50-sac_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/detectors/metafile.yml b/mmpose/configs/mmdet/detectors/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..196a1cef1751bc9d5812915c4d06de220f62baa1 --- /dev/null +++ b/mmpose/configs/mmdet/detectors/metafile.yml @@ -0,0 +1,114 @@ +Collections: + - Name: DetectoRS + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ASPP + - FPN + - RFP + - RPN + - ResNet + - RoIAlign + - SAC + Paper: + URL: https://arxiv.org/abs/2006.02334 + Title: 'DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution' + README: configs/detectors/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/backbones/detectors_resnet.py#L205 + Version: v2.2.0 + +Models: + - Name: cascade-rcnn_r50-rfp_1x_coco + In Collection: DetectoRS + Config: configs/detectors/cascade-rcnn_r50-rfp_1x_coco.py + Metadata: + Training Memory (GB): 7.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth + + - Name: cascade-rcnn_r50-sac_1x_coco + In Collection: DetectoRS + Config: configs/detectors/cascade-rcnn_r50-sac_1x_coco.py + Metadata: + Training Memory (GB): 5.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth + + - Name: detectors_cascade-rcnn_r50_1x_coco + In Collection: DetectoRS + Config: configs/detectors/detectors_cascade-rcnn_r50_1x_coco.py + Metadata: + Training Memory (GB): 9.9 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth + + - Name: htc_r50-rfp_1x_coco + In Collection: DetectoRS + Config: configs/detectors/htc_r50-rfp_1x_coco.py + Metadata: + Training Memory (GB): 11.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth + + - Name: htc_r50-sac_1x_coco + In Collection: DetectoRS + Config: configs/detectors/htc_r50-sac_1x_coco.py + Metadata: + Training Memory (GB): 9.3 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth + + - Name: detectors_htc-r50_1x_coco + In Collection: DetectoRS + Config: configs/detectors/detectors_htc-r50_1x_coco.py + Metadata: + Training Memory (GB): 13.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth diff --git a/mmpose/configs/mmdet/detr/README.md b/mmpose/configs/mmdet/detr/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8e843f369be40cac73bbc098d6bb04097de0a722 --- /dev/null +++ b/mmpose/configs/mmdet/detr/README.md @@ -0,0 +1,37 @@ +# DETR + +> [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) + + + +## Abstract + +We present a new method that views object detection as a direct set prediction problem. Our approach streamlines the detection pipeline, effectively removing the need for many hand-designed components like a non-maximum suppression procedure or anchor generation that explicitly encode our prior knowledge about the task. The main ingredients of the new framework, called DEtection TRansformer or DETR, are a set-based global loss that forces unique predictions via bipartite matching, and a transformer encoder-decoder architecture. Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. The new model is conceptually simple and does not require a specialized library, unlike many other modern detectors. DETR demonstrates accuracy and run-time performance on par with the well-established and highly-optimized Faster RCNN baseline on the challenging COCO object detection dataset. Moreover, DETR can be easily generalized to produce panoptic segmentation in a unified manner. We show that it significantly outperforms competitive baselines. + +
+ +
+ +## Results and Models + +| Backbone | Model | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :---: | :-----: | :------: | :------------: | :----: | :------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | DETR | 150e | 7.9 | | 39.9 | [config](./detr_r50_8xb2-150e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/detr/detr_r50_8xb2-150e_coco/detr_r50_8xb2-150e_coco_20221023_153551-436d03e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/detr/detr_r50_8xb2-150e_coco/detr_r50_8xb2-150e_coco_20221023_153551.log.json) | + +## Citation + +We provide the config files for DETR: [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872). + +```latex +@inproceedings{detr, + author = {Nicolas Carion and + Francisco Massa and + Gabriel Synnaeve and + Nicolas Usunier and + Alexander Kirillov and + Sergey Zagoruyko}, + title = {End-to-End Object Detection with Transformers}, + booktitle = {ECCV}, + year = {2020} +} +``` diff --git a/mmpose/configs/mmdet/detr/detr_r101_8xb2-500e_coco.py b/mmpose/configs/mmdet/detr/detr_r101_8xb2-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6661aacdc54e889aa38b2e759c40fd9797ae44ad --- /dev/null +++ b/mmpose/configs/mmdet/detr/detr_r101_8xb2-500e_coco.py @@ -0,0 +1,7 @@ +_base_ = './detr_r50_8xb2-500e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/detr/detr_r18_8xb2-500e_coco.py b/mmpose/configs/mmdet/detr/detr_r18_8xb2-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..305b9d6fee8d75273b588f32b2e21582473cb137 --- /dev/null +++ b/mmpose/configs/mmdet/detr/detr_r18_8xb2-500e_coco.py @@ -0,0 +1,7 @@ +_base_ = './detr_r50_8xb2-500e_coco.py' + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[512])) diff --git a/mmpose/configs/mmdet/detr/detr_r50_8xb2-150e_coco.py b/mmpose/configs/mmdet/detr/detr_r50_8xb2-150e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa15410532e552cae387ef4eaa57227af1d855d --- /dev/null +++ b/mmpose/configs/mmdet/detr/detr_r50_8xb2-150e_coco.py @@ -0,0 +1,155 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +model = dict( + type='DETR', + num_queries=100, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=None, + num_outs=1), + encoder=dict( # DetrTransformerEncoder + num_layers=6, + layer_cfg=dict( # DetrTransformerEncoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True)))), + decoder=dict( # DetrTransformerDecoder + num_layers=6, + layer_cfg=dict( # DetrTransformerDecoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True), + cross_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True))), + return_intermediate=True), + positional_encoding=dict(num_feats=128, normalize=True), + bbox_head=dict( + type='DETRHead', + num_classes=80, + embed_dims=256, + loss_cls=dict( + type='CrossEntropyLoss', + bg_cls_weight=0.1, + use_sigmoid=False, + loss_weight=1.0, + class_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='ClassificationCost', weight=1.), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=100)) + +# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different +# from the default setting in mmdet. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[[ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + scales=[(400, 1333), (500, 1333), (600, 1333)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + keep_ratio=True) + ]]), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) + +# learning policy +max_epochs = 150 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[100], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/detr/detr_r50_8xb2-500e_coco.py b/mmpose/configs/mmdet/detr/detr_r50_8xb2-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f07d5dce05b08c74aea2059989b45d5d275c53e0 --- /dev/null +++ b/mmpose/configs/mmdet/detr/detr_r50_8xb2-500e_coco.py @@ -0,0 +1,24 @@ +_base_ = './detr_r50_8xb2-150e_coco.py' + +# learning policy +max_epochs = 500 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=10) + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[334], + gamma=0.1) +] + +# only keep latest 2 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=2)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/detr/metafile.yml b/mmpose/configs/mmdet/detr/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..a9132dff0228e31c146ae46ed32445491f4225c1 --- /dev/null +++ b/mmpose/configs/mmdet/detr/metafile.yml @@ -0,0 +1,33 @@ +Collections: + - Name: DETR + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - Transformer + Paper: + URL: https://arxiv.org/abs/2005.12872 + Title: 'End-to-End Object Detection with Transformers' + README: configs/detr/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/detectors/detr.py#L7 + Version: v2.7.0 + +Models: + - Name: detr_r50_8xb2-150e_coco + In Collection: DETR + Config: configs/detr/detr_r50_8xb2-150e_coco.py + Metadata: + Training Memory (GB): 7.9 + Epochs: 150 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/detr/detr_r50_8xb2-150e_coco/detr_r50_8xb2-150e_coco_20221023_153551-436d03e8.pth diff --git a/mmpose/configs/mmdet/dino/README.md b/mmpose/configs/mmdet/dino/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d8a01bde25582023ab65c0304faa8ef14340a27a --- /dev/null +++ b/mmpose/configs/mmdet/dino/README.md @@ -0,0 +1,40 @@ +# DINO + +> [DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection](https://arxiv.org/abs/2203.03605) + + + +## Abstract + +We present DINO (DETR with Improved deNoising anchOr boxes), a state-of-the-art end-to-end object detector. DINO improves over previous DETR-like models in performance and efficiency by using a contrastive way for denoising training, a mixed query selection method for anchor initialization, and a look forward twice scheme for box prediction. DINO achieves 49.4AP in 12 epochs and 51.3AP in 24 epochs on COCO with a ResNet-50 backbone and multi-scale features, yielding a significant improvement of +6.0AP and +2.7AP, respectively, compared to DN-DETR, the previous best DETR-like model. DINO scales well in both model size and data size. Without bells and whistles, after pre-training on the Objects365 dataset with a SwinL backbone, DINO obtains the best results on both COCO val2017 (63.2AP) and test-dev (63.3AP). Compared to other models on the leaderboard, DINO significantly reduces its model size and pre-training data size while achieving better results. + +
+ +
+ +## Results and Models + +| Backbone | Model | Lr schd | Better-Hyper | box AP | Config | Download | +| :------: | :---------: | :-----: | :----------: | :----: | :---------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | DINO-4scale | 12e | False | 49.0 | [config](./dino-4scale_r50_8xb2-12e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/dino/dino-4scale_r50_8xb2-12e_coco/dino-4scale_r50_8xb2-12e_coco_20221202_182705-55b2bba2.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/dino/dino-4scale_r50_8xb2-12e_coco/dino-4scale_r50_8xb2-12e_coco_20221202_182705.log.json) | +| R-50 | DINO-4scale | 12e | True | 50.1 | [config](./dino-4scale_r50_improved_8xb2-12e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/dino/dino-4scale_r50_improved_8xb2-12e_coco/dino-4scale_r50_improved_8xb2-12e_coco_20230818_162607-6f47a913.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/dino/dino-4scale_r50_improved_8xb2-12e_coco/dino-4scale_r50_improved_8xb2-12e_coco_20230818_162607.log.json) | +| Swin-L | DINO-5scale | 12e | False | 57.2 | [config](./dino-5scale_swin-l_8xb2-12e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/dino/dino-5scale_swin-l_8xb2-12e_coco/dino-5scale_swin-l_8xb2-12e_coco_20230228_072924-a654145f.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/dino/dino-5scale_swin-l_8xb2-12e_coco/dino-5scale_swin-l_8xb2-12e_coco_20230228_072924.log) | +| Swin-L | DINO-5scale | 36e | False | 58.4 | [config](./dino-5scale_swin-l_8xb2-36e_coco.py) | [model](https://github.com/RistoranteRist/mmlab-weights/releases/download/dino-swinl/dino-5scale_swin-l_8xb2-36e_coco-5486e051.pth) \| [log](https://github.com/RistoranteRist/mmlab-weights/releases/download/dino-swinl/20230307_032359.log) | + +### NOTE + +The performance is unstable. `DINO-4scale` with `R-50` may fluctuate about 0.4 mAP. + +## Citation + +We provide the config files for DINO: [DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection](https://arxiv.org/abs/2203.03605). + +```latex +@misc{zhang2022dino, + title={DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection}, + author={Hao Zhang and Feng Li and Shilong Liu and Lei Zhang and Hang Su and Jun Zhu and Lionel M. Ni and Heung-Yeung Shum}, + year={2022}, + eprint={2203.03605}, + archivePrefix={arXiv}, + primaryClass={cs.CV}} +``` diff --git a/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-12e_coco.py b/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-12e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5831f898b4a706accb2b828b6194b2974e78d0fc --- /dev/null +++ b/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-12e_coco.py @@ -0,0 +1,163 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +model = dict( + type='DINO', + num_queries=900, # num_matching_queries + with_box_refine=True, + as_two_stage=True, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[512, 1024, 2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + encoder=dict( + num_layers=6, + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=4, + dropout=0.0), # 0.1 for DeformDETR + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, # 1024 for DeformDETR + ffn_drop=0.0))), # 0.1 for DeformDETR + decoder=dict( + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, + dropout=0.0), # 0.1 for DeformDETR + cross_attn_cfg=dict(embed_dims=256, num_levels=4, + dropout=0.0), # 0.1 for DeformDETR + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, # 1024 for DeformDETR + ffn_drop=0.0)), # 0.1 for DeformDETR + post_norm_cfg=None), + positional_encoding=dict( + num_feats=128, + normalize=True, + offset=0.0, # -0.5 for DeformDETR + temperature=20), # 10000 for DeformDETR + bbox_head=dict( + type='DINOHead', + num_classes=80, + sync_cls_avg_factor=True, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), # 2.0 in DeformDETR + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + dn_cfg=dict( # TODO: Move to model.train_cfg ? + label_noise_scale=0.5, + box_noise_scale=1.0, # 0.4 for DN-DETR + group_cfg=dict(dynamic=True, num_groups=None, + num_dn_queries=100)), # TODO: half num_dn_queries + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=300)) # 100 for DeformDETR + +# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different +# from the default setting in mmdet. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] +train_dataloader = dict( + dataset=dict( + filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=0.0001, # 0.0002 for DeformDETR + weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)}) +) # custom_keys contains sampling_offsets and reference_points in DeformDETR # noqa + +# learning policy +max_epochs = 12 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-24e_coco.py b/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-24e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8534ac6a7ccc7f3f8c081275b3567a0a0792b7a5 --- /dev/null +++ b/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-24e_coco.py @@ -0,0 +1,13 @@ +_base_ = './dino-4scale_r50_8xb2-12e_coco.py' +max_epochs = 24 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[20], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-36e_coco.py b/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-36e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1c2cf4602d358dfed5b737f8a74843c89a54702d --- /dev/null +++ b/mmpose/configs/mmdet/dino/dino-4scale_r50_8xb2-36e_coco.py @@ -0,0 +1,13 @@ +_base_ = './dino-4scale_r50_8xb2-12e_coco.py' +max_epochs = 36 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[30], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/dino/dino-4scale_r50_improved_8xb2-12e_coco.py b/mmpose/configs/mmdet/dino/dino-4scale_r50_improved_8xb2-12e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6a4a82bacc1f1e990d4720db81cae0af5c012557 --- /dev/null +++ b/mmpose/configs/mmdet/dino/dino-4scale_r50_improved_8xb2-12e_coco.py @@ -0,0 +1,18 @@ +_base_ = ['dino-4scale_r50_8xb2-12e_coco.py'] + +# from deformable detr hyper +model = dict( + backbone=dict(frozen_stages=-1), + bbox_head=dict(loss_cls=dict(loss_weight=2.0)), + positional_encoding=dict(offset=-0.5, temperature=10000), + dn_cfg=dict(group_cfg=dict(num_dn_queries=300))) + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.0002), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1), + 'sampling_offsets': dict(lr_mult=0.1), + 'reference_points': dict(lr_mult=0.1) + })) diff --git a/mmpose/configs/mmdet/dino/dino-5scale_swin-l_8xb2-12e_coco.py b/mmpose/configs/mmdet/dino/dino-5scale_swin-l_8xb2-12e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3d39f22f50926a11137d143976fe4033ec3a8640 --- /dev/null +++ b/mmpose/configs/mmdet/dino/dino-5scale_swin-l_8xb2-12e_coco.py @@ -0,0 +1,30 @@ +_base_ = './dino-4scale_r50_8xb2-12e_coco.py' + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa +num_levels = 5 +model = dict( + num_feature_levels=num_levels, + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(0, 1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=True, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768, 1536], num_outs=num_levels), + encoder=dict(layer_cfg=dict(self_attn_cfg=dict(num_levels=num_levels))), + decoder=dict(layer_cfg=dict(cross_attn_cfg=dict(num_levels=num_levels)))) diff --git a/mmpose/configs/mmdet/dino/dino-5scale_swin-l_8xb2-36e_coco.py b/mmpose/configs/mmdet/dino/dino-5scale_swin-l_8xb2-36e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d55a38e61d411892c6de819cf46247ba4d41d427 --- /dev/null +++ b/mmpose/configs/mmdet/dino/dino-5scale_swin-l_8xb2-36e_coco.py @@ -0,0 +1,13 @@ +_base_ = './dino-5scale_swin-l_8xb2-12e_coco.py' +max_epochs = 36 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/dino/metafile.yml b/mmpose/configs/mmdet/dino/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..f276a04ef557b70443083ac70b6a16671e7fa6e1 --- /dev/null +++ b/mmpose/configs/mmdet/dino/metafile.yml @@ -0,0 +1,85 @@ +Collections: + - Name: DINO + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 8x A100 GPUs + Architecture: + - ResNet + - Transformer + Paper: + URL: https://arxiv.org/abs/2203.03605 + Title: 'DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection' + README: configs/dino/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/f4112c9e5611468ffbd57cfba548fd1289264b52/mmdet/models/detectors/dino.py#L17 + Version: v3.0.0rc6 + +Models: + - Name: dino-4scale_r50_8xb2-12e_coco + In Collection: DINO + Config: configs/dino/dino-4scale_r50_8xb2-12e_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.0 + Weights: https://download.openmmlab.com/mmdetection/v3.0/dino/dino-4scale_r50_8xb2-12e_coco/dino-4scale_r50_8xb2-12e_coco_20221202_182705-55b2bba2.pth + + - Name: dino-4scale_r50_8xb2-24e_coco + In Collection: DINO + Config: configs/dino/dino-4scale_r50_8xb2-24e_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + + - Name: dino-4scale_r50_8xb2-36e_coco + In Collection: DINO + Config: configs/dino/dino-4scale_r50_8xb2-36e_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + + - Name: dino-5scale_swin-l_8xb2-12e_coco + In Collection: DINO + Config: configs/dino/dino-5scale_swin-l_8xb2-12e_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 57.2 + Weights: https://download.openmmlab.com/mmdetection/v3.0/dino/dino-5scale_swin-l_8xb2-12e_coco/dino-5scale_swin-l_8xb2-12e_coco_20230228_072924-a654145f.pth + + - Name: dino-5scale_swin-l_8xb2-36e_coco + In Collection: DINO + Config: configs/dino/dino-5scale_swin-l_8xb2-36e_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 58.4 + Weights: https://github.com/RistoranteRist/mmlab-weights/releases/download/dino-swinl/dino-5scale_swin-l_8xb2-36e_coco-5486e051.pth + - Name: dino-4scale_r50_improved_8xb2-12e_coco + In Collection: DINO + Config: configs/dino/dino-4scale_r50_improved_8xb2-12e_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.1 + Weights: https://download.openmmlab.com/mmdetection/v3.0/dino/dino-4scale_r50_improved_8xb2-12e_coco/dino-4scale_r50_improved_8xb2-12e_coco_20230818_162607-6f47a913.pth diff --git a/mmpose/configs/mmdet/double_heads/README.md b/mmpose/configs/mmdet/double_heads/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b97dbc188df1557814f40e792940ab45a845781 --- /dev/null +++ b/mmpose/configs/mmdet/double_heads/README.md @@ -0,0 +1,32 @@ +# Double Heads + +> [Rethinking Classification and Localization for Object Detection](https://arxiv.org/abs/1904.06493) + + + +## Abstract + +Two head structures (i.e. fully connected head and convolution head) have been widely used in R-CNN based detectors for classification and localization tasks. However, there is a lack of understanding of how does these two head structures work for these two tasks. To address this issue, we perform a thorough analysis and find an interesting fact that the two head structures have opposite preferences towards the two tasks. Specifically, the fully connected head (fc-head) is more suitable for the classification task, while the convolution head (conv-head) is more suitable for the localization task. Furthermore, we examine the output feature maps of both heads and find that fc-head has more spatial sensitivity than conv-head. Thus, fc-head has more capability to distinguish a complete object from part of an object, but is not robust to regress the whole object. Based upon these findings, we propose a Double-Head method, which has a fully connected head focusing on classification and a convolution head for bounding box regression. Without bells and whistles, our method gains +3.5 and +2.8 AP on MS COCO dataset from Feature Pyramid Network (FPN) baselines with ResNet-50 and ResNet-101 backbones, respectively. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 6.8 | 9.5 | 40.0 | [config](./dh-faster-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130_220238.log.json) | + +## Citation + +```latex +@article{wu2019rethinking, + title={Rethinking Classification and Localization for Object Detection}, + author={Yue Wu and Yinpeng Chen and Lu Yuan and Zicheng Liu and Lijuan Wang and Hongzhi Li and Yun Fu}, + year={2019}, + eprint={1904.06493}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/mmpose/configs/mmdet/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6b9b6e69a12d978a55fbba049fc2b1c5229c1fc5 --- /dev/null +++ b/mmpose/configs/mmdet/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,23 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + type='DoubleHeadRoIHead', + reg_roi_scale_factor=1.3, + bbox_head=dict( + _delete_=True, + type='DoubleConvFCBBoxHead', + num_convs=4, + num_fcs=2, + in_channels=256, + conv_out_channels=1024, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) diff --git a/mmpose/configs/mmdet/double_heads/metafile.yml b/mmpose/configs/mmdet/double_heads/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..bb14e7968e259bb6dae1bbd6dad5e1c4e862f228 --- /dev/null +++ b/mmpose/configs/mmdet/double_heads/metafile.yml @@ -0,0 +1,41 @@ +Collections: + - Name: Rethinking Classification and Localization for Object Detection + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/pdf/1904.06493 + Title: 'Rethinking Classification and Localization for Object Detection' + README: configs/double_heads/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/roi_heads/double_roi_head.py#L6 + Version: v2.0.0 + +Models: + - Name: dh-faster-rcnn_r50_fpn_1x_coco + In Collection: Rethinking Classification and Localization for Object Detection + Config: configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.8 + inference time (ms/im): + - value: 105.26 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth diff --git a/mmpose/configs/mmdet/dsdl/README.md b/mmpose/configs/mmdet/dsdl/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f38c3b65ac67ee623eb909acbd1dc8ad3eafa0af --- /dev/null +++ b/mmpose/configs/mmdet/dsdl/README.md @@ -0,0 +1,63 @@ +# DSDL: Standard Description Language for DataSet + + + +## 1. Abstract + +Data is the cornerstone of artificial intelligence. The efficiency of data acquisition, exchange, and application directly impacts the advances in technologies and applications. Over the long history of AI, a vast quantity of data sets have been developed and distributed. However, these datasets are defined in very different forms, which incurs significant overhead when it comes to exchange, integration, and utilization -- it is often the case that one needs to develop a new customized tool or script in order to incorporate a new dataset into a workflow. + +To overcome such difficulties, we develop **Data Set Description Language (DSDL)**. More details please visit our [official documents](https://opendatalab.github.io/dsdl-docs/getting_started/overview/), dsdl datasets can be downloaded from our platform [OpenDataLab](https://opendatalab.com/). + +## 2. Steps + +- install dsdl: + + install by pip: + + ``` + pip install dsdl + ``` + + install by source code: + + ``` + git clone https://github.com/opendatalab/dsdl-sdk.git -b schema-dsdl + cd dsdl-sdk + python setup.py install + ``` + +- install mmdet and pytorch: + please refer this [installation documents](https://mmdetection.readthedocs.io/en/latest/get_started.html). + +- train: + + - using single gpu: + + ``` + python tools/train.py {config_file} + ``` + + - using slurm: + + ``` + ./tools/slurm_train.sh {partition} {job_name} {config_file} {work_dir} {gpu_nums} + ``` + +## 3. Test Results + +- detection task: + + | Datasets | Model | box AP | Config | + | :--------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----: | :-------------------------: | + | VOC07+12 | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712-54bef0f3.pth) | 80.3\* | [config](./voc0712.py) | + | COCO | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) | 37.4 | [config](./coco.py) | + | Objects365 | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2/faster_rcnn_r50_fpn_16x4_1x_obj365v2_20221220_175040-5910b015.pth) | 19.8 | [config](./objects365v2.py) | + | OpenImages | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth) | 59.9\* | [config](./openimagesv6.py) | + + \*: box AP in voc metric and openimages metric, actually means AP_50. + +- instance segmentation task: + + | Datasets | Model | box AP | mask AP | Config | + | :------: | :------------------------------------------------------------------------------------------------------------------------------------------: | :----: | :-----: | :--------------------------: | + | COCO | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) | 38.1 | 34.7 | [config](./coco_instance.py) | diff --git a/mmpose/configs/mmdet/dsdl/coco.py b/mmpose/configs/mmdet/dsdl/coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3c9e895e53c1588028cf6def2fe79d49fd98d6e1 --- /dev/null +++ b/mmpose/configs/mmdet/dsdl/coco.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', + '../_base_/datasets/dsdl.py' +] + +# dsdl dataset settings + +# please visit our platform [OpenDataLab](https://opendatalab.com/) +# to downloaded dsdl dataset. +data_root = 'data/COCO2017' +img_prefix = 'original' +train_ann = 'dsdl/set-train/train.yaml' +val_ann = 'dsdl/set-val/val.yaml' +specific_key_path = dict(ignore_flag='./annotations/*/iscrowd') + +train_dataloader = dict( + dataset=dict( + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=train_ann, + data_prefix=dict(img_path=img_prefix), + filter_cfg=dict(filter_empty_gt=True, min_size=32, bbox_min_size=32), + )) + +val_dataloader = dict( + dataset=dict( + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=val_ann, + data_prefix=dict(img_path=img_prefix), + )) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/dsdl/coco_instance.py b/mmpose/configs/mmdet/dsdl/coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..e34f93c97f55f5eeef55f9de73f1a8389f8980c6 --- /dev/null +++ b/mmpose/configs/mmdet/dsdl/coco_instance.py @@ -0,0 +1,62 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', + '../_base_/datasets/dsdl.py' +] + +# dsdl dataset settings. + +# please visit our platform [OpenDataLab](https://opendatalab.com/) +# to downloaded dsdl dataset. +data_root = 'data/COCO2017' +img_prefix = 'original' +train_ann = 'dsdl/set-train/train.yaml' +val_ann = 'dsdl/set-val/val.yaml' +specific_key_path = dict(ignore_flag='./annotations/*/iscrowd') + +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'instances')) +] + +train_dataloader = dict( + dataset=dict( + with_polygon=True, + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=train_ann, + data_prefix=dict(img_path=img_prefix), + filter_cfg=dict(filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline, + )) + +val_dataloader = dict( + dataset=dict( + with_polygon=True, + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=val_ann, + data_prefix=dict(img_path=img_prefix), + pipeline=test_pipeline, + )) + +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', metric=['bbox', 'segm'], format_only=False) + +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/dsdl/objects365v2.py b/mmpose/configs/mmdet/dsdl/objects365v2.py new file mode 100644 index 0000000000000000000000000000000000000000..d25a2323027c22eaf9777f6e62e4992880b29d2c --- /dev/null +++ b/mmpose/configs/mmdet/dsdl/objects365v2.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', + '../_base_/datasets/dsdl.py' +] + +model = dict(roi_head=dict(bbox_head=dict(num_classes=365))) + +# dsdl dataset settings + +# please visit our platform [OpenDataLab](https://opendatalab.com/) +# to downloaded dsdl dataset. +data_root = 'data/Objects365' +img_prefix = 'original' +train_ann = 'dsdl/set-train/train.yaml' +val_ann = 'dsdl/set-val/val.yaml' +specific_key_path = dict(ignore_flag='./annotations/*/iscrowd') + +train_dataloader = dict( + dataset=dict( + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=train_ann, + data_prefix=dict(img_path=img_prefix), + filter_cfg=dict(filter_empty_gt=True, min_size=32, bbox_min_size=32), + )) + +val_dataloader = dict( + dataset=dict( + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=val_ann, + data_prefix=dict(img_path=img_prefix), + test_mode=True, + )) +test_dataloader = val_dataloader + +default_hooks = dict(logger=dict(type='LoggerHook', interval=1000), ) +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=3, val_interval=1) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[1, 2], + gamma=0.1) +] +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/dsdl/openimagesv6.py b/mmpose/configs/mmdet/dsdl/openimagesv6.py new file mode 100644 index 0000000000000000000000000000000000000000..a65f942a0d4f8cfdaa3cfb712276d6de34d62a84 --- /dev/null +++ b/mmpose/configs/mmdet/dsdl/openimagesv6.py @@ -0,0 +1,94 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/schedules/schedule_1x.py', + '../_base_/default_runtime.py', +] + +model = dict(roi_head=dict(bbox_head=dict(num_classes=601))) + +# dsdl dataset settings + +# please visit our platform [OpenDataLab](https://opendatalab.com/) +# to downloaded dsdl dataset. +dataset_type = 'DSDLDetDataset' +data_root = 'data/OpenImages' +train_ann = 'dsdl/set-train/train.yaml' +val_ann = 'dsdl/set-val/val.yaml' +specific_key_path = dict( + image_level_labels='./image_labels/*/label', + Label='./objects/*/label', + is_group_of='./objects/*/isgroupof', +) + +backend_args = dict( + backend='petrel', + path_mapping=dict({'data/': 's3://open_dataset_original/'})) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1024, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1024, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'instances', 'image_level_labels')) +] + +train_dataloader = dict( + sampler=dict(type='ClassAwareSampler', num_sample_class=1), + dataset=dict( + type=dataset_type, + with_imagelevel_label=True, + with_hierarchy=True, + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=train_ann, + filter_cfg=dict(filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline)) + +val_dataloader = dict( + dataset=dict( + type=dataset_type, + with_imagelevel_label=True, + with_hierarchy=True, + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=val_ann, + test_mode=True, + pipeline=test_pipeline)) + +test_dataloader = val_dataloader + +default_hooks = dict(logger=dict(type='LoggerHook', interval=1000), ) +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=3, val_interval=1) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[1, 2], + gamma=0.1) +] +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +val_evaluator = dict( + type='OpenImagesMetric', + iou_thrs=0.5, + ioa_thrs=0.5, + use_group_of=True, + get_supercategory=True) + +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/dsdl/voc07.py b/mmpose/configs/mmdet/dsdl/voc07.py new file mode 100644 index 0000000000000000000000000000000000000000..b7b864714e4987ca9d31eda5fee746e741b7aa10 --- /dev/null +++ b/mmpose/configs/mmdet/dsdl/voc07.py @@ -0,0 +1,94 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/default_runtime.py' +] + +# model setting +model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) + +# dsdl dataset settings + +# please visit our platform [OpenDataLab](https://opendatalab.com/) +# to downloaded dsdl dataset. +dataset_type = 'DSDLDetDataset' +data_root = 'data/VOC07-det' +img_prefix = 'original' +train_ann = 'dsdl/set-train/train.yaml' +val_ann = 'dsdl/set-test/test.yaml' + +specific_key_path = dict(ignore_flag='./objects/*/difficult') + +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1000, 600), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1000, 600), keep_ratio=True), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'instances')) +] +train_dataloader = dict( + dataset=dict( + type=dataset_type, + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=train_ann, + data_prefix=dict(img_path=img_prefix), + filter_cfg=dict(filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline)) + +val_dataloader = dict( + dataset=dict( + type=dataset_type, + specific_key_path=specific_key_path, + data_root=data_root, + ann_file=val_ann, + data_prefix=dict(img_path=img_prefix), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL +# VOC2012 defaults to use 'area'. +val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points') +# val_evaluator = dict(type='CocoMetric', metric='bbox') +test_evaluator = val_evaluator + +# training schedule, voc dataset is repeated 3 times, in +# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12 +max_epochs = 12 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=3) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[9], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/dsdl/voc0712.py b/mmpose/configs/mmdet/dsdl/voc0712.py new file mode 100644 index 0000000000000000000000000000000000000000..9ec1bb8f98e56d0402c9a80934c3b77bd7919fa4 --- /dev/null +++ b/mmpose/configs/mmdet/dsdl/voc0712.py @@ -0,0 +1,132 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/schedules/schedule_1x.py', + '../_base_/default_runtime.py', + # '../_base_/datasets/dsdl.py' +] + +# model setting +model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) + +# dsdl dataset settings + +# please visit our platform [OpenDataLab](https://opendatalab.com/) +# to downloaded dsdl dataset. +dataset_type = 'DSDLDetDataset' +data_root_07 = 'data/VOC07-det' +data_root_12 = 'data/VOC12-det' +img_prefix = 'original' + +train_ann = 'dsdl/set-train/train.yaml' +val_ann = 'dsdl/set-val/val.yaml' +test_ann = 'dsdl/set-test/test.yaml' + +backend_args = None +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1000, 600), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1000, 600), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'instances')) +] + +specific_key_path = dict(ignore_flag='./objects/*/difficult', ) + +train_dataloader = dict( + dataset=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type=dataset_type, + specific_key_path=specific_key_path, + data_root=data_root_07, + ann_file=train_ann, + data_prefix=dict(img_path=img_prefix), + filter_cfg=dict( + filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline), + dict( + type=dataset_type, + specific_key_path=specific_key_path, + data_root=data_root_07, + ann_file=val_ann, + data_prefix=dict(img_path=img_prefix), + filter_cfg=dict( + filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline), + dict( + type=dataset_type, + specific_key_path=specific_key_path, + data_root=data_root_12, + ann_file=train_ann, + data_prefix=dict(img_path=img_prefix), + filter_cfg=dict( + filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline), + dict( + type=dataset_type, + specific_key_path=specific_key_path, + data_root=data_root_12, + ann_file=val_ann, + data_prefix=dict(img_path=img_prefix), + filter_cfg=dict( + filter_empty_gt=True, min_size=32, bbox_min_size=32), + pipeline=train_pipeline), + ]))) + +val_dataloader = dict( + dataset=dict( + type=dataset_type, + specific_key_path=specific_key_path, + data_root=data_root_07, + ann_file=test_ann, + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='CocoMetric', metric='bbox') +# val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points') +test_evaluator = val_evaluator + +# training schedule, voc dataset is repeated 3 times, in +# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12 +max_epochs = 4 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[3], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/dyhead/README.md b/mmpose/configs/mmdet/dyhead/README.md new file mode 100644 index 0000000000000000000000000000000000000000..decd48051f0b10ef3f9e6de8ad7476e59fb89511 --- /dev/null +++ b/mmpose/configs/mmdet/dyhead/README.md @@ -0,0 +1,52 @@ +# DyHead + +> [Dynamic Head: Unifying Object Detection Heads with Attentions](https://arxiv.org/abs/2106.08322) + + + +## Abstract + +The complex nature of combining localization and classification in object detection has resulted in the flourished development of methods. Previous works tried to improve the performance in various object detection heads but failed to present a unified view. In this paper, we present a novel dynamic head framework to unify object detection heads with attentions. By coherently combining multiple self-attention mechanisms between feature levels for scale-awareness, among spatial locations for spatial-awareness, and within output channels for task-awareness, the proposed approach significantly improves the representation ability of object detection heads without any computational overhead. Further experiments demonstrate that the effectiveness and efficiency of the proposed dynamic head on the COCO benchmark. With a standard ResNeXt-101-DCN backbone, we largely improve the performance over popular object detectors and achieve a new state-of-the-art at 54.0 AP. Furthermore, with latest transformer backbone and extra data, we can push current best COCO result to a new record at 60.6 AP. + +
+ +
+ +## Results and Models + +| Method | Backbone | Style | Setting | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----: | :------: | :-----: | :----------: | :-----: | :------: | :------------: | :----: | :----------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ATSS | R-50 | caffe | reproduction | 1x | 5.4 | 13.2 | 42.5 | [config](./atss_r50-caffe_fpn_dyhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939.log.json) | +| ATSS | R-50 | pytorch | simple | 1x | 4.9 | 13.7 | 43.3 | [config](./atss_r50_fpn_dyhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314.log.json) | + +- We trained the above models with 4 GPUs and 4 `samples_per_gpu`. +- The `reproduction` setting aims to reproduce the official implementation based on Detectron2. +- The `simple` setting serves as a minimum example to use DyHead in MMDetection. Specifically, + - it adds `DyHead` to `neck` after `FPN` + - it sets `stacked_convs=0` to `bbox_head` +- The `simple` setting achieves higher AP than the original implementation. + We have not conduct ablation study between the two settings. + `dict(type='Pad', size_divisor=128)` may further improve AP by prefer spatial alignment across pyramid levels, although large padding reduces efficiency. + +We also trained the model with Swin-L backbone. Results are as below. + +| Method | Backbone | Style | Setting | Lr schd | mstrain | box AP | Config | Download | +| :----: | :------: | :---: | :----------: | :-----: | :------: | :----: | :-----------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ATSS | Swin-L | caffe | reproduction | 2x | 480~1200 | 56.2 | [config](./atss_swin-l-p4-w12_fpn_dyhead_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315.log.json) | + +## Relation to Other Methods + +- DyHead can be regarded as an improved [SEPC](https://arxiv.org/abs/2005.03101) with [DyReLU modules](https://arxiv.org/abs/2003.10027) and simplified [SE blocks](https://arxiv.org/abs/1709.01507). +- Xiyang Dai et al., the author team of DyHead, adopt it for [Dynamic DETR](https://openaccess.thecvf.com/content/ICCV2021/html/Dai_Dynamic_DETR_End-to-End_Object_Detection_With_Dynamic_Attention_ICCV_2021_paper.html). + The description of Dynamic Encoder in Sec. 3.2 will help you understand DyHead. + +## Citation + +```latex +@inproceedings{DyHead_CVPR2021, + author = {Dai, Xiyang and Chen, Yinpeng and Xiao, Bin and Chen, Dongdong and Liu, Mengchen and Yuan, Lu and Zhang, Lei}, + title = {Dynamic Head: Unifying Object Detection Heads With Attentions}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2021} +} +``` diff --git a/mmpose/configs/mmdet/dyhead/atss_r50-caffe_fpn_dyhead_1x_coco.py b/mmpose/configs/mmdet/dyhead/atss_r50-caffe_fpn_dyhead_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8716f1226cb0b37435d0318d62599a74e6126f19 --- /dev/null +++ b/mmpose/configs/mmdet/dyhead/atss_r50-caffe_fpn_dyhead_1x_coco.py @@ -0,0 +1,103 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='ATSS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=128), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + dict( + type='DyHead', + in_channels=256, + out_channels=256, + num_blocks=6, + # disable zero_init_offset to follow official implementation + zero_init_offset=False) + ], + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + pred_kernel_size=1, # follow DyHead official implementation + stacked_convs=0, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128], + center_offset=0.5), # follow DyHead official implementation + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.01)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/dyhead/atss_r50_fpn_dyhead_1x_coco.py b/mmpose/configs/mmdet/dyhead/atss_r50_fpn_dyhead_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..89e89b98ca437bb13fe5d01acc05cfdcd04e8fa0 --- /dev/null +++ b/mmpose/configs/mmdet/dyhead/atss_r50_fpn_dyhead_1x_coco.py @@ -0,0 +1,72 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='ATSS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + dict(type='DyHead', in_channels=256, out_channels=256, num_blocks=6) + ], + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + stacked_convs=0, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/dyhead/atss_swin-l-p4-w12_fpn_dyhead_ms-2x_coco.py b/mmpose/configs/mmdet/dyhead/atss_swin-l-p4-w12_fpn_dyhead_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f537b9dc9b17aa50f0044b874585fe1e0ba15216 --- /dev/null +++ b/mmpose/configs/mmdet/dyhead/atss_swin-l-p4-w12_fpn_dyhead_ms-2x_coco.py @@ -0,0 +1,140 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa +model = dict( + type='ATSS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=128), + backbone=dict( + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=[ + dict( + type='FPN', + in_channels=[384, 768, 1536], + out_channels=256, + start_level=0, + add_extra_convs='on_output', + num_outs=5), + dict( + type='DyHead', + in_channels=256, + out_channels=256, + num_blocks=6, + # disable zero_init_offset to follow official implementation + zero_init_offset=False) + ], + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + pred_kernel_size=1, # follow DyHead official implementation + stacked_convs=0, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128], + center_offset=0.5), # follow DyHead official implementation + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=[(2000, 480), (2000, 1200)], + keep_ratio=True, + backend='pillow'), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(2000, 1200), keep_ratio=True, backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=2, + dataset=dict( + type={{_base_.dataset_type}}, + data_root={{_base_.data_root}}, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args={{_base_.backend_args}}))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00005, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + }), + clip_grad=None) diff --git a/mmpose/configs/mmdet/dyhead/metafile.yml b/mmpose/configs/mmdet/dyhead/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..28b5a5821c81cea3213494c712910f904ae117f2 --- /dev/null +++ b/mmpose/configs/mmdet/dyhead/metafile.yml @@ -0,0 +1,76 @@ +Collections: + - Name: DyHead + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 4x T4 GPUs + Architecture: + - ATSS + - DyHead + - FPN + - ResNet + - Deformable Convolution + - Pyramid Convolution + Paper: + URL: https://arxiv.org/abs/2106.08322 + Title: 'Dynamic Head: Unifying Object Detection Heads with Attentions' + README: configs/dyhead/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/necks/dyhead.py#L130 + Version: v2.22.0 + +Models: + - Name: atss_r50-caffe_fpn_dyhead_1x_coco + In Collection: DyHead + Config: configs/dyhead/atss_r50-caffe_fpn_dyhead_1x_coco.py + Metadata: + Training Memory (GB): 5.4 + inference time (ms/im): + - value: 75.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth + + - Name: atss_r50_fpn_dyhead_1x_coco + In Collection: DyHead + Config: configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py + Metadata: + Training Memory (GB): 4.9 + inference time (ms/im): + - value: 73.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth + + - Name: atss_swin-l-p4-w12_fpn_dyhead_ms-2x_coco + In Collection: DyHead + Config: configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_ms-2x_coco.py + Metadata: + Training Memory (GB): 58.4 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 56.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth diff --git a/mmpose/configs/mmdet/dynamic_rcnn/README.md b/mmpose/configs/mmdet/dynamic_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b5e803a2f27f07a1e49abfc9195965e33f36b73a --- /dev/null +++ b/mmpose/configs/mmdet/dynamic_rcnn/README.md @@ -0,0 +1,30 @@ +# Dynamic R-CNN + +> [Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training](https://arxiv.org/abs/2004.06002) + + + +## Abstract + +Although two-stage object detectors have continuously advanced the state-of-the-art performance in recent years, the training process itself is far from crystal. In this work, we first point out the inconsistency problem between the fixed network settings and the dynamic training procedure, which greatly affects the performance. For example, the fixed label assignment strategy and regression loss function cannot fit the distribution change of proposals and thus are harmful to training high quality detectors. Consequently, we propose Dynamic R-CNN to adjust the label assignment criteria (IoU threshold) and the shape of regression loss function (parameters of SmoothL1 Loss) automatically based on the statistics of proposals during training. This dynamic design makes better use of the training samples and pushes the detector to fit more high quality samples. Specifically, our method improves upon ResNet-50-FPN baseline with 1.9% AP and 5.5% AP90 on the MS COCO dataset with no extra overhead. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | 3.8 | | 38.9 | [config](./dynamic-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x_20200618_095048.log.json) | + +## Citation + +```latex +@article{DynamicRCNN, + author = {Hongkai Zhang and Hong Chang and Bingpeng Ma and Naiyan Wang and Xilin Chen}, + title = {Dynamic {R-CNN}: Towards High Quality Object Detection via Dynamic Training}, + journal = {arXiv preprint arXiv:2004.06002}, + year = {2020} +} +``` diff --git a/mmpose/configs/mmdet/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f64dfa0b9102d5f7b32793b9d21e19c67afdfc2a --- /dev/null +++ b/mmpose/configs/mmdet/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,28 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + type='DynamicRoIHead', + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict(nms=dict(iou_threshold=0.85)), + rcnn=dict( + dynamic_rcnn=dict( + iou_topk=75, + beta_topk=10, + update_iter_interval=100, + initial_iou=0.4, + initial_beta=1.0))), + test_cfg=dict(rpn=dict(nms=dict(iou_threshold=0.85)))) diff --git a/mmpose/configs/mmdet/dynamic_rcnn/metafile.yml b/mmpose/configs/mmdet/dynamic_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..64ab3b0ce490a25e227b3bcd60442669608fda22 --- /dev/null +++ b/mmpose/configs/mmdet/dynamic_rcnn/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: Dynamic R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Dynamic R-CNN + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/pdf/2004.06002 + Title: 'Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training' + README: configs/dynamic_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/roi_heads/dynamic_roi_head.py#L11 + Version: v2.2.0 + +Models: + - Name: dynamic-rcnn_r50_fpn_1x_coco + In Collection: Dynamic R-CNN + Config: configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth diff --git a/mmpose/configs/mmdet/efficientnet/README.md b/mmpose/configs/mmdet/efficientnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..941944db4f3fdc887da5ddc9647b3d619138478b --- /dev/null +++ b/mmpose/configs/mmdet/efficientnet/README.md @@ -0,0 +1,30 @@ +# EfficientNet + +> [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946v5) + + + +## Introduction + +Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. + +To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters. + +## Results and Models + +### RetinaNet + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Efficientnet-b3 | pytorch | 1x | - | - | 40.5 | [config](./retinanet_effb3_fpn_8xb4-crop896-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806.log.json) | + +## Citation + +```latex +@article{tan2019efficientnet, + title={Efficientnet: Rethinking model scaling for convolutional neural networks}, + author={Tan, Mingxing and Le, Quoc V}, + journal={arXiv preprint arXiv:1905.11946}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/efficientnet/metafile.yml b/mmpose/configs/mmdet/efficientnet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..6e220c8ad7cd0e25386d950c21616d4b92f8481e --- /dev/null +++ b/mmpose/configs/mmdet/efficientnet/metafile.yml @@ -0,0 +1,19 @@ +Models: + - Name: retinanet_effb3_fpn_8xb4-crop896-1x_coco + In Collection: RetinaNet + Config: configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth + Paper: + URL: https://arxiv.org/abs/1905.11946v5 + Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' + README: configs/efficientnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/backbones/efficientnet.py#L159 + Version: v2.23.0 diff --git a/mmpose/configs/mmdet/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py b/mmpose/configs/mmdet/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2d0d9cefd0b565b2cce42117eb872ac9373ea4b9 --- /dev/null +++ b/mmpose/configs/mmdet/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py @@ -0,0 +1,94 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/schedules/schedule_1x.py', + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] + +image_size = (896, 896) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] +norm_cfg = dict(type='BN', requires_grad=True) +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + _delete_=True, + type='EfficientNet', + arch='b3', + drop_path_rate=0.2, + out_indices=(3, 4, 5), + frozen_stages=0, + norm_cfg=dict( + type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01), + norm_eval=False, + init_cfg=dict( + type='Pretrained', prefix='backbone', checkpoint=checkpoint)), + neck=dict( + in_channels=[48, 136, 384], + start_level=0, + out_channels=256, + relu_before_extra_convs=True, + no_norm_on_lateral=True, + norm_cfg=norm_cfg), + bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), + # training and testing settings + train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict(type='RandomCrop', crop_size=image_size), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=image_size, keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=4, num_workers=4, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.04), + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) + +# learning policy +max_epochs = 12 +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) + +# cudnn_benchmark=True can accelerate fix-size training +env_cfg = dict(cudnn_benchmark=True) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (4 samples per GPU) +auto_scale_lr = dict(base_batch_size=32) diff --git a/mmpose/configs/mmdet/empirical_attention/README.md b/mmpose/configs/mmdet/empirical_attention/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c0b4a68b6c35fefadc886c844d66d871eb90bef6 --- /dev/null +++ b/mmpose/configs/mmdet/empirical_attention/README.md @@ -0,0 +1,33 @@ +# Empirical Attention + +> [An Empirical Study of Spatial Attention Mechanisms in Deep Networks](https://arxiv.org/abs/1904.05873) + + + +## Abstract + +Attention mechanisms have become a popular component in deep neural networks, yet there has been little examination of how different influencing factors and methods for computing attention from these factors affect performance. Toward a better general understanding of attention mechanisms, we present an empirical study that ablates various spatial attention elements within a generalized attention formulation, encompassing the dominant Transformer attention as well as the prevalent deformable convolution and dynamic convolution modules. Conducted on a variety of applications, the study yields significant findings about spatial attention in deep networks, some of which run counter to conventional understanding. For example, we find that the query and key content comparison in Transformer attention is negligible for self-attention, but vital for encoder-decoder attention. A proper combination of deformable convolution with key content only saliency achieves the best accuracy-efficiency tradeoff in self-attention. Our results suggest that there exists much room for improvement in the design of attention mechanisms. + +
+ +
+ +## Results and Models + +| Backbone | Attention Component | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----------------: | :-: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | 1111 | N | 1x | 8.0 | 13.8 | 40.0 | [config](./faster-rcnn_r50-attn1111_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130_210344.log.json) | +| R-50 | 0010 | N | 1x | 4.2 | 18.4 | 39.1 | [config](./faster-rcnn_r50-attn0010_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130_210125.log.json) | +| R-50 | 1111 | Y | 1x | 8.0 | 12.7 | 42.1 | [config](./faster-rcnn_r50-attn1111-dcn_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130_204442.log.json) | +| R-50 | 0010 | Y | 1x | 4.2 | 17.1 | 42.0 | [config](./faster-rcnn_r50-attn0010-dcn_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130_210410.log.json) | + +## Citation + +```latex +@article{zhu2019empirical, + title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks}, + author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng}, + journal={arXiv preprint arXiv:1904.05873}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn0010-dcn_fpn_1x_coco.py b/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn0010-dcn_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e1ae17a7ee4d3516e6aca90697fa165f592cf51e --- /dev/null +++ b/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn0010-dcn_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + plugins=[ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + stages=(False, False, True, True), + position='after_conv2') + ], + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn0010_fpn_1x_coco.py b/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn0010_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7336d292eafe8c92407f831e712946a23e231db0 --- /dev/null +++ b/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn0010_fpn_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + stages=(False, False, True, True), + position='after_conv2') + ])) diff --git a/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn1111-dcn_fpn_1x_coco.py b/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn1111-dcn_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..980e23d4509a19fe438d5c8494e2905d940705b1 --- /dev/null +++ b/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn1111-dcn_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + plugins=[ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='1111', + kv_stride=2), + stages=(False, False, True, True), + position='after_conv2') + ], + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py b/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..426bc09fd64c16b43b33a5c797265aa9ec2c0c15 --- /dev/null +++ b/mmpose/configs/mmdet/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='1111', + kv_stride=2), + stages=(False, False, True, True), + position='after_conv2') + ])) diff --git a/mmpose/configs/mmdet/empirical_attention/metafile.yml b/mmpose/configs/mmdet/empirical_attention/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..b488da7d29fbd632da614895272cec2025b5eccc --- /dev/null +++ b/mmpose/configs/mmdet/empirical_attention/metafile.yml @@ -0,0 +1,103 @@ +Collections: + - Name: Empirical Attention + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Deformable Convolution + - FPN + - RPN + - ResNet + - RoIAlign + - Spatial Attention + Paper: + URL: https://arxiv.org/pdf/1904.05873 + Title: 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' + README: configs/empirical_attention/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/generalized_attention.py#L10 + Version: v2.0.0 + +Models: + - Name: faster-rcnn_r50_fpn_attention_1111_1x_coco + In Collection: Empirical Attention + Config: configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + inference time (ms/im): + - value: 72.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth + + - Name: faster-rcnn_r50_fpn_attention_0010_1x_coco + In Collection: Empirical Attention + Config: configs/empirical_attention/faster-rcnn_r50-attn0010_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + inference time (ms/im): + - value: 54.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth + + - Name: faster-rcnn_r50_fpn_attention_1111_dcn_1x_coco + In Collection: Empirical Attention + Config: configs/empirical_attention/faster-rcnn_r50-attn1111-dcn_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + inference time (ms/im): + - value: 78.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth + + - Name: faster-rcnn_r50_fpn_attention_0010_dcn_1x_coco + In Collection: Empirical Attention + Config: configs/empirical_attention/faster-rcnn_r50-attn0010-dcn_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + inference time (ms/im): + - value: 58.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth diff --git a/mmpose/configs/mmdet/fast_rcnn/README.md b/mmpose/configs/mmdet/fast_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0bdc9359c7c6e6100fa9f08397aa46e5c9999bac --- /dev/null +++ b/mmpose/configs/mmdet/fast_rcnn/README.md @@ -0,0 +1,121 @@ +# Fast R-CNN + +> [Fast R-CNN](https://arxiv.org/abs/1504.08083) + + + +## Abstract + +This paper proposes a Fast Region-based Convolutional Network method (Fast R-CNN) for object detection. Fast R-CNN builds on previous work to efficiently classify object proposals using deep convolutional networks. Compared to previous work, Fast R-CNN employs several innovations to improve training and testing speed while also increasing detection accuracy. Fast R-CNN trains the very deep VGG16 network 9x faster than R-CNN, is 213x faster at test-time, and achieves a higher mAP on PASCAL VOC 2012. Compared to SPPnet, Fast R-CNN trains VGG16 3x faster, tests 10x faster, and is more accurate. + +
+ +
+ +## Introduction + +Before training the Fast R-CNN, users should first train an [RPN](../rpn/README.md), and use the RPN to extract the region proposals. +The region proposals can be obtained by setting `DumpProposals` pseudo metric. The dumped results is a `dict(file_name: pred_instance)`. +The `pred_instance` is an `InstanceData` containing the sorted boxes and scores predicted by RPN. We provide example of dumping proposals in [RPN config](../rpn/rpn_r50_fpn_1x_coco.py). + +- First, it should be obtained the region proposals in both training and validation (or testing) set. + change the type of `test_evaluator` to `DumpProposals` in the RPN config to get the region proposals as below: + + The config of get training image region proposals can be set as below: + + ```python + # For training set + val_dataloader = dict( + dataset=dict( + ann_file='data/coco/annotations/instances_train2017.json', + data_prefix=dict(img='val2017/'))) + val_dataloader = dict( + _delete_=True, + type='DumpProposals', + output_dir='data/coco/proposals/', + proposals_file='rpn_r50_fpn_1x_train2017.pkl') + test_dataloader = val_dataloader + test_evaluator = val_dataloader + ``` + + The config of get validation image region proposals can be set as below: + + ```python + # For validation set + val_dataloader = dict( + _delete_=True, + type='DumpProposals', + output_dir='data/coco/proposals/', + proposals_file='rpn_r50_fpn_1x_val2017.pkl') + test_evaluator = val_dataloader + ``` + + Extract the region proposals command can be set as below: + + ```bash + ./tools/dist_test.sh \ + configs/rpn_r50_fpn_1x_coco.py \ + checkpoints/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth \ + 8 + ``` + + Users can refer to [test tutorial](https://mmdetection.readthedocs.io/en/latest/user_guides/test.html) for more details. + +- Then, modify the path of `proposal_file` in the dataset and using `ProposalBroadcaster` to process both ground truth bounding boxes and region proposals in pipelines. + An example of Fast R-CNN important setting can be seen as below: + + ```python + train_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args={{_base_.backend_args}}), + dict(type='LoadProposals', num_max_proposals=2000), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='ProposalBroadcaster', + transforms=[ + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + ]), + dict(type='PackDetInputs') + ] + test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args={{_base_.backend_args}}), + dict(type='LoadProposals', num_max_proposals=None), + dict( + type='ProposalBroadcaster', + transforms=[ + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) + ] + train_dataloader = dict( + dataset=dict( + proposal_file='proposals/rpn_r50_fpn_1x_train2017.pkl', + pipeline=train_pipeline)) + val_dataloader = dict( + dataset=dict( + proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl', + pipeline=test_pipeline)) + test_dataloader = val_dataloader + ``` + +- Finally, users can start training the Fast R-CNN. + +## Results and Models + +## Citation + +```latex +@inproceedings{girshick2015fast, + title={Fast r-cnn}, + author={Girshick, Ross}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + year={2015} +} +``` diff --git a/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..02c70296fca04d59b2b87801fa7834c0dc3d30f0 --- /dev/null +++ b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './fast-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5af6b223c5bf66928a1d79ffba904d86006a3741 --- /dev/null +++ b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fast-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101_fpn_2x_coco.py b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..73425cf1ac3be429c69f6cf6b482fee91a8e2782 --- /dev/null +++ b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fast-rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3110f9fdf590ea665c9d7b7e28a56613cd79b786 --- /dev/null +++ b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = './fast-rcnn_r50_fpn_1x_coco.py' + +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + norm_cfg=dict(type='BN', requires_grad=False), + style='caffe', + norm_eval=True, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..daefe2d2d287b865b925263a81c12a6e30c58c4d --- /dev/null +++ b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/fast-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadProposals', num_max_proposals=2000), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='ProposalBroadcaster', + transforms=[ + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + ]), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadProposals', num_max_proposals=None), + dict( + type='ProposalBroadcaster', + transforms=[ + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + dataset=dict( + proposal_file='proposals/rpn_r50_fpn_1x_train2017.pkl', + pipeline=train_pipeline)) +val_dataloader = dict( + dataset=dict( + proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl', + pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50_fpn_2x_coco.py b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d609a7c02d657e15316a4c5747983a4d9a10fc7c --- /dev/null +++ b/mmpose/configs/mmdet/fast_rcnn/fast-rcnn_r50_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './fast-rcnn_r50_fpn_1x_coco.py' + +train_cfg = dict(max_epochs=24) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/faster_rcnn/README.md b/mmpose/configs/mmdet/faster_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8bcdcf6d5120b65cc68c24b46e8d4d35447491fd --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/README.md @@ -0,0 +1,88 @@ +# Faster R-CNN + +> [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497) + + + +## Abstract + +State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-C4 | caffe | 1x | - | - | 35.6 | [config](./faster-rcnn_r50-caffe_c4-1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50-caffe-c4_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152.log.json) | +| R-50-DC5 | caffe | 1x | - | - | 37.2 | [config](./faster-rcnn_r50-caffe-dc5_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50-caffe-dc5_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909.log.json) | +| R-50-FPN | caffe | 1x | 3.8 | | 37.8 | [config](./faster-rcnn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50-caffe_fpn_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_20200504_180032.log.json) | +| R-50-FPN | pytorch | 1x | 4.0 | 21.4 | 37.4 | [config](./faster-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50_fpn_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 3.4 | 28.8 | 37.5 | [config](./faster-rcnn_r50_fpn_amp-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) | +| R-50-FPN | pytorch | 2x | - | - | 38.4 | [config](./faster-rcnn_r50_fpn_2x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50_fpn_2x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_20200504_210434.log.json) | +| R-101-FPN | caffe | 1x | 5.7 | | 39.8 | [config](./faster-rcnn_r101-caffe_fpn_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r101-caffe_fpn_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_20200504_180057.log.json) | +| R-101-FPN | pytorch | 1x | 6.0 | 15.6 | 39.4 | [config](./faster-rcnn_r101_fpn_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r101_fpn_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130_204655.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 39.8 | [config](./faster-rcnn_r101_fpn_2x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r101_fpn_2x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_20200504_210455.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.2 | 13.8 | 41.2 | [config](./faster-rcnn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_x101-32x4d_fpn_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203_000520.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 41.2 | [config](./faster-rcnn_x101-32x4d_fpn_2x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_x101-32x4d_fpn_2x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_20200506_041400.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.3 | 9.4 | 42.1 | [config](./faster-rcnn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_x101-64x4d_fpn_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204_134340.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 41.6 | [config](./faster-rcnn_x101-64x4d_fpn_2x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_x101-64x4d_fpn_2x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033.log.json) | + +## Different regression loss + +We trained with R-50-FPN pytorch style backbone for 1x schedule. + +| Backbone | Loss type | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :------------: | :------: | :------------: | :----: | :----------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | L1Loss | 4.0 | 21.4 | 37.4 | [config](./faster-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50_fpn_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| R-50-FPN | IoULoss | | | 37.9 | [config](./faster-rcnn_r50_fpn_iou_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50_fpn_iou_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954.log.json) | +| R-50-FPN | GIoULoss | | | 37.6 | [config](./faster-rcnn_r50_fpn_giou_1x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50_fpn_giou_1x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco_20200505_161120.log.json) | +| R-50-FPN | BoundedIoULoss | | | 37.4 | [config](./faster-rcnn_r50_fpn_bounded-iou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco-98ad993b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco_20200505_160738.log.json) | + +## Pre-trained Models + +We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-----------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-C4](./faster-rcnn_r50-caffe-c4_ms-1x_coco.py) | caffe | 1x | - | | 35.9 | [config](./faster-rcnn_r50-caffe-c4_ms-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527-db276fed.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527.log.json) | +| [R-50-DC5](./faster-rcnn_r50-caffe-dc5_ms-1x_coco.py) | caffe | 1x | - | | 37.4 | [config](./faster-rcnn_r50-caffe-dc5_ms-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851-b33d21b9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851.log.json) | +| [R-50-DC5](./faster-rcnn_r50-caffe-dc5_ms-3x_coco.py) | caffe | 3x | - | | 38.7 | [config](./faster-rcnn_r50-caffe-dc5_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107-34a53b2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107.log.json) | +| [R-50-FPN](./faster-rcnn_r50-caffe_fpn_ms-2x_coco.py) | caffe | 2x | 3.7 | | 39.7 | [config](./faster-rcnn_r50-caffe_fpn_ms-2x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50-caffe_fpn_ms-2x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_20200504_231813.log.json) | +| [R-50-FPN](./faster-rcnn_r50-caffe_fpn_ms-3x_coco.py) | caffe | 3x | 3.7 | | 39.9 | [config](./faster-rcnn_r50-caffe_fpn_ms-3x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r50-caffe_fpn_ms-3x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054.log.json) | +| [R-50-FPN](./faster-rcnn_r50_fpn_ms-3x_coco.py) | pytorch | 3x | 3.9 | | 40.3 | [config](./faster-rcnn_r50_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822-e10bd31c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822.log.json) | +| [R-101-FPN](./faster-rcnn_r101-caffe_fpn_ms-3x_coco.py) | caffe | 3x | 5.6 | | 42.0 | [config](./faster-rcnn_r101-caffe_fpn_ms-3x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r101-caffe_fpn_ms-3x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742.log.json) | +| [R-101-FPN](./faster-rcnn_r101_fpn_ms-3x_coco.py) | pytorch | 3x | 5.8 | | 41.8 | [config](./faster-rcnn_r101_fpn_ms-3x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_r101_fpn_ms-3x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822.log.json) | +| [X-101-32x4d-FPN](./faster-rcnn_x101-32x4d_fpn_ms-3x_coco.py) | pytorch | 3x | 7.0 | | 42.5 | [config](./faster-rcnn_x101-32x4d_fpn_ms-3x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_x101-32x4d_fpn_ms-3x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151.log.json) | +| [X-101-32x8d-FPN](./faster-rcnn_x101-32x8d_fpn_ms-3x_coco.py) | pytorch | 3x | 10.1 | | 42.4 | [config](./faster-rcnn_x101-32x8d_fpn_ms-3x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_x101-32x8d_fpn_ms-3x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954.log.json) | +| [X-101-64x4d-FPN](./faster-rcnn_x101-64x4d_fpn_ms-3x_coco.py) | pytorch | 3x | 10.0 | | 43.1 | [config](./faster-rcnn_x101-64x4d_fpn_ms-3x_coco.py) | [model](https://download.openxlab.org.cn/models/mmdetection/FasterR-CNN/weight/faster-rcnn_x101-64x4d_fpn_ms-3x_coco) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528.log.json) | + +We further finetune some pre-trained models on the COCO subsets, which only contain only a few of the 80 categories. + +| Backbone | Style | Class name | Pre-traind model | Mem (GB) | box AP | Config | Download | +| ------------------------------------------------------------------------ | ----- | ------------------ | -------------------------------------------------------------- | -------- | ------ | ---------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [R-50-FPN](./faster-rcnn_r50-caffe_fpn_ms-1x_coco-person.py) | caffe | person | [R-50-FPN-Caffe-3x](./faster-rcnn_r50-caffe_fpn_ms-3x_coco.py) | 3.7 | 55.8 | [config](./faster-rcnn_r50-caffe_fpn_ms-1x_coco-person.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929.log.json) | +| [R-50-FPN](./faster-rcnn_r50-caffe_fpn_ms-1x_coco-person-bicycle-car.py) | caffe | person-bicycle-car | [R-50-FPN-Caffe-3x](./faster-rcnn_r50-caffe_fpn_ms-3x_coco.py) | 3.7 | 44.1 | [config](./faster-rcnn_r50-caffe_fpn_ms-1x_coco-person-bicycle-car.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car_20201216_173117-6eda6d92.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car_20201216_173117.log.json) | + +## Torchvision New Receipe (TNR) + +Torchvision released its high-precision ResNet models. The training details can be found on the [Pytorch website](https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/). Here, we have done grid searches on learning rate and weight decay and found the optimal hyper-parameter on the detection task. + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-TNR](./faster-rcnn_r50-tnr-pre_fpn_1x_coco.py) | pytorch | 1x | - | | 40.2 | [config](./faster-rcnn_r50-tnr-pre_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147-efedfda4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147.log.json) | + +## Citation + +```latex +@article{Ren_2017, + title={Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian}, + year={2017}, + month={Jun}, +} +``` diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a18f1ada31ed2a2d1023d16470a271ad49c3be2e --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './faster-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101-caffe_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101-caffe_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1cdb4d4973e364c4f37b80644388a4859f55772e --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101-caffe_fpn_ms-3x_coco.py @@ -0,0 +1,11 @@ +_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d113ae6295fdc3f3058ef498eb9b675154a05c12 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_2x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b471fb3cbd8a79165e0cd19afc3ba98bbcfeb74e --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster-rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a71d4afd3246d083bdf0f5a84be2fbf2340f621f --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8ef6d1f8ea6b45e9a4bfe438910da827d079479b --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r101_fpn_ms-3x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..65515c9ace8bf4445a77db2485fc8d3f95c263b9 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-c4_ms-1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-c4_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7e231e865270acf0383e03a64f151efdbf88c29e --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-c4_ms-1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +_base_.train_dataloader.dataset.pipeline = train_pipeline diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8952a5c9c6c2fe019711968fa2aa7ed2065b13f6 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50-caffe-dc5.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..63a68859a85fe5556e927c04aae5cafbef1fc0b6 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py @@ -0,0 +1,14 @@ +_base_ = 'faster-rcnn_r50-caffe-dc5_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +_base_.train_dataloader.dataset.pipeline = train_pipeline diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-3x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..27063468a70436a62a7cc54b8c8efc2de96ec33f --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-3x_coco.py @@ -0,0 +1,18 @@ +_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py' + +# MMEngine support the following two ways, users can choose +# according to convenience +# param_scheduler = [ +# dict( +# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa +# dict( +# type='MultiStepLR', +# begin=0, +# end=12, +# by_epoch=True, +# milestones=[28, 34], +# gamma=0.1) +# ] +_base_.param_scheduler[1].milestones = [28, 34] + +train_cfg = dict(max_epochs=36) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_c4-1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_c4-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0888fc01790af82a4c7131280ca5f0247b28d9fd --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_c4-1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50-caffe-c4.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9129a9583c52bf8ccab38a65f35c9f14bb128d07 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,15 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_90k_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..27f49355f3be8f6a53038894405c5f1b3d9b46fa --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_90k_coco.py @@ -0,0 +1,22 @@ +_base_ = 'faster-rcnn_r50-caffe_fpn_1x_coco.py' +max_iter = 90000 + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[60000, 80000], + gamma=0.1) +] + +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=10000) +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000)) +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco-person-bicycle-car.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco-person-bicycle-car.py new file mode 100644 index 0000000000000000000000000000000000000000..f36bb055f87aeadc43aa1233d1d3a7bdc33fbd80 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco-person-bicycle-car.py @@ -0,0 +1,16 @@ +_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py' +model = dict(roi_head=dict(bbox_head=dict(num_classes=3))) +metainfo = { + 'classes': ('person', 'bicycle', 'car'), + 'palette': [ + (220, 20, 60), + (119, 11, 32), + (0, 0, 142), + ] +} + +train_dataloader = dict(dataset=dict(metainfo=metainfo)) +val_dataloader = dict(dataset=dict(metainfo=metainfo)) +test_dataloader = dict(dataset=dict(metainfo=metainfo)) + +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco-person.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco-person.py new file mode 100644 index 0000000000000000000000000000000000000000..9528b63f4deabb3610a26af59c856cee62c489c2 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco-person.py @@ -0,0 +1,14 @@ +_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py' +model = dict(roi_head=dict(bbox_head=dict(num_classes=1))) +metainfo = { + 'classes': ('person', ), + 'palette': [ + (220, 20, 60), + ] +} + +train_dataloader = dict(dataset=dict(metainfo=metainfo)) +val_dataloader = dict(dataset=dict(metainfo=metainfo)) +test_dataloader = dict(dataset=dict(metainfo=metainfo)) + +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..59f1633c807f3eb904657cfaf97113c355df3fca --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco.py @@ -0,0 +1,31 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +# MMEngine support the following two ways, users can choose +# according to convenience +# train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +_base_.train_dataloader.dataset.pipeline = train_pipeline diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..44d320ea01ba53d591ab7db29742e7fffc7c81ce --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-2x_coco.py @@ -0,0 +1,18 @@ +_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py' + +# MMEngine support the following two ways, users can choose +# according to convenience +# param_scheduler = [ +# dict( +# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa +# dict( +# type='MultiStepLR', +# begin=0, +# end=12, +# by_epoch=True, +# milestones=[16, 23], +# gamma=0.1) +# ] +_base_.param_scheduler[1].milestones = [16, 23] + +train_cfg = dict(max_epochs=24) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..365f6439241c6374554af1fd58a114ef03448877 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-3x_coco.py @@ -0,0 +1,15 @@ +_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py' +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-90k_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6b9b3eb0e79b1ffb71d15c21274692d3b85e16ac --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-90k_coco.py @@ -0,0 +1,23 @@ +_base_ = 'faster-rcnn_r50-caffe_fpn_ms-1x_coco.py' + +max_iter = 90000 + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[60000, 80000], + gamma=0.1) +] + +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=10000) +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000)) +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-tnr-pre_fpn_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-tnr-pre_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7b3e5dedbe81b927492dd41b13f017bcc2bd4c92 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50-tnr-pre_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth' +model = dict( + backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint))) + +# `lr` and `weight_decay` have been searched to be optimal. +optim_wrapper = dict( + optimizer=dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.1), + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8a45417fdd4566241114e20275990a5729486932 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_2x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2981c6fbe16eb7a8b6ca1202ebb6325e2324c040 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_2x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3d366f3ba0e5ff098db3e409171a88860f1cf3af --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../common/lsj-200e_coco-detection.py' +] +image_size = (1024, 1024) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +model = dict(data_preprocessor=dict(batch_augments=batch_augments)) + +train_dataloader = dict(batch_size=8, num_workers=4) +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_amp-1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_amp-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f765deaef1db8a798c44d848c6f759755ccd4c45 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_amp-1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' + +# MMEngine support the following two ways, users can choose +# according to convenience +# optim_wrapper = dict(type='AmpOptimWrapper') +_base_.optim_wrapper.type = 'AmpOptimWrapper' diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_bounded-iou_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_bounded-iou_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7758ca80b372e7895be267cad8c4603778d160b3 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_bounded-iou_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + reg_decoded_bbox=True, + loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0)))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ciou_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ciou_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e8d8a3042750e8f5f9478b5e8c3111d8b7a10528 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ciou_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + reg_decoded_bbox=True, + loss_bbox=dict(type='CIoULoss', loss_weight=12.0)))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_fcos-rpn_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_fcos-rpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b5a34d9f74a60388fa60afd8255d470c45f209f7 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_fcos-rpn_1x_coco.py @@ -0,0 +1,48 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + # copied from configs/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py + neck=dict( + start_level=1, + add_extra_convs='on_output', # use P5 + relu_before_extra_convs=True), + rpn_head=dict( + _delete_=True, # ignore the unused old settings + type='FCOSHead', + # num_classes = 1 for rpn, + # if num_classes > 1, it will be set to 1 in + # TwoStageDetector automatically + num_classes=1, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + roi_head=dict( # update featmap_strides + bbox_roi_extractor=dict(featmap_strides=[8, 16, 32, 64, 128]))) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), # Slowly increase lr, otherwise loss becomes NAN + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_giou_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_giou_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..82b71d77bfc448eceadcd03a6c8cbc4c8f871109 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_giou_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + reg_decoded_bbox=True, + loss_bbox=dict(type='GIoULoss', loss_weight=10.0)))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_iou_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_iou_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e21c43640cb7004e8e4ef189ff8843ad39de3c6f --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_iou_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + reg_decoded_bbox=True, + loss_bbox=dict(type='IoULoss', loss_weight=10.0)))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..75dcfeb7a2310938c05cc103fadec6c6e119b90b --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ms-3x_coco.py @@ -0,0 +1 @@ +_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py'] diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ohem_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ohem_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4f804b9be283015d4ec349f0df664e9ca7326c96 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_ohem_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict(train_cfg=dict(rcnn=dict(sampler=dict(type='OHEMSampler')))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_soft-nms_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_soft-nms_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3775d8e447cb80c0fc28199be2abc4c23383eadd --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_r50_fpn_soft-nms_1x_coco.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + test_cfg=dict( + rcnn=dict( + score_thr=0.05, + nms=dict(type='soft_nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..395c98cd65cd5f883c9fe206a7b9c99e59acb32e --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6232d0edba51f433a930c46d03c49fc27954303f --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster-rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..88cb40fd62a87a8af13e166df16a348c26e6d29e --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x4d_fpn_ms-3x_coco.py @@ -0,0 +1,14 @@ +_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py'] +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x8d_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x8d_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..28d6290be7a75b7cceef8957e872e221fd3e78f5 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-32x8d_fpn_ms-3x_coco.py @@ -0,0 +1,23 @@ +_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py'] +model = dict( + # ResNeXt-101-32x8d model trained with Caffe2 at FB, + # so the mean and std need to be changed. + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f39d6322fc3a4729ea7bbfefc207a6975efb4bf4 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..97a3c1338fe294f66109fa92de0d8a48686b8a09 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster-rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..eeaa218c9dc76123791d9e19b0ebae687cc296c9 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/faster-rcnn_x101-64x4d_fpn_ms-3x_coco.py @@ -0,0 +1,14 @@ +_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py'] +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/faster_rcnn/metafile.yml b/mmpose/configs/mmdet/faster_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..6a201e177bad065235dd1346c1d36017c4359214 --- /dev/null +++ b/mmpose/configs/mmdet/faster_rcnn/metafile.yml @@ -0,0 +1,451 @@ +Collections: + - Name: Faster R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - RPN + - ResNet + - RoIPool + Paper: + URL: https://arxiv.org/abs/1506.01497 + Title: "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" + README: configs/faster_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/faster_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: faster-rcnn_r50-caffe-c4_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-caffe_c4-1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152-3f885b85.pth + + - Name: faster-rcnn_r50-caffe-c4_mstrain_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-caffe-c4_ms-1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527-db276fed.pth + + - Name: faster-rcnn_r50-caffe-dc5_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909-531f0f43.pth + + - Name: faster-rcnn_r50-caffe_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth + + - Name: faster-rcnn_r50_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 46.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth + + - Name: faster-rcnn_r50_fpn_fp16_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50_fpn_amp-1x_coco.py + Metadata: + Training Memory (GB): 3.4 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 34.72 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth + + - Name: faster-rcnn_r50_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50_fpn_2x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 46.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth + + - Name: faster-rcnn_r101-caffe_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.7 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.398_20200504_180057-b269e9dd.pth + + - Name: faster-rcnn_r101_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 64.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth + + - Name: faster-rcnn_r101_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 64.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth + + - Name: faster-rcnn_x101-32x4d_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.2 + inference time (ms/im): + - value: 72.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203-cff10310.pth + + - Name: faster-rcnn_x101-32x4d_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_x101-32x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.2 + inference time (ms/im): + - value: 72.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.412_20200506_041400-64a12c0b.pth + + - Name: faster-rcnn_x101-64x4d_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 106.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth + + - Name: faster-rcnn_x101-64x4d_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_x101-64x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 106.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033-5961fa95.pth + + - Name: faster-rcnn_r50_fpn_iou_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50_fpn_iou_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954-938e81f0.pth + + - Name: faster-rcnn_r50_fpn_giou_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50_fpn_giou_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco-0eada910.pth + + - Name: faster-rcnn_r50_fpn_bounded_iou_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50_fpn_bounded-iou_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco-98ad993b.pth + + - Name: faster-rcnn_r50-caffe-dc5_mstrain_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851-b33d21b9.pth + + - Name: faster-rcnn_r50-caffe-dc5_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107-34a53b2c.pth + + - Name: faster-rcnn_r50-caffe_fpn_ms-2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-2x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_bbox_mAP-0.397_20200504_231813-10b2de58.pth + + - Name: faster-rcnn_r50-caffe_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 3.7 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054-1f77628b.pth + + - Name: faster-rcnn_r50_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 3.9 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822-e10bd31c.pth + + - Name: faster-rcnn_r101-caffe_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r101-caffe_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 5.6 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742-a7ae426d.pth + + - Name: faster-rcnn_r101_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r101_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 5.8 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822-4d4d2ca8.pth + + - Name: faster-rcnn_x101-32x4d_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_x101-32x4d_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 7.0 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151-16b9b260.pth + + - Name: faster-rcnn_x101-32x8d_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_x101-32x8d_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 10.1 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954-002e082a.pth + + - Name: faster-rcnn_x101-64x4d_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_x101-64x4d_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 10.0 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528-26c63de6.pth + + - Name: faster-rcnn_r50_fpn_tnr-pretrain_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster-rcnn_r50-tnr-pre_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 46.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147-efedfda4.pth diff --git a/mmpose/configs/mmdet/fcos/README.md b/mmpose/configs/mmdet/fcos/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8d72237a059793385b43b04b7e77f3392fe30d5e --- /dev/null +++ b/mmpose/configs/mmdet/fcos/README.md @@ -0,0 +1,45 @@ +# FCOS + +> [FCOS: Fully Convolutional One-Stage Object Detection](https://arxiv.org/abs/1904.01355) + + + +## Abstract + +We propose a fully convolutional one-stage object detector (FCOS) to solve object detection in a per-pixel prediction fashion, analogue to semantic segmentation. Almost all state-of-the-art object detectors such as RetinaNet, SSD, YOLOv3, and Faster R-CNN rely on pre-defined anchor boxes. In contrast, our proposed detector FCOS is anchor box free, as well as proposal free. By eliminating the predefined set of anchor boxes, FCOS completely avoids the complicated computation related to anchor boxes such as calculating overlapping during training. More importantly, we also avoid all hyper-parameters related to anchor boxes, which are often very sensitive to the final detection performance. With the only post-processing non-maximum suppression (NMS), FCOS with ResNeXt-64x4d-101 achieves 44.7% in AP with single-model and single-scale testing, surpassing previous one-stage detectors with the advantage of being much simpler. For the first time, we demonstrate a much simpler and flexible detection framework achieving improved detection accuracy. We hope that the proposed FCOS framework can serve as a simple and strong alternative for many other instance-level tasks. + +
+ +
+ +## Results and Models + +| Backbone | Style | GN | MS train | Tricks | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :---: | :-: | :------: | :----: | :-: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | caffe | Y | N | N | N | 1x | 3.6 | 22.7 | 36.6 | [config](./fcos_r50-caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/20201227_180009.log.json) | +| R-50 | caffe | Y | N | Y | N | 1x | 3.7 | - | 38.7 | [config](./fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/20210105_135818.log.json) | +| R-50 | caffe | Y | N | Y | Y | 1x | 3.8 | - | 42.3 | [config](./fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/20210105_224556.log.json) | +| R-101 | caffe | Y | N | N | N | 1x | 5.5 | 17.3 | 39.1 | [config](./fcos_r101-caffe_fpn_gn-head-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/20210103_155046.log.json) | + +| Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-: | :------: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | caffe | Y | Y | 2x | 2.6 | 22.9 | 38.5 | [config](./fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20201227_161900.log.json) | +| R-101 | caffe | Y | Y | 2x | 5.5 | 17.3 | 40.8 | [config](./fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20210103_155046.log.json) | +| X-101 | pytorch | Y | Y | 2x | 10.0 | 9.7 | 42.6 | [config](./fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/20210114_133041.log.json) | + +**Notes:** + +- The X-101 backbone is X-101-64x4d. +- Tricks means setting `norm_on_bbox`, `centerness_on_reg`, `center_sampling` as `True`. +- DCN means using `DCNv2` in both backbone and head. + +## Citation + +```latex +@article{tian2019fcos, + title={FCOS: Fully Convolutional One-Stage Object Detection}, + author={Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong}, + journal={arXiv preprint arXiv:1904.01355}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/fcos/fcos_r101-caffe_fpn_gn-head-1x_coco.py b/mmpose/configs/mmdet/fcos/fcos_r101-caffe_fpn_gn-head-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5380e87483e494b4c0bc6d8846c6892811d581d3 --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r101-caffe_fpn_gn-head-1x_coco.py @@ -0,0 +1,9 @@ +_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py b/mmpose/configs/mmdet/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..286a07a2db2c6fc423f6cf039b2609ac81ede73d --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py @@ -0,0 +1,38 @@ +_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet101_caffe'))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# training schedule for 2x +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/fcos/fcos_r101_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/fcos/fcos_r101_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..77250e6917812d3494c8dabd52a3ed12f5f34483 --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r101_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py' # noqa + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/fcos/fcos_r18_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/fcos/fcos_r18_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6f001024bb702c5ed0cb1103c5e10ae3cd7f599b --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r18_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py' # noqa + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2a77641dd87142d5c6d508f2f4a4ba5b70db52c1 --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py @@ -0,0 +1,43 @@ +_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py' + +# model setting +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + bbox_head=dict( + norm_on_bbox=True, + centerness_on_reg=True, + dcn_on_last_conv=False, + center_sampling=True, + conv_bias=True, + loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), + # training and testing settings + test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3.0, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict(clip_grad=None) diff --git a/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head-center_1x_coco.py b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head-center_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9e4eb1d5981761fab8fe0bb876ff7ef243ac31f9 --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head-center_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' + +# model settings +model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5)) diff --git a/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..928a9b4c92d217822179c0ae00ae50f6f74289b1 --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='FCOS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[102.9801, 115.9465, 122.7717], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet50_caffe')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # testing settings + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +# learning rate +param_scheduler = [ + dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.01), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_4xb4-1x_coco.py b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_4xb4-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..32358cd3c69800874aa77ba5746ffc0d6f3a219d --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_4xb4-1x_coco.py @@ -0,0 +1,5 @@ +# TODO: Remove this config after benchmarking all related configs +_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py' + +# dataset settings +train_dataloader = dict(batch_size=4, num_workers=4) diff --git a/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4d50b4ec6c4a10b07cbf73475e7af545b058605c --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py @@ -0,0 +1,30 @@ +_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# training schedule for 2x +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/fcos/fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py b/mmpose/configs/mmdet/fcos/fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a6a6c44f9b4213601b447bc02720e24dc86a53d9 --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py' + +# model settings +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + bbox_head=dict( + norm_on_bbox=True, + centerness_on_reg=True, + dcn_on_last_conv=True, + center_sampling=True, + conv_bias=True, + loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), + # training and testing settings + test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3.0, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict(clip_grad=None) diff --git a/mmpose/configs/mmdet/fcos/fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/fcos/fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b51556b8eb7f844866d7acff5c7b86c08cb2a054 --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,75 @@ +_base_ = '../common/lsj-200e_coco-detection.py' + +image_size = (1024, 1024) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +# model settings +model = dict( + type='FCOS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + norm_on_bbox=True, + centerness_on_reg=True, + dcn_on_last_conv=False, + center_sampling=True, + conv_bias=True, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # testing settings + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +train_dataloader = dict(batch_size=8, num_workers=4) +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=dict(max_norm=35, norm_type=2)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/fcos/fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py b/mmpose/configs/mmdet/fcos/fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..503c0e1ce79bdbc9f2a32cc65f977b0f1e968927 --- /dev/null +++ b/mmpose/configs/mmdet/fcos/fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py @@ -0,0 +1,52 @@ +_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' + +# model settings +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# training schedule for 2x +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/fcos/metafile.yml b/mmpose/configs/mmdet/fcos/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..fb6527cf2d418762ae1a4a9298ade3da54ece5df --- /dev/null +++ b/mmpose/configs/mmdet/fcos/metafile.yml @@ -0,0 +1,146 @@ +Collections: + - Name: FCOS + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Group Normalization + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.01355 + Title: 'FCOS: Fully Convolutional One-Stage Object Detection' + README: configs/fcos/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fcos.py#L6 + Version: v2.0.0 + +Models: + - Name: fcos_r50-caffe_fpn_gn-head_1x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py + Metadata: + Training Memory (GB): 3.6 + inference time (ms/im): + - value: 44.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth + + - Name: fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py + Metadata: + Training Memory (GB): 3.7 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth + + - Name: fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth + + - Name: fcos_r101-caffe_fpn_gn-head-1x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r101-caffe_fpn_gn-head-1x_coco.py + Metadata: + Training Memory (GB): 5.5 + inference time (ms/im): + - value: 57.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth + + - Name: fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py + Metadata: + Training Memory (GB): 2.6 + inference time (ms/im): + - value: 43.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth + + - Name: fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py + Metadata: + Training Memory (GB): 5.5 + inference time (ms/im): + - value: 57.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth + + - Name: fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco + In Collection: FCOS + Config: configs/fcos/fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py + Metadata: + Training Memory (GB): 10.0 + inference time (ms/im): + - value: 103.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth diff --git a/mmpose/configs/mmdet/foveabox/README.md b/mmpose/configs/mmdet/foveabox/README.md new file mode 100644 index 0000000000000000000000000000000000000000..96f1358b11840e5e03d1a640969a8d18d6197588 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/README.md @@ -0,0 +1,53 @@ +# FoveaBox + +> [FoveaBox: Beyond Anchor-based Object Detector](https://arxiv.org/abs/1904.03797) + + + +## Abstract + +We present FoveaBox, an accurate, flexible, and completely anchor-free framework for object detection. While almost all state-of-the-art object detectors utilize predefined anchors to enumerate possible locations, scales and aspect ratios for the search of the objects, their performance and generalization ability are also limited to the design of anchors. Instead, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. The scales of target boxes are naturally associated with feature pyramid representations. In FoveaBox, an instance is assigned to adjacent feature levels to make the model more accurate.We demonstrate its effectiveness on standard benchmarks and report extensive experimental analysis. Without bells and whistles, FoveaBox achieves state-of-the-art single model performance on the standard COCO and Pascal VOC object detection benchmark. More importantly, FoveaBox avoids all computation and hyper-parameters related to anchor boxes, which are often sensitive to the final detection performance. We believe the simple and effective approach will serve as a solid baseline and help ease future research for object detection. + +
+ +
+ +## Introduction + +FoveaBox is an accurate, flexible and completely anchor-free object detection system for object detection framework, as presented in our paper [https://arxiv.org/abs/1904.03797](https://arxiv.org/abs/1904.03797): +Different from previous anchor-based methods, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. + +## Results and Models + +### Results on R50/101-FPN + +| Backbone | Style | align | ms-train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :---: | :------: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | N | 1x | 5.6 | 24.1 | 36.5 | [config](./fovea_r50_fpn_4xb4-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219-ee4d5303.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219_223025.log.json) | +| R-50 | pytorch | N | N | 2x | 5.6 | - | 37.2 | [config](./fovea_r50_fpn_4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203-2df792b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203_112043.log.json) | +| R-50 | pytorch | Y | N | 2x | 8.1 | 19.4 | 37.9 | [config](./fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203_134252.log.json) | +| R-50 | pytorch | Y | Y | 2x | 8.1 | 18.3 | 40.4 | [config](./fovea_r50_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205-85ce26cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205_112557.log.json) | +| R-101 | pytorch | N | N | 1x | 9.2 | 17.4 | 38.6 | [config](./fovea_r101_fpn_4xb4-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219-05e38f1c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219_011740.log.json) | +| R-101 | pytorch | N | N | 2x | 11.7 | - | 40.0 | [config](./fovea_r101_fpn_4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208-02320ea4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208_202059.log.json) | +| R-101 | pytorch | Y | N | 2x | 11.7 | 14.7 | 40.0 | [config](./fovea_r101_fpn_gn-head-align_4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208-c39a027a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208_203337.log.json) | +| R-101 | pytorch | Y | Y | 2x | 11.7 | 14.7 | 42.0 | [config](./fovea_r101_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208-649c5eb6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208_202124.log.json) | + +\[1\] *1x and 2x mean the model is trained for 12 and 24 epochs, respectively.* \ +\[2\] *Align means utilizing deformable convolution to align the cls branch.* \ +\[3\] *All results are obtained with a single model and without any test time data augmentation.*\ +\[4\] *We use 4 GPUs for training.* + +Any pull requests or issues are welcome. + +## Citation + +Please consider citing our paper in your publications if the project helps your research. BibTeX reference is as follows. + +```latex +@article{kong2019foveabox, + title={FoveaBox: Beyond Anchor-based Object Detector}, + author={Kong, Tao and Sun, Fuchun and Liu, Huaping and Jiang, Yuning and Shi, Jianbo}, + journal={arXiv preprint arXiv:1904.03797}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_4xb4-1x_coco.py b/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_4xb4-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7e8ccf910e6317bf576463fa26bfcb330b6ff385 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_4xb4-1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fovea_r50_fpn_4xb4-1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_4xb4-2x_coco.py b/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0dc98515e62b2dba225e822850229f0a2f802d63 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_4xb4-2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fovea_r50_fpn_4xb4-2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_gn-head-align_4xb4-2x_coco.py b/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_gn-head-align_4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..222671d49d1e3fbc31285e4f13487d86642ebbe3 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_gn-head-align_4xb4-2x_coco.py @@ -0,0 +1,23 @@ +_base_ = './fovea_r50_fpn_4xb4-1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + with_deform=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) +# learning policy +max_epochs = 24 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py b/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e1852d581fcbdd9a1459291fc7f65e51041aa4e6 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/fovea_r101_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py @@ -0,0 +1,34 @@ +_base_ = './fovea_r50_fpn_4xb4-1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + with_deform=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +# learning policy +max_epochs = 24 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_4xb4-1x_coco.py b/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_4xb4-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..13cf3ae92b0d2bfd1d84f032f7b202430f095a6a --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_4xb4-1x_coco.py @@ -0,0 +1,59 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='FOVEA', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + num_outs=5, + add_extra_convs='on_input'), + bbox_head=dict( + type='FoveaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + base_edge_list=[16, 32, 64, 128, 256], + scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), + sigma=0.4, + with_deform=False, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=1.50, + alpha=0.4, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), + # training and testing settings + train_cfg=dict(), + test_cfg=dict( + nms_pre=1000, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) +train_dataloader = dict(batch_size=4, num_workers=4) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_4xb4-2x_coco.py b/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d06ef9f9ba89f202ef13176af39df7e89cb5e6 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_4xb4-2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './fovea_r50_fpn_4xb4-1x_coco.py' +# learning policy +max_epochs = 24 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py b/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..877bb4fa4e1c03190a05da4e95558d8534e5e6e8 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py @@ -0,0 +1,20 @@ +_base_ = './fovea_r50_fpn_4xb4-1x_coco.py' +model = dict( + bbox_head=dict( + with_deform=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) +# learning policy +max_epochs = 24 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py b/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5690bcae08cd0e639afe3c832a46f78036324c08 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/fovea_r50_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py @@ -0,0 +1,30 @@ +_base_ = './fovea_r50_fpn_4xb4-1x_coco.py' +model = dict( + bbox_head=dict( + with_deform=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +# learning policy +max_epochs = 24 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/foveabox/metafile.yml b/mmpose/configs/mmdet/foveabox/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..9ab2f5420323a9eb8c2ace386485c34277d53213 --- /dev/null +++ b/mmpose/configs/mmdet/foveabox/metafile.yml @@ -0,0 +1,172 @@ +Collections: + - Name: FoveaBox + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 4x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.03797 + Title: 'FoveaBox: Beyond Anchor-based Object Detector' + README: configs/foveabox/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fovea.py#L6 + Version: v2.0.0 + +Models: + - Name: fovea_r50_fpn_4xb4-1x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r50_fpn_4xb4-1x_coco.py + Metadata: + Training Memory (GB): 5.6 + inference time (ms/im): + - value: 41.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219-ee4d5303.pth + + - Name: fovea_r50_fpn_4xb4-2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r50_fpn_4xb4-2x_coco.py + Metadata: + Training Memory (GB): 5.6 + inference time (ms/im): + - value: 41.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203-2df792b1.pth + + - Name: fovea_r50_fpn_gn-head-align_4xb4-2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py + Metadata: + Training Memory (GB): 8.1 + inference time (ms/im): + - value: 51.55 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth + + - Name: fovea_r50_fpn_gn-head-align_ms-640-800-4xb4-2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r50_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py + Metadata: + Training Memory (GB): 8.1 + inference time (ms/im): + - value: 54.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205-85ce26cb.pth + + - Name: fovea_r101_fpn_4xb4-1x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r101_fpn_4xb4-1x_coco.py + Metadata: + Training Memory (GB): 9.2 + inference time (ms/im): + - value: 57.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219-05e38f1c.pth + + - Name: fovea_r101_fpn_4xb4-2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r101_fpn_4xb4-2x_coco.py + Metadata: + Training Memory (GB): 11.7 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208-02320ea4.pth + + - Name: fovea_r101_fpn_gn-head-align_4xb4-2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r101_fpn_gn-head-align_4xb4-2x_coco.py + Metadata: + Training Memory (GB): 11.7 + inference time (ms/im): + - value: 68.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208-c39a027a.pth + + - Name: fovea_r101_fpn_gn-head-align_ms-640-800-4xb4-2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r101_fpn_gn-head-align_ms-640-800-4xb4-2x_coco.py + Metadata: + Training Memory (GB): 11.7 + inference time (ms/im): + - value: 68.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208-649c5eb6.pth diff --git a/mmpose/configs/mmdet/fpg/README.md b/mmpose/configs/mmdet/fpg/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1e2fd400288d3ebd57741f1b1d18e430a8c62f41 --- /dev/null +++ b/mmpose/configs/mmdet/fpg/README.md @@ -0,0 +1,43 @@ +# FPG + +> [Feature Pyramid Grids](https://arxiv.org/abs/2004.03580) + + + +## Abstract + +Feature pyramid networks have been widely adopted in the object detection literature to improve feature representations for better handling of variations in scale. In this paper, we present Feature Pyramid Grids (FPG), a deep multi-pathway feature pyramid, that represents the feature scale-space as a regular grid of parallel bottom-up pathways which are fused by multi-directional lateral connections. FPG can improve single-pathway feature pyramid networks by significantly increasing its performance at similar computation cost, highlighting importance of deep pyramid representations. In addition to its general and uniform structure, over complicated structures that have been found with neural architecture search, it also compares favorably against such approaches without relying on search. We hope that FPG with its uniform and effective nature can serve as a strong component for future work in object recognition. + +
+ +
+ +## Results and Models + +We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. +All backbones are Resnet-50 in pytorch style. + +| Method | Neck | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :--------: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | FPG | 50e | 20.0 | - | 42.3 | - | [config](./faster-rcnn_r50_fpg_crop640-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856-74109f42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856.log.json) | +| Faster R-CNN | FPG-chn128 | 50e | 11.9 | - | 41.2 | - | [config](./faster-rcnn_r50_fpg-chn128_crop640-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857-9376aa9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857.log.json) | +| Faster R-CNN | FPN | 50e | 20.0 | - | 38.9 | - | [config](./faster-rcnn_r50_fpn_crop640-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpn_crop640_50e_coco/faster_rcnn_r50_fpn_crop640_50e_coco_20220311_011857-be7c9f42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpn_crop640_50e_coco/faster_rcnn_r50_fpn_crop640_50e_coco_20220311_011857.log.json) | +| Mask R-CNN | FPG | 50e | 23.2 | - | 43.0 | 38.1 | [config](./mask-rcnn_r50_fpg_crop640-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857.log.json) | +| Mask R-CNN | FPG-chn128 | 50e | 15.3 | - | 41.7 | 37.1 | [config](./mask-rcnn_r50_fpg-chn128_crop640-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859-043c9b4e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859.log.json) | +| Mask R-CNN | FPN | 50e | 23.2 | - | 49.6 | 35.6 | [config](./mask-rcnn_r50_fpn_crop640-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpn_crop640_50e_coco/mask_rcnn_r50_fpn_crop640_50e_coco_20220311_011855-a756664a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpn_crop640_50e_coco/mask_rcnn_r50_fpn_crop640_50e_coco_20220311_011855.log.json) | +| RetinaNet | FPG | 50e | 20.8 | - | 40.5 | - | [config](./retinanet_r50_fpg_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809-b0bcf5f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809.log.json) | +| RetinaNet | FPG-chn128 | 50e | 19.9 | - | 39.9 | - | [config](./retinanet_r50_fpg-chn128_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829-ee99a686.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829.log.json) | + +**Note**: Chn128 means to decrease the number of channels of features and convs from 256 (default) to 128 in +Neck and BBox Head, which can greatly decrease memory consumption without sacrificing much precision. + +## Citation + +```latex +@article{chen2020feature, + title={Feature pyramid grids}, + author={Chen, Kai and Cao, Yuhang and Loy, Chen Change and Lin, Dahua and Feichtenhofer, Christoph}, + journal={arXiv preprint arXiv:2004.03580}, + year={2020} +} +``` diff --git a/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpg-chn128_crop640-50e_coco.py b/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpg-chn128_crop640-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cb9160f5cc7e118069d7172573018515aa406331 --- /dev/null +++ b/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpg-chn128_crop640-50e_coco.py @@ -0,0 +1,9 @@ +_base_ = 'faster-rcnn_r50_fpg_crop640-50e_coco.py' + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + neck=dict(out_channels=128, inter_channels=128), + rpn_head=dict(in_channels=128), + roi_head=dict( + bbox_roi_extractor=dict(out_channels=128), + bbox_head=dict(in_channels=128))) diff --git a/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpg_crop640-50e_coco.py b/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpg_crop640-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d0d366f1f30e5bcc6d52010c46d60183b56386ea --- /dev/null +++ b/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpg_crop640-50e_coco.py @@ -0,0 +1,48 @@ +_base_ = 'faster-rcnn_r50_fpn_crop640-50e_coco.py' + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + neck=dict( + type='FPG', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + inter_channels=256, + num_outs=5, + stack_times=9, + paths=['bu'] * 9, + same_down_trans=None, + same_up_trans=dict( + type='conv', + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_lateral_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_down_trans=dict( + type='interpolation_conv', + mode='nearest', + kernel_size=3, + norm_cfg=norm_cfg, + order=('act', 'conv', 'norm'), + inplace=False), + across_up_trans=None, + across_skip_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + output_trans=dict( + type='last_conv', + kernel_size=3, + order=('act', 'conv', 'norm'), + inplace=False), + norm_cfg=norm_cfg, + skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) diff --git a/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpn_crop640-50e_coco.py b/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpn_crop640-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..46211de03f34e6a9709a9cfa8561b88a90f69581 --- /dev/null +++ b/mmpose/configs/mmdet/fpg/faster-rcnn_r50_fpn_crop640-50e_coco.py @@ -0,0 +1,73 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +norm_cfg = dict(type='BN', requires_grad=True) +image_size = (640, 640) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +model = dict( + data_preprocessor=dict(pad_size_divisor=64, batch_augments=batch_augments), + backbone=dict(norm_cfg=norm_cfg, norm_eval=False), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + allow_negative_crop=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=image_size, keep_ratio=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# learning policy +max_epochs = 50 +train_cfg = dict(max_epochs=max_epochs, val_interval=2) +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[30, 40], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001), + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True), + clip_grad=None) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpg-chn128_crop640-50e_coco.py b/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpg-chn128_crop640-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..804393966c6711a1e5261ace00e9b8b84283fde5 --- /dev/null +++ b/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpg-chn128_crop640-50e_coco.py @@ -0,0 +1,10 @@ +_base_ = 'mask-rcnn_r50_fpg_crop640-50e_coco.py' + +model = dict( + neck=dict(out_channels=128, inter_channels=128), + rpn_head=dict(in_channels=128), + roi_head=dict( + bbox_roi_extractor=dict(out_channels=128), + bbox_head=dict(in_channels=128), + mask_roi_extractor=dict(out_channels=128), + mask_head=dict(in_channels=128))) diff --git a/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py b/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..135bb60bb340c40a47a9bd64e5a8afc57ede60db --- /dev/null +++ b/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py @@ -0,0 +1,48 @@ +_base_ = 'mask-rcnn_r50_fpn_crop640-50e_coco.py' + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + neck=dict( + type='FPG', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + inter_channels=256, + num_outs=5, + stack_times=9, + paths=['bu'] * 9, + same_down_trans=None, + same_up_trans=dict( + type='conv', + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_lateral_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_down_trans=dict( + type='interpolation_conv', + mode='nearest', + kernel_size=3, + norm_cfg=norm_cfg, + order=('act', 'conv', 'norm'), + inplace=False), + across_up_trans=None, + across_skip_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + output_trans=dict( + type='last_conv', + kernel_size=3, + order=('act', 'conv', 'norm'), + inplace=False), + norm_cfg=norm_cfg, + skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) diff --git a/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpn_crop640-50e_coco.py b/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpn_crop640-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..08ca5b6ffd8b9d166857d3c27bb6f5bde91416cc --- /dev/null +++ b/mmpose/configs/mmdet/fpg/mask-rcnn_r50_fpn_crop640-50e_coco.py @@ -0,0 +1,79 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +norm_cfg = dict(type='BN', requires_grad=True) +image_size = (640, 640) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +model = dict( + data_preprocessor=dict(pad_size_divisor=64, batch_augments=batch_augments), + backbone=dict(norm_cfg=norm_cfg, norm_eval=False), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + norm_cfg=norm_cfg, + num_outs=5), + roi_head=dict( + bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + allow_negative_crop=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=image_size, keep_ratio=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# learning policy +max_epochs = 50 +train_cfg = dict(max_epochs=max_epochs, val_interval=2) +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[30, 40], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001), + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True), + clip_grad=None) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/fpg/metafile.yml b/mmpose/configs/mmdet/fpg/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..7d7634aec6161a283577059de96d5f995cf1e4bb --- /dev/null +++ b/mmpose/configs/mmdet/fpg/metafile.yml @@ -0,0 +1,104 @@ +Collections: + - Name: Feature Pyramid Grids + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Feature Pyramid Grids + Paper: + URL: https://arxiv.org/abs/2004.03580 + Title: 'Feature Pyramid Grids' + README: configs/fpg/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.10.0/mmdet/models/necks/fpg.py#L101 + Version: v2.10.0 + +Models: + - Name: faster-rcnn_r50_fpg_crop640-50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/faster-rcnn_r50_fpg_crop640-50e_coco.py + Metadata: + Training Memory (GB): 20.0 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856-74109f42.pth + + - Name: faster-rcnn_r50_fpg-chn128_crop640-50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/faster-rcnn_r50_fpg-chn128_crop640-50e_coco.py + Metadata: + Training Memory (GB): 11.9 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857-9376aa9d.pth + + - Name: mask-rcnn_r50_fpg_crop640-50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py + Metadata: + Training Memory (GB): 23.2 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth + + - Name: mask-rcnn_r50_fpg-chn128_crop640-50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/mask-rcnn_r50_fpg-chn128_crop640-50e_coco.py + Metadata: + Training Memory (GB): 15.3 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859-043c9b4e.pth + + - Name: retinanet_r50_fpg_crop640_50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py + Metadata: + Training Memory (GB): 20.8 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809-b0bcf5f4.pth + + - Name: retinanet_r50_fpg-chn128_crop640_50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py + Metadata: + Training Memory (GB): 19.9 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829-ee99a686.pth diff --git a/mmpose/configs/mmdet/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py b/mmpose/configs/mmdet/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9a6cf7e56a4f23a42d3905560a9b8035d6d935ff --- /dev/null +++ b/mmpose/configs/mmdet/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py @@ -0,0 +1,5 @@ +_base_ = 'retinanet_r50_fpg_crop640_50e_coco.py' + +model = dict( + neck=dict(out_channels=128, inter_channels=128), + bbox_head=dict(in_channels=128)) diff --git a/mmpose/configs/mmdet/fpg/retinanet_r50_fpg_crop640_50e_coco.py b/mmpose/configs/mmdet/fpg/retinanet_r50_fpg_crop640_50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e2aac283992ea9e4595e7594233b21208bd672f5 --- /dev/null +++ b/mmpose/configs/mmdet/fpg/retinanet_r50_fpg_crop640_50e_coco.py @@ -0,0 +1,53 @@ +_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py' + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + neck=dict( + _delete_=True, + type='FPG', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + inter_channels=256, + num_outs=5, + add_extra_convs=True, + start_level=1, + stack_times=9, + paths=['bu'] * 9, + same_down_trans=None, + same_up_trans=dict( + type='conv', + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_lateral_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_down_trans=dict( + type='interpolation_conv', + mode='nearest', + kernel_size=3, + norm_cfg=norm_cfg, + order=('act', 'conv', 'norm'), + inplace=False), + across_up_trans=None, + across_skip_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + output_trans=dict( + type='last_conv', + kernel_size=3, + order=('act', 'conv', 'norm'), + inplace=False), + norm_cfg=norm_cfg, + skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) + +train_cfg = dict(val_interval=2) diff --git a/mmpose/configs/mmdet/free_anchor/README.md b/mmpose/configs/mmdet/free_anchor/README.md new file mode 100644 index 0000000000000000000000000000000000000000..03dc828319fcfd5368361af8b64de1018a54f638 --- /dev/null +++ b/mmpose/configs/mmdet/free_anchor/README.md @@ -0,0 +1,37 @@ +# FreeAnchor + +> [FreeAnchor: Learning to Match Anchors for Visual Object Detection](https://arxiv.org/abs/1909.02466) + + + +## Abstract + +Modern CNN-based object detectors assign anchors for ground-truth objects under the restriction of object-anchor Intersection-over-Unit (IoU). In this study, we propose a learning-to-match approach to break IoU restriction, allowing objects to match anchors in a flexible manner. Our approach, referred to as FreeAnchor, updates hand-crafted anchor assignment to "free" anchor matching by formulating detector training as a maximum likelihood estimation (MLE) procedure. FreeAnchor targets at learning features which best explain a class of objects in terms of both classification and localization. FreeAnchor is implemented by optimizing detection customized likelihood and can be fused with CNN-based detectors in a plug-and-play manner. Experiments on COCO demonstrate that FreeAnchor consistently outperforms their counterparts with significant margins. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :-----: | :-----: | :------: | :------------: | :----: | :----------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | 4.9 | 18.4 | 38.7 | [config](./freeanchor_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130_095625.log.json) | +| R-101 | pytorch | 1x | 6.8 | 14.9 | 40.3 | [config](./freeanchor_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130-358324e6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130_100723.log.json) | +| X-101-32x4d | pytorch | 1x | 8.1 | 11.1 | 41.9 | [config](./freeanchor_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130-d4846968.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130_095627.log.json) | + +**Notes:** + +- We use 8 GPUs with 2 images/GPU. +- For more settings and models, please refer to the [official repo](https://github.com/zhangxiaosong18/FreeAnchor). + +## Citation + +```latex +@inproceedings{zhang2019freeanchor, + title = {{FreeAnchor}: Learning to Match Anchors for Visual Object Detection}, + author = {Zhang, Xiaosong and Wan, Fang and Liu, Chang and Ji, Rongrong and Ye, Qixiang}, + booktitle = {Neural Information Processing Systems}, + year = {2019} +} +``` diff --git a/mmpose/configs/mmdet/free_anchor/freeanchor_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/free_anchor/freeanchor_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..dc323d94f7aa20b38e2204a38ed8e234dd4eadd1 --- /dev/null +++ b/mmpose/configs/mmdet/free_anchor/freeanchor_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './freeanchor_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/free_anchor/freeanchor_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/free_anchor/freeanchor_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..13f64d14a1ead0431549b8569d031f72669a2e84 --- /dev/null +++ b/mmpose/configs/mmdet/free_anchor/freeanchor_r50_fpn_1x_coco.py @@ -0,0 +1,22 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +model = dict( + bbox_head=dict( + _delete_=True, + type='FreeAnchorRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75))) + +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/free_anchor/freeanchor_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/free_anchor/freeanchor_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8e448bc1123115d37ef9f21a33c8a6b38cd821c3 --- /dev/null +++ b/mmpose/configs/mmdet/free_anchor/freeanchor_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = './freeanchor_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/free_anchor/metafile.yml b/mmpose/configs/mmdet/free_anchor/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..cff19db6c957c2cdc09c1f76ff230c3a611bfc01 --- /dev/null +++ b/mmpose/configs/mmdet/free_anchor/metafile.yml @@ -0,0 +1,79 @@ +Collections: + - Name: FreeAnchor + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FreeAnchor + - ResNet + Paper: + URL: https://arxiv.org/abs/1909.02466 + Title: 'FreeAnchor: Learning to Match Anchors for Visual Object Detection' + README: configs/free_anchor/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/dense_heads/free_anchor_retina_head.py#L10 + Version: v2.0.0 + +Models: + - Name: freeanchor_r50_fpn_1x_coco + In Collection: FreeAnchor + Config: configs/free_anchor/freeanchor_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.9 + inference time (ms/im): + - value: 54.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth + + - Name: freeanchor_r101_fpn_1x_coco + In Collection: FreeAnchor + Config: configs/free_anchor/freeanchor_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.8 + inference time (ms/im): + - value: 67.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130-358324e6.pth + + - Name: freeanchor_x101-32x4d_fpn_1x_coco + In Collection: FreeAnchor + Config: configs/free_anchor/freeanchor_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.1 + inference time (ms/im): + - value: 90.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130-d4846968.pth diff --git a/mmpose/configs/mmdet/fsaf/README.md b/mmpose/configs/mmdet/fsaf/README.md new file mode 100644 index 0000000000000000000000000000000000000000..46f60577728d3e9d8785f19d8cda34991bae06d3 --- /dev/null +++ b/mmpose/configs/mmdet/fsaf/README.md @@ -0,0 +1,57 @@ +# FSAF + +> [Feature Selective Anchor-Free Module for Single-Shot Object Detection](https://arxiv.org/abs/1903.00621) + + + +## Abstract + +We motivate and present feature selective anchor-free (FSAF) module, a simple and effective building block for single-shot object detectors. It can be plugged into single-shot detectors with feature pyramid structure. The FSAF module addresses two limitations brought up by the conventional anchor-based detection: 1) heuristic-guided feature selection; 2) overlap-based anchor sampling. The general concept of the FSAF module is online feature selection applied to the training of multi-level anchor-free branches. Specifically, an anchor-free branch is attached to each level of the feature pyramid, allowing box encoding and decoding in the anchor-free manner at an arbitrary level. During training, we dynamically assign each instance to the most suitable feature level. At the time of inference, the FSAF module can work jointly with anchor-based branches by outputting predictions in parallel. We instantiate this concept with simple implementations of anchor-free branches and online feature selection strategy. Experimental results on the COCO detection track show that our FSAF module performs better than anchor-based counterparts while being faster. When working jointly with anchor-based branches, the FSAF module robustly improves the baseline RetinaNet by a large margin under various settings, while introducing nearly free inference overhead. And the resulting best model can achieve a state-of-the-art 44.6% mAP, outperforming all existing single-shot detectors on COCO. + +
+ +
+ +## Introduction + +FSAF is an anchor-free method published in CVPR2019 ([https://arxiv.org/pdf/1903.00621.pdf](https://arxiv.org/pdf/1903.00621.pdf)). +Actually it is equivalent to the anchor-based method with only one anchor at each feature map position in each FPN level. +And this is how we implemented it. +Only the anchor-free branch is released for its better compatibility with the current framework and less computational budget. + +In the original paper, feature maps within the central 0.2-0.5 area of a gt box are tagged as ignored. However, +it is empirically found that a hard threshold (0.2-0.2) gives a further gain on the performance. (see the table below) + +## Results and Models + +### Results on R50/R101/X101-FPN + +| Backbone | ignore range | ms-train | Lr schd | Train Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Config | Download | +| :------: | :----------: | :------: | :-----: | :------------: | :-----------------: | :------------: | :---------: | :----------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | 0.2-0.5 | N | 1x | 3.15 | 0.43 | 12.3 | 36.0 (35.9) | | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco_20200715-b555b0e0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco_20200715_094657.log.json) | +| R-50 | 0.2-0.2 | N | 1x | 3.15 | 0.43 | 13.0 | 37.4 | [config](./fsaf_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco_20200428_072327.log.json) | +| R-101 | 0.2-0.2 | N | 1x | 5.08 | 0.58 | 10.8 | 39.3 (37.9) | [config](./fsaf_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco-9e71098f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco_20200428_160348.log.json) | +| X-101 | 0.2-0.2 | N | 1x | 9.38 | 1.23 | 5.6 | 42.4 (41.0) | [config](./fsaf_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco-e3f6e6fd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco_20200428_160424.log.json) | + +**Notes:** + +- *1x means the model is trained for 12 epochs.* +- *AP values in the brackets represent those reported in the original paper.* +- *All results are obtained with a single model and single-scale test.* +- *X-101 backbone represents ResNext-101-64x4d.* +- *All pretrained backbones use pytorch style.* +- *All models are trained on 8 Titan-XP gpus and tested on a single gpu.* + +## Citation + +BibTeX reference is as follows. + +```latex +@inproceedings{zhu2019feature, + title={Feature Selective Anchor-Free Module for Single-Shot Object Detection}, + author={Zhu, Chenchen and He, Yihui and Savvides, Marios}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={840--849}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/fsaf/fsaf_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/fsaf/fsaf_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..12b49fed5b6cd617aa9c05d76ed737d755992a34 --- /dev/null +++ b/mmpose/configs/mmdet/fsaf/fsaf_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fsaf_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/fsaf/fsaf_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/fsaf/fsaf_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e7165cd63c74ab27ff47f8255836f4c10158cf0e --- /dev/null +++ b/mmpose/configs/mmdet/fsaf/fsaf_r50_fpn_1x_coco.py @@ -0,0 +1,47 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +# model settings +model = dict( + type='FSAF', + bbox_head=dict( + type='FSAFHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + reg_decoded_bbox=True, + # Only anchor-free branch is implemented. The anchor generator only + # generates 1 anchor at each feature point, as a substitute of the + # grid of features. + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=1, + scales_per_octave=1, + ratios=[1.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0, + reduction='none'), + loss_bbox=dict( + _delete_=True, + type='IoULoss', + eps=1e-6, + loss_weight=1.0, + reduction='none')), + # training and testing settings + train_cfg=dict( + assigner=dict( + _delete_=True, + type='CenterRegionAssigner', + pos_scale=0.2, + neg_scale=0.2, + min_pos_iof=0.01), + allowed_border=-1, + pos_weight=-1, + debug=False)) + +optim_wrapper = dict(clip_grad=dict(max_norm=10, norm_type=2)) diff --git a/mmpose/configs/mmdet/fsaf/fsaf_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/fsaf/fsaf_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..89c0c6344aba6e6eae5657eff60745645dd1e8dc --- /dev/null +++ b/mmpose/configs/mmdet/fsaf/fsaf_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './fsaf_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/fsaf/metafile.yml b/mmpose/configs/mmdet/fsaf/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..daaad0d3a864b52df618a95a63c6caeaa1fd76ec --- /dev/null +++ b/mmpose/configs/mmdet/fsaf/metafile.yml @@ -0,0 +1,80 @@ +Collections: + - Name: FSAF + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x Titan-XP GPUs + Architecture: + - FPN + - FSAF + - ResNet + Paper: + URL: https://arxiv.org/abs/1903.00621 + Title: 'Feature Selective Anchor-Free Module for Single-Shot Object Detection' + README: configs/fsaf/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/fsaf.py#L6 + Version: v2.1.0 + +Models: + - Name: fsaf_r50_fpn_1x_coco + In Collection: FSAF + Config: configs/fsaf/fsaf_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.15 + inference time (ms/im): + - value: 76.92 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth + + - Name: fsaf_r101_fpn_1x_coco + In Collection: FSAF + Config: configs/fsaf/fsaf_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.08 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco-9e71098f.pth + + - Name: fsaf_x101-64x4d_fpn_1x_coco + In Collection: FSAF + Config: configs/fsaf/fsaf_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.38 + inference time (ms/im): + - value: 178.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco-e3f6e6fd.pth diff --git a/mmpose/configs/mmdet/gcnet/README.md b/mmpose/configs/mmdet/gcnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1ba6f6f3e4e23d4f68bca2545bba733352d0c498 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/README.md @@ -0,0 +1,69 @@ +# GCNet + +> [GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond](https://arxiv.org/abs/1904.11492) + + + +## Abstract + +The Non-Local Network (NLNet) presents a pioneering approach for capturing long-range dependencies, via aggregating query-specific global context to each query position. However, through a rigorous empirical analysis, we have found that the global contexts modeled by non-local network are almost the same for different query positions within an image. In this paper, we take advantage of this finding to create a simplified network based on a query-independent formulation, which maintains the accuracy of NLNet but with significantly less computation. We further observe that this simplified design shares similar structure with Squeeze-Excitation Network (SENet). Hence we unify them into a three-step general framework for global context modeling. Within the general framework, we design a better instantiation, called the global context (GC) block, which is lightweight and can effectively model the global context. The lightweight property allows us to apply it for multiple layers in a backbone network to construct a global context network (GCNet), which generally outperforms both simplified NLNet and SENet on major benchmarks for various recognition tasks. + +
+ +
+ +## Introduction + +By [Yue Cao](http://yue-cao.me), [Jiarui Xu](http://jerryxu.net), [Stephen Lin](https://scholar.google.com/citations?user=c3PYmxUAAAAJ&hl=en), Fangyun Wei, [Han Hu](https://sites.google.com/site/hanhushomepage/). + +We provide config files to reproduce the results in the paper for +["GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond"](https://arxiv.org/abs/1904.11492) on COCO object detection. + +**GCNet** is initially described in [arxiv](https://arxiv.org/abs/1904.11492). Via absorbing advantages of Non-Local Networks (NLNet) and Squeeze-Excitation Networks (SENet), GCNet provides a simple, fast and effective approach for global context modeling, which generally outperforms both NLNet and SENet on major benchmarks for various recognition tasks. + +## Results and Models + +The results on COCO 2017val are shown in the below table. + +| Backbone | Model | Context | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :---: | :------------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Mask | GC(c3-c5, r16) | 1x | 5.0 | | 39.7 | 35.9 | [config](./mask-rcnn_r50-gcb-r16-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915-187da160.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915.log.json) | +| R-50-FPN | Mask | GC(c3-c5, r4) | 1x | 5.1 | 15.0 | 39.9 | 36.0 | [config](./mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204-17235656.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204_024626.log.json) | +| R-101-FPN | Mask | GC(c3-c5, r16) | 1x | 7.6 | 11.4 | 41.3 | 37.2 | [config](./mask-rcnn_r101-gcb-r16-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205-e58ae947.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205_192835.log.json) | +| R-101-FPN | Mask | GC(c3-c5, r4) | 1x | 7.8 | 11.6 | 42.2 | 37.8 | [config](./mask-rcnn_r101-gcb-r4-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206-af22dc9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206_112128.log.json) | + +| Backbone | Model | Context | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :--------------: | :------------: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Mask | - | 1x | 4.4 | 16.6 | 38.4 | 34.6 | [config](./mask-rcnn_r50-syncbn_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202-bb3eb55c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202_214122.log.json) | +| R-50-FPN | Mask | GC(c3-c5, r16) | 1x | 5.0 | 15.5 | 40.4 | 36.2 | [config](./mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202_174907.log.json) | +| R-50-FPN | Mask | GC(c3-c5, r4) | 1x | 5.1 | 15.1 | 40.7 | 36.5 | [config](./mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) | +| R-101-FPN | Mask | - | 1x | 6.4 | 13.3 | 40.5 | 36.3 | [config](./mask-rcnn_r101-syncbn_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210-81658c8a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210_220422.log.json) | +| R-101-FPN | Mask | GC(c3-c5, r16) | 1x | 7.6 | 12.0 | 42.2 | 37.8 | [config](./mask-rcnn_r101-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207-945e77ca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207_015330.log.json) | +| R-101-FPN | Mask | GC(c3-c5, r4) | 1x | 7.8 | 11.8 | 42.2 | 37.8 | [config](./mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) | +| X-101-FPN | Mask | - | 1x | 7.6 | 11.3 | 42.4 | 37.7 | [config](./mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211-7584841c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211_054326.log.json) | +| X-101-FPN | Mask | GC(c3-c5, r16) | 1x | 8.8 | 9.8 | 43.5 | 38.6 | [config](./mask-rcnn_x101-32x4d-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-cbed3d2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211_164715.log.json) | +| X-101-FPN | Mask | GC(c3-c5, r4) | 1x | 9.0 | 9.7 | 43.9 | 39.0 | [config](./mask-rcnn_x101-32x4d-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212-68164964.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212_070942.log.json) | +| X-101-FPN | Cascade Mask | - | 1x | 9.2 | 8.4 | 44.7 | 38.6 | [config](./cascade-mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310-d5ad2a5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310_115217.log.json) | +| X-101-FPN | Cascade Mask | GC(c3-c5, r16) | 1x | 10.3 | 7.7 | 46.2 | 39.7 | [config](./cascade-mask-rcnn_x101-32x4d-syncbn-r16-gcb-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-10bf2463.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211_184154.log.json) | +| X-101-FPN | Cascade Mask | GC(c3-c5, r4) | 1x | 10.6 | | 46.4 | 40.1 | [config](./cascade-mask-rcnn_x101-32x4d-syncbn-r4-gcb-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653-ed035291.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653.log.json) | +| X-101-FPN | DCN Cascade Mask | - | 1x | | | 47.5 | 40.9 | [config](./cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019-abbc39ea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019.log.json) | +| X-101-FPN | DCN Cascade Mask | GC(c3-c5, r16) | 1x | | | 48.0 | 41.3 | [config](./cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r16-gcb-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648-44aa598a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648.log.json) | +| X-101-FPN | DCN Cascade Mask | GC(c3-c5, r4) | 1x | | | 47.9 | 41.1 | [config](./cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r4-gcb-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851-720338ec.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851.log.json) | + +**Notes:** + +- The `SyncBN` is added in the backbone for all models in **Table 2**. +- `GC` denotes Global Context (GC) block is inserted after 1x1 conv of backbone. +- `DCN` denotes replace 3x3 conv with 3x3 Deformable Convolution in `c3-c5` stages of backbone. +- `r4` and `r16` denote ratio 4 and ratio 16 in GC block respectively. + +## Citation + +```latex +@article{cao2019GCNet, + title={GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond}, + author={Cao, Yue and Xu, Jiarui and Lin, Stephen and Wei, Fangyun and Hu, Han}, + journal={arXiv preprint arXiv:1904.11492}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r16-gcb-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r16-gcb-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6cf605b666e460aee48adc629b0604af4c64e306 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r16-gcb-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r4-gcb-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r4-gcb-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..95fc687b664b25b754d4ba890ae9c9e982db65fb --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r4-gcb-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9b77dc9315f52f9437eb1e39f6d518f1afaa41bb --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r16-gcb-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r16-gcb-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8f97972aa2b7d151d5824de40da9cedae9c57535 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r16-gcb-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r4-gcb-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r4-gcb-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8404cfdaf34e470d2bff57a707ca8183fe442131 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r4-gcb-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..87667dee779ee8068075be17638a6d10a9985c7e --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-gcb-r16-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-gcb-r16-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..447e2c6d858738db0f0d2e46e57e1fccd2233af3 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-gcb-r16-c3-c5_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-gcb-r4-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-gcb-r4-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9c723a64b6f686b9dd0f8e7648c7b1b303205168 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-gcb-r4-c3-c5_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6f9d03d3f8d94116b4814825ad8377b534a912b1 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d07cb0d488c0df76a137bad54123a7583c7da87b --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..957bdf55470017d9ac9fa482b416c2206266af86 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r101-syncbn_fpn_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-gcb-r16-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-gcb-r16-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c9ec5ac3baf7c46ea95d4c3fcf4f5da4ad7a3dce --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-gcb-r16-c3-c5_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..42474d5196a8a130999db735989b423664486304 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ac1928082405baebfe5ec483f37b9775da21d5ad --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ae29f0cebe4f9fe16f2fea3de53874914186da9b --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f8ef27bad9743cba8f7134f1a77a091af1bca093 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_r50-syncbn_fpn_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1a2e2c9f26b25c5aefba912997cd01db60854a5e --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..65d3f9aadf5f79a4fb9fc9082dfabfdb3de08871 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py b/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b5343a6d4596eb82245ef078d36a5a6ce5137aeb --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/mmpose/configs/mmdet/gcnet/metafile.yml b/mmpose/configs/mmdet/gcnet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..075a94c8fbf4c5f629d9343cc841f94f18472195 --- /dev/null +++ b/mmpose/configs/mmdet/gcnet/metafile.yml @@ -0,0 +1,440 @@ +Collections: + - Name: GCNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Global Context Block + - FPN + - RPN + - ResNet + - ResNeXt + Paper: + URL: https://arxiv.org/abs/1904.11492 + Title: 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' + README: configs/gcnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/context_block.py#L13 + Version: v2.0.0 + +Models: + - Name: mask-rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r50-gcb-r16-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915-187da160.pth + + - Name: mask-rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.1 + inference time (ms/im): + - value: 66.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204-17235656.pth + + - Name: mask-rcnn_r101-gcb-r16-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r101-gcb-r16-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 87.72 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205-e58ae947.pth + + - Name: mask-rcnn_r101-gcb-r4-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r101-gcb-r4-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + inference time (ms/im): + - value: 86.21 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206-af22dc9d.pth + + - Name: mask-rcnn_r50_fpn_syncbn-backbone_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r50-syncbn_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 60.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202-bb3eb55c.pth + + - Name: mask-rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + inference time (ms/im): + - value: 64.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth + + - Name: mask-rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.1 + inference time (ms/im): + - value: 66.23 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth + + - Name: mask-rcnn_r101-syncbn_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r101-syncbn_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 75.19 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210-81658c8a.pth + + - Name: mask-rcnn_r101-syncbn-gcb-r16-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r101-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 83.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207-945e77ca.pth + + - Name: mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + inference time (ms/im): + - value: 84.75 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth + + - Name: mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211-7584841c.pth + + - Name: mask-rcnn_x101-32x4d-syncbn-gcb-r16-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.8 + inference time (ms/im): + - value: 102.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-cbed3d2c.pth + + - Name: mask-rcnn_x101-32x4d-syncbn-gcb-r4-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.0 + inference time (ms/im): + - value: 103.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212-68164964.pth + + - Name: cascade-mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.2 + inference time (ms/im): + - value: 119.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310-d5ad2a5e.pth + + - Name: cascade-mask-rcnn_x101-32x4d-syncbn-r16-gcb-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r16-gcb-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 129.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-10bf2463.pth + + - Name: cascade-mask-rcnn_x101-32x4d-syncbn-r4-gcb-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r4-gcb-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653-ed035291.pth + + - Name: cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019-abbc39ea.pth + + - Name: cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r16-gcb-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r16-gcb-c3-c5_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648-44aa598a.pth + + - Name: cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r4-gcb-c3-c5_fpn_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r4-gcb-c3-c5_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851-720338ec.pth diff --git a/mmpose/configs/mmdet/gfl/README.md b/mmpose/configs/mmdet/gfl/README.md new file mode 100644 index 0000000000000000000000000000000000000000..123f303ab422032aa2bbd2900a7c690d1a496eef --- /dev/null +++ b/mmpose/configs/mmdet/gfl/README.md @@ -0,0 +1,42 @@ +# GFL + +> [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388) + + + +## Abstract + +One-stage detector basically formulates object detection as dense classification and localization. The classification is usually optimized by Focal Loss and the box location is commonly learned under Dirac delta distribution. A recent trend for one-stage detectors is to introduce an individual prediction branch to estimate the quality of localization, where the predicted quality facilitates the classification to improve detection performance. This paper delves into the representations of the above three fundamental elements: quality estimation, classification and localization. Two problems are discovered in existing practices, including (1) the inconsistent usage of the quality estimation and classification between training and inference and (2) the inflexible Dirac delta distribution for localization when there is ambiguity and uncertainty in complex scenes. To address the problems, we design new representations for these elements. Specifically, we merge the quality estimation into the class prediction vector to form a joint representation of localization quality and classification, and use a vector to represent arbitrary distribution of box locations. The improved representations eliminate the inconsistency risk and accurately depict the flexible distribution in real data, but contain continuous labels, which is beyond the scope of Focal Loss. We then propose Generalized Focal Loss (GFL) that generalizes Focal Loss from its discrete form to the continuous version for successful optimization. On COCO test-dev, GFL achieves 45.0% AP using ResNet-101 backbone, surpassing state-of-the-art SAPD (43.5%) and ATSS (43.6%) with higher or comparable inference speed, under the same backbone and training settings. Notably, our best model can achieve a single-model single-scale AP of 48.2%, at 10 FPS on a single 2080Ti GPU. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Multi-scale Training | Inf time (fps) | box AP | Config | Download | +| :---------------: | :-----: | :-----: | :------------------: | :------------: | :----: | :------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | No | 19.5 | 40.2 | [config](./gfl_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244.log.json) | +| R-50 | pytorch | 2x | Yes | 19.5 | 42.9 | [config](./gfl_r50_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802.log.json) | +| R-101 | pytorch | 2x | Yes | 14.7 | 44.7 | [config](./gfl_r101_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126.log.json) | +| R-101-dcnv2 | pytorch | 2x | Yes | 12.9 | 47.1 | [config](./gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002.log.json) | +| X-101-32x4d | pytorch | 2x | Yes | 12.1 | 45.9 | [config](./gfl_x101-32x4d_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002.log.json) | +| X-101-32x4d-dcnv2 | pytorch | 2x | Yes | 10.7 | 48.1 | [config](./gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002.log.json) | + +\[1\] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ +\[2\] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ +\[3\] *`dcnv2` denotes deformable convolutional networks v2.* \ +\[4\] *FPS is tested with a single GeForce RTX 2080Ti GPU, using a batch size of 1.* + +## Citation + +We provide config files to reproduce the object detection results in the paper [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388) + +```latex +@article{li2020generalized, + title={Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection}, + author={Li, Xiang and Wang, Wenhai and Wu, Lijun and Chen, Shuo and Hu, Xiaolin and Li, Jun and Tang, Jinhui and Yang, Jian}, + journal={arXiv preprint arXiv:2006.04388}, + year={2020} +} +``` diff --git a/mmpose/configs/mmdet/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7f748935b62884fd501af7e6731ad3ef6ce0effb --- /dev/null +++ b/mmpose/configs/mmdet/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './gfl_r50_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/gfl/gfl_r101_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/gfl/gfl_r101_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..10135f161b9e933612d961af12a8e30198cca484 --- /dev/null +++ b/mmpose/configs/mmdet/gfl/gfl_r101_fpn_ms-2x_coco.py @@ -0,0 +1,13 @@ +_base_ = './gfl_r50_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/gfl/gfl_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/gfl/gfl_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..902382552d58f124bbe2b8c2904ce74ec7b7a4d8 --- /dev/null +++ b/mmpose/configs/mmdet/gfl/gfl_r50_fpn_1x_coco.py @@ -0,0 +1,66 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='GFL', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='GFLHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), + reg_max=16, + loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/gfl/gfl_r50_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/gfl/gfl_r50_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..22770eb101920f9daae750a1b72f5410be395743 --- /dev/null +++ b/mmpose/configs/mmdet/gfl/gfl_r50_fpn_ms-2x_coco.py @@ -0,0 +1,28 @@ +_base_ = './gfl_r50_fpn_1x_coco.py' +max_epochs = 24 + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) + +# multi-scale training +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 480), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/gfl/gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/gfl/gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6aa98eea2d0d25b4df1570aed97cce8475e9104d --- /dev/null +++ b/mmpose/configs/mmdet/gfl/gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py @@ -0,0 +1,18 @@ +_base_ = './gfl_r50_fpn_ms-2x_coco.py' +model = dict( + type='GFL', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/gfl/gfl_x101-32x4d_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/gfl/gfl_x101-32x4d_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ec629b1f0d5d3317dcb20f1244bc713818518d8a --- /dev/null +++ b/mmpose/configs/mmdet/gfl/gfl_x101-32x4d_fpn_ms-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './gfl_r50_fpn_ms-2x_coco.py' +model = dict( + type='GFL', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/gfl/metafile.yml b/mmpose/configs/mmdet/gfl/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..183fc14bdee0492c7ea3fc18ccb7371682dc0066 --- /dev/null +++ b/mmpose/configs/mmdet/gfl/metafile.yml @@ -0,0 +1,134 @@ +Collections: + - Name: Generalized Focal Loss + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Generalized Focal Loss + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2006.04388 + Title: 'Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection' + README: configs/gfl/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/gfl.py#L6 + Version: v2.2.0 + +Models: + - Name: gfl_r50_fpn_1x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_r50_fpn_1x_coco.py + Metadata: + inference time (ms/im): + - value: 51.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth + + - Name: gfl_r50_fpn_ms-2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_r50_fpn_ms-2x_coco.py + Metadata: + inference time (ms/im): + - value: 51.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth + + - Name: gfl_r101_fpn_ms-2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_r101_fpn_ms-2x_coco.py + Metadata: + inference time (ms/im): + - value: 68.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth + + - Name: gfl_r101-dconv-c3-c5_fpn_ms-2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py + Metadata: + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth + + - Name: gfl_x101-32x4d_fpn_ms-2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_x101-32x4d_fpn_ms-2x_coco.py + Metadata: + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth + + - Name: gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py + Metadata: + inference time (ms/im): + - value: 93.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth diff --git a/mmpose/configs/mmdet/ghm/README.md b/mmpose/configs/mmdet/ghm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c245cea59d45f2a1a2691ce8019bf12db4af7188 --- /dev/null +++ b/mmpose/configs/mmdet/ghm/README.md @@ -0,0 +1,33 @@ +# GHM + +> [Gradient Harmonized Single-stage Detector](https://arxiv.org/abs/1811.05181) + + + +## Abstract + +Despite the great success of two-stage detectors, single-stage detector is still a more elegant and efficient way, yet suffers from the two well-known disharmonies during training, i.e. the huge difference in quantity between positive and negative examples as well as between easy and hard examples. In this work, we first point out that the essential effect of the two disharmonies can be summarized in term of the gradient. Further, we propose a novel gradient harmonizing mechanism (GHM) to be a hedging for the disharmonies. The philosophy behind GHM can be easily embedded into both classification loss function like cross-entropy (CE) and regression loss function like smooth-L1 (SL1) loss. To this end, two novel loss functions called GHM-C and GHM-R are designed to balancing the gradient flow for anchor classification and bounding box refinement, respectively. Ablation study on MS COCO demonstrates that without laborious hyper-parameter tuning, both GHM-C and GHM-R can bring substantial improvement for single-stage detector. Without any whistles and bells, our model achieves 41.6 mAP on COCO test-dev set which surpasses the state-of-the-art method, Focal Loss (FL) + SL1, by 0.8. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 4.0 | 3.3 | 37.0 | [config](./retinanet_r50_fpn_ghm-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130_004213.log.json) | +| R-101-FPN | pytorch | 1x | 6.0 | 4.4 | 39.1 | [config](./retinanet_r101_fpn_ghm-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130-c148ee8f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130_145259.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.2 | 5.1 | 40.7 | [config](./retinanet_x101-32x4d_fpn_ghm-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131-e4333bd0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131_113653.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.3 | 5.2 | 41.4 | [config](./retinanet_x101-64x4d_fpn_ghm-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131-dd381cef.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131_113723.log.json) | + +## Citation + +```latex +@inproceedings{li2019gradient, + title={Gradient Harmonized Single-stage Detector}, + author={Li, Buyu and Liu, Yu and Wang, Xiaogang}, + booktitle={AAAI Conference on Artificial Intelligence}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/ghm/metafile.yml b/mmpose/configs/mmdet/ghm/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..63cb48ffe7323686c38fcb279dde9ee6387e9be7 --- /dev/null +++ b/mmpose/configs/mmdet/ghm/metafile.yml @@ -0,0 +1,101 @@ +Collections: + - Name: GHM + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - GHM-C + - GHM-R + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1811.05181 + Title: 'Gradient Harmonized Single-stage Detector' + README: configs/ghm/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/losses/ghm_loss.py#L21 + Version: v2.0.0 + +Models: + - Name: retinanet_r50_fpn_ghm-1x_coco + In Collection: GHM + Config: configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 303.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth + + - Name: retinanet_r101_fpn_ghm-1x_coco + In Collection: GHM + Config: configs/ghm/retinanet_r101_fpn_ghm-1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 227.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130-c148ee8f.pth + + - Name: retinanet_x101-32x4d_fpn_ghm-1x_coco + In Collection: GHM + Config: configs/ghm/retinanet_x101-32x4d_fpn_ghm-1x_coco.py + Metadata: + Training Memory (GB): 7.2 + inference time (ms/im): + - value: 196.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131-e4333bd0.pth + + - Name: retinanet_x101-64x4d_fpn_ghm-1x_coco + In Collection: GHM + Config: configs/ghm/retinanet_x101-64x4d_fpn_ghm-1x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 192.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131-dd381cef.pth diff --git a/mmpose/configs/mmdet/ghm/retinanet_r101_fpn_ghm-1x_coco.py b/mmpose/configs/mmdet/ghm/retinanet_r101_fpn_ghm-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..090221e68f68a95cfcf092b15f2636cd28fc9d87 --- /dev/null +++ b/mmpose/configs/mmdet/ghm/retinanet_r101_fpn_ghm-1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './retinanet_r50_fpn_ghm-1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/ghm/retinanet_r50_fpn_ghm-1x_coco.py b/mmpose/configs/mmdet/ghm/retinanet_r50_fpn_ghm-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..42b9aa6d05dc64f3045685a7c23d632a6041249c --- /dev/null +++ b/mmpose/configs/mmdet/ghm/retinanet_r50_fpn_ghm-1x_coco.py @@ -0,0 +1,18 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +model = dict( + bbox_head=dict( + loss_cls=dict( + _delete_=True, + type='GHMC', + bins=30, + momentum=0.75, + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict( + _delete_=True, + type='GHMR', + mu=0.02, + bins=10, + momentum=0.7, + loss_weight=10.0))) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/ghm/retinanet_x101-32x4d_fpn_ghm-1x_coco.py b/mmpose/configs/mmdet/ghm/retinanet_x101-32x4d_fpn_ghm-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1240545a624a70c7122829e85b426cafcc3f42d2 --- /dev/null +++ b/mmpose/configs/mmdet/ghm/retinanet_x101-32x4d_fpn_ghm-1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_ghm-1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/ghm/retinanet_x101-64x4d_fpn_ghm-1x_coco.py b/mmpose/configs/mmdet/ghm/retinanet_x101-64x4d_fpn_ghm-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..689d2edcdf1bdffa52ee3aa3a8a4dac7988f6fa5 --- /dev/null +++ b/mmpose/configs/mmdet/ghm/retinanet_x101-64x4d_fpn_ghm-1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_ghm-1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/glip/README.md b/mmpose/configs/mmdet/glip/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e74e98d1b578824778edc4ae47741b147c420cca --- /dev/null +++ b/mmpose/configs/mmdet/glip/README.md @@ -0,0 +1,173 @@ +# GLIP: Grounded Language-Image Pre-training + +> [GLIP: Grounded Language-Image Pre-training](https://arxiv.org/abs/2112.03857) + + + +## Abstract + +This paper presents a grounded language-image pre-training (GLIP) model for learning object-level, language-aware, and semantic-rich visual representations. GLIP unifies object detection and phrase grounding for pre-training. The unification brings two benefits: 1) it allows GLIP to learn from both detection and grounding data to improve both tasks and bootstrap a good grounding model; 2) GLIP can leverage massive image-text pairs by generating grounding boxes in a self-training fashion, making the learned representation semantic-rich. In our experiments, we pre-train GLIP on 27M grounding data, including 3M human-annotated and 24M web-crawled image-text pairs. The learned representations demonstrate strong zero-shot and few-shot transferability to various object-level recognition tasks. 1) When directly evaluated on COCO and LVIS (without seeing any images in COCO during pre-training), GLIP achieves 49.8 AP and 26.9 AP, respectively, surpassing many supervised baselines. 2) After fine-tuned on COCO, GLIP achieves 60.8 AP on val and 61.5 AP on test-dev, surpassing prior SoTA. 3) When transferred to 13 downstream object detection tasks, a 1-shot GLIP rivals with a fully-supervised Dynamic Head. + +
+ +
+ +## Installation + +```shell +cd $MMDETROOT + +# source installation +pip install -r requirements/multimodal.txt + +# or mim installation +mim install mmdet[multimodal] +``` + +```shell +cd $MMDETROOT + +wget https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_a_mmdet-b3654169.pth + +python demo/image_demo.py demo/demo.jpg \ +configs/glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py \ +--weights glip_tiny_a_mmdet-b3654169.pth \ +--texts 'bench. car' +``` + +
+ +
+ +## NOTE + +GLIP utilizes BERT as the language model, which requires access to https://huggingface.co/. If you encounter connection errors due to network access, you can download the required files on a computer with internet access and save them locally. Finally, modify the `lang_model_name` field in the config to the local path. Please refer to the following code: + +```python +from transformers import BertConfig, BertModel +from transformers import AutoTokenizer + +config = BertConfig.from_pretrained("bert-base-uncased") +model = BertModel.from_pretrained("bert-base-uncased", add_pooling_layer=False, config=config) +tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + +config.save_pretrained("your path/bert-base-uncased") +model.save_pretrained("your path/bert-base-uncased") +tokenizer.save_pretrained("your path/bert-base-uncased") +``` + +## COCO Results and Models + +| Model | Zero-shot or Finetune | COCO mAP | Official COCO mAP | Pre-Train Data | Config | Download | +| :--------: | :-------------------: | :------: | ----------------: | :------------------------: | :---------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| GLIP-T (A) | Zero-shot | 43.0 | 42.9 | O365 | [config](glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_a_mmdet-b3654169.pth) | +| GLIP-T (A) | Finetune | 53.3 | 52.9 | O365 | [config](glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_180419-e6addd96.pth)\| [log](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_180419.log.json) | +| GLIP-T (B) | Zero-shot | 44.9 | 44.9 | O365 | [config](glip_atss_swin-t_b_fpn_dyhead_pretrain_obj365.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_b_mmdet-6dfbd102.pth) | +| GLIP-T (B) | Finetune | 54.1 | 53.8 | O365 | [config](glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230916_163538-650323ba.pth)\| [log](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230916_163538.log.json) | +| GLIP-T (C) | Zero-shot | 46.7 | 46.7 | O365,GoldG | [config](glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_c_mmdet-2fc427dd.pth) | +| GLIP-T (C) | Finetune | 55.2 | 55.1 | O365,GoldG | [config](glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_182935-4ba3fc3b.pth)\| [log](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_182935.log.json) | +| GLIP-T | Zero-shot | 46.6 | 46.6 | O365,GoldG,CC3M,SBU | [config](glip_atss_swin-t_fpn_dyhead_pretrain_obj365-goldg-cc3m-sub.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_mmdet-c24ce662.pth) | +| GLIP-T | Finetune | 55.4 | 55.2 | O365,GoldG,CC3M,SBU | [config](glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_224410-ba97be24.pth)\| [log](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_224410.log.json) | +| GLIP-L | Zero-shot | 51.3 | 51.4 | FourODs,GoldG,CC3M+12M,SBU | [config](glip_atss_swin-l_fpn_dyhead_pretrain_mixeddata.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_l_mmdet-abfe026b.pth) | +| GLIP-L | Finetune | 59.4 | | FourODs,GoldG,CC3M+12M,SBU | [config](glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230910_100800-e9be4274.pth)\| [log](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230910_100800.log.json) | + +Note: + +1. The weights corresponding to the zero-shot model are adopted from the official weights and converted using the [script](../../tools/model_converters/glip_to_mmdet.py). We have not retrained the model for the time being. +2. Finetune refers to fine-tuning on the COCO 2017 dataset. The L model is trained using 16 A100 GPUs, while the remaining models are trained using 16 NVIDIA GeForce 3090 GPUs. +3. Taking the GLIP-T(A) model as an example, I trained it twice using the official code, and the fine-tuning mAP were 52.5 and 52.6. Therefore, the mAP we achieved in our reproduction is higher than the official results. The main reason is that we modified the `weight_decay` parameter. +4. Our experiments revealed that training for 24 epochs leads to overfitting. Therefore, we chose the best-performing model. If users want to train on a custom dataset, it is advisable to shorten the number of epochs and save the best-performing model. +5. Due to the official absence of fine-tuning hyperparameters for the GLIP-L model, we have not yet reproduced the official accuracy. I have found that overfitting can also occur, so it may be necessary to consider custom modifications to data augmentation and model enhancement. Given the high cost of training, we have not conducted any research on this matter at the moment. + +## LVIS Results + +| Model | Official | MiniVal APr | MiniVal APc | MiniVal APf | MiniVal AP | Val1.0 APr | Val1.0 APc | Val1.0 APf | Val1.0 AP | Pre-Train Data | Config | Download | +| :--------: | :------: | :---------: | :---------: | :---------: | :--------: | :--------: | :--------: | :--------: | :-------: | :------------------------: | :---------------------------------------------------------------------: | :------------------------------------------------------------------------------------------: | +| GLIP-T (A) | ✔ | | | | | | | | | O365 | [config](lvis/glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_a_mmdet-b3654169.pth) | +| GLIP-T (A) | | 12.1 | 15.5 | 25.8 | 20.2 | 6.2 | 10.9 | 22.8 | 14.7 | O365 | [config](lvis/glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_a_mmdet-b3654169.pth) | +| GLIP-T (B) | ✔ | | | | | | | | | O365 | [config](lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_b_mmdet-6dfbd102.pth) | +| GLIP-T (B) | | 8.6 | 13.9 | 26.0 | 19.3 | 4.6 | 9.8 | 22.6 | 13.9 | O365 | [config](lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_b_mmdet-6dfbd102.pth) | +| GLIP-T (C) | ✔ | 14.3 | 19.4 | 31.1 | 24.6 | | | | | O365,GoldG | [config](lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_c_mmdet-2fc427dd.pth) | +| GLIP-T (C) | | 14.4 | 19.8 | 31.9 | 25.2 | 8.3 | 13.2 | 28.1 | 18.2 | O365,GoldG | [config](lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_c_mmdet-2fc427dd.pth) | +| GLIP-T | ✔ | | | | | | | | | O365,GoldG,CC3M,SBU | [config](lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_mmdet-c24ce662.pth) | +| GLIP-T | | 18.1 | 21.2 | 33.1 | 26.7 | 10.8 | 14.7 | 29.0 | 19.6 | O365,GoldG,CC3M,SBU | [config](lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_mmdet-c24ce662.pth) | +| GLIP-L | ✔ | 29.2 | 34.9 | 42.1 | 37.9 | | | | | FourODs,GoldG,CC3M+12M,SBU | [config](lvis/glip_atss_swin-l_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_l_mmdet-abfe026b.pth) | +| GLIP-L | | 27.9 | 33.7 | 39.7 | 36.1 | 20.2 | 25.8 | 35.3 | 28.5 | FourODs,GoldG,CC3M+12M,SBU | [config](lvis/glip_atss_swin-l_fpn_dyhead_pretrain_zeroshot_lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/glip/glip_l_mmdet-abfe026b.pth) | + +Note: + +1. The above are zero-shot evaluation results. +2. The evaluation metric we used is LVIS FixAP. For specific details, please refer to [Evaluating Large-Vocabulary Object Detectors: The Devil is in the Details](https://arxiv.org/pdf/2102.01066.pdf). +3. We found that the performance on small models is better than the official results, but it is lower on large models. This is mainly due to the incomplete alignment of the GLIP post-processing. + +## ODinW (Object Detection in the Wild) Results + +Learning visual representations from natural language supervision has recently shown great promise in a number of pioneering works. In general, these language-augmented visual models demonstrate strong transferability to a variety of datasets and tasks. However, it remains challenging to evaluate the transferablity of these models due to the lack of easy-to-use evaluation toolkits and public benchmarks. To tackle this, we build ELEVATER 1 , the first benchmark and toolkit for evaluating (pre-trained) language-augmented visual models. ELEVATER is composed of three components. (i) Datasets. As downstream evaluation suites, it consists of 20 image classification datasets and 35 object detection datasets, each of which is augmented with external knowledge. (ii) Toolkit. An automatic hyper-parameter tuning toolkit is developed to facilitate model evaluation on downstream tasks. (iii) Metrics. A variety of evaluation metrics are used to measure sample-efficiency (zero-shot and few-shot) and parameter-efficiency (linear probing and full model fine-tuning). ELEVATER is platform for Computer Vision in the Wild (CVinW), and is publicly released at https://computer-vision-in-the-wild.github.io/ELEVATER/ + +### Results and models of ODinW13 + +| Method | GLIP-T(A) | Official | GLIP-T(B) | Official | GLIP-T(C) | Official | GroundingDINO-T | GroundingDINO-B | +| --------------------- | --------- | --------- | --------- | --------- | --------- | --------- | --------------- | --------------- | +| AerialMaritimeDrone | 0.123 | 0.122 | 0.110 | 0.110 | 0.130 | 0.130 | 0.173 | 0.281 | +| Aquarium | 0.175 | 0.174 | 0.173 | 0.169 | 0.191 | 0.190 | 0.195 | 0.445 | +| CottontailRabbits | 0.686 | 0.686 | 0.688 | 0.688 | 0.744 | 0.744 | 0.799 | 0.808 | +| EgoHands | 0.013 | 0.013 | 0.003 | 0.004 | 0.314 | 0.315 | 0.608 | 0.764 | +| NorthAmericaMushrooms | 0.502 | 0.502 | 0.367 | 0.367 | 0.297 | 0.296 | 0.507 | 0.675 | +| Packages | 0.589 | 0.589 | 0.083 | 0.083 | 0.699 | 0.699 | 0.687 | 0.670 | +| PascalVOC | 0.512 | 0.512 | 0.541 | 0.540 | 0.565 | 0.565 | 0.563 | 0.711 | +| pistols | 0.339 | 0.339 | 0.502 | 0.501 | 0.503 | 0.504 | 0.726 | 0.771 | +| pothole | 0.007 | 0.007 | 0.030 | 0.030 | 0.058 | 0.058 | 0.215 | 0.478 | +| Raccoon | 0.075 | 0.074 | 0.285 | 0.288 | 0.241 | 0.244 | 0.549 | 0.541 | +| ShellfishOpenImages | 0.253 | 0.253 | 0.337 | 0.338 | 0.300 | 0.302 | 0.393 | 0.650 | +| thermalDogsAndPeople | 0.372 | 0.372 | 0.475 | 0.475 | 0.510 | 0.510 | 0.657 | 0.633 | +| VehiclesOpenImages | 0.574 | 0.566 | 0.562 | 0.547 | 0.549 | 0.534 | 0.613 | 0.647 | +| Average | **0.325** | **0.324** | **0.320** | **0.318** | **0.392** | **0.392** | **0.514** | **0.621** | + +### Results and models of ODinW35 + +| Method | GLIP-T(A) | Official | GLIP-T(B) | Official | GLIP-T(C) | Official | GroundingDINO-T | GroundingDINO-B | +| --------------------------- | --------- | --------- | --------- | --------- | --------- | --------- | --------------- | --------------- | +| AerialMaritimeDrone_large | 0.123 | 0.122 | 0.110 | 0.110 | 0.130 | 0.130 | 0.173 | 0.281 | +| AerialMaritimeDrone_tiled | 0.174 | 0.174 | 0.172 | 0.172 | 0.172 | 0.172 | 0.206 | 0.364 | +| AmericanSignLanguageLetters | 0.001 | 0.001 | 0.003 | 0.003 | 0.009 | 0.009 | 0.002 | 0.096 | +| Aquarium | 0.175 | 0.175 | 0.173 | 0.171 | 0.192 | 0.182 | 0.195 | 0.445 | +| BCCD | 0.016 | 0.016 | 0.001 | 0.001 | 0.000 | 0.000 | 0.161 | 0.584 | +| boggleBoards | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.134 | +| brackishUnderwater | 0.016 | 0..013 | 0.021 | 0.027 | 0.020 | 0.022 | 0.021 | 0.454 | +| ChessPieces | 0.001 | 0.001 | 0.000 | 0.000 | 0.001 | 0.001 | 0.000 | 0.000 | +| CottontailRabbits | 0.710 | 0.709 | 0.683 | 0.683 | 0.752 | 0.752 | 0.806 | 0.797 | +| dice | 0.005 | 0.005 | 0.004 | 0.004 | 0.004 | 0.004 | 0.004 | 0.082 | +| DroneControl | 0.016 | 0.017 | 0.006 | 0.008 | 0.005 | 0.007 | 0.042 | 0.638 | +| EgoHands_generic | 0.009 | 0.010 | 0.005 | 0.006 | 0.510 | 0.508 | 0.608 | 0.764 | +| EgoHands_specific | 0.001 | 0.001 | 0.004 | 0.006 | 0.003 | 0.004 | 0.002 | 0.687 | +| HardHatWorkers | 0.029 | 0.029 | 0.023 | 0.023 | 0.033 | 0.033 | 0.046 | 0.439 | +| MaskWearing | 0.007 | 0.007 | 0.003 | 0.002 | 0.005 | 0.005 | 0.004 | 0.406 | +| MountainDewCommercial | 0.218 | 0.227 | 0.199 | 0.197 | 0.478 | 0.463 | 0.430 | 0.580 | +| NorthAmericaMushrooms | 0.502 | 0.502 | 0.450 | 0.450 | 0.497 | 0.497 | 0.471 | 0.501 | +| openPoetryVision | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.051 | +| OxfordPets_by_breed | 0.001 | 0.002 | 0.002 | 0.004 | 0.001 | 0.002 | 0.003 | 0.799 | +| OxfordPets_by_species | 0.016 | 0.011 | 0.012 | 0.009 | 0.013 | 0.009 | 0.011 | 0.872 | +| PKLot | 0.002 | 0.002 | 0.000 | 0.000 | 0.000 | 0.000 | 0.001 | 0.774 | +| Packages | 0.569 | 0.569 | 0.279 | 0.279 | 0.712 | 0.712 | 0.695 | 0.728 | +| PascalVOC | 0.512 | 0.512 | 0.541 | 0.540 | 0.565 | 0.565 | 0.563 | 0.711 | +| pistols | 0.339 | 0.339 | 0.502 | 0.501 | 0.503 | 0.504 | 0.726 | 0.771 | +| plantdoc | 0.002 | 0.002 | 0.007 | 0.007 | 0.009 | 0.009 | 0.005 | 0.376 | +| pothole | 0.007 | 0.010 | 0.024 | 0.025 | 0.085 | 0.101 | 0.215 | 0.478 | +| Raccoons | 0.075 | 0.074 | 0.285 | 0.288 | 0.241 | 0.244 | 0.549 | 0.541 | +| selfdrivingCar | 0.071 | 0.072 | 0.074 | 0.074 | 0.081 | 0.080 | 0.089 | 0.318 | +| ShellfishOpenImages | 0.253 | 0.253 | 0.337 | 0.338 | 0.300 | 0.302 | 0.393 | 0.650 | +| ThermalCheetah | 0.028 | 0.028 | 0.000 | 0.000 | 0.028 | 0.028 | 0.087 | 0.290 | +| thermalDogsAndPeople | 0.372 | 0.372 | 0.475 | 0.475 | 0.510 | 0.510 | 0.657 | 0.633 | +| UnoCards | 0.000 | 0.000 | 0.000 | 0.001 | 0.002 | 0.003 | 0.006 | 0.754 | +| VehiclesOpenImages | 0.574 | 0.566 | 0.562 | 0.547 | 0.549 | 0.534 | 0.613 | 0.647 | +| WildfireSmoke | 0.000 | 0.000 | 0.000 | 0.000 | 0.017 | 0.017 | 0.134 | 0.410 | +| websiteScreenshots | 0.003 | 0.004 | 0.003 | 0.005 | 0.005 | 0.006 | 0.012 | 0.175 | +| Average | **0.134** | **0.134** | **0.138** | **0.138** | **0.179** | **0.178** | **0.227** | **0.492** | + +### Results on Flickr30k + +| Model | Official | Pre-Train Data | Val R@1 | Val R@5 | Val R@10 | Test R@1 | Test R@5 | Test R@10 | +| ------------- | -------- | ------------------- | ------- | ------- | -------- | -------- | -------- | --------- | +| **GLIP-T(C)** | ✔ | O365, GoldG | 84.8 | 94.9 | 96.3 | 85.5 | 95.4 | 96.6 | +| **GLIP-T(C)** | | O365, GoldG | 84.9 | 94.9 | 96.3 | 85.6 | 95.4 | 96.7 | +| **GLIP-T** | | O365,GoldG,CC3M,SBU | 85.3 | 95.5 | 96.9 | 86.0 | 95.9 | 97.2 | diff --git a/mmpose/configs/mmdet/glip/flickr30k/glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg_zeroshot_flickr30k.py b/mmpose/configs/mmdet/glip/flickr30k/glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg_zeroshot_flickr30k.py new file mode 100644 index 0000000000000000000000000000000000000000..14d6e8aaa6372a5272467dd46d33e80979298efc --- /dev/null +++ b/mmpose/configs/mmdet/glip/flickr30k/glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg_zeroshot_flickr30k.py @@ -0,0 +1,61 @@ +_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' + +lang_model_name = 'bert-base-uncased' + +model = dict(bbox_head=dict(early_fuse=True)) + +dataset_type = 'Flickr30kDataset' +data_root = 'data/flickr30k_entities/' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive', 'phrase_ids', 'phrases')) +] + +dataset_Flickr30k_val = dict( + type=dataset_type, + data_root=data_root, + ann_file='final_flickr_separateGT_val.json', + data_prefix=dict(img='flickr30k_images/'), + pipeline=test_pipeline, +) + +dataset_Flickr30k_test = dict( + type=dataset_type, + data_root=data_root, + ann_file='final_flickr_separateGT_test.json', + data_prefix=dict(img='flickr30k_images/'), + pipeline=test_pipeline, +) + +val_evaluator_Flickr30k = dict(type='Flickr30kMetric', ) + +test_evaluator_Flickr30k = dict(type='Flickr30kMetric', ) + +# ----------Config---------- # +dataset_prefixes = ['Flickr30kVal', 'Flickr30kTest'] +datasets = [dataset_Flickr30k_val, dataset_Flickr30k_test] +metrics = [val_evaluator_Flickr30k, test_evaluator_Flickr30k] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco.py b/mmpose/configs/mmdet/glip/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..92a85a11d57b6d3d64bfed5f9a691bca739d7ce3 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco.py @@ -0,0 +1,14 @@ +_base_ = './glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco.py' + +model = dict( + backbone=dict( + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + drop_path_rate=0.4, + ), + neck=dict(in_channels=[384, 768, 1536]), + bbox_head=dict(early_fuse=True, num_dyhead_blocks=8, use_checkpoint=True)) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/glip/glip_l_mmdet-abfe026b.pth' # noqa diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-l_fpn_dyhead_pretrain_mixeddata.py b/mmpose/configs/mmdet/glip/glip_atss_swin-l_fpn_dyhead_pretrain_mixeddata.py new file mode 100644 index 0000000000000000000000000000000000000000..546ecfe1d513b4161322f5ffa0e51d01b2775780 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-l_fpn_dyhead_pretrain_mixeddata.py @@ -0,0 +1,12 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' + +model = dict( + backbone=dict( + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + drop_path_rate=0.4, + ), + neck=dict(in_channels=[384, 768, 1536]), + bbox_head=dict(early_fuse=True, num_dyhead_blocks=8)) diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco.py b/mmpose/configs/mmdet/glip/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4b280657b315c77dd118ab84880d97dc882102a1 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco.py @@ -0,0 +1,155 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_a_mmdet-b3654169.pth' # noqa +lang_model_name = 'bert-base-uncased' + +model = dict( + type='GLIP', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=False, + convert_weights=False), + neck=dict( + type='FPN_DropBlock', + in_channels=[192, 384, 768], + out_channels=256, + start_level=0, + relu_before_extra_convs=True, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='ATSSVLFusionHead', + lang_model_name=lang_model_name, + num_classes=80, + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128], + center_offset=0.5), + bbox_coder=dict( + type='DeltaXYWHBBoxCoderForGLIP', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + language_model=dict(type='BertModel', name=lang_model_name), + train_cfg=dict( + assigner=dict( + type='ATSSAssigner', + topk=9, + iou_calculator=dict(type='BboxOverlaps2D_GLIP')), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# dataset settings +train_pipeline = [ + dict( + type='LoadImageFromFile', + imdecode_backend='pillow', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='GTBoxSubOne_GLIP'), + dict( + type='RandomChoiceResize', + scales=[(1333, 480), (1333, 560), (1333, 640), (1333, 720), + (1333, 800)], + keep_ratio=True, + resize_type='FixScaleResize', + backend='pillow'), + dict(type='RandomFlip_GLIP', prob=0.5), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=2, + dataset=dict( + type=_base_.dataset_type, + data_root=_base_.data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + return_classes=True, + backend_args=_base_.backend_args))) + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, return_classes=True)) +test_dataloader = val_dataloader + +# We did not adopt the official 24e optimizer strategy +# because the results indicate that the current strategy is superior. +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00002, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + }), + clip_grad=None) diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py b/mmpose/configs/mmdet/glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py new file mode 100644 index 0000000000000000000000000000000000000000..34a818caefcbfcdd9e51ec304fb94906c20ceb9a --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py @@ -0,0 +1,90 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +lang_model_name = 'bert-base-uncased' + +model = dict( + type='GLIP', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=False, + convert_weights=False), + neck=dict( + type='FPN', + in_channels=[192, 384, 768], + out_channels=256, + start_level=0, + relu_before_extra_convs=True, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='ATSSVLFusionHead', + lang_model_name=lang_model_name, + num_classes=80, + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128], + center_offset=0.5), + bbox_coder=dict( + type='DeltaXYWHBBoxCoderForGLIP', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + ), + language_model=dict(type='BertModel', name=lang_model_name), + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities')) +] + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, return_classes=True)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco.py b/mmpose/configs/mmdet/glip/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3487de3f3a24077f475e8451722d1b4d252a0084 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco.py @@ -0,0 +1,9 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco.py' + +model = dict(bbox_head=dict(early_fuse=True, use_checkpoint=True)) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_b_mmdet-6dfbd102.pth' # noqa + +optim_wrapper = dict( + optimizer=dict(lr=0.00001), + clip_grad=dict(_delete_=True, max_norm=1, norm_type=2)) diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-t_b_fpn_dyhead_pretrain_obj365.py b/mmpose/configs/mmdet/glip/glip_atss_swin-t_b_fpn_dyhead_pretrain_obj365.py new file mode 100644 index 0000000000000000000000000000000000000000..6334e5e3b4043a81d154fc03a94594d93d74aed5 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-t_b_fpn_dyhead_pretrain_obj365.py @@ -0,0 +1,3 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' + +model = dict(bbox_head=dict(early_fuse=True)) diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco.py b/mmpose/configs/mmdet/glip/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5c315e490e7a7e05a6334d4d38ce9be9b70851b3 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco.py @@ -0,0 +1,3 @@ +_base_ = './glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco.py' + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_c_mmdet-2fc427dd.pth' # noqa diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg.py b/mmpose/configs/mmdet/glip/glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg.py new file mode 100644 index 0000000000000000000000000000000000000000..24898f4df532cc2e2728265800d2f6a030e8efe0 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg.py @@ -0,0 +1 @@ +_base_ = './glip_atss_swin-t_b_fpn_dyhead_pretrain_obj365.py' diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco.py b/mmpose/configs/mmdet/glip/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3391272e608e8098773a6435550e578f462ed886 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco.py @@ -0,0 +1,3 @@ +_base_ = './glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco.py' + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_mmdet-c24ce662.pth' # noqa diff --git a/mmpose/configs/mmdet/glip/glip_atss_swin-t_fpn_dyhead_pretrain_obj365-goldg-cc3m-sub.py b/mmpose/configs/mmdet/glip/glip_atss_swin-t_fpn_dyhead_pretrain_obj365-goldg-cc3m-sub.py new file mode 100644 index 0000000000000000000000000000000000000000..24898f4df532cc2e2728265800d2f6a030e8efe0 --- /dev/null +++ b/mmpose/configs/mmdet/glip/glip_atss_swin-t_fpn_dyhead_pretrain_obj365-goldg-cc3m-sub.py @@ -0,0 +1 @@ +_base_ = './glip_atss_swin-t_b_fpn_dyhead_pretrain_obj365.py' diff --git a/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-l_fpn_dyhead_pretrain_zeroshot_lvis.py b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-l_fpn_dyhead_pretrain_zeroshot_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..1f79e447d3f24e364739740be504bb234adc1e98 --- /dev/null +++ b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-l_fpn_dyhead_pretrain_zeroshot_lvis.py @@ -0,0 +1,12 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_lvis.py' + +model = dict( + backbone=dict( + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + drop_path_rate=0.4, + ), + neck=dict(in_channels=[384, 768, 1536]), + bbox_head=dict(early_fuse=True, num_dyhead_blocks=8)) diff --git a/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-l_fpn_dyhead_pretrain_zeroshot_mini-lvis.py b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-l_fpn_dyhead_pretrain_zeroshot_mini-lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..13f1a69082b670632dfe3eb8dc50826549dcf59f --- /dev/null +++ b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-l_fpn_dyhead_pretrain_zeroshot_mini-lvis.py @@ -0,0 +1,12 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_mini-lvis.py' + +model = dict( + backbone=dict( + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + drop_path_rate=0.4, + ), + neck=dict(in_channels=[384, 768, 1536]), + bbox_head=dict(early_fuse=True, num_dyhead_blocks=8)) diff --git a/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_lvis.py b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..4d526d59008b39996a147a2852a44d2e936113d2 --- /dev/null +++ b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_lvis.py @@ -0,0 +1,24 @@ +_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' + +model = dict(test_cfg=dict( + max_per_img=300, + chunked_size=40, +)) + +dataset_type = 'LVISV1Dataset' +data_root = 'data/coco/' + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + type=dataset_type, + ann_file='annotations/lvis_od_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +# numpy < 1.24.0 +val_evaluator = dict( + _delete_=True, + type='LVISFixedAPMetric', + ann_file=data_root + 'annotations/lvis_od_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_mini-lvis.py b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_mini-lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..70a57a3f581ca1c374dbae71059c7049a20d3a47 --- /dev/null +++ b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_mini-lvis.py @@ -0,0 +1,25 @@ +_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' + +model = dict(test_cfg=dict( + max_per_img=300, + chunked_size=40, +)) + +dataset_type = 'LVISV1Dataset' +data_root = 'data/coco/' + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + type=dataset_type, + ann_file='annotations/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +# numpy < 1.24.0 +val_evaluator = dict( + _delete_=True, + type='LVISFixedAPMetric', + ann_file=data_root + + 'annotations/lvis_v1_minival_inserted_image_name.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..6dc712b3bcb4f8dd1018b175d3a4e7f59be3a990 --- /dev/null +++ b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_lvis.py @@ -0,0 +1,3 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_lvis.py' + +model = dict(bbox_head=dict(early_fuse=True)) diff --git a/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_mini-lvis.py b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_mini-lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..3babb91101a6dc283ada78911672c7c7433f67ac --- /dev/null +++ b/mmpose/configs/mmdet/glip/lvis/glip_atss_swin-t_bc_fpn_dyhead_pretrain_zeroshot_mini-lvis.py @@ -0,0 +1,3 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_pretrain_zeroshot_mini-lvis.py' + +model = dict(bbox_head=dict(early_fuse=True)) diff --git a/mmpose/configs/mmdet/glip/metafile.yml b/mmpose/configs/mmdet/glip/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..fbbf718b9fff3061a4e02a7d39a6c95252beb603 --- /dev/null +++ b/mmpose/configs/mmdet/glip/metafile.yml @@ -0,0 +1,111 @@ +Collections: + - Name: GLIP + Metadata: + Training Data: Objects365, GoldG, CC3M, SBU and COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: A100 GPUs + Architecture: + - Swin Transformer + - DYHead + - BERT + Paper: + URL: https://arxiv.org/abs/2112.03857 + Title: 'GLIP: Grounded Language-Image Pre-training' + README: configs/glip/README.md + Code: + URL: + Version: v3.0.0 + +Models: + - Name: glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 + In Collection: GLIP + Config: configs/glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_a_mmdet-b3654169.pth + - Name: glip_atss_swin-t_b_fpn_dyhead_pretrain_obj365 + In Collection: GLIP + Config: configs/glip/glip_atss_swin-t_b_fpn_dyhead_pretrain_obj365.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_b_mmdet-6dfbd102.pth + - Name: glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg + In Collection: GLIP + Config: configs/glip/glip_atss_swin-t_c_fpn_dyhead_pretrain_obj365-goldg.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.7 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_c_mmdet-2fc427dd.pth + - Name: glip_atss_swin-t_fpn_dyhead_pretrain_obj365-goldg-cc3m-sub + In Collection: GLIP + Config: configs/glip/glip_atss_swin-t_fpn_dyhead_pretrain_obj365-goldg-cc3m-sub.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_tiny_mmdet-c24ce662.pth + - Name: glip_atss_swin-l_fpn_dyhead_pretrain_mixeddata + In Collection: GLIP + Config: configs/glip/glip_atss_swin-l_fpn_dyhead_pretrain_mixeddata.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_l_mmdet-abfe026b.pth + - Name: glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco + In Collection: GLIP + Config: configs/glip/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 53.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_180419-e6addd96.pth + - Name: glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco + In Collection: GLIP + Config: configs/glip/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 54.1 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_b_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230916_163538-650323ba.pth + - Name: glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco + In Collection: GLIP + Config: configs/glip/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 55.2 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_c_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_182935-4ba3fc3b.pth + - Name: glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco + In Collection: GLIP + Config: configs/glip/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 55.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-t_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230914_224410-ba97be24.pth + - Name: glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco + In Collection: GLIP + Config: configs/glip/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 59.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/glip/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco/glip_atss_swin-l_fpn_dyhead_16xb2_ms-2x_funtune_coco_20230910_100800-e9be4274.pth diff --git a/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_a_fpn_dyhead_pretrain_odinw13.py b/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_a_fpn_dyhead_pretrain_odinw13.py new file mode 100644 index 0000000000000000000000000000000000000000..d38effba8c1333a2403c6bc0f20b7fde21c4c47d --- /dev/null +++ b/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_a_fpn_dyhead_pretrain_odinw13.py @@ -0,0 +1,338 @@ +_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' + +dataset_type = 'CocoDataset' +data_root = 'data/odinw/' + +base_test_pipeline = _base_.test_pipeline +base_test_pipeline[-1]['meta_keys'] = ('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'text', + 'custom_entities', 'caption_prompt') + +# ---------------------1 AerialMaritimeDrone---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/large/' +dataset_AerialMaritimeDrone = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + test_mode=True, + pipeline=base_test_pipeline, + return_classes=True) +val_evaluator_AerialMaritimeDrone = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------2 Aquarium---------------------# +class_name = ('fish', 'jellyfish', 'penguin', 'puffin', 'shark', 'starfish', + 'stingray') +metainfo = dict(classes=class_name) +_data_root = data_root + 'Aquarium/Aquarium Combined.v2-raw-1024.coco/' + +caption_prompt = None +# caption_prompt = { +# 'penguin': { +# 'suffix': ', which is black and white' +# }, +# 'puffin': { +# 'suffix': ' with orange beaks' +# }, +# 'stingray': { +# 'suffix': ' which is flat and round' +# }, +# } +dataset_Aquarium = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Aquarium = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------3 CottontailRabbits---------------------# +class_name = ('Cottontail-Rabbit', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'CottontailRabbits/' + +caption_prompt = None +# caption_prompt = {'Cottontail-Rabbit': {'name': 'rabbit'}} + +dataset_CottontailRabbits = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_CottontailRabbits = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------4 EgoHands---------------------# +class_name = ('hand', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/generic/' + +caption_prompt = None +# caption_prompt = {'hand': {'suffix': ' of a person'}} + +dataset_EgoHands = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------5 NorthAmericaMushrooms---------------------# +class_name = ('CoW', 'chanterelle') +metainfo = dict(classes=class_name) +_data_root = data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa + +caption_prompt = None +# caption_prompt = { +# 'CoW': { +# 'name': 'flat mushroom' +# }, +# 'chanterelle': { +# 'name': 'yellow mushroom' +# } +# } + +dataset_NorthAmericaMushrooms = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_NorthAmericaMushrooms = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------6 Packages---------------------# +class_name = ('package', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Packages/Raw/' + +caption_prompt = None +# caption_prompt = { +# 'package': { +# 'prefix': 'there is a ', +# 'suffix': ' on the porch' +# } +# } + +dataset_Packages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Packages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------7 PascalVOC---------------------# +class_name = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PascalVOC/' +dataset_PascalVOC = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PascalVOC = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------8 pistols---------------------# +class_name = ('pistol', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pistols/export/' +dataset_pistols = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pistols = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------9 pothole---------------------# +class_name = ('pothole', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pothole/' + +caption_prompt = None +# caption_prompt = { +# 'pothole': { +# 'prefix': 'there are some ', +# 'name': 'holes', +# 'suffix': ' on the road' +# } +# } + +dataset_pothole = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pothole = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------10 Raccoon---------------------# +class_name = ('raccoon', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Raccoon/Raccoon.v2-raw.coco/' +dataset_Raccoon = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Raccoon = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------11 ShellfishOpenImages---------------------# +class_name = ('Crab', 'Lobster', 'Shrimp') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ShellfishOpenImages/raw/' +dataset_ShellfishOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ShellfishOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------12 thermalDogsAndPeople---------------------# +class_name = ('dog', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'thermalDogsAndPeople/' +dataset_thermalDogsAndPeople = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_thermalDogsAndPeople = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------13 VehiclesOpenImages---------------------# +class_name = ('Ambulance', 'Bus', 'Car', 'Motorcycle', 'Truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'VehiclesOpenImages/416x416/' +dataset_VehiclesOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_VehiclesOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# --------------------- Config---------------------# +dataset_prefixes = [ + 'AerialMaritimeDrone', 'Aquarium', 'CottontailRabbits', 'EgoHands', + 'NorthAmericaMushrooms', 'Packages', 'PascalVOC', 'pistols', 'pothole', + 'Raccoon', 'ShellfishOpenImages', 'thermalDogsAndPeople', + 'VehiclesOpenImages' +] +datasets = [ + dataset_AerialMaritimeDrone, dataset_Aquarium, dataset_CottontailRabbits, + dataset_EgoHands, dataset_NorthAmericaMushrooms, dataset_Packages, + dataset_PascalVOC, dataset_pistols, dataset_pothole, dataset_Raccoon, + dataset_ShellfishOpenImages, dataset_thermalDogsAndPeople, + dataset_VehiclesOpenImages +] +metrics = [ + val_evaluator_AerialMaritimeDrone, val_evaluator_Aquarium, + val_evaluator_CottontailRabbits, val_evaluator_EgoHands, + val_evaluator_NorthAmericaMushrooms, val_evaluator_Packages, + val_evaluator_PascalVOC, val_evaluator_pistols, val_evaluator_pothole, + val_evaluator_Raccoon, val_evaluator_ShellfishOpenImages, + val_evaluator_thermalDogsAndPeople, val_evaluator_VehiclesOpenImages +] + +# -------------------------------------------------# +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_a_fpn_dyhead_pretrain_odinw35.py b/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_a_fpn_dyhead_pretrain_odinw35.py new file mode 100644 index 0000000000000000000000000000000000000000..2eaf09ed771978397b9d67048b371724418e50aa --- /dev/null +++ b/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_a_fpn_dyhead_pretrain_odinw35.py @@ -0,0 +1,794 @@ +_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' + +dataset_type = 'CocoDataset' +data_root = 'data/odinw/' + +base_test_pipeline = _base_.test_pipeline +base_test_pipeline[-1]['meta_keys'] = ('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'text', + 'custom_entities', 'caption_prompt') + +# ---------------------1 AerialMaritimeDrone_large---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/large/' +dataset_AerialMaritimeDrone_large = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AerialMaritimeDrone_large = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------2 AerialMaritimeDrone_tiled---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/tiled/' +dataset_AerialMaritimeDrone_tiled = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AerialMaritimeDrone_tiled = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------3 AmericanSignLanguageLetters---------------------# +class_name = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AmericanSignLanguageLetters/American Sign Language Letters.v1-v1.coco/' # noqa +dataset_AmericanSignLanguageLetters = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AmericanSignLanguageLetters = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------4 Aquarium---------------------# +class_name = ('fish', 'jellyfish', 'penguin', 'puffin', 'shark', 'starfish', + 'stingray') +metainfo = dict(classes=class_name) +_data_root = data_root + 'Aquarium/Aquarium Combined.v2-raw-1024.coco/' +dataset_Aquarium = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Aquarium = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------5 BCCD---------------------# +class_name = ('Platelets', 'RBC', 'WBC') +metainfo = dict(classes=class_name) +_data_root = data_root + 'BCCD/BCCD.v3-raw.coco/' +dataset_BCCD = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_BCCD = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------6 boggleBoards---------------------# +class_name = ('Q', 'a', 'an', 'b', 'c', 'd', 'e', 'er', 'f', 'g', 'h', 'he', + 'i', 'in', 'j', 'k', 'l', 'm', 'n', 'o', 'o ', 'p', 'q', 'qu', + 'r', 's', 't', 't\\', 'th', 'u', 'v', 'w', 'wild', 'x', 'y', 'z') +metainfo = dict(classes=class_name) +_data_root = data_root + 'boggleBoards/416x416AutoOrient/export/' +dataset_boggleBoards = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_boggleBoards = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------7 brackishUnderwater---------------------# +class_name = ('crab', 'fish', 'jellyfish', 'shrimp', 'small_fish', 'starfish') +metainfo = dict(classes=class_name) +_data_root = data_root + 'brackishUnderwater/960x540/' +dataset_brackishUnderwater = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_brackishUnderwater = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------8 ChessPieces---------------------# +class_name = (' ', 'black bishop', 'black king', 'black knight', 'black pawn', + 'black queen', 'black rook', 'white bishop', 'white king', + 'white knight', 'white pawn', 'white queen', 'white rook') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/' +dataset_ChessPieces = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ChessPieces = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------9 CottontailRabbits---------------------# +class_name = ('rabbit', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'CottontailRabbits/' +dataset_CottontailRabbits = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_CottontailRabbits = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------10 dice---------------------# +class_name = ('1', '2', '3', '4', '5', '6') +metainfo = dict(classes=class_name) +_data_root = data_root + 'dice/mediumColor/export/' +dataset_dice = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_dice = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------11 DroneControl---------------------# +class_name = ('follow', 'follow_hand', 'land', 'land_hand', 'null', 'object', + 'takeoff', 'takeoff-hand') +metainfo = dict(classes=class_name) +_data_root = data_root + 'DroneControl/Drone Control.v3-raw.coco/' +dataset_DroneControl = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_DroneControl = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------12 EgoHands_generic---------------------# +class_name = ('hand', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/generic/' +caption_prompt = {'hand': {'suffix': ' of a person'}} +dataset_EgoHands_generic = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands_generic = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------13 EgoHands_specific---------------------# +class_name = ('myleft', 'myright', 'yourleft', 'yourright') +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/specific/' +dataset_EgoHands_specific = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands_specific = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------14 HardHatWorkers---------------------# +class_name = ('head', 'helmet', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'HardHatWorkers/raw/' +dataset_HardHatWorkers = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_HardHatWorkers = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------15 MaskWearing---------------------# +class_name = ('mask', 'no-mask') +metainfo = dict(classes=class_name) +_data_root = data_root + 'MaskWearing/raw/' +dataset_MaskWearing = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_MaskWearing = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------16 MountainDewCommercial---------------------# +class_name = ('bottle', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'MountainDewCommercial/' +dataset_MountainDewCommercial = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_MountainDewCommercial = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------17 NorthAmericaMushrooms---------------------# +class_name = ('flat mushroom', 'yellow mushroom') +metainfo = dict(classes=class_name) +_data_root = data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa +dataset_NorthAmericaMushrooms = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_NorthAmericaMushrooms = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------18 openPoetryVision---------------------# +class_name = ('American Typewriter', 'Andale Mono', 'Apple Chancery', 'Arial', + 'Avenir', 'Baskerville', 'Big Caslon', 'Bradley Hand', + 'Brush Script MT', 'Chalkboard', 'Comic Sans MS', 'Copperplate', + 'Courier', 'Didot', 'Futura', 'Geneva', 'Georgia', 'Gill Sans', + 'Helvetica', 'Herculanum', 'Impact', 'Kefa', 'Lucida Grande', + 'Luminari', 'Marker Felt', 'Menlo', 'Monaco', 'Noteworthy', + 'Optima', 'PT Sans', 'PT Serif', 'Palatino', 'Papyrus', + 'Phosphate', 'Rockwell', 'SF Pro', 'SignPainter', 'Skia', + 'Snell Roundhand', 'Tahoma', 'Times New Roman', 'Trebuchet MS', + 'Verdana') +metainfo = dict(classes=class_name) +_data_root = data_root + 'openPoetryVision/512x512/' +dataset_openPoetryVision = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_openPoetryVision = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------19 OxfordPets_by_breed---------------------# +class_name = ('cat-Abyssinian', 'cat-Bengal', 'cat-Birman', 'cat-Bombay', + 'cat-British_Shorthair', 'cat-Egyptian_Mau', 'cat-Maine_Coon', + 'cat-Persian', 'cat-Ragdoll', 'cat-Russian_Blue', 'cat-Siamese', + 'cat-Sphynx', 'dog-american_bulldog', + 'dog-american_pit_bull_terrier', 'dog-basset_hound', + 'dog-beagle', 'dog-boxer', 'dog-chihuahua', + 'dog-english_cocker_spaniel', 'dog-english_setter', + 'dog-german_shorthaired', 'dog-great_pyrenees', 'dog-havanese', + 'dog-japanese_chin', 'dog-keeshond', 'dog-leonberger', + 'dog-miniature_pinscher', 'dog-newfoundland', 'dog-pomeranian', + 'dog-pug', 'dog-saint_bernard', 'dog-samoyed', + 'dog-scottish_terrier', 'dog-shiba_inu', + 'dog-staffordshire_bull_terrier', 'dog-wheaten_terrier', + 'dog-yorkshire_terrier') +metainfo = dict(classes=class_name) +_data_root = data_root + 'OxfordPets/by-breed/' # noqa +dataset_OxfordPets_by_breed = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_OxfordPets_by_breed = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------20 OxfordPets_by_species---------------------# +class_name = ('cat', 'dog') +metainfo = dict(classes=class_name) +_data_root = data_root + 'OxfordPets/by-species/' # noqa +dataset_OxfordPets_by_species = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_OxfordPets_by_species = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------21 PKLot---------------------# +class_name = ('space-empty', 'space-occupied') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PKLot/640/' # noqa +dataset_PKLot = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PKLot = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------22 Packages---------------------# +class_name = ('package', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Packages/Raw/' +caption_prompt = { + 'package': { + 'prefix': 'there is a ', + 'suffix': ' on the porch' + } +} +dataset_Packages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Packages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------23 PascalVOC---------------------# +class_name = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PascalVOC/' +dataset_PascalVOC = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PascalVOC = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------24 pistols---------------------# +class_name = ('pistol', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pistols/export/' +dataset_pistols = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pistols = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------25 plantdoc---------------------# +class_name = ('Apple Scab Leaf', 'Apple leaf', 'Apple rust leaf', + 'Bell_pepper leaf', 'Bell_pepper leaf spot', 'Blueberry leaf', + 'Cherry leaf', 'Corn Gray leaf spot', 'Corn leaf blight', + 'Corn rust leaf', 'Peach leaf', 'Potato leaf', + 'Potato leaf early blight', 'Potato leaf late blight', + 'Raspberry leaf', 'Soyabean leaf', 'Soybean leaf', + 'Squash Powdery mildew leaf', 'Strawberry leaf', + 'Tomato Early blight leaf', 'Tomato Septoria leaf spot', + 'Tomato leaf', 'Tomato leaf bacterial spot', + 'Tomato leaf late blight', 'Tomato leaf mosaic virus', + 'Tomato leaf yellow virus', 'Tomato mold leaf', + 'Tomato two spotted spider mites leaf', 'grape leaf', + 'grape leaf black rot') +metainfo = dict(classes=class_name) +_data_root = data_root + 'plantdoc/416x416/' +dataset_plantdoc = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_plantdoc = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------26 pothole---------------------# +class_name = ('pothole', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pothole/' +caption_prompt = { + 'pothole': { + 'name': 'holes', + 'prefix': 'there are some ', + 'suffix': ' on the road' + } +} +dataset_pothole = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + caption_prompt=caption_prompt, + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pothole = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------27 Raccoon---------------------# +class_name = ('raccoon', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Raccoon/Raccoon.v2-raw.coco/' +dataset_Raccoon = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Raccoon = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------28 selfdrivingCar---------------------# +class_name = ('biker', 'car', 'pedestrian', 'trafficLight', + 'trafficLight-Green', 'trafficLight-GreenLeft', + 'trafficLight-Red', 'trafficLight-RedLeft', + 'trafficLight-Yellow', 'trafficLight-YellowLeft', 'truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'selfdrivingCar/fixedLarge/export/' +dataset_selfdrivingCar = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_selfdrivingCar = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------29 ShellfishOpenImages---------------------# +class_name = ('Crab', 'Lobster', 'Shrimp') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ShellfishOpenImages/raw/' +dataset_ShellfishOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ShellfishOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------30 ThermalCheetah---------------------# +class_name = ('cheetah', 'human') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ThermalCheetah/' +dataset_ThermalCheetah = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ThermalCheetah = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------31 thermalDogsAndPeople---------------------# +class_name = ('dog', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'thermalDogsAndPeople/' +dataset_thermalDogsAndPeople = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_thermalDogsAndPeople = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------32 UnoCards---------------------# +class_name = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', + '12', '13', '14') +metainfo = dict(classes=class_name) +_data_root = data_root + 'UnoCards/raw/' +dataset_UnoCards = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_UnoCards = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------33 VehiclesOpenImages---------------------# +class_name = ('Ambulance', 'Bus', 'Car', 'Motorcycle', 'Truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'VehiclesOpenImages/416x416/' +dataset_VehiclesOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_VehiclesOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------34 WildfireSmoke---------------------# +class_name = ('smoke', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'WildfireSmoke/' +dataset_WildfireSmoke = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_WildfireSmoke = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------35 websiteScreenshots---------------------# +class_name = ('button', 'field', 'heading', 'iframe', 'image', 'label', 'link', + 'text') +metainfo = dict(classes=class_name) +_data_root = data_root + 'websiteScreenshots/' +dataset_websiteScreenshots = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_websiteScreenshots = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# --------------------- Config---------------------# + +dataset_prefixes = [ + 'AerialMaritimeDrone_large', + 'AerialMaritimeDrone_tiled', + 'AmericanSignLanguageLetters', + 'Aquarium', + 'BCCD', + 'boggleBoards', + 'brackishUnderwater', + 'ChessPieces', + 'CottontailRabbits', + 'dice', + 'DroneControl', + 'EgoHands_generic', + 'EgoHands_specific', + 'HardHatWorkers', + 'MaskWearing', + 'MountainDewCommercial', + 'NorthAmericaMushrooms', + 'openPoetryVision', + 'OxfordPets_by_breed', + 'OxfordPets_by_species', + 'PKLot', + 'Packages', + 'PascalVOC', + 'pistols', + 'plantdoc', + 'pothole', + 'Raccoons', + 'selfdrivingCar', + 'ShellfishOpenImages', + 'ThermalCheetah', + 'thermalDogsAndPeople', + 'UnoCards', + 'VehiclesOpenImages', + 'WildfireSmoke', + 'websiteScreenshots', +] + +datasets = [ + dataset_AerialMaritimeDrone_large, dataset_AerialMaritimeDrone_tiled, + dataset_AmericanSignLanguageLetters, dataset_Aquarium, dataset_BCCD, + dataset_boggleBoards, dataset_brackishUnderwater, dataset_ChessPieces, + dataset_CottontailRabbits, dataset_dice, dataset_DroneControl, + dataset_EgoHands_generic, dataset_EgoHands_specific, + dataset_HardHatWorkers, dataset_MaskWearing, dataset_MountainDewCommercial, + dataset_NorthAmericaMushrooms, dataset_openPoetryVision, + dataset_OxfordPets_by_breed, dataset_OxfordPets_by_species, dataset_PKLot, + dataset_Packages, dataset_PascalVOC, dataset_pistols, dataset_plantdoc, + dataset_pothole, dataset_Raccoon, dataset_selfdrivingCar, + dataset_ShellfishOpenImages, dataset_ThermalCheetah, + dataset_thermalDogsAndPeople, dataset_UnoCards, dataset_VehiclesOpenImages, + dataset_WildfireSmoke, dataset_websiteScreenshots +] + +metrics = [ + val_evaluator_AerialMaritimeDrone_large, + val_evaluator_AerialMaritimeDrone_tiled, + val_evaluator_AmericanSignLanguageLetters, val_evaluator_Aquarium, + val_evaluator_BCCD, val_evaluator_boggleBoards, + val_evaluator_brackishUnderwater, val_evaluator_ChessPieces, + val_evaluator_CottontailRabbits, val_evaluator_dice, + val_evaluator_DroneControl, val_evaluator_EgoHands_generic, + val_evaluator_EgoHands_specific, val_evaluator_HardHatWorkers, + val_evaluator_MaskWearing, val_evaluator_MountainDewCommercial, + val_evaluator_NorthAmericaMushrooms, val_evaluator_openPoetryVision, + val_evaluator_OxfordPets_by_breed, val_evaluator_OxfordPets_by_species, + val_evaluator_PKLot, val_evaluator_Packages, val_evaluator_PascalVOC, + val_evaluator_pistols, val_evaluator_plantdoc, val_evaluator_pothole, + val_evaluator_Raccoon, val_evaluator_selfdrivingCar, + val_evaluator_ShellfishOpenImages, val_evaluator_ThermalCheetah, + val_evaluator_thermalDogsAndPeople, val_evaluator_UnoCards, + val_evaluator_VehiclesOpenImages, val_evaluator_WildfireSmoke, + val_evaluator_websiteScreenshots +] + +# -------------------------------------------------# +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_bc_fpn_dyhead_pretrain_odinw13.py b/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_bc_fpn_dyhead_pretrain_odinw13.py new file mode 100644 index 0000000000000000000000000000000000000000..c3479b62b781fa38282b26ab69763d1766301dc7 --- /dev/null +++ b/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_bc_fpn_dyhead_pretrain_odinw13.py @@ -0,0 +1,3 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_pretrain_odinw13.py' + +model = dict(bbox_head=dict(early_fuse=True)) diff --git a/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_bc_fpn_dyhead_pretrain_odinw35.py b/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_bc_fpn_dyhead_pretrain_odinw35.py new file mode 100644 index 0000000000000000000000000000000000000000..182afc66c93441da85d7e0116970e45a58c492d0 --- /dev/null +++ b/mmpose/configs/mmdet/glip/odinw/glip_atss_swin-t_bc_fpn_dyhead_pretrain_odinw35.py @@ -0,0 +1,3 @@ +_base_ = './glip_atss_swin-t_a_fpn_dyhead_pretrain_odinw35.py' + +model = dict(bbox_head=dict(early_fuse=True)) diff --git a/mmpose/configs/mmdet/glip/odinw/override_category.py b/mmpose/configs/mmdet/glip/odinw/override_category.py new file mode 100644 index 0000000000000000000000000000000000000000..9ff05fc6e5e4d0989cf7fcf7af4dc902ee99f3a3 --- /dev/null +++ b/mmpose/configs/mmdet/glip/odinw/override_category.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmengine + + +def parse_args(): + parser = argparse.ArgumentParser(description='Override Category') + parser.add_argument('data_root') + return parser.parse_args() + + +def main(): + args = parse_args() + + ChessPieces = [{ + 'id': 1, + 'name': ' ', + 'supercategory': 'pieces' + }, { + 'id': 2, + 'name': 'black bishop', + 'supercategory': 'pieces' + }, { + 'id': 3, + 'name': 'black king', + 'supercategory': 'pieces' + }, { + 'id': 4, + 'name': 'black knight', + 'supercategory': 'pieces' + }, { + 'id': 5, + 'name': 'black pawn', + 'supercategory': 'pieces' + }, { + 'id': 6, + 'name': 'black queen', + 'supercategory': 'pieces' + }, { + 'id': 7, + 'name': 'black rook', + 'supercategory': 'pieces' + }, { + 'id': 8, + 'name': 'white bishop', + 'supercategory': 'pieces' + }, { + 'id': 9, + 'name': 'white king', + 'supercategory': 'pieces' + }, { + 'id': 10, + 'name': 'white knight', + 'supercategory': 'pieces' + }, { + 'id': 11, + 'name': 'white pawn', + 'supercategory': 'pieces' + }, { + 'id': 12, + 'name': 'white queen', + 'supercategory': 'pieces' + }, { + 'id': 13, + 'name': 'white rook', + 'supercategory': 'pieces' + }] + + _data_root = args.data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/' + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = ChessPieces + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + CottontailRabbits = [{ + 'id': 1, + 'name': 'rabbit', + 'supercategory': 'Cottontail-Rabbit' + }] + + _data_root = args.data_root + 'CottontailRabbits/' + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = CottontailRabbits + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + NorthAmericaMushrooms = [{ + 'id': 1, + 'name': 'flat mushroom', + 'supercategory': 'mushroom' + }, { + 'id': 2, + 'name': 'yellow mushroom', + 'supercategory': 'mushroom' + }] + + _data_root = args.data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = NorthAmericaMushrooms + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + +if __name__ == '__main__': + main() diff --git a/mmpose/configs/mmdet/gn+ws/README.md b/mmpose/configs/mmdet/gn+ws/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ef8cfc812c40712db9006f7c25d0d3a1f1a8a12c --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/README.md @@ -0,0 +1,54 @@ +# GN + WS + +> [Weight Standardization](https://arxiv.org/abs/1903.10520) + + + +## Abstract + +Batch Normalization (BN) has become an out-of-box technique to improve deep network training. However, its effectiveness is limited for micro-batch training, i.e., each GPU typically has only 1-2 images for training, which is inevitable for many computer vision tasks, e.g., object detection and semantic segmentation, constrained by memory consumption. To address this issue, we propose Weight Standardization (WS) and Batch-Channel Normalization (BCN) to bring two success factors of BN into micro-batch training: 1) the smoothing effects on the loss landscape and 2) the ability to avoid harmful elimination singularities along the training trajectory. WS standardizes the weights in convolutional layers to smooth the loss landscape by reducing the Lipschitz constants of the loss and the gradients; BCN combines batch and channel normalizations and leverages estimated statistics of the activations in convolutional layers to keep networks away from elimination singularities. We validate WS and BCN on comprehensive computer vision tasks, including image classification, object detection, instance segmentation, video recognition and semantic segmentation. All experimental results consistently show that WS and BCN improve micro-batch training significantly. Moreover, using WS and BCN with micro-batch training is even able to match or outperform the performances of BN with large-batch training. + +
+ +
+ +## Results and Models + +Faster R-CNN + +| Backbone | Style | Normalization | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----------: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | GN+WS | 1x | 5.9 | 11.7 | 39.7 | - | [config](./faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130_210936.log.json) | +| R-101-FPN | pytorch | GN+WS | 1x | 8.9 | 9.0 | 41.7 | - | [config](./faster-rcnn_r101_fpn_gn-ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205-a93b0d75.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205_232146.log.json) | +| X-50-32x4d-FPN | pytorch | GN+WS | 1x | 7.0 | 10.3 | 40.7 | - | [config](./faster-rcnn_x50-32x4d_fpn_gn-ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203-839c5d9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203_220113.log.json) | +| X-101-32x4d-FPN | pytorch | GN+WS | 1x | 10.8 | 7.6 | 42.1 | - | [config](./faster-rcnn_x101-32x4d_fpn_gn-ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212-27da1bc2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212_195302.log.json) | + +Mask R-CNN + +| Backbone | Style | Normalization | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----------: | :-------: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | GN+WS | 2x | 7.3 | 10.5 | 40.6 | 36.6 | [config](./mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226-16acb762.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226_062128.log.json) | +| R-101-FPN | pytorch | GN+WS | 2x | 10.3 | 8.6 | 42.0 | 37.7 | [config](./mask-rcnn_r101_fpn_gn-ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212-ea357cd9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212_213627.log.json) | +| X-50-32x4d-FPN | pytorch | GN+WS | 2x | 8.4 | 9.3 | 41.1 | 37.0 | [config](./mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216-649fdb6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216_201500.log.json) | +| X-101-32x4d-FPN | pytorch | GN+WS | 2x | 12.2 | 7.1 | 42.1 | 37.9 | [config](./mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319-33fb95b5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319_104101.log.json) | +| R-50-FPN | pytorch | GN+WS | 20-23-24e | 7.3 | - | 41.1 | 37.1 | [config](./mask-rcnn_r50_fpn_gn-ws-all_20-23-24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213-487d1283.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213_035123.log.json) | +| R-101-FPN | pytorch | GN+WS | 20-23-24e | 10.3 | - | 43.1 | 38.6 | [config](./mask-rcnn_r101_fpn_gn-ws-all_20-23-24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213-57b5a50f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213_130142.log.json) | +| X-50-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 8.4 | - | 42.1 | 38.0 | [config](./mask-rcnn_x50-32x4d_fpn_gn-ws-all_20-23-24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226-969bcb2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226_093732.log.json) | +| X-101-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 12.2 | - | 42.7 | 38.5 | [config](./mask-rcnn_x101-32x4d_fpn_gn-ws-all_20-23-24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316-e6cd35ef.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316_013741.log.json) | + +Note: + +- GN+WS requires about 5% more memory than GN, and it is only 5% slower than GN. +- In the paper, a 20-23-24e lr schedule is used instead of 2x. +- The X-50-GN and X-101-GN pretrained models are also shared by the authors. + +## Citation + +```latex +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +``` diff --git a/mmpose/configs/mmdet/gn+ws/faster-rcnn_r101_fpn_gn-ws-all_1x_coco.py b/mmpose/configs/mmdet/gn+ws/faster-rcnn_r101_fpn_gn-ws-all_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a4cb8281ac6d4b43a615ba1a05903770d8ee2f69 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/faster-rcnn_r101_fpn_gn-ws-all_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws'))) diff --git a/mmpose/configs/mmdet/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py b/mmpose/configs/mmdet/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1a044c99a2e84de71822edb62543570891141b25 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), + neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg))) diff --git a/mmpose/configs/mmdet/gn+ws/faster-rcnn_x101-32x4d_fpn_gn-ws-all_1x_coco.py b/mmpose/configs/mmdet/gn+ws/faster-rcnn_x101-32x4d_fpn_gn-ws-all_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b2a317d2ac830d95788084eaa8d374838b34a365 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/faster-rcnn_x101-32x4d_fpn_gn-ws-all_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py' +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws'))) diff --git a/mmpose/configs/mmdet/gn+ws/faster-rcnn_x50-32x4d_fpn_gn-ws-all_1x_coco.py b/mmpose/configs/mmdet/gn+ws/faster-rcnn_x50-32x4d_fpn_gn-ws-all_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..dd75a2c004b8cc04411d47d8b9db6ba0ec4ffcb0 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/faster-rcnn_x50-32x4d_fpn_gn-ws-all_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py' +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + type='ResNeXt', + depth=50, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))) diff --git a/mmpose/configs/mmdet/gn+ws/mask-rcnn_r101_fpn_gn-ws-all_20-23-24e_coco.py b/mmpose/configs/mmdet/gn+ws/mask-rcnn_r101_fpn_gn-ws-all_20-23-24e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1815e3f85b9fd5d7204b08cd60a13980a382fd51 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/mask-rcnn_r101_fpn_gn-ws-all_20-23-24e_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask-rcnn_r101_fpn_gn-ws-all_2x_coco.py' +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[20, 23], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn+ws/mask-rcnn_r101_fpn_gn-ws-all_2x_coco.py b/mmpose/configs/mmdet/gn+ws/mask-rcnn_r101_fpn_gn-ws-all_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5de37dee5e86e202c211464eaa08dd295dba44b2 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/mask-rcnn_r101_fpn_gn-ws-all_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws'))) diff --git a/mmpose/configs/mmdet/gn+ws/mask-rcnn_r50_fpn_gn-ws-all_20-23-24e_coco.py b/mmpose/configs/mmdet/gn+ws/mask-rcnn_r50_fpn_gn-ws-all_20-23-24e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..287c652045d6230411043f2abab34be4f6106687 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/mask-rcnn_r50_fpn_gn-ws-all_20-23-24e_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py' +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[20, 23], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn+ws/mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py b/mmpose/configs/mmdet/gn+ws/mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ed8b1b73fe8695fc6bbb4054405192fca995cf81 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py @@ -0,0 +1,33 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), + neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg))) +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn+ws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_20-23-24e_coco.py b/mmpose/configs/mmdet/gn+ws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_20-23-24e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce9193579b914f8dc0804cb73c3d8e41b153655 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_20-23-24e_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco.py' +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[20, 23], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn+ws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco.py b/mmpose/configs/mmdet/gn+ws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..bcfc371e774470ede7d171b4268db919385775ab --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco.py @@ -0,0 +1,19 @@ +_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py' +# model settings +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws'))) diff --git a/mmpose/configs/mmdet/gn+ws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_20-23-24e_coco.py b/mmpose/configs/mmdet/gn+ws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_20-23-24e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..af9ea5ab476b8ea3247062261726bef6b6bc1b0c --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_20-23-24e_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py' +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[20, 23], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn+ws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py b/mmpose/configs/mmdet/gn+ws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ab2b14042e9510ab14698e7a64c68d6ff60835e1 --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py @@ -0,0 +1,19 @@ +_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py' +# model settings +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + type='ResNeXt', + depth=50, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))) diff --git a/mmpose/configs/mmdet/gn+ws/metafile.yml b/mmpose/configs/mmdet/gn+ws/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..89b91072924a31e53db1e95df30b47636a67b74b --- /dev/null +++ b/mmpose/configs/mmdet/gn+ws/metafile.yml @@ -0,0 +1,263 @@ +Collections: + - Name: Weight Standardization + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Group Normalization + - Weight Standardization + Paper: + URL: https://arxiv.org/abs/1903.10520 + Title: 'Weight Standardization' + README: configs/gn+ws/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/gn%2Bws/mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py + Version: v2.0.0 + +Models: + - Name: faster-rcnn_r50_fpn_gn_ws-all_1x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py + Metadata: + Training Memory (GB): 5.9 + inference time (ms/im): + - value: 85.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth + + - Name: faster-rcnn_r101_fpn_gn-ws-all_1x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/faster-rcnn_r101_fpn_gn-ws-all_1x_coco.py + Metadata: + Training Memory (GB): 8.9 + inference time (ms/im): + - value: 111.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205-a93b0d75.pth + + - Name: faster-rcnn_x50-32x4d_fpn_gn-ws-all_1x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/faster-rcnn_x50-32x4d_fpn_gn-ws-all_1x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 97.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203-839c5d9d.pth + + - Name: faster-rcnn_x101-32x4d_fpn_gn-ws-all_1x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/faster-rcnn_x101-32x4d_fpn_gn-ws-all_1x_coco.py + Metadata: + Training Memory (GB): 10.8 + inference time (ms/im): + - value: 131.58 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212-27da1bc2.pth + + - Name: mask-rcnn_r50_fpn_gn_ws-all_2x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py + Metadata: + Training Memory (GB): 7.3 + inference time (ms/im): + - value: 95.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226-16acb762.pth + + - Name: mask-rcnn_r101_fpn_gn-ws-all_2x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask-rcnn_r101_fpn_gn-ws-all_2x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 116.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212-ea357cd9.pth + + - Name: mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py + Metadata: + Training Memory (GB): 8.4 + inference time (ms/im): + - value: 107.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216-649fdb6f.pth + + - Name: mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco.py + Metadata: + Training Memory (GB): 12.2 + inference time (ms/im): + - value: 140.85 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319-33fb95b5.pth + + - Name: mask-rcnn_r50_fpn_gn_ws-all_20_23_24e_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask-rcnn_r50_fpn_gn-ws-all_20-23-24e_coco.py + Metadata: + Training Memory (GB): 7.3 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213-487d1283.pth + + - Name: mask-rcnn_r101_fpn_gn-ws-all_20-23-24e_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask-rcnn_r101_fpn_gn-ws-all_20-23-24e_coco.py + Metadata: + Training Memory (GB): 10.3 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213-57b5a50f.pth + + - Name: mask-rcnn_x50-32x4d_fpn_gn-ws-all_20-23-24e_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_20-23-24e_coco.py + Metadata: + Training Memory (GB): 8.4 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226-969bcb2c.pth + + - Name: mask-rcnn_x101-32x4d_fpn_gn-ws-all_20-23-24e_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_20-23-24e_coco.py + Metadata: + Training Memory (GB): 12.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316-e6cd35ef.pth diff --git a/mmpose/configs/mmdet/gn/README.md b/mmpose/configs/mmdet/gn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1bc8192f24a56b11449944fc3d949302dfa781b6 --- /dev/null +++ b/mmpose/configs/mmdet/gn/README.md @@ -0,0 +1,41 @@ +# GN + +> [Group Normalization](https://arxiv.org/abs/1803.08494) + + + +## Abstract + +Batch Normalization (BN) is a milestone technique in the development of deep learning, enabling various networks to train. However, normalizing along the batch dimension introduces problems --- BN's error increases rapidly when the batch size becomes smaller, caused by inaccurate batch statistics estimation. This limits BN's usage for training larger models and transferring features to computer vision tasks including detection, segmentation, and video, which require small batches constrained by memory consumption. In this paper, we present Group Normalization (GN) as a simple alternative to BN. GN divides the channels into groups and computes within each group the mean and variance for normalization. GN's computation is independent of batch sizes, and its accuracy is stable in a wide range of batch sizes. On ResNet-50 trained in ImageNet, GN has 10.6% lower error than its BN counterpart when using a batch size of 2; when using typical batch sizes, GN is comparably good with BN and outperforms other normalization variants. Moreover, GN can be naturally transferred from pre-training to fine-tuning. GN can outperform its BN-based counterparts for object detection and segmentation in COCO, and for video classification in Kinetics, showing that GN can effectively replace the powerful BN in a variety of tasks. GN can be easily implemented by a few lines of code in modern libraries. + +
+ +
+ +## Results and Models + +| Backbone | model | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-----------: | :--------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN (d) | Mask R-CNN | 2x | 7.1 | 11.0 | 40.2 | 36.4 | [config](./mask-rcnn_r50_fpn_gn-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206_050355.log.json) | +| R-50-FPN (d) | Mask R-CNN | 3x | 7.1 | - | 40.5 | 36.7 | [config](./mask-rcnn_r50_fpn_gn-all_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214-8b23b1e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214_063512.log.json) | +| R-101-FPN (d) | Mask R-CNN | 2x | 9.9 | 9.0 | 41.9 | 37.6 | [config](./mask-rcnn_r101_fpn_gn-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205-d96b1b50.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205_234402.log.json) | +| R-101-FPN (d) | Mask R-CNN | 3x | 9.9 | | 42.1 | 38.0 | [config](./mask-rcnn_r101_fpn_gn-all_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609-0df864f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609.log.json) | +| R-50-FPN (c) | Mask R-CNN | 2x | 7.1 | 10.9 | 40.0 | 36.1 | [config](./mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207-20d3e849.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207_225832.log.json) | +| R-50-FPN (c) | Mask R-CNN | 3x | 7.1 | - | 40.1 | 36.2 | [config](./mask-rcnn_r50-contrib_fpn_gn-all_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225-542aefbc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225_235135.log.json) | + +**Notes:** + +- (d) means pretrained model converted from Detectron, and (c) means the contributed model pretrained by [@thangvubk](https://github.com/thangvubk). +- The `3x` schedule is epoch \[28, 34, 36\]. +- **Memory, Train/Inf time is outdated.** + +## Citation + +```latex +@inproceedings{wu2018group, + title={Group Normalization}, + author={Wu, Yuxin and He, Kaiming}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2018} +} +``` diff --git a/mmpose/configs/mmdet/gn/mask-rcnn_r101_fpn_gn-all_2x_coco.py b/mmpose/configs/mmdet/gn/mask-rcnn_r101_fpn_gn-all_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..54f57d8d0855d07c696907d8c7c0758e4c13a573 --- /dev/null +++ b/mmpose/configs/mmdet/gn/mask-rcnn_r101_fpn_gn-all_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './mask-rcnn_r50_fpn_gn-all_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet101_gn'))) diff --git a/mmpose/configs/mmdet/gn/mask-rcnn_r101_fpn_gn-all_3x_coco.py b/mmpose/configs/mmdet/gn/mask-rcnn_r101_fpn_gn-all_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a94e063ecd2a5e2fd83eb78aa4d7ddd8f51e2b9e --- /dev/null +++ b/mmpose/configs/mmdet/gn/mask-rcnn_r101_fpn_gn-all_3x_coco.py @@ -0,0 +1,18 @@ +_base_ = './mask-rcnn_r101_fpn_gn-all_2x_coco.py' + +# learning policy +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn/mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py b/mmpose/configs/mmdet/gn/mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5515ec14a47a0dfa58acf6c46bc40d77ce39ac3d --- /dev/null +++ b/mmpose/configs/mmdet/gn/mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py @@ -0,0 +1,31 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn/mask-rcnn_r50-contrib_fpn_gn-all_3x_coco.py b/mmpose/configs/mmdet/gn/mask-rcnn_r50-contrib_fpn_gn-all_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e6f7a97e8e0482836b225e832be2e3de4ae99947 --- /dev/null +++ b/mmpose/configs/mmdet/gn/mask-rcnn_r50-contrib_fpn_gn-all_3x_coco.py @@ -0,0 +1,18 @@ +_base_ = './mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py' + +# learning policy +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py b/mmpose/configs/mmdet/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1313b22e4795239d5148fb8d665cdadb5fac8e4f --- /dev/null +++ b/mmpose/configs/mmdet/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py @@ -0,0 +1,36 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False), + backbone=dict( + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet50_gn')), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn/mask-rcnn_r50_fpn_gn-all_3x_coco.py b/mmpose/configs/mmdet/gn/mask-rcnn_r50_fpn_gn-all_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e425de951bb0419d1d1596e45637be1d914a8034 --- /dev/null +++ b/mmpose/configs/mmdet/gn/mask-rcnn_r50_fpn_gn-all_3x_coco.py @@ -0,0 +1,18 @@ +_base_ = './mask-rcnn_r50_fpn_gn-all_2x_coco.py' + +# learning policy +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/gn/metafile.yml b/mmpose/configs/mmdet/gn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..9781dc9393f17b89a8e4228ef905a06dfdbc7eb5 --- /dev/null +++ b/mmpose/configs/mmdet/gn/metafile.yml @@ -0,0 +1,162 @@ +Collections: + - Name: Group Normalization + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Group Normalization + Paper: + URL: https://arxiv.org/abs/1803.08494 + Title: 'Group Normalization' + README: configs/gn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py + Version: v2.0.0 + +Models: + - Name: mask-rcnn_r50_fpn_gn-all_2x_coco + In Collection: Group Normalization + Config: configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth + + - Name: mask-rcnn_r50_fpn_gn-all_3x_coco + In Collection: Group Normalization + Config: configs/gn/mask-rcnn_r50_fpn_gn-all_3x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214-8b23b1e5.pth + + - Name: mask-rcnn_r101_fpn_gn-all_2x_coco + In Collection: Group Normalization + Config: configs/gn/mask-rcnn_r101_fpn_gn-all_2x_coco.py + Metadata: + Training Memory (GB): 9.9 + inference time (ms/im): + - value: 111.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205-d96b1b50.pth + + - Name: mask-rcnn_r101_fpn_gn-all_3x_coco + In Collection: Group Normalization + Config: configs/gn/mask-rcnn_r101_fpn_gn-all_3x_coco.py + Metadata: + Training Memory (GB): 9.9 + inference time (ms/im): + - value: 111.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609-0df864f4.pth + + - Name: mask-rcnn_r50_fpn_gn-all_contrib_2x_coco + In Collection: Group Normalization + Config: configs/gn/mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 91.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207-20d3e849.pth + + - Name: mask-rcnn_r50_fpn_gn-all_contrib_3x_coco + In Collection: Group Normalization + Config: configs/gn/mask-rcnn_r50-contrib_fpn_gn-all_3x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 91.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225-542aefbc.pth diff --git a/mmpose/configs/mmdet/grid_rcnn/README.md b/mmpose/configs/mmdet/grid_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3de810afc66c29df6ab9bd1728d0cb8b57316acf --- /dev/null +++ b/mmpose/configs/mmdet/grid_rcnn/README.md @@ -0,0 +1,47 @@ +# Grid R-CNN + +> [Grid R-CNN](https://arxiv.org/abs/1811.12030) + + + +## Abstract + +This paper proposes a novel object detection framework named Grid R-CNN, which adopts a grid guided localization mechanism for accurate object detection. Different from the traditional regression based methods, the Grid R-CNN captures the spatial information explicitly and enjoys the position sensitive property of fully convolutional architecture. Instead of using only two independent points, we design a multi-point supervision formulation to encode more clues in order to reduce the impact of inaccurate prediction of specific points. To take the full advantage of the correlation of points in a grid, we propose a two-stage information fusion strategy to fuse feature maps of neighbor grid points. The grid guided localization approach is easy to be extended to different state-of-the-art detection frameworks. Grid R-CNN leads to high quality object localization, and experiments demonstrate that it achieves a 4.1% AP gain at IoU=0.8 and a 10.0% AP gain at IoU=0.9 on COCO benchmark compared to Faster R-CNN with Res50 backbone and FPN architecture. + +Grid R-CNN is a well-performed objection detection framework. It transforms the traditional box offset regression problem into a grid point estimation problem. With the guidance of the grid points, it can obtain high-quality localization results. However, the speed of Grid R-CNN is not so satisfactory. In this technical report we present Grid R-CNN Plus, a better and faster version of Grid R-CNN. We have made several updates that significantly speed up the framework and simultaneously improve the accuracy. On COCO dataset, the Res50-FPN based Grid R-CNN Plus detector achieves an mAP of 40.4%, outperforming the baseline on the same model by 3.0 points with similar inference time. + +
+ +
+ +## Results and Models + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | 2x | 5.1 | 15.0 | 40.4 | [config](./grid-rcnn_r50_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130_221140.log.json) | +| R-101 | 2x | 7.0 | 12.6 | 41.5 | [config](./grid-rcnn_r101_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309-d6eca030.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309_164224.log.json) | +| X-101-32x4d | 2x | 8.3 | 10.8 | 42.9 | [config](./grid-rcnn_x101-32x4d_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130-d8f0e3ff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130_215413.log.json) | +| X-101-64x4d | 2x | 11.3 | 7.7 | 43.0 | [config](./grid-rcnn_x101-64x4d_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204-ec76a754.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204_080641.log.json) | + +**Notes:** + +- All models are trained with 8 GPUs instead of 32 GPUs in the original paper. +- The warming up lasts for 1 epoch and `2x` here indicates 25 epochs. + +## Citation + +```latex +@inproceedings{lu2019grid, + title={Grid r-cnn}, + author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} + +@article{lu2019grid, + title={Grid R-CNN Plus: Faster and Better}, + author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, + journal={arXiv preprint arXiv:1906.05688}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r101_fpn_gn-head_2x_coco.py b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r101_fpn_gn-head_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..46d41ed4ed5d1d6345e98434221cc5b07c60767d --- /dev/null +++ b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r101_fpn_gn-head_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './grid-rcnn_r50_fpn_gn-head_2x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r50_fpn_gn-head_1x_coco.py b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r50_fpn_gn-head_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..358280630fa96e40ac7834cbda6b1ad3dc689c55 --- /dev/null +++ b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r50_fpn_gn-head_1x_coco.py @@ -0,0 +1,19 @@ +_base_ = './grid-rcnn_r50_fpn_gn-head_2x_coco.py' + +# training schedule +max_epochs = 12 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.0001, by_epoch=False, begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..228fca2323ceec2052a3835089d987a2643c53c1 --- /dev/null +++ b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py @@ -0,0 +1,160 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='GridRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='GridRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + with_reg=False, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False), + grid_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + grid_head=dict( + type='GridHead', + grid_points=9, + num_convs=8, + in_channels=256, + point_feat_channels=64, + norm_cfg=dict(type='GN', num_groups=36), + loss_grid=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_radius=1, + pos_weight=-1, + max_num_grid=192, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.03, + nms=dict(type='nms', iou_threshold=0.3), + max_per_img=100))) +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +# training schedule +max_epochs = 25 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 80, + by_epoch=False, + begin=0, + end=3665), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[17, 23], + gamma=0.1) +] + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_x101-32x4d_fpn_gn-head_2x_coco.py b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_x101-32x4d_fpn_gn-head_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..dddf157beb6667887d0cd920cb2803e340d43183 --- /dev/null +++ b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_x101-32x4d_fpn_gn-head_2x_coco.py @@ -0,0 +1,13 @@ +_base_ = './grid-rcnn_r50_fpn_gn-head_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_x101-64x4d_fpn_gn-head_2x_coco.py b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_x101-64x4d_fpn_gn-head_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e4ff50f546ae660cf398c2cb1c6f67ca20848c0f --- /dev/null +++ b/mmpose/configs/mmdet/grid_rcnn/grid-rcnn_x101-64x4d_fpn_gn-head_2x_coco.py @@ -0,0 +1,13 @@ +_base_ = './grid-rcnn_x101-32x4d_fpn_gn-head_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/grid_rcnn/metafile.yml b/mmpose/configs/mmdet/grid_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..cee91e3b88e7bafa27e705713f2bc45d0dc872d0 --- /dev/null +++ b/mmpose/configs/mmdet/grid_rcnn/metafile.yml @@ -0,0 +1,101 @@ +Collections: + - Name: Grid R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RPN + - Dilated Convolution + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1906.05688 + Title: 'Grid R-CNN' + README: configs/grid_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/grid_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: grid-rcnn_r50_fpn_gn-head_2x_coco + In Collection: Grid R-CNN + Config: configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py + Metadata: + Training Memory (GB): 5.1 + inference time (ms/im): + - value: 66.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth + + - Name: grid-rcnn_r101_fpn_gn-head_2x_coco + In Collection: Grid R-CNN + Config: configs/grid_rcnn/grid-rcnn_r101_fpn_gn-head_2x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 79.37 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309-d6eca030.pth + + - Name: grid-rcnn_x101-32x4d_fpn_gn-head_2x_coco + In Collection: Grid R-CNN + Config: configs/grid_rcnn/grid-rcnn_x101-32x4d_fpn_gn-head_2x_coco.py + Metadata: + Training Memory (GB): 8.3 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130-d8f0e3ff.pth + + - Name: grid-rcnn_x101-64x4d_fpn_gn-head_2x_coco + In Collection: Grid R-CNN + Config: configs/grid_rcnn/grid-rcnn_x101-64x4d_fpn_gn-head_2x_coco.py + Metadata: + Training Memory (GB): 11.3 + inference time (ms/im): + - value: 129.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204-ec76a754.pth diff --git a/mmpose/configs/mmdet/groie/README.md b/mmpose/configs/mmdet/groie/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9792df93c1e9093d298467ee3037991c09fd1dae --- /dev/null +++ b/mmpose/configs/mmdet/groie/README.md @@ -0,0 +1,72 @@ +# GRoIE + +> [A novel Region of Interest Extraction Layer for Instance Segmentation](https://arxiv.org/abs/2004.13665) + + + +## Abstract + +Given the wide diffusion of deep neural network architectures for computer vision tasks, several new applications are nowadays more and more feasible. Among them, a particular attention has been recently given to instance segmentation, by exploiting the results achievable by two-stage networks (such as Mask R-CNN or Faster R-CNN), derived from R-CNN. In these complex architectures, a crucial role is played by the Region of Interest (RoI) extraction layer, devoted to extracting a coherent subset of features from a single Feature Pyramid Network (FPN) layer attached on top of a backbone. +This paper is motivated by the need to overcome the limitations of existing RoI extractors which select only one (the best) layer from FPN. Our intuition is that all the layers of FPN retain useful information. Therefore, the proposed layer (called Generic RoI Extractor - GRoIE) introduces non-local building blocks and attention mechanisms to boost the performance. +A comprehensive ablation study at component level is conducted to find the best set of algorithms and parameters for the GRoIE layer. Moreover, GRoIE can be integrated seamlessly with every two-stage architecture for both object detection and instance segmentation tasks. Therefore, the improvements brought about by the use of GRoIE in different state-of-the-art architectures are also evaluated. The proposed layer leads up to gain a 1.1% AP improvement on bounding box detection and 1.7% AP improvement on instance segmentation. + +
+ +
+ +## Introduction + +By Leonardo Rossi, Akbar Karimi and Andrea Prati from +[IMPLab](http://implab.ce.unipr.it/). + +We provide configs to reproduce the results in the paper for +"*A novel Region of Interest Extraction Layer for Instance Segmentation*" +on COCO object detection. + +This paper is motivated by the need to overcome to the limitations of existing +RoI extractors which select only one (the best) layer from FPN. + +Our intuition is that all the layers of FPN retain useful information. + +Therefore, the proposed layer (called Generic RoI Extractor - **GRoIE**) +introduces non-local building blocks and attention mechanisms to boost the +performance. + +## Results and Models + +The results on COCO 2017 minival (5k images) are shown in the below table. + +### Application of GRoIE to different architectures + +| Backbone | Method | Lr schd | box AP | mask AP | Config | Download | +| :-------: | :-------------: | :-----: | :----: | :-----: | :------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Faster Original | 1x | 37.4 | | [config](../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| R-50-FPN | + GRoIE | 1x | 38.3 | | [config](./faste-rcnn_r50_fpn_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) | +| R-50-FPN | Grid R-CNN | 1x | 39.1 | | [config](./grid-rcnn_r50_fpn_gn-head-groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco/grid_rcnn_r50_fpn_gn-head_groie_1x_coco_20200605_202059-4b75d86f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco/grid_rcnn_r50_fpn_gn-head_groie_1x_coco_20200605_202059.log.json) | +| R-50-FPN | + GRoIE | 1x | | | [config](./grid-rcnn_r50_fpn_gn-head-groie_1x_coco.py) | | +| R-50-FPN | Mask R-CNN | 1x | 38.2 | 34.7 | [config](../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | +| R-50-FPN | + GRoIE | 1x | 39.0 | 36.0 | [config](./mask-rcnn_r50_fpn_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) | +| R-50-FPN | GC-Net | 1x | 40.7 | 36.5 | [config](../gcnet/mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) | +| R-50-FPN | + GRoIE | 1x | 41.0 | 37.8 | [config](./mask-rcnn_r50_fpn_syncbn-r4-gcb-c3-c5-groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) | +| R-101-FPN | GC-Net | 1x | 42.2 | 37.8 | [config](../gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) | +| R-101-FPN | + GRoIE | 1x | 42.6 | 38.7 | [config](./mask-rcnn_r101_fpn_syncbn-r4-gcb_c3-c5-groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507.log.json) | + +## Citation + +If you use this work or benchmark in your research, please cite this project. + +```latex +@inproceedings{rossi2021novel, + title={A novel region of interest extraction layer for instance segmentation}, + author={Rossi, Leonardo and Karimi, Akbar and Prati, Andrea}, + booktitle={2020 25th International Conference on Pattern Recognition (ICPR)}, + pages={2203--2209}, + year={2021}, + organization={IEEE} +} +``` + +## Contact + +The implementation of GRoIE is currently maintained by +[Leonardo Rossi](https://github.com/hachreak/). diff --git a/mmpose/configs/mmdet/groie/faste-rcnn_r50_fpn_groie_1x_coco.py b/mmpose/configs/mmdet/groie/faste-rcnn_r50_fpn_groie_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0fbe8a32c3a81e9b312a02f79f3495171387d9f0 --- /dev/null +++ b/mmpose/configs/mmdet/groie/faste-rcnn_r50_fpn_groie_1x_coco.py @@ -0,0 +1,25 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/mmpose/configs/mmdet/groie/grid-rcnn_r50_fpn_gn-head-groie_1x_coco.py b/mmpose/configs/mmdet/groie/grid-rcnn_r50_fpn_gn-head-groie_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..dadccb79c2288f16eb4a1fa33269e4a8f5a55c9b --- /dev/null +++ b/mmpose/configs/mmdet/groie/grid-rcnn_r50_fpn_gn-head-groie_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = '../grid_rcnn/grid-rcnn_r50_fpn_gn-head_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)), + grid_roi_extractor=dict( + type='GenericRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/mmpose/configs/mmdet/groie/mask-rcnn_r101_fpn_syncbn-r4-gcb_c3-c5-groie_1x_coco.py b/mmpose/configs/mmdet/groie/mask-rcnn_r101_fpn_syncbn-r4-gcb_c3-c5-groie_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5699b4284a76fe633afd81acb0b047a81df6afd2 --- /dev/null +++ b/mmpose/configs/mmdet/groie/mask-rcnn_r101_fpn_syncbn-r4-gcb_c3-c5-groie_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = '../gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)), + mask_roi_extractor=dict( + type='GenericRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/mmpose/configs/mmdet/groie/mask-rcnn_r50_fpn_groie_1x_coco.py b/mmpose/configs/mmdet/groie/mask-rcnn_r50_fpn_groie_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4c9521e2f5730b74efc51f2051f861bfe5f8192d --- /dev/null +++ b/mmpose/configs/mmdet/groie/mask-rcnn_r50_fpn_groie_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)), + mask_roi_extractor=dict( + type='GenericRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/mmpose/configs/mmdet/groie/mask-rcnn_r50_fpn_syncbn-r4-gcb-c3-c5-groie_1x_coco.py b/mmpose/configs/mmdet/groie/mask-rcnn_r50_fpn_syncbn-r4-gcb-c3-c5-groie_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..22e97b6959a0bd13ae4432c806c61ca3d899f9ea --- /dev/null +++ b/mmpose/configs/mmdet/groie/mask-rcnn_r50_fpn_syncbn-r4-gcb-c3-c5-groie_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = '../gcnet/mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)), + mask_roi_extractor=dict( + type='GenericRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/mmpose/configs/mmdet/groie/metafile.yml b/mmpose/configs/mmdet/groie/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..ce957004719cb542a51c48e7e07a3d94d6bdee18 --- /dev/null +++ b/mmpose/configs/mmdet/groie/metafile.yml @@ -0,0 +1,94 @@ +Collections: + - Name: GRoIE + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Generic RoI Extractor + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/2004.13665 + Title: 'A novel Region of Interest Extraction Layer for Instance Segmentation' + README: configs/groie/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/roi_extractors/groie.py#L15 + Version: v2.1.0 + +Models: + - Name: faster-rcnn_r50_fpn_groie_1x_coco + In Collection: GRoIE + Config: configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth + + - Name: grid-rcnn_r50_fpn_gn-head-groie_1x_coco + In Collection: GRoIE + Config: configs/groie/grid-rcnn_r50_fpn_gn-head-groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco/grid_rcnn_r50_fpn_gn-head_groie_1x_coco_20200605_202059-4b75d86f.pth + + - Name: mask-rcnn_r50_fpn_groie_1x_coco + In Collection: GRoIE + Config: configs/groie/mask-rcnn_r50_fpn_groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth + + - Name: mask-rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco + In Collection: GRoIE + Config: configs/groie/mask-rcnn_r50_fpn_syncbn-r4-gcb-c3-c5-groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth + + - Name: mask-rcnn_r101_fpn_syncbn-r4-gcb_c3-c5-groie_1x_coco + In Collection: GRoIE + Config: configs/groie/mask-rcnn_r101_fpn_syncbn-r4-gcb_c3-c5-groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth diff --git a/mmpose/configs/mmdet/grounding_dino/README.md b/mmpose/configs/mmdet/grounding_dino/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2a527828a467df069bbdbe624b55c1afcaa3521f --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/README.md @@ -0,0 +1,317 @@ +# Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection + +[Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection](https://arxiv.org/abs/2303.05499) + + + +## Abstract + +In this paper, we present an open-set object detector, called Grounding DINO, by marrying Transformer-based detector DINO with grounded pre-training, which can detect arbitrary objects with human inputs such as category names or referring expressions. The key solution of open-set object detection is introducing language to a closed-set detector for open-set concept generalization. To effectively fuse language and vision modalities, we conceptually divide a closed-set detector into three phases and propose a tight fusion solution, which includes a feature enhancer, a language-guided query selection, and a cross-modality decoder for cross-modality fusion. While previous works mainly evaluate open-set object detection on novel categories, we propose to also perform evaluations on referring expression comprehension for objects specified with attributes. Grounding DINO performs remarkably well on all three settings, including benchmarks on COCO, LVIS, ODinW, and RefCOCO/+/g. Grounding DINO achieves a 52.5 AP on the COCO detection zero-shot transfer benchmark, i.e., without any training data from COCO. It sets a new record on the ODinW zero-shot benchmark with a mean 26.1 AP. + +
+ +
+ +## Installation + +```shell +cd $MMDETROOT + +# source installation +pip install -r requirements/multimodal.txt + +# or mim installation +mim install mmdet[multimodal] +``` + +## NOTE + +Grounding DINO utilizes BERT as the language model, which requires access to https://huggingface.co/. If you encounter connection errors due to network access, you can download the required files on a computer with internet access and save them locally. Finally, modify the `lang_model_name` field in the config to the local path. Please refer to the following code: + +```python +from transformers import BertConfig, BertModel +from transformers import AutoTokenizer + +config = BertConfig.from_pretrained("bert-base-uncased") +model = BertModel.from_pretrained("bert-base-uncased", add_pooling_layer=False, config=config) +tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + +config.save_pretrained("your path/bert-base-uncased") +model.save_pretrained("your path/bert-base-uncased") +tokenizer.save_pretrained("your path/bert-base-uncased") +``` + +## Inference + +``` +cd $MMDETROOT + +wget https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth + +python demo/image_demo.py \ + demo/demo.jpg \ + configs/grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py \ + --weights groundingdino_swint_ogc_mmdet-822d7e9d.pth \ + --texts 'bench . car .' +``` + +
+ +
+ +## COCO Results and Models + +| Model | Backbone | Style | COCO mAP | Official COCO mAP | Pre-Train Data | Config | Download | +| :----------------: | :------: | :-------: | :--------: | :---------------: | :----------------------------------------------: | :------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Grounding DINO-T | Swin-T | Zero-shot | 48.5 | 48.4 | O365,GoldG,Cap4M | [config](grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth) | +| Grounding DINO-T | Swin-T | Finetune | 58.1(+0.9) | 57.2 | O365,GoldG,Cap4M | [config](grounding_dino_swin-t_finetune_16xb2_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco/grounding_dino_swin-t_finetune_16xb2_1x_coco_20230921_152544-5f234b20.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco/grounding_dino_swin-t_finetune_16xb2_1x_coco_20230921_152544.log.json) | +| Grounding DINO-B | Swin-B | Zero-shot | 56.9 | 56.7 | COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO | [config](grounding_dino_swin-b_pretrain_mixeddata.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swinb_cogcoor_mmdet-55949c9c.pth) | +| Grounding DINO-B | Swin-B | Finetune | 59.7 | | COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO | [config](grounding_dino_swin-b_finetune_16xb2_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_swin-b_finetune_16xb2_1x_coco/grounding_dino_swin-b_finetune_16xb2_1x_coco_20230921_153201-f219e0c0.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_swin-b_finetune_16xb2_1x_coco/grounding_dino_swin-b_finetune_16xb2_1x_coco_20230921_153201.log.json) | +| Grounding DINO-R50 | R50 | Scratch | 48.9(+0.8) | 48.1 | | [config](grounding_dino_r50_scratch_8xb2_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_r50_scratch_8xb2_1x_coco/grounding_dino_r50_scratch_1x_coco-fe0002f2.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_r50_scratch_8xb2_1x_coco/20230922_114218.json) | + +Note: + +1. The weights corresponding to the zero-shot model are adopted from the official weights and converted using the [script](../../tools/model_converters/groundingdino_to_mmdet.py). We have not retrained the model for the time being. +2. Finetune refers to fine-tuning on the COCO 2017 dataset. The R50 model is trained using 8 NVIDIA GeForce 3090 GPUs, while the remaining models are trained using 16 NVIDIA GeForce 3090 GPUs. The GPU memory usage is approximately 8.5GB. +3. Our performance is higher than the official model due to two reasons: we modified the initialization strategy and introduced a log scaler. + +## LVIS Results + +| Model | MiniVal APr | MiniVal APc | MiniVal APf | MiniVal AP | Val1.0 APr | Val1.0 APc | Val1.0 APf | Val1.0 AP | Pre-Train Data | Config | Download | +| :--------------: | :---------: | :---------: | :---------: | :--------: | :--------: | :--------: | :--------: | :-------: | :----------------------------------------------: | :-----------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: | +| Grounding DINO-T | 18.8 | 24.2 | 34.7 | 28.8 | 10.1 | 15.3 | 29.9 | 20.1 | O365,GoldG,Cap4M | [config](lvis/grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth) | +| Grounding DINO-B | 27.9 | 33.4 | 37.2 | 34.7 | 19.0 | 24.1 | 32.9 | 26.7 | COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO | [config](lvis/grounding_dino_swin-b_pretrain_zeroshot_mini-lvis.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swinb_cogcoor_mmdet-55949c9c.pth) | + +Note: + +1. The above are zero-shot evaluation results. +2. The evaluation metric we used is LVIS FixAP. For specific details, please refer to [Evaluating Large-Vocabulary Object Detectors: The Devil is in the Details](https://arxiv.org/pdf/2102.01066.pdf). + +## ODinW (Object Detection in the Wild) Results + +Learning visual representations from natural language supervision has recently shown great promise in a number of pioneering works. In general, these language-augmented visual models demonstrate strong transferability to a variety of datasets and tasks. However, it remains challenging to evaluate the transferablity of these models due to the lack of easy-to-use evaluation toolkits and public benchmarks. To tackle this, we build ELEVATER 1 , the first benchmark and toolkit for evaluating (pre-trained) language-augmented visual models. ELEVATER is composed of three components. (i) Datasets. As downstream evaluation suites, it consists of 20 image classification datasets and 35 object detection datasets, each of which is augmented with external knowledge. (ii) Toolkit. An automatic hyper-parameter tuning toolkit is developed to facilitate model evaluation on downstream tasks. (iii) Metrics. A variety of evaluation metrics are used to measure sample-efficiency (zero-shot and few-shot) and parameter-efficiency (linear probing and full model fine-tuning). ELEVATER is platform for Computer Vision in the Wild (CVinW), and is publicly released at https://computer-vision-in-the-wild.github.io/ELEVATER/ + +### Results and models of ODinW13 + +| Method | GLIP-T(A) | Official | GLIP-T(B) | Official | GLIP-T(C) | Official | GroundingDINO-T | GroundingDINO-B | +| --------------------- | --------- | --------- | --------- | --------- | --------- | --------- | --------------- | --------------- | +| AerialMaritimeDrone | 0.123 | 0.122 | 0.110 | 0.110 | 0.130 | 0.130 | 0.173 | 0.281 | +| Aquarium | 0.175 | 0.174 | 0.173 | 0.169 | 0.191 | 0.190 | 0.195 | 0.445 | +| CottontailRabbits | 0.686 | 0.686 | 0.688 | 0.688 | 0.744 | 0.744 | 0.799 | 0.808 | +| EgoHands | 0.013 | 0.013 | 0.003 | 0.004 | 0.314 | 0.315 | 0.608 | 0.764 | +| NorthAmericaMushrooms | 0.502 | 0.502 | 0.367 | 0.367 | 0.297 | 0.296 | 0.507 | 0.675 | +| Packages | 0.589 | 0.589 | 0.083 | 0.083 | 0.699 | 0.699 | 0.687 | 0.670 | +| PascalVOC | 0.512 | 0.512 | 0.541 | 0.540 | 0.565 | 0.565 | 0.563 | 0.711 | +| pistols | 0.339 | 0.339 | 0.502 | 0.501 | 0.503 | 0.504 | 0.726 | 0.771 | +| pothole | 0.007 | 0.007 | 0.030 | 0.030 | 0.058 | 0.058 | 0.215 | 0.478 | +| Raccoon | 0.075 | 0.074 | 0.285 | 0.288 | 0.241 | 0.244 | 0.549 | 0.541 | +| ShellfishOpenImages | 0.253 | 0.253 | 0.337 | 0.338 | 0.300 | 0.302 | 0.393 | 0.650 | +| thermalDogsAndPeople | 0.372 | 0.372 | 0.475 | 0.475 | 0.510 | 0.510 | 0.657 | 0.633 | +| VehiclesOpenImages | 0.574 | 0.566 | 0.562 | 0.547 | 0.549 | 0.534 | 0.613 | 0.647 | +| Average | **0.325** | **0.324** | **0.320** | **0.318** | **0.392** | **0.392** | **0.514** | **0.621** | + +### Results and models of ODinW35 + +| Method | GLIP-T(A) | Official | GLIP-T(B) | Official | GLIP-T(C) | Official | GroundingDINO-T | GroundingDINO-B | +| --------------------------- | --------- | --------- | --------- | --------- | --------- | --------- | --------------- | --------------- | +| AerialMaritimeDrone_large | 0.123 | 0.122 | 0.110 | 0.110 | 0.130 | 0.130 | 0.173 | 0.281 | +| AerialMaritimeDrone_tiled | 0.174 | 0.174 | 0.172 | 0.172 | 0.172 | 0.172 | 0.206 | 0.364 | +| AmericanSignLanguageLetters | 0.001 | 0.001 | 0.003 | 0.003 | 0.009 | 0.009 | 0.002 | 0.096 | +| Aquarium | 0.175 | 0.175 | 0.173 | 0.171 | 0.192 | 0.182 | 0.195 | 0.445 | +| BCCD | 0.016 | 0.016 | 0.001 | 0.001 | 0.000 | 0.000 | 0.161 | 0.584 | +| boggleBoards | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.134 | +| brackishUnderwater | 0.016 | 0..013 | 0.021 | 0.027 | 0.020 | 0.022 | 0.021 | 0.454 | +| ChessPieces | 0.001 | 0.001 | 0.000 | 0.000 | 0.001 | 0.001 | 0.000 | 0.000 | +| CottontailRabbits | 0.710 | 0.709 | 0.683 | 0.683 | 0.752 | 0.752 | 0.806 | 0.797 | +| dice | 0.005 | 0.005 | 0.004 | 0.004 | 0.004 | 0.004 | 0.004 | 0.082 | +| DroneControl | 0.016 | 0.017 | 0.006 | 0.008 | 0.005 | 0.007 | 0.042 | 0.638 | +| EgoHands_generic | 0.009 | 0.010 | 0.005 | 0.006 | 0.510 | 0.508 | 0.608 | 0.764 | +| EgoHands_specific | 0.001 | 0.001 | 0.004 | 0.006 | 0.003 | 0.004 | 0.002 | 0.687 | +| HardHatWorkers | 0.029 | 0.029 | 0.023 | 0.023 | 0.033 | 0.033 | 0.046 | 0.439 | +| MaskWearing | 0.007 | 0.007 | 0.003 | 0.002 | 0.005 | 0.005 | 0.004 | 0.406 | +| MountainDewCommercial | 0.218 | 0.227 | 0.199 | 0.197 | 0.478 | 0.463 | 0.430 | 0.580 | +| NorthAmericaMushrooms | 0.502 | 0.502 | 0.450 | 0.450 | 0.497 | 0.497 | 0.471 | 0.501 | +| openPoetryVision | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 0.051 | +| OxfordPets_by_breed | 0.001 | 0.002 | 0.002 | 0.004 | 0.001 | 0.002 | 0.003 | 0.799 | +| OxfordPets_by_species | 0.016 | 0.011 | 0.012 | 0.009 | 0.013 | 0.009 | 0.011 | 0.872 | +| PKLot | 0.002 | 0.002 | 0.000 | 0.000 | 0.000 | 0.000 | 0.001 | 0.774 | +| Packages | 0.569 | 0.569 | 0.279 | 0.279 | 0.712 | 0.712 | 0.695 | 0.728 | +| PascalVOC | 0.512 | 0.512 | 0.541 | 0.540 | 0.565 | 0.565 | 0.563 | 0.711 | +| pistols | 0.339 | 0.339 | 0.502 | 0.501 | 0.503 | 0.504 | 0.726 | 0.771 | +| plantdoc | 0.002 | 0.002 | 0.007 | 0.007 | 0.009 | 0.009 | 0.005 | 0.376 | +| pothole | 0.007 | 0.010 | 0.024 | 0.025 | 0.085 | 0.101 | 0.215 | 0.478 | +| Raccoons | 0.075 | 0.074 | 0.285 | 0.288 | 0.241 | 0.244 | 0.549 | 0.541 | +| selfdrivingCar | 0.071 | 0.072 | 0.074 | 0.074 | 0.081 | 0.080 | 0.089 | 0.318 | +| ShellfishOpenImages | 0.253 | 0.253 | 0.337 | 0.338 | 0.300 | 0.302 | 0.393 | 0.650 | +| ThermalCheetah | 0.028 | 0.028 | 0.000 | 0.000 | 0.028 | 0.028 | 0.087 | 0.290 | +| thermalDogsAndPeople | 0.372 | 0.372 | 0.475 | 0.475 | 0.510 | 0.510 | 0.657 | 0.633 | +| UnoCards | 0.000 | 0.000 | 0.000 | 0.001 | 0.002 | 0.003 | 0.006 | 0.754 | +| VehiclesOpenImages | 0.574 | 0.566 | 0.562 | 0.547 | 0.549 | 0.534 | 0.613 | 0.647 | +| WildfireSmoke | 0.000 | 0.000 | 0.000 | 0.000 | 0.017 | 0.017 | 0.134 | 0.410 | +| websiteScreenshots | 0.003 | 0.004 | 0.003 | 0.005 | 0.005 | 0.006 | 0.012 | 0.175 | +| Average | **0.134** | **0.134** | **0.138** | **0.138** | **0.179** | **0.178** | **0.227** | **0.492** | + +## Flickr30k Results + +| Model | Pre-Train Data | Val R@1 | Val R@5 | Val R@10 | Tesst R@1 | Test R@5 | Test R@10 | Config | Download | +| :--------------: | :--------------: | ------- | ------- | -------- | --------- | -------- | --------- | :-------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Grounding DINO-T | O365,GoldG,Cap4M | 87.8 | 96.6 | 98.0 | 88.1 | 96.9 | 98.2 | [config](grounding_dino_swin-t_finetune_16xb2_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco/grounding_dino_swin-t_finetune_16xb2_1x_coco_20230921_152544-5f234b20.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco/grounding_dino_swin-t_finetune_16xb2_1x_coco_20230921_152544.log.json) | + +Note: + +1. `@1,5,10` refers to precision at the top 1, 5, and 10 positions in a predicted ranked list. +2. The pretraining data used by Grounding DINO-T is `O365,GoldG,Cap4M`, and the corresponding evaluation configuration is (grounding_dino_swin-t_pretrain_zeroshot_refcoco)\[refcoco/grounding_dino_swin-t_pretrain_zeroshot_refcoco.py\]. + +Test Command + +```shell +cd mmdetection +bash tools/dist_test.sh configs/grounding_dino/flickr30k/grounding_dino_swin-t-pretrain_zeroshot_flickr30k.py checkpoints/groundingdino_swint_ogc_mmdet-822d7e9d.pth 8 +``` + +## Referring Expression Comprehension Results + +| Method | Grounding DINO-T
(O365,GoldG,Cap4M) | Grounding DINO-B
(COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO) | +| --------------------------------------- | ----------------------------------------- | ------------------------------------------------------------------------- | +| RefCOCO val @1,5,10 | 50.77/89.45/94.86 | 84.61/97.88/99.10 | +| RefCOCO testA @1,5,10 | 57.45/91.29/95.62 | 88.65/98.89/99.63 | +| RefCOCO testB @1,5,10 | 44.97/86.54/92.88 | 80.51/96.64/98.51 | +| RefCOCO+ val @1,5,10 | 51.64/86.35/92.57 | 73.67/96.60/98.65 | +| RefCOCO+ testA @1,5,10 | 57.25/86.74/92.65 | 82.19/97.92/99.09 | +| RefCOCO+ testB @1,5,10 | 46.35/84.05/90.67 | 64.10/94.25/97.46 | +| RefCOCOg val @1,5,10 | 60.42/92.10/96.18 | 78.33/97.28/98.57 | +| RefCOCOg test @1,5,10 | 59.74/92.08/96.28 | 78.11/97.06/98.65 | +| gRefCOCO val Pr@(F1=1, IoU≥0.5),N-acc | 41.32/91.82 | 46.18/81.44 | +| gRefCOCO testA Pr@(F1=1, IoU≥0.5),N-acc | 27.23/90.24 | 38.60/76.06 | +| gRefCOCO testB Pr@(F1=1, IoU≥0.5),N-acc | 29.70/93.49 | 35.87/80.58 | + +Note: + +1. `@1,5,10` refers to precision at the top 1, 5, and 10 positions in a predicted ranked list. +2. `Pr@(F1=1, IoU≥0.5),N-acc` from the paper [GREC: Generalized Referring Expression Comprehension](https://arxiv.org/pdf/2308.16182.pdf) +3. The pretraining data used by Grounding DINO-T is `O365,GoldG,Cap4M`, and the corresponding evaluation configuration is (grounding_dino_swin-t_pretrain_zeroshot_refcoco)\[refcoco/grounding_dino_swin-t_pretrain_zeroshot_refcoco.py\]. +4. The pretraining data used by Grounding DINO-B is `COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO`, and the corresponding evaluation configuration is (grounding_dino_swin-t_pretrain_zeroshot_refcoco)\[refcoco/grounding_dino_swin-b_pretrain_zeroshot_refcoco.py\]. + +Test Command + +```shell +cd mmdetection +./tools/dist_test.sh configs/grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp.py https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth 8 +./tools/dist_test.sh configs/grounding_dino/refcoco/grounding_dino_swin-b_pretrain_zeroshot_refexp.py https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swinb_cogcoor_mmdet-55949c9c.pth 8 +``` + +## Description Detection Dataset + +```shell +pip install ddd-dataset +``` + +| Method | mode | Grounding DINO-T
(O365,GoldG,Cap4M) | Grounding DINO-B
(COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO) | +| -------------------------------- | -------- | ----------------------------------------- | ------------------------------------------------------------------------- | +| FULL/short/middle/long/very long | concat | 17.2/18.0/18.7/14.8/16.3 | 20.2/20.4/21.1/18.8/19.8 | +| FULL/short/middle/long/very long | parallel | 22.3/28.2/24.8/19.1/13.9 | 25.0/26.4/27.2/23.5/19.7 | +| PRES/short/middle/long/very long | concat | 17.8/18.3/19.2/15.2/17.3 | 20.7/21.7/21.4/19.1/20.3 | +| PRES/short/middle/long/very long | parallel | 21.0/27.0/22.8/17.5/12.5 | 23.7/25.8/25.1/21.9/19.3 | +| ABS/short/middle/long/very long | concat | 15.4/17.1/16.4/13.6/14.9 | 18.6/16.1/19.7/18.1/19.1 | +| ABS/short/middle/long/very long | parallel | 26.0/32.0/33.0/23.6/15.5 | 28.8/28.1/35.8/28.2/20.2 | + +Note: + +1. Considering that the evaluation time for Inter-scenario is very long and the performance is low, it is temporarily not supported. The mentioned metrics are for Intra-scenario. +2. `concat` is the default inference mode for Grounding DINO, where it concatenates multiple sub-sentences with "." to form a single sentence for inference. On the other hand, "parallel" performs inference on each sub-sentence in a for-loop. + +## Custom Dataset + +To facilitate fine-tuning on custom datasets, we use a simple cat dataset as an example, as shown in the following steps. + +### 1. Dataset Preparation + +```shell +cd mmdetection +wget https://download.openmmlab.com/mmyolo/data/cat_dataset.zip +unzip cat_dataset.zip -d data/cat/ +``` + +cat dataset is a single-category dataset with 144 images, which has been converted to coco format. + +
+cat dataset +
+ +### 2. Config Preparation + +Due to the simplicity and small number of cat datasets, we use 8 cards to train 20 epochs, scale the learning rate accordingly, and do not train the language model, only the visual model. + +The Details of the configuration can be found in [grounding_dino_swin-t_finetune_8xb2_20e_cat](grounding_dino_swin-t_finetune_8xb2_20e_cat.py) + +### 3. Visualization and Evaluation + +Due to the Grounding DINO is an open detection model, so it can be detected and evaluated even if it is not trained on the cat dataset. + +The single image visualization is as follows: + +```shell +cd mmdetection +python demo/image_demo.py data/cat/images/IMG_20211205_120756.jpg configs/grounding_dino/grounding_dino_swin-t_finetune_8xb2_20e_cat.py --weights https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth --texts cat. +``` + +
+cat dataset +
+ +The test dataset evaluation on single card is as follows: + +```shell +python tools/test.py configs/grounding_dino/grounding_dino_swin-t_finetune_8xb2_20e_cat.py https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth +``` + +```text + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.867 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 1.000 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.931 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.867 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.903 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.907 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.907 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.907 +``` + +### 4. Model Training and Visualization + +```shell +./tools/dist_train.sh configs/grounding_dino/grounding_dino_swin-t_finetune_8xb2_20e_cat.py 8 --work-dir cat_work_dir +``` + +The model will be saved based on the best performance on the test set. The performance of the best model (at epoch 16) is as follows: + +```text + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.905 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 1.000 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.923 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.905 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.927 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.937 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.937 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.937 +``` + +We can find that after fine-tuning training, the training of the cat dataset is increased from 86.7 to 90.5. + +If we do single image inference visualization again, the result is as follows: + +```shell +cd mmdetection +python demo/image_demo.py data/cat/images/IMG_20211205_120756.jpg configs/grounding_dino/grounding_dino_swin-t_finetune_8xb2_20e_cat.py --weights cat_work_dir/best_coco_bbox_mAP_epoch_16.pth --texts cat. +``` + +
+cat dataset +
diff --git a/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-b_pretrain_zeroshot_concat_dod.py b/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-b_pretrain_zeroshot_concat_dod.py new file mode 100644 index 0000000000000000000000000000000000000000..ac655b74aa664ef912b6b1f509e4eb9341ccd62a --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-b_pretrain_zeroshot_concat_dod.py @@ -0,0 +1,14 @@ +_base_ = 'grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py' + +model = dict( + type='GroundingDINO', + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.3, + patch_norm=True), + neck=dict(in_channels=[256, 512, 1024]), +) diff --git a/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-b_pretrain_zeroshot_parallel_dod.py b/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-b_pretrain_zeroshot_parallel_dod.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1c8f2ac740c6c64a01a1a6a8f7dd57622bedf6 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-b_pretrain_zeroshot_parallel_dod.py @@ -0,0 +1,3 @@ +_base_ = 'grounding_dino_swin-b_pretrain_zeroshot_concat_dod.py' + +model = dict(test_cfg=dict(chunked_size=1)) diff --git a/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py b/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py new file mode 100644 index 0000000000000000000000000000000000000000..bb418011bf489c259f3696589aa56c5b8296256c --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py @@ -0,0 +1,78 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py' + +data_root = 'data/d3/' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', 'sent_ids')) +] + +# -------------------------------------------------# +val_dataset_full = dict( + type='DODDataset', + data_root=data_root, + ann_file='d3_json/d3_full_annotations.json', + data_prefix=dict(img='d3_images/', anno='d3_pkl'), + pipeline=test_pipeline, + test_mode=True, + backend_args=None, + return_classes=True) + +val_evaluator_full = dict( + type='DODCocoMetric', + ann_file=data_root + 'd3_json/d3_full_annotations.json') + +# -------------------------------------------------# +val_dataset_pres = dict( + type='DODDataset', + data_root=data_root, + ann_file='d3_json/d3_pres_annotations.json', + data_prefix=dict(img='d3_images/', anno='d3_pkl'), + pipeline=test_pipeline, + test_mode=True, + backend_args=None, + return_classes=True) +val_evaluator_pres = dict( + type='DODCocoMetric', + ann_file=data_root + 'd3_json/d3_pres_annotations.json') + +# -------------------------------------------------# +val_dataset_abs = dict( + type='DODDataset', + data_root=data_root, + ann_file='d3_json/d3_abs_annotations.json', + data_prefix=dict(img='d3_images/', anno='d3_pkl'), + pipeline=test_pipeline, + test_mode=True, + backend_args=None, + return_classes=True) +val_evaluator_abs = dict( + type='DODCocoMetric', + ann_file=data_root + 'd3_json/d3_abs_annotations.json') + +# -------------------------------------------------# +datasets = [val_dataset_full, val_dataset_pres, val_dataset_abs] +dataset_prefixes = ['FULL', 'PRES', 'ABS'] +metrics = [val_evaluator_full, val_evaluator_pres, val_evaluator_abs] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_parallel_dod.py b/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_parallel_dod.py new file mode 100644 index 0000000000000000000000000000000000000000..3d680091162e5ac96c15c76b58a18764e85d3233 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_parallel_dod.py @@ -0,0 +1,3 @@ +_base_ = 'grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py' + +model = dict(test_cfg=dict(chunked_size=1)) diff --git a/mmpose/configs/mmdet/grounding_dino/flickr30k/grounding_dino_swin-t-pretrain_zeroshot_flickr30k.py b/mmpose/configs/mmdet/grounding_dino/flickr30k/grounding_dino_swin-t-pretrain_zeroshot_flickr30k.py new file mode 100644 index 0000000000000000000000000000000000000000..c1996567588842f82c0af83e3a9ab84c81e7c25d --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/flickr30k/grounding_dino_swin-t-pretrain_zeroshot_flickr30k.py @@ -0,0 +1,57 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py' + +dataset_type = 'Flickr30kDataset' +data_root = 'data/flickr30k_entities/' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive', 'phrase_ids', 'phrases')) +] + +dataset_Flickr30k_val = dict( + type=dataset_type, + data_root=data_root, + ann_file='final_flickr_separateGT_val.json', + data_prefix=dict(img='flickr30k_images/'), + pipeline=test_pipeline, +) + +dataset_Flickr30k_test = dict( + type=dataset_type, + data_root=data_root, + ann_file='final_flickr_separateGT_test.json', + data_prefix=dict(img='flickr30k_images/'), + pipeline=test_pipeline, +) + +val_evaluator_Flickr30k = dict(type='Flickr30kMetric') + +test_evaluator_Flickr30k = dict(type='Flickr30kMetric') + +# ----------Config---------- # +dataset_prefixes = ['Flickr30kVal', 'Flickr30kTest'] +datasets = [dataset_Flickr30k_val, dataset_Flickr30k_test] +metrics = [val_evaluator_Flickr30k, test_evaluator_Flickr30k] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/grounding_dino/grounding_dino_r50_scratch_8xb2_1x_coco.py b/mmpose/configs/mmdet/grounding_dino/grounding_dino_r50_scratch_8xb2_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..623a29b87adfd6734e980e814766e873b2b89d05 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/grounding_dino_r50_scratch_8xb2_1x_coco.py @@ -0,0 +1,208 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +lang_model_name = 'bert-base-uncased' + +model = dict( + type='GroundingDINO', + num_queries=900, + with_box_refine=True, + as_two_stage=True, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=False, + ), + language_model=dict( + type='BertModel', + name=lang_model_name, + pad_to_max=False, + use_sub_sentence_represent=True, + special_tokens_list=['[CLS]', '[SEP]', '.', '?'], + add_pooling_layer=False, + ), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[512, 1024, 2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + bias=True, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + encoder=dict( + num_layers=6, + num_cp=6, + # visual layer config + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + # text layer config + text_layer_cfg=dict( + self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)), + # fusion layer config + fusion_layer_cfg=dict( + v_dim=256, + l_dim=256, + embed_dim=1024, + num_heads=4, + init_values=1e-4), + ), + decoder=dict( + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + # query self attention layer + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to text + cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to image + cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + post_norm_cfg=None), + positional_encoding=dict( + num_feats=128, normalize=True, offset=0.0, temperature=20), + bbox_head=dict( + type='GroundingDINOHead', + num_classes=80, + sync_cls_avg_factor=True, + contrastive_cfg=dict(max_text_len=256, log_scale='auto', bias=True), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), # 2.0 in DeformDETR + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + dn_cfg=dict( # TODO: Move to model.train_cfg ? + label_noise_scale=0.5, + box_noise_scale=1.0, # 0.4 for DN-DETR + group_cfg=dict(dynamic=True, num_groups=None, + num_dn_queries=100)), # TODO: half num_dn_queries + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='BinaryFocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=300)) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='FixScaleResize', scale=(800, 1333), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities')) +] + +train_dataloader = dict( + dataset=dict( + filter_cfg=dict(filter_empty_gt=False), + pipeline=train_pipeline, + return_classes=True)) +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, return_classes=True)) +test_dataloader = val_dataloader + +# We did not adopt the official 24e optimizer strategy +# because the results indicate that the current strategy is superior. +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=0.0001, # 0.0002 for DeformDETR + weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1) + })) +# learning policy +max_epochs = 12 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-b_finetune_16xb2_1x_coco.py b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-b_finetune_16xb2_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3554ee245ffe4312fc7f2cdd83755b1a0731aab9 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-b_finetune_16xb2_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = [ + './grounding_dino_swin-t_finetune_16xb2_1x_coco.py', +] + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swinb_cogcoor_mmdet-55949c9c.pth' # noqa +model = dict( + type='GroundingDINO', + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.3, + patch_norm=True), + neck=dict(in_channels=[256, 512, 1024]), +) diff --git a/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-b_pretrain_mixeddata.py b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-b_pretrain_mixeddata.py new file mode 100644 index 0000000000000000000000000000000000000000..92f327fef8311f0f72d7f75149bfc163863e913c --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-b_pretrain_mixeddata.py @@ -0,0 +1,16 @@ +_base_ = [ + './grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py', +] + +model = dict( + type='GroundingDINO', + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.3, + patch_norm=True), + neck=dict(in_channels=[256, 512, 1024]), +) diff --git a/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco.py b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6403ee66d9e5782723117191176efbadec2a90 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco.py @@ -0,0 +1,204 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth' # noqa +lang_model_name = 'bert-base-uncased' + +model = dict( + type='GroundingDINO', + num_queries=900, + with_box_refine=True, + as_two_stage=True, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=False, + ), + language_model=dict( + type='BertModel', + name=lang_model_name, + pad_to_max=False, + use_sub_sentence_represent=True, + special_tokens_list=['[CLS]', '[SEP]', '.', '?'], + add_pooling_layer=False, + ), + backbone=dict( + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=True, + convert_weights=False), + neck=dict( + type='ChannelMapper', + in_channels=[192, 384, 768], + kernel_size=1, + out_channels=256, + act_cfg=None, + bias=True, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + encoder=dict( + num_layers=6, + num_cp=6, + # visual layer config + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + # text layer config + text_layer_cfg=dict( + self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)), + # fusion layer config + fusion_layer_cfg=dict( + v_dim=256, + l_dim=256, + embed_dim=1024, + num_heads=4, + init_values=1e-4), + ), + decoder=dict( + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + # query self attention layer + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to text + cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to image + cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + post_norm_cfg=None), + positional_encoding=dict( + num_feats=128, normalize=True, offset=0.0, temperature=20), + bbox_head=dict( + type='GroundingDINOHead', + num_classes=80, + sync_cls_avg_factor=True, + contrastive_cfg=dict(max_text_len=256, log_scale=0.0, bias=False), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), # 2.0 in DeformDETR + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + dn_cfg=dict( # TODO: Move to model.train_cfg ? + label_noise_scale=0.5, + box_noise_scale=1.0, # 0.4 for DN-DETR + group_cfg=dict(dynamic=True, num_groups=None, + num_dn_queries=100)), # TODO: half num_dn_queries + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='BinaryFocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=300)) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='FixScaleResize', scale=(800, 1333), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities')) +] + +train_dataloader = dict( + dataset=dict( + filter_cfg=dict(filter_empty_gt=False), + pipeline=train_pipeline, + return_classes=True)) +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, return_classes=True)) +test_dataloader = val_dataloader + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1) + })) +# learning policy +max_epochs = 12 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (16 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=32) diff --git a/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_finetune_8xb2_20e_cat.py b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_finetune_8xb2_20e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..c2265e86730f68ed69af246a5e0e87fa2cb5e570 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_finetune_8xb2_20e_cat.py @@ -0,0 +1,56 @@ +_base_ = 'grounding_dino_swin-t_finetune_16xb2_1x_coco.py' + +data_root = 'data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(220, 20, 60)]) + +model = dict(bbox_head=dict(num_classes=num_classes)) + +train_dataloader = dict( + dataset=dict( + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +max_epoch = 20 + +default_hooks = dict( + checkpoint=dict(interval=1, max_keep_ckpts=1, save_best='auto'), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict(max_epochs=max_epoch, val_interval=1) + +param_scheduler = [ + dict(type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=30), + dict( + type='MultiStepLR', + begin=0, + end=max_epoch, + by_epoch=True, + milestones=[15], + gamma=0.1) +] + +optim_wrapper = dict( + optimizer=dict(lr=0.00005), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + 'language_model': dict(lr_mult=0), + })) + +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py new file mode 100644 index 0000000000000000000000000000000000000000..7448764ef7ed4fb91bbca981e8006b412e74c414 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py @@ -0,0 +1,128 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +lang_model_name = 'bert-base-uncased' + +model = dict( + type='GroundingDINO', + num_queries=900, + with_box_refine=True, + as_two_stage=True, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=False, + ), + language_model=dict( + type='BertModel', + name=lang_model_name, + pad_to_max=False, + use_sub_sentence_represent=True, + special_tokens_list=['[CLS]', '[SEP]', '.', '?'], + add_pooling_layer=True, + ), + backbone=dict( + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=False, + convert_weights=False), + neck=dict( + type='ChannelMapper', + in_channels=[192, 384, 768], + kernel_size=1, + out_channels=256, + act_cfg=None, + bias=True, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + encoder=dict( + num_layers=6, + # visual layer config + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + # text layer config + text_layer_cfg=dict( + self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)), + # fusion layer config + fusion_layer_cfg=dict( + v_dim=256, + l_dim=256, + embed_dim=1024, + num_heads=4, + init_values=1e-4), + ), + decoder=dict( + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + # query self attention layer + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to text + cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to image + cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + post_norm_cfg=None), + positional_encoding=dict( + num_feats=128, normalize=True, offset=0.0, temperature=20), + bbox_head=dict( + type='GroundingDINOHead', + num_classes=80, + sync_cls_avg_factor=True, + contrastive_cfg=dict(max_text_len=256), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), # 2.0 in DeformDETR + loss_bbox=dict(type='L1Loss', loss_weight=5.0)), + dn_cfg=dict( # TODO: Move to model.train_cfg ? + label_noise_scale=0.5, + box_noise_scale=1.0, # 0.4 for DN-DETR + group_cfg=dict(dynamic=True, num_groups=None, + num_dn_queries=100)), # TODO: half num_dn_queries + # training and testing settings + train_cfg=None, + test_cfg=dict(max_per_img=300)) + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive')) +] + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, return_classes=True)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-b_pretrain_zeroshot_lvis.py b/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-b_pretrain_zeroshot_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..6084159044e8c0e8642a1226c6a9efd85c7d27d2 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-b_pretrain_zeroshot_lvis.py @@ -0,0 +1,14 @@ +_base_ = './grounding_dino_swin-t_pretrain_zeroshot_lvis.py' + +model = dict( + type='GroundingDINO', + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.3, + patch_norm=True), + neck=dict(in_channels=[256, 512, 1024]), +) diff --git a/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-b_pretrain_zeroshot_mini-lvis.py b/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-b_pretrain_zeroshot_mini-lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..68467a7237ca893aa79eb5b0acc9d159f7082968 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-b_pretrain_zeroshot_mini-lvis.py @@ -0,0 +1,14 @@ +_base_ = './grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py' + +model = dict( + type='GroundingDINO', + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.3, + patch_norm=True), + neck=dict(in_channels=[256, 512, 1024]), +) diff --git a/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_lvis.py b/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..3d05f0ce1c0cb095c0c9f9a65bd7666cba57afe7 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_lvis.py @@ -0,0 +1,24 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py' + +model = dict(test_cfg=dict( + max_per_img=300, + chunked_size=40, +)) + +dataset_type = 'LVISV1Dataset' +data_root = 'data/coco/' + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + type=dataset_type, + ann_file='annotations/lvis_od_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +# numpy < 1.24.0 +val_evaluator = dict( + _delete_=True, + type='LVISFixedAPMetric', + ann_file=data_root + 'annotations/lvis_od_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py b/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..0aac6cf33a92827c9c350175977bb1a595d2c0c8 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py @@ -0,0 +1,25 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py' + +model = dict(test_cfg=dict( + max_per_img=300, + chunked_size=40, +)) + +dataset_type = 'LVISV1Dataset' +data_root = 'data/coco/' + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + type=dataset_type, + ann_file='annotations/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +# numpy < 1.24.0 +val_evaluator = dict( + _delete_=True, + type='LVISFixedAPMetric', + ann_file=data_root + + 'annotations/lvis_v1_minival_inserted_image_name.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/grounding_dino/metafile.yml b/mmpose/configs/mmdet/grounding_dino/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..dcb5ebf82846d3cfbc2fa345cc89468ba269fd84 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/metafile.yml @@ -0,0 +1,67 @@ +Collections: + - Name: Grounding DINO + Metadata: + Training Data: Objects365, GoldG, CC3M and COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 3090 GPUs + Architecture: + - Swin Transformer + - BERT + Paper: + URL: https://arxiv.org/abs/2303.05499 + Title: 'Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection +' + README: configs/grounding_dino/README.md + Code: + URL: + Version: v3.0.0 + +Models: + - Name: grounding_dino_swin-t_pretrain_obj365_goldg_cap4m + In Collection: Grounding DINO + Config: configs/grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.5 + Weights: https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth + - Name: grounding_dino_swin-b_pretrain_mixeddata + In Collection: Grounding DINO + Config: configs/grounding_dino/grounding_dino_swin-b_pretrain_mixeddata.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 56.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swinb_cogcoor_mmdet-55949c9c.pth + - Name: grounding_dino_swin-t_finetune_16xb2_1x_coco + In Collection: Grounding DINO + Config: configs/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 58.1 + Weights: https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco/grounding_dino_swin-t_finetune_16xb2_1x_coco_20230921_152544-5f234b20.pth + - Name: grounding_dino_swin-b_finetune_16xb2_1x_coco + In Collection: Grounding DINO + Config: configs/grounding_dino/grounding_dino_swin-b_finetune_16xb2_1x_coco.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 59.7 + Weights: https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_swin-b_finetune_16xb2_1x_coco/grounding_dino_swin-b_finetune_16xb2_1x_coco_20230921_153201-f219e0c0.pth + - Name: grounding_dino_r50_scratch_8xb2_1x_coco + In Collection: Grounding DINO + Config: configs/grounding_dino/grounding_dino_r50_scratch_8xb2_1x_coco.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/grounding_dino_r50_scratch_8xb2_1x_coco/grounding_dino_r50_scratch_1x_coco-fe0002f2.pth diff --git a/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-b_pretrain_odinw13.py b/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-b_pretrain_odinw13.py new file mode 100644 index 0000000000000000000000000000000000000000..65a6bc2a078a9ea5123c745aa72ba22466ea6e58 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-b_pretrain_odinw13.py @@ -0,0 +1,338 @@ +_base_ = '../grounding_dino_swin-b_pretrain_mixeddata.py' + +dataset_type = 'CocoDataset' +data_root = 'data/odinw/' + +base_test_pipeline = _base_.test_pipeline +base_test_pipeline[-1]['meta_keys'] = ('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'text', + 'custom_entities', 'caption_prompt') + +# ---------------------1 AerialMaritimeDrone---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/large/' +dataset_AerialMaritimeDrone = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + test_mode=True, + pipeline=base_test_pipeline, + return_classes=True) +val_evaluator_AerialMaritimeDrone = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------2 Aquarium---------------------# +class_name = ('fish', 'jellyfish', 'penguin', 'puffin', 'shark', 'starfish', + 'stingray') +metainfo = dict(classes=class_name) +_data_root = data_root + 'Aquarium/Aquarium Combined.v2-raw-1024.coco/' + +caption_prompt = None +# caption_prompt = { +# 'penguin': { +# 'suffix': ', which is black and white' +# }, +# 'puffin': { +# 'suffix': ' with orange beaks' +# }, +# 'stingray': { +# 'suffix': ' which is flat and round' +# }, +# } +dataset_Aquarium = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Aquarium = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------3 CottontailRabbits---------------------# +class_name = ('Cottontail-Rabbit', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'CottontailRabbits/' + +caption_prompt = None +# caption_prompt = {'Cottontail-Rabbit': {'name': 'rabbit'}} + +dataset_CottontailRabbits = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_CottontailRabbits = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------4 EgoHands---------------------# +class_name = ('hand', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/generic/' + +caption_prompt = None +# caption_prompt = {'hand': {'suffix': ' of a person'}} + +dataset_EgoHands = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------5 NorthAmericaMushrooms---------------------# +class_name = ('CoW', 'chanterelle') +metainfo = dict(classes=class_name) +_data_root = data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa + +caption_prompt = None +# caption_prompt = { +# 'CoW': { +# 'name': 'flat mushroom' +# }, +# 'chanterelle': { +# 'name': 'yellow mushroom' +# } +# } + +dataset_NorthAmericaMushrooms = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_NorthAmericaMushrooms = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------6 Packages---------------------# +class_name = ('package', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Packages/Raw/' + +caption_prompt = None +# caption_prompt = { +# 'package': { +# 'prefix': 'there is a ', +# 'suffix': ' on the porch' +# } +# } + +dataset_Packages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Packages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------7 PascalVOC---------------------# +class_name = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PascalVOC/' +dataset_PascalVOC = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PascalVOC = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------8 pistols---------------------# +class_name = ('pistol', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pistols/export/' +dataset_pistols = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pistols = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------9 pothole---------------------# +class_name = ('pothole', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pothole/' + +caption_prompt = None +# caption_prompt = { +# 'pothole': { +# 'prefix': 'there are some ', +# 'name': 'holes', +# 'suffix': ' on the road' +# } +# } + +dataset_pothole = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pothole = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------10 Raccoon---------------------# +class_name = ('raccoon', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Raccoon/Raccoon.v2-raw.coco/' +dataset_Raccoon = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Raccoon = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------11 ShellfishOpenImages---------------------# +class_name = ('Crab', 'Lobster', 'Shrimp') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ShellfishOpenImages/raw/' +dataset_ShellfishOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ShellfishOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------12 thermalDogsAndPeople---------------------# +class_name = ('dog', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'thermalDogsAndPeople/' +dataset_thermalDogsAndPeople = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_thermalDogsAndPeople = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------13 VehiclesOpenImages---------------------# +class_name = ('Ambulance', 'Bus', 'Car', 'Motorcycle', 'Truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'VehiclesOpenImages/416x416/' +dataset_VehiclesOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_VehiclesOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# --------------------- Config---------------------# +dataset_prefixes = [ + 'AerialMaritimeDrone', 'Aquarium', 'CottontailRabbits', 'EgoHands', + 'NorthAmericaMushrooms', 'Packages', 'PascalVOC', 'pistols', 'pothole', + 'Raccoon', 'ShellfishOpenImages', 'thermalDogsAndPeople', + 'VehiclesOpenImages' +] +datasets = [ + dataset_AerialMaritimeDrone, dataset_Aquarium, dataset_CottontailRabbits, + dataset_EgoHands, dataset_NorthAmericaMushrooms, dataset_Packages, + dataset_PascalVOC, dataset_pistols, dataset_pothole, dataset_Raccoon, + dataset_ShellfishOpenImages, dataset_thermalDogsAndPeople, + dataset_VehiclesOpenImages +] +metrics = [ + val_evaluator_AerialMaritimeDrone, val_evaluator_Aquarium, + val_evaluator_CottontailRabbits, val_evaluator_EgoHands, + val_evaluator_NorthAmericaMushrooms, val_evaluator_Packages, + val_evaluator_PascalVOC, val_evaluator_pistols, val_evaluator_pothole, + val_evaluator_Raccoon, val_evaluator_ShellfishOpenImages, + val_evaluator_thermalDogsAndPeople, val_evaluator_VehiclesOpenImages +] + +# -------------------------------------------------# +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-b_pretrain_odinw35.py b/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-b_pretrain_odinw35.py new file mode 100644 index 0000000000000000000000000000000000000000..e73cd8e61ba20f4baff6f7c85477a8fae3735e44 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-b_pretrain_odinw35.py @@ -0,0 +1,796 @@ +_base_ = '../grounding_dino_swin-b_pretrain_mixeddata.py' + +dataset_type = 'CocoDataset' +data_root = 'data/odinw/' + +base_test_pipeline = _base_.test_pipeline +base_test_pipeline[-1]['meta_keys'] = ('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'text', + 'custom_entities', 'caption_prompt') + +# ---------------------1 AerialMaritimeDrone_large---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/large/' +dataset_AerialMaritimeDrone_large = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AerialMaritimeDrone_large = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------2 AerialMaritimeDrone_tiled---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/tiled/' +dataset_AerialMaritimeDrone_tiled = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AerialMaritimeDrone_tiled = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------3 AmericanSignLanguageLetters---------------------# +class_name = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AmericanSignLanguageLetters/American Sign Language Letters.v1-v1.coco/' # noqa +dataset_AmericanSignLanguageLetters = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AmericanSignLanguageLetters = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------4 Aquarium---------------------# +class_name = ('fish', 'jellyfish', 'penguin', 'puffin', 'shark', 'starfish', + 'stingray') +metainfo = dict(classes=class_name) +_data_root = data_root + 'Aquarium/Aquarium Combined.v2-raw-1024.coco/' +dataset_Aquarium = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Aquarium = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------5 BCCD---------------------# +class_name = ('Platelets', 'RBC', 'WBC') +metainfo = dict(classes=class_name) +_data_root = data_root + 'BCCD/BCCD.v3-raw.coco/' +dataset_BCCD = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_BCCD = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------6 boggleBoards---------------------# +class_name = ('Q', 'a', 'an', 'b', 'c', 'd', 'e', 'er', 'f', 'g', 'h', 'he', + 'i', 'in', 'j', 'k', 'l', 'm', 'n', 'o', 'o ', 'p', 'q', 'qu', + 'r', 's', 't', 't\\', 'th', 'u', 'v', 'w', 'wild', 'x', 'y', 'z') +metainfo = dict(classes=class_name) +_data_root = data_root + 'boggleBoards/416x416AutoOrient/export/' +dataset_boggleBoards = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_boggleBoards = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------7 brackishUnderwater---------------------# +class_name = ('crab', 'fish', 'jellyfish', 'shrimp', 'small_fish', 'starfish') +metainfo = dict(classes=class_name) +_data_root = data_root + 'brackishUnderwater/960x540/' +dataset_brackishUnderwater = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_brackishUnderwater = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------8 ChessPieces---------------------# +class_name = (' ', 'black bishop', 'black king', 'black knight', 'black pawn', + 'black queen', 'black rook', 'white bishop', 'white king', + 'white knight', 'white pawn', 'white queen', 'white rook') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/' +dataset_ChessPieces = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ChessPieces = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------9 CottontailRabbits---------------------# +class_name = ('rabbit', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'CottontailRabbits/' +dataset_CottontailRabbits = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_CottontailRabbits = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------10 dice---------------------# +class_name = ('1', '2', '3', '4', '5', '6') +metainfo = dict(classes=class_name) +_data_root = data_root + 'dice/mediumColor/export/' +dataset_dice = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_dice = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------11 DroneControl---------------------# +class_name = ('follow', 'follow_hand', 'land', 'land_hand', 'null', 'object', + 'takeoff', 'takeoff-hand') +metainfo = dict(classes=class_name) +_data_root = data_root + 'DroneControl/Drone Control.v3-raw.coco/' +dataset_DroneControl = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_DroneControl = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------12 EgoHands_generic---------------------# +class_name = ('hand', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/generic/' +caption_prompt = {'hand': {'suffix': ' of a person'}} +dataset_EgoHands_generic = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + # NOTE w. prompt 0.548; wo. prompt 0.764 + # caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands_generic = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------13 EgoHands_specific---------------------# +class_name = ('myleft', 'myright', 'yourleft', 'yourright') +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/specific/' +dataset_EgoHands_specific = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands_specific = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------14 HardHatWorkers---------------------# +class_name = ('head', 'helmet', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'HardHatWorkers/raw/' +dataset_HardHatWorkers = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_HardHatWorkers = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------15 MaskWearing---------------------# +class_name = ('mask', 'no-mask') +metainfo = dict(classes=class_name) +_data_root = data_root + 'MaskWearing/raw/' +dataset_MaskWearing = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_MaskWearing = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------16 MountainDewCommercial---------------------# +class_name = ('bottle', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'MountainDewCommercial/' +dataset_MountainDewCommercial = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_MountainDewCommercial = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------17 NorthAmericaMushrooms---------------------# +class_name = ('flat mushroom', 'yellow mushroom') +metainfo = dict(classes=class_name) +_data_root = data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa +dataset_NorthAmericaMushrooms = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_NorthAmericaMushrooms = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------18 openPoetryVision---------------------# +class_name = ('American Typewriter', 'Andale Mono', 'Apple Chancery', 'Arial', + 'Avenir', 'Baskerville', 'Big Caslon', 'Bradley Hand', + 'Brush Script MT', 'Chalkboard', 'Comic Sans MS', 'Copperplate', + 'Courier', 'Didot', 'Futura', 'Geneva', 'Georgia', 'Gill Sans', + 'Helvetica', 'Herculanum', 'Impact', 'Kefa', 'Lucida Grande', + 'Luminari', 'Marker Felt', 'Menlo', 'Monaco', 'Noteworthy', + 'Optima', 'PT Sans', 'PT Serif', 'Palatino', 'Papyrus', + 'Phosphate', 'Rockwell', 'SF Pro', 'SignPainter', 'Skia', + 'Snell Roundhand', 'Tahoma', 'Times New Roman', 'Trebuchet MS', + 'Verdana') +metainfo = dict(classes=class_name) +_data_root = data_root + 'openPoetryVision/512x512/' +dataset_openPoetryVision = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_openPoetryVision = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------19 OxfordPets_by_breed---------------------# +class_name = ('cat-Abyssinian', 'cat-Bengal', 'cat-Birman', 'cat-Bombay', + 'cat-British_Shorthair', 'cat-Egyptian_Mau', 'cat-Maine_Coon', + 'cat-Persian', 'cat-Ragdoll', 'cat-Russian_Blue', 'cat-Siamese', + 'cat-Sphynx', 'dog-american_bulldog', + 'dog-american_pit_bull_terrier', 'dog-basset_hound', + 'dog-beagle', 'dog-boxer', 'dog-chihuahua', + 'dog-english_cocker_spaniel', 'dog-english_setter', + 'dog-german_shorthaired', 'dog-great_pyrenees', 'dog-havanese', + 'dog-japanese_chin', 'dog-keeshond', 'dog-leonberger', + 'dog-miniature_pinscher', 'dog-newfoundland', 'dog-pomeranian', + 'dog-pug', 'dog-saint_bernard', 'dog-samoyed', + 'dog-scottish_terrier', 'dog-shiba_inu', + 'dog-staffordshire_bull_terrier', 'dog-wheaten_terrier', + 'dog-yorkshire_terrier') +metainfo = dict(classes=class_name) +_data_root = data_root + 'OxfordPets/by-breed/' # noqa +dataset_OxfordPets_by_breed = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_OxfordPets_by_breed = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------20 OxfordPets_by_species---------------------# +class_name = ('cat', 'dog') +metainfo = dict(classes=class_name) +_data_root = data_root + 'OxfordPets/by-species/' # noqa +dataset_OxfordPets_by_species = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_OxfordPets_by_species = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------21 PKLot---------------------# +class_name = ('space-empty', 'space-occupied') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PKLot/640/' # noqa +dataset_PKLot = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PKLot = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------22 Packages---------------------# +class_name = ('package', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Packages/Raw/' +caption_prompt = { + 'package': { + 'prefix': 'there is a ', + 'suffix': ' on the porch' + } +} +dataset_Packages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, # NOTE w. prompt 0.728; wo. prompt 0.670 + test_mode=True, + return_classes=True) +val_evaluator_Packages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------23 PascalVOC---------------------# +class_name = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PascalVOC/' +dataset_PascalVOC = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PascalVOC = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------24 pistols---------------------# +class_name = ('pistol', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pistols/export/' +dataset_pistols = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pistols = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------25 plantdoc---------------------# +class_name = ('Apple Scab Leaf', 'Apple leaf', 'Apple rust leaf', + 'Bell_pepper leaf', 'Bell_pepper leaf spot', 'Blueberry leaf', + 'Cherry leaf', 'Corn Gray leaf spot', 'Corn leaf blight', + 'Corn rust leaf', 'Peach leaf', 'Potato leaf', + 'Potato leaf early blight', 'Potato leaf late blight', + 'Raspberry leaf', 'Soyabean leaf', 'Soybean leaf', + 'Squash Powdery mildew leaf', 'Strawberry leaf', + 'Tomato Early blight leaf', 'Tomato Septoria leaf spot', + 'Tomato leaf', 'Tomato leaf bacterial spot', + 'Tomato leaf late blight', 'Tomato leaf mosaic virus', + 'Tomato leaf yellow virus', 'Tomato mold leaf', + 'Tomato two spotted spider mites leaf', 'grape leaf', + 'grape leaf black rot') +metainfo = dict(classes=class_name) +_data_root = data_root + 'plantdoc/416x416/' +dataset_plantdoc = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_plantdoc = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------26 pothole---------------------# +class_name = ('pothole', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pothole/' +caption_prompt = { + 'pothole': { + 'name': 'holes', + 'prefix': 'there are some ', + 'suffix': ' on the road' + } +} +dataset_pothole = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + # NOTE w. prompt 0.221; wo. prompt 0.478 + # caption_prompt=caption_prompt, + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pothole = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------27 Raccoon---------------------# +class_name = ('raccoon', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Raccoon/Raccoon.v2-raw.coco/' +dataset_Raccoon = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Raccoon = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------28 selfdrivingCar---------------------# +class_name = ('biker', 'car', 'pedestrian', 'trafficLight', + 'trafficLight-Green', 'trafficLight-GreenLeft', + 'trafficLight-Red', 'trafficLight-RedLeft', + 'trafficLight-Yellow', 'trafficLight-YellowLeft', 'truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'selfdrivingCar/fixedLarge/export/' +dataset_selfdrivingCar = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_selfdrivingCar = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------29 ShellfishOpenImages---------------------# +class_name = ('Crab', 'Lobster', 'Shrimp') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ShellfishOpenImages/raw/' +dataset_ShellfishOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ShellfishOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------30 ThermalCheetah---------------------# +class_name = ('cheetah', 'human') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ThermalCheetah/' +dataset_ThermalCheetah = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ThermalCheetah = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------31 thermalDogsAndPeople---------------------# +class_name = ('dog', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'thermalDogsAndPeople/' +dataset_thermalDogsAndPeople = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_thermalDogsAndPeople = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------32 UnoCards---------------------# +class_name = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', + '12', '13', '14') +metainfo = dict(classes=class_name) +_data_root = data_root + 'UnoCards/raw/' +dataset_UnoCards = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_UnoCards = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------33 VehiclesOpenImages---------------------# +class_name = ('Ambulance', 'Bus', 'Car', 'Motorcycle', 'Truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'VehiclesOpenImages/416x416/' +dataset_VehiclesOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_VehiclesOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------34 WildfireSmoke---------------------# +class_name = ('smoke', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'WildfireSmoke/' +dataset_WildfireSmoke = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_WildfireSmoke = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------35 websiteScreenshots---------------------# +class_name = ('button', 'field', 'heading', 'iframe', 'image', 'label', 'link', + 'text') +metainfo = dict(classes=class_name) +_data_root = data_root + 'websiteScreenshots/' +dataset_websiteScreenshots = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_websiteScreenshots = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# --------------------- Config---------------------# + +dataset_prefixes = [ + 'AerialMaritimeDrone_large', + 'AerialMaritimeDrone_tiled', + 'AmericanSignLanguageLetters', + 'Aquarium', + 'BCCD', + 'boggleBoards', + 'brackishUnderwater', + 'ChessPieces', + 'CottontailRabbits', + 'dice', + 'DroneControl', + 'EgoHands_generic', + 'EgoHands_specific', + 'HardHatWorkers', + 'MaskWearing', + 'MountainDewCommercial', + 'NorthAmericaMushrooms', + 'openPoetryVision', + 'OxfordPets_by_breed', + 'OxfordPets_by_species', + 'PKLot', + 'Packages', + 'PascalVOC', + 'pistols', + 'plantdoc', + 'pothole', + 'Raccoons', + 'selfdrivingCar', + 'ShellfishOpenImages', + 'ThermalCheetah', + 'thermalDogsAndPeople', + 'UnoCards', + 'VehiclesOpenImages', + 'WildfireSmoke', + 'websiteScreenshots', +] + +datasets = [ + dataset_AerialMaritimeDrone_large, dataset_AerialMaritimeDrone_tiled, + dataset_AmericanSignLanguageLetters, dataset_Aquarium, dataset_BCCD, + dataset_boggleBoards, dataset_brackishUnderwater, dataset_ChessPieces, + dataset_CottontailRabbits, dataset_dice, dataset_DroneControl, + dataset_EgoHands_generic, dataset_EgoHands_specific, + dataset_HardHatWorkers, dataset_MaskWearing, dataset_MountainDewCommercial, + dataset_NorthAmericaMushrooms, dataset_openPoetryVision, + dataset_OxfordPets_by_breed, dataset_OxfordPets_by_species, dataset_PKLot, + dataset_Packages, dataset_PascalVOC, dataset_pistols, dataset_plantdoc, + dataset_pothole, dataset_Raccoon, dataset_selfdrivingCar, + dataset_ShellfishOpenImages, dataset_ThermalCheetah, + dataset_thermalDogsAndPeople, dataset_UnoCards, dataset_VehiclesOpenImages, + dataset_WildfireSmoke, dataset_websiteScreenshots +] + +metrics = [ + val_evaluator_AerialMaritimeDrone_large, + val_evaluator_AerialMaritimeDrone_tiled, + val_evaluator_AmericanSignLanguageLetters, val_evaluator_Aquarium, + val_evaluator_BCCD, val_evaluator_boggleBoards, + val_evaluator_brackishUnderwater, val_evaluator_ChessPieces, + val_evaluator_CottontailRabbits, val_evaluator_dice, + val_evaluator_DroneControl, val_evaluator_EgoHands_generic, + val_evaluator_EgoHands_specific, val_evaluator_HardHatWorkers, + val_evaluator_MaskWearing, val_evaluator_MountainDewCommercial, + val_evaluator_NorthAmericaMushrooms, val_evaluator_openPoetryVision, + val_evaluator_OxfordPets_by_breed, val_evaluator_OxfordPets_by_species, + val_evaluator_PKLot, val_evaluator_Packages, val_evaluator_PascalVOC, + val_evaluator_pistols, val_evaluator_plantdoc, val_evaluator_pothole, + val_evaluator_Raccoon, val_evaluator_selfdrivingCar, + val_evaluator_ShellfishOpenImages, val_evaluator_ThermalCheetah, + val_evaluator_thermalDogsAndPeople, val_evaluator_UnoCards, + val_evaluator_VehiclesOpenImages, val_evaluator_WildfireSmoke, + val_evaluator_websiteScreenshots +] + +# -------------------------------------------------# +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py b/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py new file mode 100644 index 0000000000000000000000000000000000000000..216b8059726b8fbe9dff3b2a43718bc563502aab --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py @@ -0,0 +1,338 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py' # noqa + +dataset_type = 'CocoDataset' +data_root = 'data/odinw/' + +base_test_pipeline = _base_.test_pipeline +base_test_pipeline[-1]['meta_keys'] = ('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'text', + 'custom_entities', 'caption_prompt') + +# ---------------------1 AerialMaritimeDrone---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/large/' +dataset_AerialMaritimeDrone = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + test_mode=True, + pipeline=base_test_pipeline, + return_classes=True) +val_evaluator_AerialMaritimeDrone = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------2 Aquarium---------------------# +class_name = ('fish', 'jellyfish', 'penguin', 'puffin', 'shark', 'starfish', + 'stingray') +metainfo = dict(classes=class_name) +_data_root = data_root + 'Aquarium/Aquarium Combined.v2-raw-1024.coco/' + +caption_prompt = None +# caption_prompt = { +# 'penguin': { +# 'suffix': ', which is black and white' +# }, +# 'puffin': { +# 'suffix': ' with orange beaks' +# }, +# 'stingray': { +# 'suffix': ' which is flat and round' +# }, +# } +dataset_Aquarium = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Aquarium = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------3 CottontailRabbits---------------------# +class_name = ('Cottontail-Rabbit', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'CottontailRabbits/' + +caption_prompt = None +# caption_prompt = {'Cottontail-Rabbit': {'name': 'rabbit'}} + +dataset_CottontailRabbits = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_CottontailRabbits = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------4 EgoHands---------------------# +class_name = ('hand', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/generic/' + +caption_prompt = None +# caption_prompt = {'hand': {'suffix': ' of a person'}} + +dataset_EgoHands = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------5 NorthAmericaMushrooms---------------------# +class_name = ('CoW', 'chanterelle') +metainfo = dict(classes=class_name) +_data_root = data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa + +caption_prompt = None +# caption_prompt = { +# 'CoW': { +# 'name': 'flat mushroom' +# }, +# 'chanterelle': { +# 'name': 'yellow mushroom' +# } +# } + +dataset_NorthAmericaMushrooms = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_NorthAmericaMushrooms = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------6 Packages---------------------# +class_name = ('package', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Packages/Raw/' + +caption_prompt = None +# caption_prompt = { +# 'package': { +# 'prefix': 'there is a ', +# 'suffix': ' on the porch' +# } +# } + +dataset_Packages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Packages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------7 PascalVOC---------------------# +class_name = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PascalVOC/' +dataset_PascalVOC = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PascalVOC = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------8 pistols---------------------# +class_name = ('pistol', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pistols/export/' +dataset_pistols = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pistols = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------9 pothole---------------------# +class_name = ('pothole', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pothole/' + +caption_prompt = None +# caption_prompt = { +# 'pothole': { +# 'prefix': 'there are some ', +# 'name': 'holes', +# 'suffix': ' on the road' +# } +# } + +dataset_pothole = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pothole = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------10 Raccoon---------------------# +class_name = ('raccoon', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Raccoon/Raccoon.v2-raw.coco/' +dataset_Raccoon = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Raccoon = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------11 ShellfishOpenImages---------------------# +class_name = ('Crab', 'Lobster', 'Shrimp') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ShellfishOpenImages/raw/' +dataset_ShellfishOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ShellfishOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------12 thermalDogsAndPeople---------------------# +class_name = ('dog', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'thermalDogsAndPeople/' +dataset_thermalDogsAndPeople = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_thermalDogsAndPeople = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------13 VehiclesOpenImages---------------------# +class_name = ('Ambulance', 'Bus', 'Car', 'Motorcycle', 'Truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'VehiclesOpenImages/416x416/' +dataset_VehiclesOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_VehiclesOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# --------------------- Config---------------------# +dataset_prefixes = [ + 'AerialMaritimeDrone', 'Aquarium', 'CottontailRabbits', 'EgoHands', + 'NorthAmericaMushrooms', 'Packages', 'PascalVOC', 'pistols', 'pothole', + 'Raccoon', 'ShellfishOpenImages', 'thermalDogsAndPeople', + 'VehiclesOpenImages' +] +datasets = [ + dataset_AerialMaritimeDrone, dataset_Aquarium, dataset_CottontailRabbits, + dataset_EgoHands, dataset_NorthAmericaMushrooms, dataset_Packages, + dataset_PascalVOC, dataset_pistols, dataset_pothole, dataset_Raccoon, + dataset_ShellfishOpenImages, dataset_thermalDogsAndPeople, + dataset_VehiclesOpenImages +] +metrics = [ + val_evaluator_AerialMaritimeDrone, val_evaluator_Aquarium, + val_evaluator_CottontailRabbits, val_evaluator_EgoHands, + val_evaluator_NorthAmericaMushrooms, val_evaluator_Packages, + val_evaluator_PascalVOC, val_evaluator_pistols, val_evaluator_pothole, + val_evaluator_Raccoon, val_evaluator_ShellfishOpenImages, + val_evaluator_thermalDogsAndPeople, val_evaluator_VehiclesOpenImages +] + +# -------------------------------------------------# +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw35.py b/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw35.py new file mode 100644 index 0000000000000000000000000000000000000000..3df0394a204061684cbb9bb66adb08d92a784efb --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw35.py @@ -0,0 +1,796 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py' # noqa + +dataset_type = 'CocoDataset' +data_root = 'data/odinw/' + +base_test_pipeline = _base_.test_pipeline +base_test_pipeline[-1]['meta_keys'] = ('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'text', + 'custom_entities', 'caption_prompt') + +# ---------------------1 AerialMaritimeDrone_large---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/large/' +dataset_AerialMaritimeDrone_large = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AerialMaritimeDrone_large = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------2 AerialMaritimeDrone_tiled---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/tiled/' +dataset_AerialMaritimeDrone_tiled = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AerialMaritimeDrone_tiled = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------3 AmericanSignLanguageLetters---------------------# +class_name = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AmericanSignLanguageLetters/American Sign Language Letters.v1-v1.coco/' # noqa +dataset_AmericanSignLanguageLetters = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AmericanSignLanguageLetters = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------4 Aquarium---------------------# +class_name = ('fish', 'jellyfish', 'penguin', 'puffin', 'shark', 'starfish', + 'stingray') +metainfo = dict(classes=class_name) +_data_root = data_root + 'Aquarium/Aquarium Combined.v2-raw-1024.coco/' +dataset_Aquarium = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Aquarium = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------5 BCCD---------------------# +class_name = ('Platelets', 'RBC', 'WBC') +metainfo = dict(classes=class_name) +_data_root = data_root + 'BCCD/BCCD.v3-raw.coco/' +dataset_BCCD = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_BCCD = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------6 boggleBoards---------------------# +class_name = ('Q', 'a', 'an', 'b', 'c', 'd', 'e', 'er', 'f', 'g', 'h', 'he', + 'i', 'in', 'j', 'k', 'l', 'm', 'n', 'o', 'o ', 'p', 'q', 'qu', + 'r', 's', 't', 't\\', 'th', 'u', 'v', 'w', 'wild', 'x', 'y', 'z') +metainfo = dict(classes=class_name) +_data_root = data_root + 'boggleBoards/416x416AutoOrient/export/' +dataset_boggleBoards = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_boggleBoards = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------7 brackishUnderwater---------------------# +class_name = ('crab', 'fish', 'jellyfish', 'shrimp', 'small_fish', 'starfish') +metainfo = dict(classes=class_name) +_data_root = data_root + 'brackishUnderwater/960x540/' +dataset_brackishUnderwater = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_brackishUnderwater = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------8 ChessPieces---------------------# +class_name = (' ', 'black bishop', 'black king', 'black knight', 'black pawn', + 'black queen', 'black rook', 'white bishop', 'white king', + 'white knight', 'white pawn', 'white queen', 'white rook') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/' +dataset_ChessPieces = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ChessPieces = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------9 CottontailRabbits---------------------# +class_name = ('rabbit', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'CottontailRabbits/' +dataset_CottontailRabbits = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_CottontailRabbits = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------10 dice---------------------# +class_name = ('1', '2', '3', '4', '5', '6') +metainfo = dict(classes=class_name) +_data_root = data_root + 'dice/mediumColor/export/' +dataset_dice = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_dice = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------11 DroneControl---------------------# +class_name = ('follow', 'follow_hand', 'land', 'land_hand', 'null', 'object', + 'takeoff', 'takeoff-hand') +metainfo = dict(classes=class_name) +_data_root = data_root + 'DroneControl/Drone Control.v3-raw.coco/' +dataset_DroneControl = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_DroneControl = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------12 EgoHands_generic---------------------# +class_name = ('hand', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/generic/' +caption_prompt = {'hand': {'suffix': ' of a person'}} +dataset_EgoHands_generic = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + # NOTE w. prompt 0.526, wo. prompt 0.608 + # caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands_generic = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------13 EgoHands_specific---------------------# +class_name = ('myleft', 'myright', 'yourleft', 'yourright') +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/specific/' +dataset_EgoHands_specific = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands_specific = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------14 HardHatWorkers---------------------# +class_name = ('head', 'helmet', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'HardHatWorkers/raw/' +dataset_HardHatWorkers = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_HardHatWorkers = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------15 MaskWearing---------------------# +class_name = ('mask', 'no-mask') +metainfo = dict(classes=class_name) +_data_root = data_root + 'MaskWearing/raw/' +dataset_MaskWearing = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_MaskWearing = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------16 MountainDewCommercial---------------------# +class_name = ('bottle', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'MountainDewCommercial/' +dataset_MountainDewCommercial = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_MountainDewCommercial = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------17 NorthAmericaMushrooms---------------------# +class_name = ('flat mushroom', 'yellow mushroom') +metainfo = dict(classes=class_name) +_data_root = data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa +dataset_NorthAmericaMushrooms = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_NorthAmericaMushrooms = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------18 openPoetryVision---------------------# +class_name = ('American Typewriter', 'Andale Mono', 'Apple Chancery', 'Arial', + 'Avenir', 'Baskerville', 'Big Caslon', 'Bradley Hand', + 'Brush Script MT', 'Chalkboard', 'Comic Sans MS', 'Copperplate', + 'Courier', 'Didot', 'Futura', 'Geneva', 'Georgia', 'Gill Sans', + 'Helvetica', 'Herculanum', 'Impact', 'Kefa', 'Lucida Grande', + 'Luminari', 'Marker Felt', 'Menlo', 'Monaco', 'Noteworthy', + 'Optima', 'PT Sans', 'PT Serif', 'Palatino', 'Papyrus', + 'Phosphate', 'Rockwell', 'SF Pro', 'SignPainter', 'Skia', + 'Snell Roundhand', 'Tahoma', 'Times New Roman', 'Trebuchet MS', + 'Verdana') +metainfo = dict(classes=class_name) +_data_root = data_root + 'openPoetryVision/512x512/' +dataset_openPoetryVision = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_openPoetryVision = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------19 OxfordPets_by_breed---------------------# +class_name = ('cat-Abyssinian', 'cat-Bengal', 'cat-Birman', 'cat-Bombay', + 'cat-British_Shorthair', 'cat-Egyptian_Mau', 'cat-Maine_Coon', + 'cat-Persian', 'cat-Ragdoll', 'cat-Russian_Blue', 'cat-Siamese', + 'cat-Sphynx', 'dog-american_bulldog', + 'dog-american_pit_bull_terrier', 'dog-basset_hound', + 'dog-beagle', 'dog-boxer', 'dog-chihuahua', + 'dog-english_cocker_spaniel', 'dog-english_setter', + 'dog-german_shorthaired', 'dog-great_pyrenees', 'dog-havanese', + 'dog-japanese_chin', 'dog-keeshond', 'dog-leonberger', + 'dog-miniature_pinscher', 'dog-newfoundland', 'dog-pomeranian', + 'dog-pug', 'dog-saint_bernard', 'dog-samoyed', + 'dog-scottish_terrier', 'dog-shiba_inu', + 'dog-staffordshire_bull_terrier', 'dog-wheaten_terrier', + 'dog-yorkshire_terrier') +metainfo = dict(classes=class_name) +_data_root = data_root + 'OxfordPets/by-breed/' # noqa +dataset_OxfordPets_by_breed = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_OxfordPets_by_breed = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------20 OxfordPets_by_species---------------------# +class_name = ('cat', 'dog') +metainfo = dict(classes=class_name) +_data_root = data_root + 'OxfordPets/by-species/' # noqa +dataset_OxfordPets_by_species = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_OxfordPets_by_species = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------21 PKLot---------------------# +class_name = ('space-empty', 'space-occupied') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PKLot/640/' # noqa +dataset_PKLot = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PKLot = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------22 Packages---------------------# +class_name = ('package', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Packages/Raw/' +caption_prompt = { + 'package': { + 'prefix': 'there is a ', + 'suffix': ' on the porch' + } +} +dataset_Packages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, # NOTE w. prompt 0.695; wo. prompt 0.687 + test_mode=True, + return_classes=True) +val_evaluator_Packages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------23 PascalVOC---------------------# +class_name = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PascalVOC/' +dataset_PascalVOC = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PascalVOC = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------24 pistols---------------------# +class_name = ('pistol', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pistols/export/' +dataset_pistols = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pistols = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------25 plantdoc---------------------# +class_name = ('Apple Scab Leaf', 'Apple leaf', 'Apple rust leaf', + 'Bell_pepper leaf', 'Bell_pepper leaf spot', 'Blueberry leaf', + 'Cherry leaf', 'Corn Gray leaf spot', 'Corn leaf blight', + 'Corn rust leaf', 'Peach leaf', 'Potato leaf', + 'Potato leaf early blight', 'Potato leaf late blight', + 'Raspberry leaf', 'Soyabean leaf', 'Soybean leaf', + 'Squash Powdery mildew leaf', 'Strawberry leaf', + 'Tomato Early blight leaf', 'Tomato Septoria leaf spot', + 'Tomato leaf', 'Tomato leaf bacterial spot', + 'Tomato leaf late blight', 'Tomato leaf mosaic virus', + 'Tomato leaf yellow virus', 'Tomato mold leaf', + 'Tomato two spotted spider mites leaf', 'grape leaf', + 'grape leaf black rot') +metainfo = dict(classes=class_name) +_data_root = data_root + 'plantdoc/416x416/' +dataset_plantdoc = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_plantdoc = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------26 pothole---------------------# +class_name = ('pothole', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pothole/' +caption_prompt = { + 'pothole': { + 'name': 'holes', + 'prefix': 'there are some ', + 'suffix': ' on the road' + } +} +dataset_pothole = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + # NOTE w. prompt 0.137; wo. prompt 0.215 + # caption_prompt=caption_prompt, + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pothole = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------27 Raccoon---------------------# +class_name = ('raccoon', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Raccoon/Raccoon.v2-raw.coco/' +dataset_Raccoon = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Raccoon = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------28 selfdrivingCar---------------------# +class_name = ('biker', 'car', 'pedestrian', 'trafficLight', + 'trafficLight-Green', 'trafficLight-GreenLeft', + 'trafficLight-Red', 'trafficLight-RedLeft', + 'trafficLight-Yellow', 'trafficLight-YellowLeft', 'truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'selfdrivingCar/fixedLarge/export/' +dataset_selfdrivingCar = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_selfdrivingCar = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------29 ShellfishOpenImages---------------------# +class_name = ('Crab', 'Lobster', 'Shrimp') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ShellfishOpenImages/raw/' +dataset_ShellfishOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ShellfishOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------30 ThermalCheetah---------------------# +class_name = ('cheetah', 'human') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ThermalCheetah/' +dataset_ThermalCheetah = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ThermalCheetah = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------31 thermalDogsAndPeople---------------------# +class_name = ('dog', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'thermalDogsAndPeople/' +dataset_thermalDogsAndPeople = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_thermalDogsAndPeople = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------32 UnoCards---------------------# +class_name = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', + '12', '13', '14') +metainfo = dict(classes=class_name) +_data_root = data_root + 'UnoCards/raw/' +dataset_UnoCards = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_UnoCards = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------33 VehiclesOpenImages---------------------# +class_name = ('Ambulance', 'Bus', 'Car', 'Motorcycle', 'Truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'VehiclesOpenImages/416x416/' +dataset_VehiclesOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_VehiclesOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------34 WildfireSmoke---------------------# +class_name = ('smoke', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'WildfireSmoke/' +dataset_WildfireSmoke = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_WildfireSmoke = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------35 websiteScreenshots---------------------# +class_name = ('button', 'field', 'heading', 'iframe', 'image', 'label', 'link', + 'text') +metainfo = dict(classes=class_name) +_data_root = data_root + 'websiteScreenshots/' +dataset_websiteScreenshots = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_websiteScreenshots = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# --------------------- Config---------------------# + +dataset_prefixes = [ + 'AerialMaritimeDrone_large', + 'AerialMaritimeDrone_tiled', + 'AmericanSignLanguageLetters', + 'Aquarium', + 'BCCD', + 'boggleBoards', + 'brackishUnderwater', + 'ChessPieces', + 'CottontailRabbits', + 'dice', + 'DroneControl', + 'EgoHands_generic', + 'EgoHands_specific', + 'HardHatWorkers', + 'MaskWearing', + 'MountainDewCommercial', + 'NorthAmericaMushrooms', + 'openPoetryVision', + 'OxfordPets_by_breed', + 'OxfordPets_by_species', + 'PKLot', + 'Packages', + 'PascalVOC', + 'pistols', + 'plantdoc', + 'pothole', + 'Raccoons', + 'selfdrivingCar', + 'ShellfishOpenImages', + 'ThermalCheetah', + 'thermalDogsAndPeople', + 'UnoCards', + 'VehiclesOpenImages', + 'WildfireSmoke', + 'websiteScreenshots', +] + +datasets = [ + dataset_AerialMaritimeDrone_large, dataset_AerialMaritimeDrone_tiled, + dataset_AmericanSignLanguageLetters, dataset_Aquarium, dataset_BCCD, + dataset_boggleBoards, dataset_brackishUnderwater, dataset_ChessPieces, + dataset_CottontailRabbits, dataset_dice, dataset_DroneControl, + dataset_EgoHands_generic, dataset_EgoHands_specific, + dataset_HardHatWorkers, dataset_MaskWearing, dataset_MountainDewCommercial, + dataset_NorthAmericaMushrooms, dataset_openPoetryVision, + dataset_OxfordPets_by_breed, dataset_OxfordPets_by_species, dataset_PKLot, + dataset_Packages, dataset_PascalVOC, dataset_pistols, dataset_plantdoc, + dataset_pothole, dataset_Raccoon, dataset_selfdrivingCar, + dataset_ShellfishOpenImages, dataset_ThermalCheetah, + dataset_thermalDogsAndPeople, dataset_UnoCards, dataset_VehiclesOpenImages, + dataset_WildfireSmoke, dataset_websiteScreenshots +] + +metrics = [ + val_evaluator_AerialMaritimeDrone_large, + val_evaluator_AerialMaritimeDrone_tiled, + val_evaluator_AmericanSignLanguageLetters, val_evaluator_Aquarium, + val_evaluator_BCCD, val_evaluator_boggleBoards, + val_evaluator_brackishUnderwater, val_evaluator_ChessPieces, + val_evaluator_CottontailRabbits, val_evaluator_dice, + val_evaluator_DroneControl, val_evaluator_EgoHands_generic, + val_evaluator_EgoHands_specific, val_evaluator_HardHatWorkers, + val_evaluator_MaskWearing, val_evaluator_MountainDewCommercial, + val_evaluator_NorthAmericaMushrooms, val_evaluator_openPoetryVision, + val_evaluator_OxfordPets_by_breed, val_evaluator_OxfordPets_by_species, + val_evaluator_PKLot, val_evaluator_Packages, val_evaluator_PascalVOC, + val_evaluator_pistols, val_evaluator_plantdoc, val_evaluator_pothole, + val_evaluator_Raccoon, val_evaluator_selfdrivingCar, + val_evaluator_ShellfishOpenImages, val_evaluator_ThermalCheetah, + val_evaluator_thermalDogsAndPeople, val_evaluator_UnoCards, + val_evaluator_VehiclesOpenImages, val_evaluator_WildfireSmoke, + val_evaluator_websiteScreenshots +] + +# -------------------------------------------------# +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/grounding_dino/odinw/override_category.py b/mmpose/configs/mmdet/grounding_dino/odinw/override_category.py new file mode 100644 index 0000000000000000000000000000000000000000..9ff05fc6e5e4d0989cf7fcf7af4dc902ee99f3a3 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/odinw/override_category.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmengine + + +def parse_args(): + parser = argparse.ArgumentParser(description='Override Category') + parser.add_argument('data_root') + return parser.parse_args() + + +def main(): + args = parse_args() + + ChessPieces = [{ + 'id': 1, + 'name': ' ', + 'supercategory': 'pieces' + }, { + 'id': 2, + 'name': 'black bishop', + 'supercategory': 'pieces' + }, { + 'id': 3, + 'name': 'black king', + 'supercategory': 'pieces' + }, { + 'id': 4, + 'name': 'black knight', + 'supercategory': 'pieces' + }, { + 'id': 5, + 'name': 'black pawn', + 'supercategory': 'pieces' + }, { + 'id': 6, + 'name': 'black queen', + 'supercategory': 'pieces' + }, { + 'id': 7, + 'name': 'black rook', + 'supercategory': 'pieces' + }, { + 'id': 8, + 'name': 'white bishop', + 'supercategory': 'pieces' + }, { + 'id': 9, + 'name': 'white king', + 'supercategory': 'pieces' + }, { + 'id': 10, + 'name': 'white knight', + 'supercategory': 'pieces' + }, { + 'id': 11, + 'name': 'white pawn', + 'supercategory': 'pieces' + }, { + 'id': 12, + 'name': 'white queen', + 'supercategory': 'pieces' + }, { + 'id': 13, + 'name': 'white rook', + 'supercategory': 'pieces' + }] + + _data_root = args.data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/' + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = ChessPieces + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + CottontailRabbits = [{ + 'id': 1, + 'name': 'rabbit', + 'supercategory': 'Cottontail-Rabbit' + }] + + _data_root = args.data_root + 'CottontailRabbits/' + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = CottontailRabbits + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + NorthAmericaMushrooms = [{ + 'id': 1, + 'name': 'flat mushroom', + 'supercategory': 'mushroom' + }, { + 'id': 2, + 'name': 'yellow mushroom', + 'supercategory': 'mushroom' + }] + + _data_root = args.data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = NorthAmericaMushrooms + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + +if __name__ == '__main__': + main() diff --git a/mmpose/configs/mmdet/grounding_dino/refcoco/grounding_dino_swin-b_pretrain_zeroshot_refexp.py b/mmpose/configs/mmdet/grounding_dino/refcoco/grounding_dino_swin-b_pretrain_zeroshot_refexp.py new file mode 100644 index 0000000000000000000000000000000000000000..dea0bad08c0ebf6455211fadb268b07868ab4ded --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/refcoco/grounding_dino_swin-b_pretrain_zeroshot_refexp.py @@ -0,0 +1,14 @@ +_base_ = './grounding_dino_swin-t_pretrain_zeroshot_refexp.py' + +model = dict( + type='GroundingDINO', + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.3, + patch_norm=True), + neck=dict(in_channels=[256, 512, 1024]), +) diff --git a/mmpose/configs/mmdet/grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp.py b/mmpose/configs/mmdet/grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp.py new file mode 100644 index 0000000000000000000000000000000000000000..4b5c46574a30bbb2253fc69f79edbcf0cb016505 --- /dev/null +++ b/mmpose/configs/mmdet/grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp.py @@ -0,0 +1,228 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py' + +# 30 is an empirical value, just set it to the maximum value +# without affecting the evaluation result +model = dict(test_cfg=dict(max_per_img=30)) + +data_root = 'data/coco/' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive')) +] + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/final_refexp_val.json' +val_dataset_all_val = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) +val_evaluator_all_val = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco_testA.json' +val_dataset_refcoco_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testA = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco_testB.json' +val_dataset_refcoco_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testB = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco+_testA.json' +val_dataset_refcoco_plus_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcoco_plus_testA = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco+_testB.json' +val_dataset_refcoco_plus_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcoco_plus_testB = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcocog_test.json' +val_dataset_refcocog_test = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcocog_test = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_val.json' +val_dataset_grefcoco_val = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_grefcoco_val = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_testA.json' +val_dataset_grefcoco_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_grefcoco_testA = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_testB.json' +val_dataset_grefcoco_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_grefcoco_testB = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +datasets = [ + val_dataset_all_val, val_dataset_refcoco_testA, val_dataset_refcoco_testB, + val_dataset_refcoco_plus_testA, val_dataset_refcoco_plus_testB, + val_dataset_refcocog_test, val_dataset_grefcoco_val, + val_dataset_grefcoco_testA, val_dataset_grefcoco_testB +] +dataset_prefixes = [ + 'val', 'refcoco_testA', 'refcoco_testB', 'refcoco+_testA', + 'refcoco+_testB', 'refcocog_test', 'grefcoco_val', 'grefcoco_testA', + 'grefcoco_testB' +] +metrics = [ + val_evaluator_all_val, val_evaluator_refcoco_testA, + val_evaluator_refcoco_testB, val_evaluator_refcoco_plus_testA, + val_evaluator_refcoco_plus_testB, val_evaluator_refcocog_test, + val_evaluator_grefcoco_val, val_evaluator_grefcoco_testA, + val_evaluator_grefcoco_testB +] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/guided_anchoring/README.md b/mmpose/configs/mmdet/guided_anchoring/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1a5e505d2888f4c521c29d9c8bc6079fac077590 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/README.md @@ -0,0 +1,59 @@ +# Guided Anchoring + +> [Region Proposal by Guided Anchoring](https://arxiv.org/abs/1901.03278) + + + +## Abstract + +Region anchors are the cornerstone of modern object detection techniques. State-of-the-art detectors mostly rely on a dense anchoring scheme, where anchors are sampled uniformly over the spatial domain with a predefined set of scales and aspect ratios. In this paper, we revisit this foundational stage. Our study shows that it can be done much more effectively and efficiently. Specifically, we present an alternative scheme, named Guided Anchoring, which leverages semantic features to guide the anchoring. The proposed method jointly predicts the locations where the center of objects of interest are likely to exist as well as the scales and aspect ratios at different locations. On top of predicted anchor shapes, we mitigate the feature inconsistency with a feature adaption module. We also study the use of high-quality proposals to improve detection performance. The anchoring scheme can be seamlessly integrated into proposal methods and detectors. With Guided Anchoring, we achieve 9.1% higher recall on MS COCO with 90% fewer anchors than the RPN baseline. We also adopt Guided Anchoring in Fast R-CNN, Faster R-CNN and RetinaNet, respectively improving the detection mAP by 2.2%, 2.7% and 1.2%. + +
+ +
+ +## Results and Models + +The results on COCO 2017 val is shown in the below table. (results on test-dev are usually slightly higher than val). + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AR 1000 | Config | Download | +| :----: | :-------------: | :-----: | :-----: | :------: | :------------: | :-----: | :------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| GA-RPN | R-50-FPN | caffe | 1x | 5.3 | 15.8 | 68.4 | [config](./ga-rpn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531-899008a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531_011819.log.json) | +| GA-RPN | R-101-FPN | caffe | 1x | 7.3 | 13.0 | 69.5 | [config](./ga-rpn_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531-ca9ba8fb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531_011812.log.json) | +| GA-RPN | X-101-32x4d-FPN | pytorch | 1x | 8.5 | 10.0 | 70.6 | [config](./ga-rpn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220-c28d1b18.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220_221326.log.json) | +| GA-RPN | X-101-64x4d-FPN | pytorch | 1x | 7.1 | 7.5 | 71.2 | [config](./ga-rpn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225-3c6e1aa2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225_152704.log.json) | + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------------: | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| GA-Faster RCNN | R-50-FPN | caffe | 1x | 5.5 | | 39.6 | [config](./ga-faster-rcnn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718.log.json) | +| GA-Faster RCNN | R-101-FPN | caffe | 1x | 7.5 | | 41.5 | [config](./ga-faster-rcnn_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_bbox_mAP-0.415_20200505_115528-fb82e499.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_20200505_115528.log.json) | +| GA-Faster RCNN | X-101-32x4d-FPN | pytorch | 1x | 8.7 | 9.7 | 43.0 | [config](./ga-faster-rcnn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215-1ded9da3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215_184547.log.json) | +| GA-Faster RCNN | X-101-64x4d-FPN | pytorch | 1x | 11.8 | 7.3 | 43.9 | [config](./ga-faster-rcnn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215-0fa7bde7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215_104455.log.json) | +| GA-RetinaNet | R-50-FPN | caffe | 1x | 3.5 | 16.8 | 36.9 | [config](./ga-retinanet_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020_225450.log.json) | +| GA-RetinaNet | R-101-FPN | caffe | 1x | 5.5 | 12.9 | 39.0 | [config](./ga-retinanet_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531-6266453c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531_012847.log.json) | +| GA-RetinaNet | X-101-32x4d-FPN | pytorch | 1x | 6.9 | 10.6 | 40.5 | [config](./ga-retinanet_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219-40c56caa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219_223025.log.json) | +| GA-RetinaNet | X-101-64x4d-FPN | pytorch | 1x | 9.9 | 7.7 | 41.3 | [config](./ga-retinanet_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226-ef9f7f1f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226_221123.log.json) | + +- In the Guided Anchoring paper, `score_thr` is set to 0.001 in Fast/Faster RCNN and 0.05 in RetinaNet for both baselines and Guided Anchoring. + +- Performance on COCO test-dev benchmark are shown as follows. + +| Method | Backbone | Style | Lr schd | Aug Train | Score thr | AP | AP_50 | AP_75 | AP_small | AP_medium | AP_large | Download | +| :------------: | :-------: | :---: | :-----: | :-------: | :-------: | :-: | :---: | :---: | :------: | :-------: | :------: | :------: | +| GA-Faster RCNN | R-101-FPN | caffe | 1x | F | 0.05 | | | | | | | | +| GA-Faster RCNN | R-101-FPN | caffe | 1x | F | 0.001 | | | | | | | | +| GA-RetinaNet | R-101-FPN | caffe | 1x | F | 0.05 | | | | | | | | +| GA-RetinaNet | R-101-FPN | caffe | 2x | T | 0.05 | | | | | | | | + +## Citation + +We provide config files to reproduce the results in the CVPR 2019 paper for [Region Proposal by Guided Anchoring](https://arxiv.org/abs/1901.03278). + +```latex +@inproceedings{wang2019region, + title={Region Proposal by Guided Anchoring}, + author={Jiaqi Wang and Kai Chen and Shuo Yang and Chen Change Loy and Dahua Lin}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-fast-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-fast-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2d0579c53cb23d71d0bec57387f413cc39449e93 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-fast-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,66 @@ +_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + roi_head=dict( + bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), + sampler=dict(num=256))), + test_cfg=dict(rcnn=dict(score_thr=1e-3))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=300), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=None), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img', 'proposals']), + ]) +] +# TODO: support loading proposals +data = dict( + train=dict( + proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl', + pipeline=train_pipeline), + val=dict( + proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', + pipeline=test_pipeline), + test=dict( + proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', + pipeline=test_pipeline)) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f585dc355ac7dc10e75875f6b9f739fe669912bb --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './ga-faster-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6cd44de557bfb20b4298099bd0972e3327b410cb --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,64 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='GARPNHead', + in_channels=256, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.14, 0.14]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.11, 0.11]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + roi_head=dict( + bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + center_ratio=0.2, + ignore_ratio=0.5), + rpn_proposal=dict(nms_post=1000, max_per_img=300), + rcnn=dict( + assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), + sampler=dict(type='RandomSampler', num=256))), + test_cfg=dict( + rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3007fbec42016fa8c6b90ba5b0b4e772d0e865f7 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,64 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='GARPNHead', + in_channels=256, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.14, 0.14]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.11, 0.11]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + roi_head=dict( + bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + center_ratio=0.2, + ignore_ratio=0.5), + rpn_proposal=dict(nms_post=1000, max_per_img=300), + rcnn=dict( + assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), + sampler=dict(type='RandomSampler', num=256))), + test_cfg=dict( + rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8a22a1ec01e66854c68968f65802dc117aa59953 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga-faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3d6aaeaa7187deaa2c0da73a89bf14980a3405db --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-faster-rcnn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga-faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9adbae55eea2311800ccbc8e01e3f41521c7040b --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './ga-retinanet_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r101-caffe_fpn_ms-2x.py b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r101-caffe_fpn_ms-2x.py new file mode 100644 index 0000000000000000000000000000000000000000..012e89b8338c69c4ffdf4182827a185233945288 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r101-caffe_fpn_ms-2x.py @@ -0,0 +1,34 @@ +_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 480), (1333, 960)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# learning policy +max_epochs = 24 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3.0, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b62aba62c64870977c7c8fe4021a361c8871b633 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,61 @@ +_base_ = '../retinanet/retinanet_r50-caffe_fpn_1x_coco.py' +model = dict( + bbox_head=dict( + _delete_=True, + type='GARetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), + center_ratio=0.2, + ignore_ratio=0.5)) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..da39c7005b26d65cca0ae122bf078db2d8ad2786 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,61 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +model = dict( + bbox_head=dict( + _delete_=True, + type='GARetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), + center_ratio=0.2, + ignore_ratio=0.5)) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..478a8e5e4a2192e23329564ac688ac40c93110dd --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga-retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cb7721d3a604277977b102d431076d6d58a7d457 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-retinanet_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga-retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b375c874ac8cabf5ad29aacc51e1065d14d83ee1 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = './ga-rpn_r50-caffe_fpn_1x_coco.py' +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..aa58426effe8bedbe9ffb907153b98d51bef5ef2 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,57 @@ +_base_ = '../rpn/rpn_r50-caffe_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='GARPNHead', + in_channels=256, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.14, 0.14]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.11, 0.11]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + center_ratio=0.2, + ignore_ratio=0.5)), + test_cfg=dict(rpn=dict(nms_post=1000))) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2973f272b740c8deec74f6c24798a2d80d917946 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_r50_fpn_1x_coco.py @@ -0,0 +1,57 @@ +_base_ = '../rpn/rpn_r50_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='GARPNHead', + in_channels=256, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.14, 0.14]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.11, 0.11]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + center_ratio=0.2, + ignore_ratio=0.5)), + test_cfg=dict(rpn=dict(nms_post=1000))) +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-rpn_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..276d45d8c21fa1eba130e834671bdddd794fa1f5 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga-rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/ga-rpn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f29fe9aa20054f3152e290df5ca75363dff6a4ce --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/ga-rpn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga-rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/guided_anchoring/metafile.yml b/mmpose/configs/mmdet/guided_anchoring/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..516b3e93fc2b10fb563de1b377144da103ef4523 --- /dev/null +++ b/mmpose/configs/mmdet/guided_anchoring/metafile.yml @@ -0,0 +1,246 @@ +Collections: + - Name: Guided Anchoring + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Guided Anchoring + - ResNet + Paper: + URL: https://arxiv.org/abs/1901.03278 + Title: 'Region Proposal by Guided Anchoring' + README: configs/guided_anchoring/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/dense_heads/ga_retina_head.py#L10 + Version: v2.0.0 + +Models: + - Name: ga-rpn_r50-caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-rpn_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.3 + inference time (ms/im): + - value: 63.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Region Proposal + Dataset: COCO + Metrics: + AR@1000: 68.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531-899008a6.pth + + - Name: ga-rpn_r101-caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-rpn_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.3 + inference time (ms/im): + - value: 76.92 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Region Proposal + Dataset: COCO + Metrics: + AR@1000: 69.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531-ca9ba8fb.pth + + - Name: ga-rpn_x101-32x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-rpn_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.5 + inference time (ms/im): + - value: 100 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Region Proposal + Dataset: COCO + Metrics: + AR@1000: 70.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220-c28d1b18.pth + + - Name: ga-rpn_x101-64x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-rpn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 133.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Region Proposal + Dataset: COCO + Metrics: + AR@1000: 70.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225-3c6e1aa2.pth + + - Name: ga-faster-rcnn_r50-caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-faster-rcnn_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth + + - Name: ga-faster-rcnn_r101-caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-faster-rcnn_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_bbox_mAP-0.415_20200505_115528-fb82e499.pth + + - Name: ga-faster-rcnn_x101-32x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-faster-rcnn_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.7 + inference time (ms/im): + - value: 103.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215-1ded9da3.pth + + - Name: ga-faster-rcnn_x101-64x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-faster-rcnn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 11.8 + inference time (ms/im): + - value: 136.99 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215-0fa7bde7.pth + + - Name: ga-retinanet_r50-caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.5 + inference time (ms/im): + - value: 59.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth + + - Name: ga-retinanet_r101-caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-retinanet_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531-6266453c.pth + + - Name: ga-retinanet_x101-32x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-retinanet_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.9 + inference time (ms/im): + - value: 94.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219-40c56caa.pth + + - Name: ga-retinanet_x101-64x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga-retinanet_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.9 + inference time (ms/im): + - value: 129.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226-ef9f7f1f.pth diff --git a/mmpose/configs/mmdet/hrnet/README.md b/mmpose/configs/mmdet/hrnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fc1ed0cc94e778ad56504b9fa8050ad8237c4c11 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/README.md @@ -0,0 +1,101 @@ +# HRNet + +> [Deep High-Resolution Representation Learning for Human Pose Estimation](https://arxiv.org/abs/1902.09212) + + + +## Abstract + +This is an official pytorch implementation of Deep High-Resolution Representation Learning for Human Pose Estimation. In this work, we are interested in the human pose estimation problem with a focus on learning reliable high-resolution representations. Most existing methods recover high-resolution representations from low-resolution representations produced by a high-to-low resolution network. Instead, our proposed network maintains high-resolution representations through the whole process. We start from a high-resolution subnetwork as the first stage, gradually add high-to-low resolution subnetworks one by one to form more stages, and connect the mutli-resolution subnetworks in parallel. We conduct repeated multi-scale fusions such that each of the high-to-low resolution representations receives information from other parallel representations over and over, leading to rich high-resolution representations. As a result, the predicted keypoint heatmap is potentially more accurate and spatially more precise. We empirically demonstrate the effectiveness of our network through the superior pose estimation results over two benchmark datasets: the COCO keypoint detection dataset and the MPII Human Pose dataset. + +High-resolution representation learning plays an essential role in many vision problems, e.g., pose estimation and semantic segmentation. The high-resolution network (HRNet), recently developed for human pose estimation, maintains high-resolution representations through the whole process by connecting high-to-low resolution convolutions in parallel and produces strong high-resolution representations by repeatedly conducting fusions across parallel convolutions. +In this paper, we conduct a further study on high-resolution representations by introducing a simple yet effective modification and apply it to a wide range of vision tasks. We augment the high-resolution representation by aggregating the (upsampled) representations from all the parallel convolutions rather than only the representation from the high-resolution convolution as done in HRNet. This simple modification leads to stronger representations, evidenced by superior results. We show top results in semantic segmentation on Cityscapes, LIP, and PASCAL Context, and facial landmark detection on AFLW, COFW, 300W, and WFLW. In addition, we build a multi-level representation from the high-resolution representation and apply it to the Faster R-CNN object detection framework and the extended frameworks. The proposed approach achieves superior results to existing single-model networks on COCO object detection. + +
+ +
+ +## Results and Models + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 1x | 6.6 | 13.4 | 36.9 | [config](./faster-rcnn_hrnetv2p-w18-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130_211246.log.json) | +| HRNetV2p-W18 | pytorch | 2x | 6.6 | - | 38.9 | [config](./faster-rcnn_hrnetv2p-w18-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731.log.json) | +| HRNetV2p-W32 | pytorch | 1x | 9.0 | 12.4 | 40.2 | [config](./faster-rcnn_hrnetv2p-w32-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130_204442.log.json) | +| HRNetV2p-W32 | pytorch | 2x | 9.0 | - | 41.4 | [config](./faster-rcnn_hrnetv2p-w32_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927.log.json) | +| HRNetV2p-W40 | pytorch | 1x | 10.4 | 10.5 | 41.2 | [config](./faster-rcnn_hrnetv2p-w40-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210_125315.log.json) | +| HRNetV2p-W40 | pytorch | 2x | 10.4 | - | 42.1 | [config](./faster-rcnn_hrnetv2p-w40_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033.log.json) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 1x | 7.0 | 11.7 | 37.7 | 34.2 | [config](./mask-rcnn_hrnetv2p-w18-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205_232523.log.json) | +| HRNetV2p-W18 | pytorch | 2x | 7.0 | - | 39.8 | 36.0 | [config](./mask-rcnn_hrnetv2p-w18-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212_134222.log.json) | +| HRNetV2p-W32 | pytorch | 1x | 9.4 | 11.3 | 41.2 | 37.1 | [config](./mask-rcnn_hrnetv2p-w32-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207_055017.log.json) | +| HRNetV2p-W32 | pytorch | 2x | 9.4 | - | 42.5 | 37.8 | [config](./mask-rcnn_hrnetv2p-w32-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213_150518.log.json) | +| HRNetV2p-W40 | pytorch | 1x | 10.9 | | 42.1 | 37.5 | [config](./mask-rcnn_hrnetv2p-w40_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646.log.json) | +| HRNetV2p-W40 | pytorch | 2x | 10.9 | | 42.8 | 38.2 | [config](./mask-rcnn_hrnetv2p-w40-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732.log.json) | + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 20e | 7.0 | 11.0 | 41.2 | [config](./cascade-rcnn_hrnetv2p-w18-20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210_105632.log.json) | +| HRNetV2p-W32 | pytorch | 20e | 9.4 | 11.0 | 43.3 | [config](./cascade-rcnn_hrnetv2p-w32-20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208_160511.log.json) | +| HRNetV2p-W40 | pytorch | 20e | 10.8 | | 43.8 | [config](./cascade-rcnn_hrnetv2p-w40-20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112.log.json) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 20e | 8.5 | 8.5 | 41.6 | 36.4 | [config](./cascade-mask-rcnn_hrnetv2p-w18_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210_093149.log.json) | +| HRNetV2p-W32 | pytorch | 20e | | 8.3 | 44.3 | 38.6 | [config](./cascade-mask-rcnn_hrnetv2p-w32_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043.log.json) | +| HRNetV2p-W40 | pytorch | 20e | 12.5 | | 45.1 | 39.3 | [config](./cascade-mask-rcnn_hrnetv2p-w40-20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922.log.json) | + +### Hybrid Task Cascade (HTC) + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 20e | 10.8 | 4.7 | 42.8 | 37.9 | [config](./htc_hrnetv2p-w18_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210_182735.log.json) | +| HRNetV2p-W32 | pytorch | 20e | 13.1 | 4.9 | 45.4 | 39.9 | [config](./htc_hrnetv2p-w32_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207_193153.log.json) | +| HRNetV2p-W40 | pytorch | 20e | 14.6 | | 46.4 | 40.8 | [config](./htc_hrnetv2p-w40_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411.log.json) | + +### FCOS + +| Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :-----: | :-: | :------: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | Y | N | 1x | 13.0 | 12.9 | 35.3 | [config](./fcos_hrnetv2p-w18-gn-head_4xb4-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710.log.json) | +| HRNetV2p-W18 | pytorch | Y | N | 2x | 13.0 | - | 38.2 | [config](./fcos_hrnetv2p-w18-gn-head_4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110.log.json) | +| HRNetV2p-W32 | pytorch | Y | N | 1x | 17.5 | 12.9 | 39.5 | [config](./fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730.log.json) | +| HRNetV2p-W32 | pytorch | Y | N | 2x | 17.5 | - | 40.8 | [config](./fcos_hrnetv2p-w32-gn-head_4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133.log.json) | +| HRNetV2p-W18 | pytorch | Y | Y | 2x | 13.0 | 12.9 | 38.3 | [config](./fcos_hrnetv2p-w18-gn-head_ms-640-800-4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651.log.json) | +| HRNetV2p-W32 | pytorch | Y | Y | 2x | 17.5 | 12.4 | 41.9 | [config](./fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846.log.json) | +| HRNetV2p-W48 | pytorch | Y | Y | 2x | 20.3 | 10.8 | 42.7 | [config](./fcos_hrnetv2p-w40-gn-head_ms-640-800-4xb4-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752.log.json) | + +**Note:** + +- The `28e` schedule in HTC indicates decreasing the lr at 24 and 27 epochs, with a total of 28 epochs. +- HRNetV2 ImageNet pretrained models are in [HRNets for Image Classification](https://github.com/HRNet/HRNet-Image-Classification). + +## Citation + +```latex +@inproceedings{SunXLW19, + title={Deep High-Resolution Representation Learning for Human Pose Estimation}, + author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang}, + booktitle={CVPR}, + year={2019} +} + +@article{SunZJCXLMWLW19, + title={High-Resolution Representations for Labeling Pixels and Regions}, + author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao + and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, + journal = {CoRR}, + volume = {abs/1904.04514}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w18_20e_coco.py b/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w18_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5ca0ebfe43b00886b22ffc426c5ac89a50f4fda6 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w18_20e_coco.py @@ -0,0 +1,11 @@ +_base_ = './cascade-mask-rcnn_hrnetv2p-w32_20e_coco.py' +# model settings +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w32_20e_coco.py b/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w32_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1ffedc3916748c3c6b333023110e56895de7e4bd --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w32_20e_coco.py @@ -0,0 +1,51 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) +# learning policy +max_epochs = 20 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 19], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w40-20e_coco.py b/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w40-20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4a51a02412871905d947bcbb648b1a24e5033f56 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/cascade-mask-rcnn_hrnetv2p-w40-20e_coco.py @@ -0,0 +1,12 @@ +_base_ = './cascade-mask-rcnn_hrnetv2p-w32_20e_coco.py' +# model settings +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w18-20e_coco.py b/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w18-20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8834c1d4ac7973a0e5ceb9f794786c0d706f343a --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w18-20e_coco.py @@ -0,0 +1,11 @@ +_base_ = './cascade-rcnn_hrnetv2p-w32-20e_coco.py' +# model settings +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w32-20e_coco.py b/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w32-20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..afeb75dbe13c5a8425924e280b250208aaec872f --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w32-20e_coco.py @@ -0,0 +1,51 @@ +_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) +# learning policy +max_epochs = 20 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 19], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w40-20e_coco.py b/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w40-20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..66f8882a0030ae82f7a74f67963bbd1da3422a48 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/cascade-rcnn_hrnetv2p-w40-20e_coco.py @@ -0,0 +1,12 @@ +_base_ = './cascade-rcnn_hrnetv2p-w32-20e_coco.py' +# model settings +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ee9a698699a6674c90011b4037843560459462db --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py @@ -0,0 +1,11 @@ +_base_ = './faster-rcnn_hrnetv2p-w32-1x_coco.py' +# model settings +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w18-2x_coco.py b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w18-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0b72c68f8cbbc83d16313c6d3ab3faf0ac86926f --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w18-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './faster-rcnn_hrnetv2p-w18-1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w32-1x_coco.py b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w32-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a27ad06c5c169c84c6368f767b79b0a817d99fa1 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w32-1x_coco.py @@ -0,0 +1,37 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w32_2x_coco.py b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w32_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c9568ce65c142f86ec6181236464454106d7de99 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w32_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './faster-rcnn_hrnetv2p-w32-1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w40-1x_coco.py b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w40-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b36200230b76269a9644cc7852cec6ce62eac5c3 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w40-1x_coco.py @@ -0,0 +1,11 @@ +_base_ = './faster-rcnn_hrnetv2p-w32-1x_coco.py' +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w40_2x_coco.py b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w40_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d1b45355db1de7c649136438b91fec5199e08141 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/faster-rcnn_hrnetv2p-w40_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './faster-rcnn_hrnetv2p-w40-1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_4xb4-1x_coco.py b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_4xb4-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c20ca7767364e14e552b5b8af68a8124f6a1253e --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_4xb4-1x_coco.py @@ -0,0 +1,10 @@ +_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py' +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_4xb4-2x_coco.py b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f5b67f6a12e294455829dddb89d05e281f2d7dc0 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_4xb4-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './fcos_hrnetv2p-w18-gn-head_4xb4-1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_ms-640-800-4xb4-2x_coco.py b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_ms-640-800-4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c5332d65d129255117f459f45369d5e13ed6653c --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w18-gn-head_ms-640-800-4xb4-2x_coco.py @@ -0,0 +1,10 @@ +_base_ = './fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py' +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..159d96d712ae047efd7988bc53ae65006291478f --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py @@ -0,0 +1,43 @@ +_base_ = '../fcos/fcos_r50-caffe_fpn_gn-head_4xb4-1x_coco.py' +model = dict( + data_preprocessor=dict( + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256, + stride=2, + num_outs=5)) diff --git a/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_4xb4-2x_coco.py b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..73fd80e979d88840a57c68ca2fad6cb2e82a26bd --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_4xb4-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4c977bf31ed2fb0ef062108cea97c1cd235b89d3 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py @@ -0,0 +1,35 @@ +_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py' + +model = dict( + data_preprocessor=dict( + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w40-gn-head_ms-640-800-4xb4-2x_coco.py b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w40-gn-head_ms-640-800-4xb4-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..bb0ff6d6ce80e702f6e88b556a770345a23afca4 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/fcos_hrnetv2p-w40-gn-head_ms-640-800-4xb4-2x_coco.py @@ -0,0 +1,11 @@ +_base_ = './fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py' +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w18_20e_coco.py b/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w18_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..55255d52a3541c99660dcddfba96da27c99f841d --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w18_20e_coco.py @@ -0,0 +1,10 @@ +_base_ = './htc_hrnetv2p-w32_20e_coco.py' +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w32_20e_coco.py b/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w32_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..545cb83eaca50f9d5de1fa6b3f3e569faab7d5f2 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w32_20e_coco.py @@ -0,0 +1,37 @@ +_base_ = '../htc/htc_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w40_20e_coco.py b/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w40_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b09256a08ee16893bcc0dd6518714daece294e0d --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w40_20e_coco.py @@ -0,0 +1,11 @@ +_base_ = './htc_hrnetv2p-w32_20e_coco.py' +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w40_28e_coco.py b/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w40_28e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1c13b58a1a0690d19239fef40915489ddaff408e --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/htc_hrnetv2p-w40_28e_coco.py @@ -0,0 +1,16 @@ +_base_ = './htc_hrnetv2p-w40_20e_coco.py' + +# learning policy +max_epochs = 28 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[24, 27], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/htc_x101-64x4d_fpn_16xb1-28e_coco.py b/mmpose/configs/mmdet/hrnet/htc_x101-64x4d_fpn_16xb1-28e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1f1304e5f963351667c28cb264ca5434bc81f744 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/htc_x101-64x4d_fpn_16xb1-28e_coco.py @@ -0,0 +1,16 @@ +_base_ = '../htc/htc_x101-64x4d_fpn_16xb1-20e_coco.py' + +# learning policy +max_epochs = 28 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[24, 27], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w18-1x_coco.py b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w18-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5d5a463a66bed51d73a42eafffea654a18c111ce --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w18-1x_coco.py @@ -0,0 +1,10 @@ +_base_ = './mask-rcnn_hrnetv2p-w32-1x_coco.py' +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w18-2x_coco.py b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w18-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8abc55924a3eb8e06f9e1e5eeed503890542f6f6 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w18-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './mask-rcnn_hrnetv2p-w18-1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w32-1x_coco.py b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w32-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..208b037807dfa9cab1d33ac58ac785ff72e400c1 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w32-1x_coco.py @@ -0,0 +1,37 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w32-2x_coco.py b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w32-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d3741c820a6a0ca622ce6bbf80cb3e922107efb6 --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w32-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './mask-rcnn_hrnetv2p-w32-1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w40-2x_coco.py b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w40-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..360420c56d42814ed6f4d84775f1a19dfa96574a --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w40-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './mask-rcnn_hrnetv2p-w40_1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w40_1x_coco.py b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w40_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..36e2305a520fd8305f9fd1358f5cbcb01027e40d --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/mask-rcnn_hrnetv2p-w40_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = './mask-rcnn_hrnetv2p-w18-1x_coco.py' +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/mmpose/configs/mmdet/hrnet/metafile.yml b/mmpose/configs/mmdet/hrnet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..54c624793291dc9a713c9a6fa6df50499136768c --- /dev/null +++ b/mmpose/configs/mmdet/hrnet/metafile.yml @@ -0,0 +1,971 @@ +Models: + - Name: faster-rcnn_hrnetv2p-w18-1x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py + Metadata: + Training Memory (GB): 6.6 + inference time (ms/im): + - value: 74.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster-rcnn_hrnetv2p-w18-2x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster-rcnn_hrnetv2p-w18-2x_coco.py + Metadata: + Training Memory (GB): 6.6 + inference time (ms/im): + - value: 74.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster-rcnn_hrnetv2p-w32-1x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster-rcnn_hrnetv2p-w32-1x_coco.py + Metadata: + Training Memory (GB): 9.0 + inference time (ms/im): + - value: 80.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster-rcnn_hrnetv2p-w32_2x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster-rcnn_hrnetv2p-w32_2x_coco.py + Metadata: + Training Memory (GB): 9.0 + inference time (ms/im): + - value: 80.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster-rcnn_hrnetv2p-w40-1x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster-rcnn_hrnetv2p-w40-1x_coco.py + Metadata: + Training Memory (GB): 10.4 + inference time (ms/im): + - value: 95.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster-rcnn_hrnetv2p-w40_2x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster-rcnn_hrnetv2p-w40_2x_coco.py + Metadata: + Training Memory (GB): 10.4 + inference time (ms/im): + - value: 95.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask-rcnn_hrnetv2p-w18-1x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask-rcnn_hrnetv2p-w18-1x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 85.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask-rcnn_hrnetv2p-w18-2x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask-rcnn_hrnetv2p-w18-2x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 85.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask-rcnn_hrnetv2p-w32-1x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask-rcnn_hrnetv2p-w32-1x_coco.py + Metadata: + Training Memory (GB): 9.4 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask-rcnn_hrnetv2p-w32-2x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask-rcnn_hrnetv2p-w32-2x_coco.py + Metadata: + Training Memory (GB): 9.4 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask-rcnn_hrnetv2p-w40_1x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask-rcnn_hrnetv2p-w40_1x_coco.py + Metadata: + Training Memory (GB): 10.9 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask-rcnn_hrnetv2p-w40-2x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask-rcnn_hrnetv2p-w40-2x_coco.py + Metadata: + Training Memory (GB): 10.9 + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade-rcnn_hrnetv2p-w18-20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade-rcnn_hrnetv2p-w18-20e_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade-rcnn_hrnetv2p-w32-20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade-rcnn_hrnetv2p-w32-20e_coco.py + Metadata: + Training Memory (GB): 9.4 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade-rcnn_hrnetv2p-w40-20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade-rcnn_hrnetv2p-w40-20e_coco.py + Metadata: + Training Memory (GB): 10.8 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade-mask-rcnn_hrnetv2p-w18_20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade-mask-rcnn_hrnetv2p-w18_20e_coco.py + Metadata: + Training Memory (GB): 8.5 + inference time (ms/im): + - value: 117.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade-mask-rcnn_hrnetv2p-w32_20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade-mask-rcnn_hrnetv2p-w32_20e_coco.py + Metadata: + inference time (ms/im): + - value: 120.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade-mask-rcnn_hrnetv2p-w40-20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade-mask-rcnn_hrnetv2p-w40-20e_coco.py + Metadata: + Training Memory (GB): 12.5 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: htc_hrnetv2p-w18_20e_coco + In Collection: HTC + Config: configs/hrnet/htc_hrnetv2p-w18_20e_coco.py + Metadata: + Training Memory (GB): 10.8 + inference time (ms/im): + - value: 212.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: htc_hrnetv2p-w32_20e_coco + In Collection: HTC + Config: configs/hrnet/htc_hrnetv2p-w32_20e_coco.py + Metadata: + Training Memory (GB): 13.1 + inference time (ms/im): + - value: 204.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: htc_hrnetv2p-w40_20e_coco + In Collection: HTC + Config: configs/hrnet/htc_hrnetv2p-w40_20e_coco.py + Metadata: + Training Memory (GB): 14.6 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p-w18-gn-head_4xb4-1x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p-w18-gn-head_4xb4-1x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 13.0 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p-w18-gn-head_4xb4-2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p-w18-gn-head_4xb4-2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 13.0 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 17.5 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p-w32-gn-head_4xb4-2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p-w32-gn-head_4xb4-2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 17.5 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p-w18-gn-head_ms-640-800-4xb4-2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p-w18-gn-head_ms-640-800-4xb4-2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 13.0 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 17.5 + inference time (ms/im): + - value: 80.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p-w40-gn-head_ms-640-800-4xb4-2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p-w40-gn-head_ms-640-800-4xb4-2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 20.3 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 diff --git a/mmpose/configs/mmdet/htc/README.md b/mmpose/configs/mmdet/htc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a6b77ce4754f5f88e6effcd47dcdbbe4cd739757 --- /dev/null +++ b/mmpose/configs/mmdet/htc/README.md @@ -0,0 +1,67 @@ +# HTC + +> [Hybrid Task Cascade for Instance Segmentation](https://arxiv.org/abs/1901.07518) + + + +## Abstract + +Cascade is a classic yet powerful architecture that has boosted performance on various tasks. However, how to introduce cascade to instance segmentation remains an open question. A simple combination of Cascade R-CNN and Mask R-CNN only brings limited gain. In exploring a more effective approach, we find that the key to a successful instance segmentation cascade is to fully leverage the reciprocal relationship between detection and segmentation. In this work, we propose a new framework, Hybrid Task Cascade (HTC), which differs in two important aspects: (1) instead of performing cascaded refinement on these two tasks separately, it interweaves them for a joint multi-stage processing; (2) it adopts a fully convolutional branch to provide spatial context, which can help distinguishing hard foreground from cluttered background. Overall, this framework can learn more discriminative features progressively while integrating complementary features together in each stage. Without bells and whistles, a single HTC obtains 38.4 and 1.5 improvement over a strong Cascade Mask R-CNN baseline on MSCOCO dataset. Moreover, our overall system achieves 48.6 mask AP on the test-challenge split, ranking 1st in the COCO 2018 Challenge Object Detection Task. + +
+ +
+ +## Introduction + +HTC requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +| | ├── stuffthingmaps +``` + +## Results and Models + +The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 8.2 | 5.8 | 42.3 | 37.4 | [config](./htc_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317_070435.log.json) | +| R-50-FPN | pytorch | 20e | 8.2 | - | 43.3 | 38.3 | [config](./htc_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319_070313.log.json) | +| R-101-FPN | pytorch | 20e | 10.2 | 5.5 | 44.8 | 39.6 | [config](./htc_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317_153107.log.json) | +| X-101-32x4d-FPN | pytorch | 20e | 11.4 | 5.0 | 46.1 | 40.5 | [config](./htc_x101-32x4d_fpn_16xb1-20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318_034519.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 14.5 | 4.4 | 47.0 | 41.4 | [config](./htc_x101-64x4d_fpn_16xb1-20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318_081711.log.json) | + +- In the HTC paper and COCO 2018 Challenge, `score_thr` is set to 0.001 for both baselines and HTC. +- We use 8 GPUs with 2 images/GPU for R-50 and R-101 models, and 16 GPUs with 1 image/GPU for X-101 models. + If you would like to train X-101 HTC with 8 GPUs, you need to change the lr from 0.02 to 0.01. + +We also provide a powerful HTC with DCN and multi-scale training model. No testing augmentation is used. + +| Backbone | Style | DCN | training scales | Lr schd | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :---: | :-------------: | :-----: | :----: | :-----: | :----------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| X-101-64x4d-FPN | pytorch | c3-c5 | 400~1400 | 20e | 50.4 | 43.8 | [config](./htc_x101-64x4d-dconv-c3-c5_fpn_ms-400-1400-16xb1-20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312_203410.log.json) | + +## Citation + +We provide config files to reproduce the results in the CVPR 2019 paper for [Hybrid Task Cascade](https://arxiv.org/abs/1901.07518). + +```latex +@inproceedings{chen2019hybrid, + title={Hybrid task cascade for instance segmentation}, + author={Chen, Kai and Pang, Jiangmiao and Wang, Jiaqi and Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and Liu, Ziwei and Shi, Jianping and Ouyang, Wanli and Chen Change Loy and Dahua Lin}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/htc/htc-without-semantic_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/htc/htc-without-semantic_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..791f4eb25b53e122cd4876a71e84a4a9d2f67e26 --- /dev/null +++ b/mmpose/configs/mmdet/htc/htc-without-semantic_r50_fpn_1x_coco.py @@ -0,0 +1,223 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='HybridTaskCascade', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='HybridTaskCascadeRoIHead', + interleaved=True, + mask_info_flow=True, + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=[ + dict( + type='HTCMaskHead', + with_conv_res=False, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/mmpose/configs/mmdet/htc/htc_r101_fpn_20e_coco.py b/mmpose/configs/mmdet/htc/htc_r101_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..28091aad31029109c29941404f2c3cc47f9c1092 --- /dev/null +++ b/mmpose/configs/mmdet/htc/htc_r101_fpn_20e_coco.py @@ -0,0 +1,6 @@ +_base_ = './htc_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/htc/htc_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/htc/htc_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3573f1f698095585f4a1de692d0e45a21429822e --- /dev/null +++ b/mmpose/configs/mmdet/htc/htc_r50_fpn_1x_coco.py @@ -0,0 +1,33 @@ +_base_ = './htc-without-semantic_r50_fpn_1x_coco.py' +model = dict( + data_preprocessor=dict(pad_seg=True), + roi_head=dict( + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + seg_scale_factor=1 / 8, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + loss_seg=dict( + type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2)))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict( + dataset=dict( + data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'), + pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/htc/htc_r50_fpn_20e_coco.py b/mmpose/configs/mmdet/htc/htc_r50_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9f510fa6eec210381707f4d1b01264e72e0d0f76 --- /dev/null +++ b/mmpose/configs/mmdet/htc/htc_r50_fpn_20e_coco.py @@ -0,0 +1,16 @@ +_base_ = './htc_r50_fpn_1x_coco.py' + +# learning policy +max_epochs = 20 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 19], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/htc/htc_x101-32x4d_fpn_16xb1-20e_coco.py b/mmpose/configs/mmdet/htc/htc_x101-32x4d_fpn_16xb1-20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..396d3a0e2b72acc1d9601706ec4629720a46a738 --- /dev/null +++ b/mmpose/configs/mmdet/htc/htc_x101-32x4d_fpn_16xb1-20e_coco.py @@ -0,0 +1,32 @@ +_base_ = './htc_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) + +train_dataloader = dict(batch_size=1, num_workers=1) + +# learning policy +max_epochs = 20 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 19], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/htc/htc_x101-64x4d-dconv-c3-c5_fpn_ms-400-1400-16xb1-20e_coco.py b/mmpose/configs/mmdet/htc/htc_x101-64x4d-dconv-c3-c5_fpn_ms-400-1400-16xb1-20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..26d68e7e2cda2a711e4d16899ae85b100afc60a0 --- /dev/null +++ b/mmpose/configs/mmdet/htc/htc_x101-64x4d-dconv-c3-c5_fpn_ms-400-1400-16xb1-20e_coco.py @@ -0,0 +1,20 @@ +_base_ = './htc_x101-64x4d_fpn_16xb1-20e_coco.py' + +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict( + type='RandomResize', + scale=[(1600, 400), (1600, 1400)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/htc/htc_x101-64x4d_fpn_16xb1-20e_coco.py b/mmpose/configs/mmdet/htc/htc_x101-64x4d_fpn_16xb1-20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a600ddb0ebd2287cdaa0d00a6008db636d79be76 --- /dev/null +++ b/mmpose/configs/mmdet/htc/htc_x101-64x4d_fpn_16xb1-20e_coco.py @@ -0,0 +1,7 @@ +_base_ = './htc_x101-32x4d_fpn_16xb1-20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + groups=64, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/htc/metafile.yml b/mmpose/configs/mmdet/htc/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..2f0f74d2d06a0f6053fa7f0b9bb73024f8dcaac5 --- /dev/null +++ b/mmpose/configs/mmdet/htc/metafile.yml @@ -0,0 +1,165 @@ +Collections: + - Name: HTC + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - HTC + - RPN + - ResNet + - ResNeXt + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1901.07518 + Title: 'Hybrid Task Cascade for Instance Segmentation' + README: configs/htc/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/htc.py#L6 + Version: v2.0.0 + +Models: + - Name: htc_r50_fpn_1x_coco + In Collection: HTC + Config: configs/htc/htc_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.2 + inference time (ms/im): + - value: 172.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth + + - Name: htc_r50_fpn_20e_coco + In Collection: HTC + Config: configs/htc/htc_r50_fpn_20e_coco.py + Metadata: + Training Memory (GB): 8.2 + inference time (ms/im): + - value: 172.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth + + - Name: htc_r101_fpn_20e_coco + In Collection: HTC + Config: configs/htc/htc_r101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 10.2 + inference time (ms/im): + - value: 181.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth + + - Name: htc_x101-32x4d_fpn_16xb1-20e_coco + In Collection: HTC + Config: configs/htc/htc_x101-32x4d_fpn_16xb1-20e_coco.py + Metadata: + Training Resources: 16x V100 GPUs + Batch Size: 16 + Training Memory (GB): 11.4 + inference time (ms/im): + - value: 200 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth + + - Name: htc_x101-64x4d_fpn_16xb1-20e_coco + In Collection: HTC + Config: configs/htc/htc_x101-64x4d_fpn_16xb1-20e_coco.py + Metadata: + Training Resources: 16x V100 GPUs + Batch Size: 16 + Training Memory (GB): 14.5 + inference time (ms/im): + - value: 227.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth + + - Name: htc_x101-64x4d-dconv-c3-c5_fpn_ms-400-1400-16xb1-20e_coco + In Collection: HTC + Config: configs/htc/htc_x101-64x4d-dconv-c3-c5_fpn_ms-400-1400-16xb1-20e_coco.py + Metadata: + Training Resources: 16x V100 GPUs + Batch Size: 16 + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth diff --git a/mmpose/configs/mmdet/instaboost/README.md b/mmpose/configs/mmdet/instaboost/README.md new file mode 100644 index 0000000000000000000000000000000000000000..34132341833308e2d5d3dcb65bd5d8ba0b4e23bd --- /dev/null +++ b/mmpose/configs/mmdet/instaboost/README.md @@ -0,0 +1,58 @@ +# Instaboost + +> [Instaboost: Boosting instance segmentation via probability map guided copy-pasting](https://arxiv.org/abs/1908.07801) + + + +## Abstract + +Instance segmentation requires a large number of training samples to achieve satisfactory performance and benefits from proper data augmentation. To enlarge the training set and increase the diversity, previous methods have investigated using data annotation from other domain (e.g. bbox, point) in a weakly supervised mechanism. In this paper, we present a simple, efficient and effective method to augment the training set using the existing instance mask annotations. Exploiting the pixel redundancy of the background, we are able to improve the performance of Mask R-CNN for 1.7 mAP on COCO dataset and 3.3 mAP on Pascal VOC dataset by simply introducing random jittering to objects. Furthermore, we propose a location probability map based approach to explore the feasible locations that objects can be placed based on local appearance similarity. With the guidance of such map, we boost the performance of R101-Mask R-CNN on instance segmentation from 35.7 mAP to 37.9 mAP without modifying the backbone or network structure. Our method is simple to implement and does not increase the computational complexity. It can be integrated into the training pipeline of any instance segmentation model without affecting the training and inference efficiency. + +
+ +
+ +## Introduction + +Configs in this directory is the implementation for ICCV2019 paper "InstaBoost: Boosting Instance Segmentation Via Probability Map Guided Copy-Pasting" and provided by the authors of the paper. InstaBoost is a data augmentation method for object detection and instance segmentation. The paper has been released on [`arXiv`](https://arxiv.org/abs/1908.07801). + +## Usage + +### Requirements + +You need to install `instaboostfast` before using it. + +```shell +pip install instaboostfast +``` + +The code and more details can be found [here](https://github.com/GothicAi/Instaboost). + +### Integration with MMDetection + +InstaBoost have been already integrated in the data pipeline, thus all you need is to add or change **InstaBoost** configurations after **LoadImageFromFile**. We have provided examples like [this](mask_rcnn_r50_fpn_instaboost_4x#L121). You can refer to [`InstaBoostConfig`](https://github.com/GothicAi/InstaBoost-pypi#instaboostconfig) for more details. + +## Results and Models + +- All models were trained on `coco_2017_train` and tested on `coco_2017_val` for convenience of evaluation and comparison. In the paper, the results are obtained from `test-dev`. +- To balance accuracy and training time when using InstaBoost, models released in this page are all trained for 48 Epochs. Other training and testing configs strictly follow the original framework. +- For results and models in MMDetection V1.x, please refer to [Instaboost](https://github.com/GothicAi/Instaboost). + +| Network | Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-----------: | :-------------: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN | R-50-FPN | 4x | 4.4 | 17.5 | 40.6 | 36.6 | [config](./mask-rcnn_r50_fpn_instaboost-4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307_223635.log.json) | +| Mask R-CNN | R-101-FPN | 4x | 6.4 | | 42.5 | 38.0 | [config](./mask-rcnn_r101_fpn_instaboost-4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738-f23f3a5f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738.log.json) | +| Mask R-CNN | X-101-64x4d-FPN | 4x | 10.7 | | 44.7 | 39.7 | [config](./mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947-8ed58c1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947.log.json) | +| Cascade R-CNN | R-101-FPN | 4x | 6.0 | 12.0 | 43.7 | 38.0 | [config](./cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-c19d98d9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307_223646.log.json) | + +## Citation + +```latex +@inproceedings{fang2019instaboost, + title={Instaboost: Boosting instance segmentation via probability map guided copy-pasting}, + author={Fang, Hao-Shu and Sun, Jianhua and Wang, Runzhong and Gou, Minghao and Li, Yong-Lu and Lu, Cewu}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={682--691}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_r101_fpn_instaboost-4x_coco.py b/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_r101_fpn_instaboost-4x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..53e33b890cad86fcc64e6ea6eefe39138241c8e7 --- /dev/null +++ b/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_r101_fpn_instaboost-4x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py b/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f7736cf5756676944c543b7e8412997ac81c2745 --- /dev/null +++ b/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py @@ -0,0 +1,40 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='InstaBoost', + action_candidate=('normal', 'horizontal', 'skip'), + action_prob=(1, 0, 0), + scale=(0.8, 1.2), + dx=15, + dy=15, + theta=(-1, 1), + color_prob=0.5, + hflag=False, + aug_ratio=0.5), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +max_epochs = 48 + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[32, 44], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) + +# only keep latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) diff --git a/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py b/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c7938d9e00e3a9c030b788ca83b1a6ddee208aed --- /dev/null +++ b/mmpose/configs/mmdet/instaboost/cascade-mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/instaboost/mask-rcnn_r101_fpn_instaboost-4x_coco.py b/mmpose/configs/mmdet/instaboost/mask-rcnn_r101_fpn_instaboost-4x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..55bfa9fefa4db9d6d69fb3c4a285d04592168398 --- /dev/null +++ b/mmpose/configs/mmdet/instaboost/mask-rcnn_r101_fpn_instaboost-4x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_instaboost-4x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py b/mmpose/configs/mmdet/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0a8c9be81f03f98f97975aca47922575555e3844 --- /dev/null +++ b/mmpose/configs/mmdet/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py @@ -0,0 +1,40 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='InstaBoost', + action_candidate=('normal', 'horizontal', 'skip'), + action_prob=(1, 0, 0), + scale=(0.8, 1.2), + dx=15, + dy=15, + theta=(-1, 1), + color_prob=0.5, + hflag=False, + aug_ratio=0.5), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +max_epochs = 48 + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[32, 44], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) + +# only keep latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) diff --git a/mmpose/configs/mmdet/instaboost/mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py b/mmpose/configs/mmdet/instaboost/mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9ba2ada6011dd77ea2dcac2133bef8d92e522381 --- /dev/null +++ b/mmpose/configs/mmdet/instaboost/mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_r50_fpn_instaboost-4x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/instaboost/metafile.yml b/mmpose/configs/mmdet/instaboost/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..228f31b7301e6a5f9d2206e10be07bc7ea3b70be --- /dev/null +++ b/mmpose/configs/mmdet/instaboost/metafile.yml @@ -0,0 +1,99 @@ +Collections: + - Name: InstaBoost + Metadata: + Training Data: COCO + Training Techniques: + - InstaBoost + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Paper: + URL: https://arxiv.org/abs/1908.07801 + Title: 'Instaboost: Boosting instance segmentation via probability map guided copy-pasting' + README: configs/instaboost/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/datasets/pipelines/instaboost.py#L7 + Version: v2.0.0 + +Models: + - Name: mask-rcnn_r50_fpn_instaboost_4x_coco + In Collection: InstaBoost + Config: configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 57.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 48 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth + + - Name: mask-rcnn_r101_fpn_instaboost-4x_coco + In Collection: InstaBoost + Config: configs/instaboost/mask-rcnn_r101_fpn_instaboost-4x_coco.py + Metadata: + Training Memory (GB): 6.4 + Epochs: 48 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738-f23f3a5f.pth + + - Name: mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco + In Collection: InstaBoost + Config: configs/instaboost/mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py + Metadata: + Training Memory (GB): 10.7 + Epochs: 48 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947-8ed58c1b.pth + + - Name: cascade-mask-rcnn_r50_fpn_instaboost_4x_coco + In Collection: InstaBoost + Config: configs/instaboost/cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 83.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 48 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-c19d98d9.pth diff --git a/mmpose/configs/mmdet/lad/README.md b/mmpose/configs/mmdet/lad/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3c3b6b4bb4d9a86d87c7843dabb23b4e5d0abc66 --- /dev/null +++ b/mmpose/configs/mmdet/lad/README.md @@ -0,0 +1,45 @@ +# LAD + +> [Improving Object Detection by Label Assignment Distillation](https://arxiv.org/abs/2108.10520) + + + +## Abstract + +Label assignment in object detection aims to assign targets, foreground or background, to sampled regions in an image. Unlike labeling for image classification, this problem is not well defined due to the object's bounding box. In this paper, we investigate the problem from a perspective of distillation, hence we call Label Assignment Distillation (LAD). Our initial motivation is very simple, we use a teacher network to generate labels for the student. This can be achieved in two ways: either using the teacher's prediction as the direct targets (soft label), or through the hard labels dynamically assigned by the teacher (LAD). Our experiments reveal that: (i) LAD is more effective than soft-label, but they are complementary. (ii) Using LAD, a smaller teacher can also improve a larger student significantly, while soft-label can't. We then introduce Co-learning LAD, in which two networks simultaneously learn from scratch and the role of teacher and student are dynamically interchanged. Using PAA-ResNet50 as a teacher, our LAD techniques can improve detectors PAA-ResNet101 and PAA-ResNeXt101 to 46AP and 47.5AP on the COCO test-dev set. With a stronger teacher PAA-SwinB, we improve the students PAA-ResNet50 to 43.7AP by only 1x schedule training and standard setting, and PAA-ResNet101 to 47.9AP, significantly surpassing the current methods. + +
+ +
+ +## Results and Models + +We provide config files to reproduce the object detection results in the +WACV 2022 paper for Improving Object Detection by Label Assignment +Distillation. + +### PAA with LAD + +| Teacher | Student | Training schedule | AP (val) | Config | Download | +| :-----: | :-----: | :---------------: | :------: | :----------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| -- | R-50 | 1x | 40.4 | [config](../paa/paa_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.log.json) | +| -- | R-101 | 1x | 42.6 | [config](../paa/paa_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.log.json) | +| R-101 | R-50 | 1x | 41.4 | [config](./lad_r50-paa-r101_fpn_2xb8_coco_1x.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r50_paa_r101_fpn_coco_1x/lad_r50_paa_r101_fpn_coco_1x_20220708_124246-74c76ff0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r50_paa_r101_fpn_coco_1x/lad_r50_paa_r101_fpn_coco_1x_20220708_124246.log.json) | +| R-50 | R-101 | 1x | 43.2 | [config](./lad_r101-paa-r50_fpn_2xb8_coco_1x.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r101_paa_r50_fpn_coco_1x/lad_r101_paa_r50_fpn_coco_1x_20220708_124357-9407ac54.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r101_paa_r50_fpn_coco_1x/lad_r101_paa_r50_fpn_coco_1x_20220708_124357.log.json) | + +## Note + +- Meaning of Config name: lad_r50(student model)\_paa(based on paa)\_r101(teacher model)\_fpn(neck)\_coco(dataset)\_1x(12 epoch).py +- Results may fluctuate by about 0.2 mAP. +- 2 GPUs are used, 8 samples per GPU. + +## Citation + +```latex +@inproceedings{nguyen2021improving, + title={Improving Object Detection by Label Assignment Distillation}, + author={Chuong H. Nguyen and Thuy C. Nguyen and Tuan N. Tang and Nam L. H. Phan}, + booktitle = {WACV}, + year={2022} +} +``` diff --git a/mmpose/configs/mmdet/lad/lad_r101-paa-r50_fpn_2xb8_coco_1x.py b/mmpose/configs/mmdet/lad/lad_r101-paa-r50_fpn_2xb8_coco_1x.py new file mode 100644 index 0000000000000000000000000000000000000000..d61d08638a073f3dad71d7499221e3ef62ff90f3 --- /dev/null +++ b/mmpose/configs/mmdet/lad/lad_r101-paa-r50_fpn_2xb8_coco_1x.py @@ -0,0 +1,127 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa + +model = dict( + type='LAD', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + # student + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # teacher + teacher_ckpt=teacher_ckpt, + teacher_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + teacher_neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + teacher_bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.1, + neg_iou_thr=0.1, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + score_voting=True, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +train_dataloader = dict(batch_size=8, num_workers=4) +optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py b/mmpose/configs/mmdet/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py new file mode 100644 index 0000000000000000000000000000000000000000..f7eaf2bfba1c41b42836e94ffe2714978dffd20a --- /dev/null +++ b/mmpose/configs/mmdet/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py @@ -0,0 +1,126 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa + +model = dict( + type='LAD', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + # student + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # teacher + teacher_ckpt=teacher_ckpt, + teacher_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + teacher_neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + teacher_bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.1, + neg_iou_thr=0.1, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + score_voting=True, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +train_dataloader = dict(batch_size=8, num_workers=4) +optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/lad/metafile.yml b/mmpose/configs/mmdet/lad/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..230132e63c06c77e16902450c282cf9a25150751 --- /dev/null +++ b/mmpose/configs/mmdet/lad/metafile.yml @@ -0,0 +1,45 @@ +Collections: + - Name: Label Assignment Distillation + Metadata: + Training Data: COCO + Training Techniques: + - Label Assignment Distillation + - SGD with Momentum + - Weight Decay + Training Resources: 2x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2108.10520 + Title: 'Improving Object Detection by Label Assignment Distillation' + README: configs/lad/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.19.0/mmdet/models/detectors/lad.py#L10 + Version: v2.19.0 + +Models: + - Name: lad_r101-paa-r50_fpn_2xb8_coco_1x + In Collection: Label Assignment Distillation + Config: configs/lad/lad_r101-paa-r50_fpn_2xb8_coco_1x.py + Metadata: + Training Memory (GB): 12.4 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r101_paa_r50_fpn_coco_1x/lad_r101_paa_r50_fpn_coco_1x_20220708_124357-9407ac54.pth + - Name: lad_r50-paa-r101_fpn_2xb8_coco_1x + In Collection: Label Assignment Distillation + Config: configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py + Metadata: + Training Memory (GB): 8.9 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r50_paa_r101_fpn_coco_1x/lad_r50_paa_r101_fpn_coco_1x_20220708_124246-74c76ff0.pth diff --git a/mmpose/configs/mmdet/ld/README.md b/mmpose/configs/mmdet/ld/README.md new file mode 100644 index 0000000000000000000000000000000000000000..65e16c79d9ce4072f46c1473f0f208a533a3a300 --- /dev/null +++ b/mmpose/configs/mmdet/ld/README.md @@ -0,0 +1,43 @@ +# LD + +> [Localization Distillation for Dense Object Detection](https://arxiv.org/abs/2102.12252) + + + +## Abstract + +Knowledge distillation (KD) has witnessed its powerful capability in learning compact models in object detection. Previous KD methods for object detection mostly focus on imitating deep features within the imitation regions instead of mimicking classification logits due to its inefficiency in distilling localization information. In this paper, by reformulating the knowledge distillation process on localization, we present a novel localization distillation (LD) method which can efficiently transfer the localization knowledge from the teacher to the student. Moreover, we also heuristically introduce the concept of valuable localization region that can aid to selectively distill the semantic and localization knowledge for a certain region. Combining these two new components, for the first time, we show that logit mimicking can outperform feature imitation and localization knowledge distillation is more important and efficient than semantic knowledge for distilling object detectors. Our distillation scheme is simple as well as effective and can be easily applied to different dense object detectors. Experiments show that our LD can boost the AP score of GFocal-ResNet-50 with a single-scale 1× training schedule from 40.1 to 42.1 on the COCO benchmark without any sacrifice on the inference speed. + +
+ +
+ +## Results and Models + +### GFocalV1 with LD + +| Teacher | Student | Training schedule | Mini-batch size | AP (val) | Config | Download | +| :-------: | :-----: | :---------------: | :-------------: | :------: | :-----------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| -- | R-18 | 1x | 6 | 35.8 | | | +| R-101 | R-18 | 1x | 6 | 36.5 | [config](./ld_r18-gflv1-r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206-330e6332.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206.log.json) | +| -- | R-34 | 1x | 6 | 38.9 | | | +| R-101 | R-34 | 1x | 6 | 39.9 | [config](./ld_r34-gflv1-r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007-9bc69413.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007.log.json) | +| -- | R-50 | 1x | 6 | 40.1 | | | +| R-101 | R-50 | 1x | 6 | 41.0 | [config](./ld_r50-gflv1-r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355-8dc5bad8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355.log.json) | +| -- | R-101 | 2x | 6 | 44.6 | | | +| R-101-DCN | R-101 | 2x | 6 | 45.5 | [config](./ld_r101-gflv1-r101-dcn_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920-9e658426.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920.log.json) | + +## Note + +- Meaning of Config name: ld_r18(student model)\_gflv1(based on gflv1)\_r101(teacher model)\_fpn(neck)\_coco(dataset)\_1x(12 epoch).py + +## Citation + +```latex +@Inproceedings{zheng2022LD, + title={Localization Distillation for Dense Object Detection}, + author= {Zheng, Zhaohui and Ye, Rongguang and Wang, Ping and Ren, Dongwei and Zuo, Wangmeng and Hou, Qibin and Cheng, Mingming}, + booktitle={CVPR}, + year={2022} +} +``` diff --git a/mmpose/configs/mmdet/ld/ld_r101-gflv1-r101-dcn_fpn_2x_coco.py b/mmpose/configs/mmdet/ld/ld_r101-gflv1-r101-dcn_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e928bdc2325825d836bd939f163d71e972c238 --- /dev/null +++ b/mmpose/configs/mmdet/ld/ld_r101-gflv1-r101-dcn_fpn_2x_coco.py @@ -0,0 +1,49 @@ +_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py'] +teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa +model = dict( + teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py', + teacher_ckpt=teacher_ckpt, + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5)) + +max_epochs = 24 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) + +# multi-scale training +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 480), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/ld/ld_r18-gflv1-r101_fpn_1x_coco.py b/mmpose/configs/mmdet/ld/ld_r18-gflv1-r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f18bb1d3620f3caecdc870ea8a3346424729225c --- /dev/null +++ b/mmpose/configs/mmdet/ld/ld_r18-gflv1-r101_fpn_1x_coco.py @@ -0,0 +1,70 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa +model = dict( + type='KnowledgeDistillationSingleStageDetector', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + teacher_config='configs/gfl/gfl_r101_fpn_ms-2x_coco.py', + teacher_ckpt=teacher_ckpt, + backbone=dict( + type='ResNet', + depth=18, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict( + type='FPN', + in_channels=[64, 128, 256, 512], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='LDHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), + loss_ld=dict( + type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), + reg_max=16, + loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/ld/ld_r34-gflv1-r101_fpn_1x_coco.py b/mmpose/configs/mmdet/ld/ld_r34-gflv1-r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2198adc82cfc98fca139e120ea0487989ac8bae7 --- /dev/null +++ b/mmpose/configs/mmdet/ld/ld_r34-gflv1-r101_fpn_1x_coco.py @@ -0,0 +1,19 @@ +_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py'] +model = dict( + backbone=dict( + type='ResNet', + depth=34, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet34')), + neck=dict( + type='FPN', + in_channels=[64, 128, 256, 512], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5)) diff --git a/mmpose/configs/mmdet/ld/ld_r50-gflv1-r101_fpn_1x_coco.py b/mmpose/configs/mmdet/ld/ld_r50-gflv1-r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..89ab5796969b88080f96f3afcc24183b0c11c730 --- /dev/null +++ b/mmpose/configs/mmdet/ld/ld_r50-gflv1-r101_fpn_1x_coco.py @@ -0,0 +1,19 @@ +_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py'] +model = dict( + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5)) diff --git a/mmpose/configs/mmdet/ld/metafile.yml b/mmpose/configs/mmdet/ld/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..a807d1b816e78734839cc1482c9c3d4afe59d6ac --- /dev/null +++ b/mmpose/configs/mmdet/ld/metafile.yml @@ -0,0 +1,69 @@ +Collections: + - Name: Localization Distillation + Metadata: + Training Data: COCO + Training Techniques: + - Localization Distillation + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2102.12252 + Title: 'Localization Distillation for Dense Object Detection' + README: configs/ld/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.11.0/mmdet/models/dense_heads/ld_head.py#L11 + Version: v2.11.0 + +Models: + - Name: ld_r18-gflv1-r101_fpn_1x_coco + In Collection: Localization Distillation + Config: configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 1.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206-330e6332.pth + - Name: ld_r34-gflv1-r101_fpn_1x_coco + In Collection: Localization Distillation + Config: configs/ld/ld_r34-gflv1-r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 2.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007-9bc69413.pth + - Name: ld_r50-gflv1-r101_fpn_1x_coco + In Collection: Localization Distillation + Config: configs/ld/ld_r50-gflv1-r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355-8dc5bad8.pth + - Name: ld_r101-gflv1-r101-dcn_fpn_2x_coco + In Collection: Localization Distillation + Config: configs/ld/ld_r101-gflv1-r101-dcn_fpn_2x_coco.py + Metadata: + Training Memory (GB): 5.5 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920-9e658426.pth diff --git a/mmpose/configs/mmdet/legacy_1.x/README.md b/mmpose/configs/mmdet/legacy_1.x/README.md new file mode 100644 index 0000000000000000000000000000000000000000..443a0a71b46f4d2eda45571d6b7e108af6528d02 --- /dev/null +++ b/mmpose/configs/mmdet/legacy_1.x/README.md @@ -0,0 +1,54 @@ +# Legacy Configs in MMDetection V1.x + + + +Configs in this directory implement the legacy configs used by MMDetection V1.x and its model zoos. + +To help users convert their models from V1.x to MMDetection V2.0, we provide v1.x configs to inference the converted v1.x models. +Due to the BC-breaking changes in MMDetection V2.0 from MMDetection V1.x, running inference with the same model weights in these two version will produce different results. The difference will cause within 1% AP absolute difference as can be found in the following table. + +## Usage + +To upgrade the model version, the users need to do the following steps. + +### 1. Convert model weights + +There are three main difference in the model weights between V1.x and V2.0 codebases. + +1. Since the class order in all the detector's classification branch is reordered, all the legacy model weights need to go through the conversion process. +2. The regression and segmentation head no longer contain the background channel. Weights in these background channels should be removed to fix in the current codebase. +3. For two-stage detectors, their wegihts need to be upgraded since MMDetection V2.0 refactors all the two-stage detectors with `RoIHead`. + +The users can do the same modification as mentioned above for the self-implemented +detectors. We provide a scripts `tools/model_converters/upgrade_model_version.py` to convert the model weights in the V1.x model zoo. + +```bash +python tools/model_converters/upgrade_model_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} --num-classes ${NUM_CLASSES} + +``` + +- OLD_MODEL_PATH: the path to load the model weights in 1.x version. +- NEW_MODEL_PATH: the path to save the converted model weights in 2.0 version. +- NUM_CLASSES: number of classes of the original model weights. Usually it is 81 for COCO dataset, 21 for VOC dataset. + The number of classes in V2.0 models should be equal to that in V1.x models - 1. + +### 2. Use configs with legacy settings + +After converting the model weights, checkout to the v1.2 release to find the corresponding config file that uses the legacy settings. +The V1.x models usually need these three legacy modules: `LegacyAnchorGenerator`, `LegacyDeltaXYWHBBoxCoder`, and `RoIAlign(align=False)`. +For models using ResNet Caffe backbones, they also need to change the pretrain name and the corresponding `img_norm_cfg`. +An example is in [`retinanet_r50-caffe_fpn_1x_coco_v1.py`](retinanet_r50-caffe_fpn_1x_coco_v1.py) +Then use the config to test the model weights. For most models, the obtained results should be close to that in V1.x. +We provide configs of some common structures in this directory. + +## Performance + +The performance change after converting the models in this directory are listed as the following. + +| Method | Style | Lr schd | V1.x box AP | V1.x mask AP | V2.0 box AP | V2.0 mask AP | Config | Download | +| :-------------------------: | :-----: | :-----: | :---------: | :----------: | :---------: | :----------: | :-------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN R-50-FPN | pytorch | 1x | 37.3 | 34.2 | 36.8 | 33.9 | [config](./mask-rcnn_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth) | +| RetinaNet R-50-FPN | caffe | 1x | 35.8 | - | 35.4 | - | [config](./retinanet_r50-caffe_fpn_1x_coco_v1.py) | | +| RetinaNet R-50-FPN | pytorch | 1x | 35.6 | - | 35.2 | - | [config](./retinanet_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_r50_fpn_1x_20181125-7b0c2548.pth) | +| Cascade Mask R-CNN R-50-FPN | pytorch | 1x | 41.2 | 35.7 | 40.8 | 35.6 | [config](./cascade-mask-rcnn_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_r50_fpn_1x_20181123-88b170c9.pth) | +| SSD300-VGG16 | caffe | 120e | 25.7 | - | 25.4 | - | [config](./ssd300_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd300_coco_vgg16_caffe_120e_20181221-84d7110b.pth) | diff --git a/mmpose/configs/mmdet/legacy_1.x/cascade-mask-rcnn_r50_fpn_1x_coco_v1.py b/mmpose/configs/mmdet/legacy_1.x/cascade-mask-rcnn_r50_fpn_1x_coco_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..f948a7a9c10f618438e8ff54bdf3333335577e90 --- /dev/null +++ b/mmpose/configs/mmdet/legacy_1.x/cascade-mask-rcnn_r50_fpn_1x_coco_v1.py @@ -0,0 +1,78 @@ +_base_ = [ + '../_base_/models/cascade-mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5), + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0])), + roi_head=dict( + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=2, + aligned=False)), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + reg_class_agnostic=True, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2])), + dict( + type='Shared2FCBBoxHead', + reg_class_agnostic=True, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1])), + dict( + type='Shared2FCBBoxHead', + reg_class_agnostic=True, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067])), + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=14, + sampling_ratio=2, + aligned=False)))) diff --git a/mmpose/configs/mmdet/legacy_1.x/faster-rcnn_r50_fpn_1x_coco_v1.py b/mmpose/configs/mmdet/legacy_1.x/faster-rcnn_r50_fpn_1x_coco_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..66bf9713793c4a0a951273d037253f930fbb31a6 --- /dev/null +++ b/mmpose/configs/mmdet/legacy_1.x/faster-rcnn_r50_fpn_1x_coco_v1.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='FasterRCNN', + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + rpn_head=dict( + type='RPNHead', + anchor_generator=dict( + type='LegacyAnchorGenerator', + center_offset=0.5, + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=2, + aligned=False), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn_proposal=dict(max_per_img=2000), + rcnn=dict(assigner=dict(match_low_quality=True)))) diff --git a/mmpose/configs/mmdet/legacy_1.x/mask-rcnn_r50_fpn_1x_coco_v1.py b/mmpose/configs/mmdet/legacy_1.x/mask-rcnn_r50_fpn_1x_coco_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..690802598493e64821aaf98111161e36b169e475 --- /dev/null +++ b/mmpose/configs/mmdet/legacy_1.x/mask-rcnn_r50_fpn_1x_coco_v1.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + rpn_head=dict( + anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5), + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=2, + aligned=False)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=14, + sampling_ratio=2, + aligned=False)), + bbox_head=dict( + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + + # model training and testing settings + train_cfg=dict( + rpn_proposal=dict(max_per_img=2000), + rcnn=dict(assigner=dict(match_low_quality=True)))) diff --git a/mmpose/configs/mmdet/legacy_1.x/retinanet_r50-caffe_fpn_1x_coco_v1.py b/mmpose/configs/mmdet/legacy_1.x/retinanet_r50-caffe_fpn_1x_coco_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..49abc31a002f56147cacf1b7707140a14b784a99 --- /dev/null +++ b/mmpose/configs/mmdet/legacy_1.x/retinanet_r50-caffe_fpn_1x_coco_v1.py @@ -0,0 +1,16 @@ +_base_ = './retinanet_r50_fpn_1x_coco_v1.py' +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + # use caffe img_norm + mean=[102.9801, 115.9465, 122.7717], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py b/mmpose/configs/mmdet/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..6198b9717957374ce734ca74de5f54dda44123b9 --- /dev/null +++ b/mmpose/configs/mmdet/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + bbox_head=dict( + type='RetinaHead', + anchor_generator=dict( + type='LegacyAnchorGenerator', + center_offset=0.5, + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) diff --git a/mmpose/configs/mmdet/legacy_1.x/ssd300_coco_v1.py b/mmpose/configs/mmdet/legacy_1.x/ssd300_coco_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..e5ffc633a9b4773d7116bed7cbf8bcab7fb3110d --- /dev/null +++ b/mmpose/configs/mmdet/legacy_1.x/ssd300_coco_v1.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# model settings +input_size = 300 +model = dict( + bbox_head=dict( + type='SSDHead', + anchor_generator=dict( + type='LegacySSDAnchorGenerator', + scale_major=False, + input_size=input_size, + basesize_ratio_range=(0.15, 0.9), + strides=[8, 16, 32, 64, 100, 300], + ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]))) diff --git a/mmpose/configs/mmdet/libra_rcnn/README.md b/mmpose/configs/mmdet/libra_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ee8015ba12286a9bf940bf2b690441505e39ec0e --- /dev/null +++ b/mmpose/configs/mmdet/libra_rcnn/README.md @@ -0,0 +1,53 @@ +# Libra R-CNN + +> [Libra R-CNN: Towards Balanced Learning for Object Detection](https://arxiv.org/abs/1904.02701) + + + +## Abstract + +Compared with model architectures, the training process, which is also crucial to the success of detectors, has received relatively less attention in object detection. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple but effective framework towards balanced learning for object detection. It integrates three novel components: IoU-balanced sampling, balanced feature pyramid, and balanced L1 loss, respectively for reducing the imbalance at sample, feature, and objective level. Benefitted from the overall balanced design, Libra R-CNN significantly improves the detection performance. Without bells and whistles, it achieves 2.5 points and 2.0 points higher Average Precision (AP) than FPN Faster R-CNN and RetinaNet respectively on MSCOCO. + +Instance recognition is rapidly advanced along with the developments of various deep convolutional neural networks. Compared to the architectures of networks, the training process, which is also crucial to the success of detectors, has received relatively less attention. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple yet effective framework towards balanced learning for instance recognition. It integrates IoU-balanced sampling, balanced feature pyramid, and objective re-weighting, respectively for reducing the imbalance at sample, feature, and objective level. Extensive experiments conducted on MS COCO, LVIS and Pascal VOC datasets prove the effectiveness of the overall balanced design. + +
+ +
+ +## Results and Models + +The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) + +| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50-FPN | pytorch | 1x | 4.6 | 19.0 | 38.3 | [config](./libra-faster-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| Fast R-CNN | R-50-FPN | pytorch | 1x | | | | | | +| Faster R-CNN | R-101-FPN | pytorch | 1x | 6.5 | 14.4 | 40.1 | [config](./libra-faster-rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203-8dba6a5a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203_001405.log.json) | +| Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 10.8 | 8.5 | 42.7 | [config](./libra-faster-rcnn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315_231625.log.json) | +| RetinaNet | R-50-FPN | pytorch | 1x | 4.2 | 17.7 | 37.6 | [config](./libra-retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205-804d94ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205_112757.log.json) | + +## Citation + +We provide config files to reproduce the results in the CVPR 2019 paper [Libra R-CNN](https://arxiv.org/pdf/1904.02701.pdf). + +The extended version of [Libra R-CNN](https://arxiv.org/pdf/2108.10175.pdf) is accpeted by IJCV. + +```latex +@inproceedings{pang2019libra, + title={Libra R-CNN: Towards Balanced Learning for Object Detection}, + author={Pang, Jiangmiao and Chen, Kai and Shi, Jianping and Feng, Huajun and Ouyang, Wanli and Dahua Lin}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} + +@article{pang2021towards, + title={Towards Balanced Learning for Instance Recognition}, + author={Pang, Jiangmiao and Chen, Kai and Li, Qi and Xu, Zhihai and Feng, Huajun and Shi, Jianping and Ouyang, Wanli and Lin, Dahua}, + journal={International Journal of Computer Vision}, + volume={129}, + number={5}, + pages={1376--1393}, + year={2021}, + publisher={Springer} +} +``` diff --git a/mmpose/configs/mmdet/libra_rcnn/libra-fast-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/libra_rcnn/libra-fast-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2efe440ce361d5bc5855c76001a5ff6b661a568a --- /dev/null +++ b/mmpose/configs/mmdet/libra_rcnn/libra-fast-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,52 @@ +_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py' +# model settings +model = dict( + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + dict( + type='BFP', + in_channels=256, + num_levels=5, + refine_level=2, + refine_type='non_local') + ], + roi_head=dict( + bbox_head=dict( + loss_bbox=dict( + _delete_=True, + type='BalancedL1Loss', + alpha=0.5, + gamma=1.5, + beta=1.0, + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + sampler=dict( + _delete_=True, + type='CombinedSampler', + num=512, + pos_fraction=0.25, + add_gt_as_proposals=True, + pos_sampler=dict(type='InstanceBalancedPosSampler'), + neg_sampler=dict( + type='IoUBalancedNegSampler', + floor_thr=-1, + floor_fraction=0, + num_bins=3))))) + +# MMEngine support the following two ways, users can choose +# according to convenience +# _base_.train_dataloader.dataset.proposal_file = 'libra_proposals/rpn_r50_fpn_1x_train2017.pkl' # noqa +train_dataloader = dict( + dataset=dict(proposal_file='libra_proposals/rpn_r50_fpn_1x_train2017.pkl')) + +# _base_.val_dataloader.dataset.proposal_file = 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl' # noqa +# test_dataloader = _base_.val_dataloader +val_dataloader = dict( + dataset=dict(proposal_file='libra_proposals/rpn_r50_fpn_1x_val2017.pkl')) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..985df64cb437e233f76235ee9be4b788ec8f701c --- /dev/null +++ b/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './libra-faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ee507d26338b49eca004ee195fd2b1954c32d9 --- /dev/null +++ b/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,41 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +# model settings +model = dict( + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + dict( + type='BFP', + in_channels=256, + num_levels=5, + refine_level=2, + refine_type='non_local') + ], + roi_head=dict( + bbox_head=dict( + loss_bbox=dict( + _delete_=True, + type='BalancedL1Loss', + alpha=0.5, + gamma=1.5, + beta=1.0, + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict(sampler=dict(neg_pos_ub=5), allowed_border=-1), + rcnn=dict( + sampler=dict( + _delete_=True, + type='CombinedSampler', + num=512, + pos_fraction=0.25, + add_gt_as_proposals=True, + pos_sampler=dict(type='InstanceBalancedPosSampler'), + neg_sampler=dict( + type='IoUBalancedNegSampler', + floor_thr=-1, + floor_fraction=0, + num_bins=3))))) diff --git a/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..158e238ed14d9c56b7d02d17f0061b08d4116282 --- /dev/null +++ b/mmpose/configs/mmdet/libra_rcnn/libra-faster-rcnn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './libra-faster-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/libra_rcnn/libra-retinanet_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/libra_rcnn/libra-retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..be2742098fb8f1e46bbb16c9d3e2e20c2e3083aa --- /dev/null +++ b/mmpose/configs/mmdet/libra_rcnn/libra-retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,26 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +# model settings +model = dict( + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + dict( + type='BFP', + in_channels=256, + num_levels=5, + refine_level=1, + refine_type='non_local') + ], + bbox_head=dict( + loss_bbox=dict( + _delete_=True, + type='BalancedL1Loss', + alpha=0.5, + gamma=1.5, + beta=0.11, + loss_weight=1.0))) diff --git a/mmpose/configs/mmdet/libra_rcnn/metafile.yml b/mmpose/configs/mmdet/libra_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..f01bd02bb7a55dd899bc64a56346357f2951f6d5 --- /dev/null +++ b/mmpose/configs/mmdet/libra_rcnn/metafile.yml @@ -0,0 +1,99 @@ +Collections: + - Name: Libra R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - IoU-Balanced Sampling + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Balanced Feature Pyramid + Paper: + URL: https://arxiv.org/abs/1904.02701 + Title: 'Libra R-CNN: Towards Balanced Learning for Object Detection' + README: configs/libra_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/bfp.py#L10 + Version: v2.0.0 + +Models: + - Name: libra-faster-rcnn_r50_fpn_1x_coco + In Collection: Libra R-CNN + Config: configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.6 + inference time (ms/im): + - value: 52.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth + + - Name: libra-faster-rcnn_r101_fpn_1x_coco + In Collection: Libra R-CNN + Config: configs/libra_rcnn/libra-faster-rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.5 + inference time (ms/im): + - value: 69.44 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203-8dba6a5a.pth + + - Name: libra-faster-rcnn_x101-64x4d_fpn_1x_coco + In Collection: Libra R-CNN + Config: configs/libra_rcnn/libra-faster-rcnn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.8 + inference time (ms/im): + - value: 117.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth + + - Name: libra-retinanet_r50_fpn_1x_coco + In Collection: Libra R-CNN + Config: configs/libra_rcnn/libra-retinanet_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + inference time (ms/im): + - value: 56.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205-804d94ce.pth diff --git a/mmpose/configs/mmdet/lvis/README.md b/mmpose/configs/mmdet/lvis/README.md new file mode 100644 index 0000000000000000000000000000000000000000..57aeda438b3cb55e7c3c0d22cddc27a41e6fa3ae --- /dev/null +++ b/mmpose/configs/mmdet/lvis/README.md @@ -0,0 +1,56 @@ +# LVIS + +> [LVIS: A Dataset for Large Vocabulary Instance Segmentation](https://arxiv.org/abs/1908.03195) + + + +## Abstract + +Progress on object detection is enabled by datasets that focus the research community's attention on open challenges. This process led us from simple images to complex scenes and from bounding boxes to segmentation masks. In this work, we introduce LVIS (pronounced \`el-vis'): a new dataset for Large Vocabulary Instance Segmentation. We plan to collect ~2 million high-quality instance segmentation masks for over 1000 entry-level object categories in 164k images. Due to the Zipfian distribution of categories in natural images, LVIS naturally has a long tail of categories with few training samples. Given that state-of-the-art deep learning methods for object detection perform poorly in the low-sample regime, we believe that our dataset poses an important and exciting new scientific challenge. + +
+ +
+ +## Common Setting + +- Please follow [install guide](../../docs/get_started.md#install-mmdetection) to install open-mmlab forked cocoapi first. + +- Run following scripts to install our forked lvis-api. + + ```shell + pip install git+https://github.com/lvis-dataset/lvis-api.git + ``` + +- All experiments use oversample strategy [here](../../docs/tutorials/customize_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`. + +- The size of LVIS v0.5 is half of COCO, so schedule `2x` in LVIS is roughly the same iterations as `1x` in COCO. + +## Results and models of LVIS v0.5 + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 2x | - | - | 26.1 | 25.9 | [config](./mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis-dbd06831.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_20200531_160435.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 27.1 | 27.0 | [config](./mask-rcnn_r101_fpn_sample1e-3_ms-2x_lvis-v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis-54582ee2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_20200601_134748.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 26.7 | 26.9 | [config](./mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis-3cf55ea2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_20200531_221749.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 26.4 | 26.0 | [config](./mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis-1c99a5ad.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_20200601_194651.log.json) | + +## Results and models of LVIS v1 + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 9.1 | - | 22.5 | 21.7 | [config](./mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-aa78ac3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_061305.log.json) | +| R-101-FPN | pytorch | 1x | 10.8 | - | 24.6 | 23.6 | [config](./mask-rcnn_r101_fpn_sample1e-3_ms-1x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-ec55ce32.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_070959.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 11.8 | - | 26.7 | 25.5 | [config](./mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-1x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-ebbc5c81.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_071317.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 14.6 | - | 27.2 | 25.8 | [config](./mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-1x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-43d9edfe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200830_060206.log.json) | + +## Citation + +```latex +@inproceedings{gupta2019lvis, + title={{LVIS}: A Dataset for Large Vocabulary Instance Segmentation}, + author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross}, + booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-1x_lvis-v1.py b/mmpose/configs/mmdet/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-1x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..3994d75a81aaa5368bd42c591fa770b05b665e25 --- /dev/null +++ b/mmpose/configs/mmdet/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-1x_lvis-v1.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-2x_lvis-v0.5.py b/mmpose/configs/mmdet/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-2x_lvis-v0.5.py new file mode 100644 index 0000000000000000000000000000000000000000..ed8b3639a0046e14d5c11a98f9d7dc38eb4badec --- /dev/null +++ b/mmpose/configs/mmdet/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-2x_lvis-v0.5.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py b/mmpose/configs/mmdet/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..cdd3683e3005dd09ada78827825da516bfd4c66e --- /dev/null +++ b/mmpose/configs/mmdet/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/lvis_v1_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) diff --git a/mmpose/configs/mmdet/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py b/mmpose/configs/mmdet/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py new file mode 100644 index 0000000000000000000000000000000000000000..b36b6c17fef7da3646654e494fa715302b1b050e --- /dev/null +++ b/mmpose/configs/mmdet/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/lvis_v0.5_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=1230), mask_head=dict(num_classes=1230)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) diff --git a/mmpose/configs/mmdet/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-1x_lvis-v1.py b/mmpose/configs/mmdet/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-1x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..9da3ab6db04ec6ee772202270a47179171a9d13c --- /dev/null +++ b/mmpose/configs/mmdet/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-1x_lvis-v1.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py b/mmpose/configs/mmdet/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py new file mode 100644 index 0000000000000000000000000000000000000000..9a097c94c7e2d7c7b583027ce6000aba8205d490 --- /dev/null +++ b/mmpose/configs/mmdet/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-1x_lvis-v1.py b/mmpose/configs/mmdet/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-1x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..b0819b3ec60d710205a643305edd2a27db977d9b --- /dev/null +++ b/mmpose/configs/mmdet/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-1x_lvis-v1.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py b/mmpose/configs/mmdet/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py new file mode 100644 index 0000000000000000000000000000000000000000..9d2720089181f066bcaa04b73903836b64b97bb9 --- /dev/null +++ b/mmpose/configs/mmdet/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/lvis/metafile.yml b/mmpose/configs/mmdet/lvis/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..f8def96c7e5404bba0b40f4f00ce9efabfe0a891 --- /dev/null +++ b/mmpose/configs/mmdet/lvis/metafile.yml @@ -0,0 +1,128 @@ +Models: + - Name: mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5 + In Collection: Mask R-CNN + Config: configs/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v0.5 + Metrics: + box AP: 26.1 + - Task: Instance Segmentation + Dataset: LVIS v0.5 + Metrics: + mask AP: 25.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis-dbd06831.pth + + - Name: mask-rcnn_r101_fpn_sample1e-3_ms-2x_lvis-v0.5 + In Collection: Mask R-CNN + Config: configs/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-2x_lvis-v0.5.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v0.5 + Metrics: + box AP: 27.1 + - Task: Instance Segmentation + Dataset: LVIS v0.5 + Metrics: + mask AP: 27.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis-54582ee2.pth + + - Name: mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-2x_lvis-v0.5 + In Collection: Mask R-CNN + Config: configs/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v0.5 + Metrics: + box AP: 26.7 + - Task: Instance Segmentation + Dataset: LVIS v0.5 + Metrics: + mask AP: 26.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis-3cf55ea2.pth + + - Name: mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-2x_lvis-v0.5 + In Collection: Mask R-CNN + Config: configs/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v0.5 + Metrics: + box AP: 26.4 + - Task: Instance Segmentation + Dataset: LVIS v0.5 + Metrics: + mask AP: 26.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis-1c99a5ad.pth + + - Name: mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1 + In Collection: Mask R-CNN + Config: configs/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 22.5 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 21.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-aa78ac3d.pth + + - Name: mask-rcnn_r101_fpn_sample1e-3_ms-1x_lvis-v1 + In Collection: Mask R-CNN + Config: configs/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-1x_lvis-v1.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 24.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 23.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-ec55ce32.pth + + - Name: mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-1x_lvis-v1 + In Collection: Mask R-CNN + Config: configs/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-1x_lvis-v1.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 26.7 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 25.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-ebbc5c81.pth + + - Name: mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-1x_lvis-v1 + In Collection: Mask R-CNN + Config: configs/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-1x_lvis-v1.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.2 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 25.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-43d9edfe.pth diff --git a/mmpose/configs/mmdet/mask2former/README.md b/mmpose/configs/mmdet/mask2former/README.md new file mode 100644 index 0000000000000000000000000000000000000000..94b0821e7a2f3a467f48f8f7581e6c10d1571404 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/README.md @@ -0,0 +1,76 @@ +# Mask2Former + +> [Masked-attention Mask Transformer for Universal Image Segmentation](http://arxiv.org/abs/2112.01527) + + + +## Abstract + +Image segmentation is about grouping pixels with different semantics, e.g., category or instance membership, where each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K). + +
+ +
+ +## Introduction + +Mask2Former requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +| | | ├── instances_train2017.json +| | | ├── instances_val2017.json +│ │ │ ├── panoptic_train2017.json +│ │ │ ├── panoptic_train2017 +│ │ │ ├── panoptic_val2017.json +│ │ │ ├── panoptic_val2017 +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +``` + +## Results and Models + +### Panoptic segmentation + +| Backbone | style | Pretrain | Lr schd | Mem (GB) | Inf time (fps) | PQ | box mAP | mask mAP | Config | Download | +| -------- | ------- | ------------ | ------- | -------- | -------------- | ---- | ------- | -------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| R-50 | pytorch | ImageNet-1K | 50e | 13.9 | - | 52.0 | 44.5 | 41.8 | [config](./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic/mask2former_r50_8xb2-lsj-50e_coco-panoptic_20230118_125535-54df384a.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic/mask2former_r50_8xb2-lsj-50e_coco-panoptic_20230118_125535.log.json) | +| R-101 | pytorch | ImageNet-1K | 50e | 16.1 | - | 52.4 | 45.3 | 42.4 | [config](./mask2former_r101_8xb2-lsj-50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r101_8xb2-lsj-50e_coco-panoptic/mask2former_r101_8xb2-lsj-50e_coco-panoptic_20220329_225104-c74d4d71.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104.log.json) | +| Swin-T | - | ImageNet-1K | 50e | 15.9 | - | 53.4 | 46.3 | 43.4 | [config](./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic_20220326_224553-3ec9e0ae.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553.log.json) | +| Swin-S | - | ImageNet-1K | 50e | 19.1 | - | 54.5 | 47.8 | 44.5 | [config](./mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic_20220329_225200-4a16ded7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200.log.json) | +| Swin-B | - | ImageNet-1K | 50e | 26.0 | - | 55.1 | 48.2 | 44.9 | [config](./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic_20220331_002244-8a651d82.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244.log.json) | +| Swin-B | - | ImageNet-21K | 50e | 25.8 | - | 56.3 | 50.0 | 46.3 | [config](./mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic_20220329_230021-05ec7315.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021.log.json) | +| Swin-L | - | ImageNet-21K | 100e | 21.1 | - | 57.6 | 52.2 | 48.5 | [config](./mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic_20220407_104949-82f8d28d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949.log.json) | + +### Instance segmentation + +| Backbone | style | Pretrain | Lr schd | Mem (GB) | Inf time (fps) | box mAP | mask mAP | Config | Download | +| -------- | ------- | ----------- | ------- | -------- | -------------- | ------- | -------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| R-50 | pytorch | ImageNet-1K | 50e | 13.7 | - | 45.7 | 42.9 | [config](./mask2former_r50_8xb2-lsj-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r50_8xb2-lsj-50e_coco/mask2former_r50_8xb2-lsj-50e_coco_20220506_191028-41b088b6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028.log.json) | +| R-101 | pytorch | ImageNet-1K | 50e | 15.5 | - | 46.7 | 44.0 | [config](./mask2former_r101_8xb2-lsj-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r101_8xb2-lsj-50e_coco/mask2former_r101_8xb2-lsj-50e_coco_20220426_100250-ecf181e2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250.log.json) | +| Swin-T | - | ImageNet-1K | 50e | 15.3 | - | 47.7 | 44.7 | [config](./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco_20220508_091649-01b0f990.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649.log.json) | +| Swin-S | - | ImageNet-1K | 50e | 18.8 | - | 49.3 | 46.1 | [config](./mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco_20220504_001756-c9d0c4f2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756.log.json) | + +### Note + +1. The performance is unstable. The `Mask2Former-R50-coco-panoptic` may fluctuate about 0.2 PQ. The models other than `Mask2Former-R50-coco-panoptic` were trained with mmdet 2.x and have been converted for mmdet 3.x. +2. We have trained the instance segmentation models many times (see more details in [PR 7571](https://github.com/open-mmlab/mmdetection/pull/7571)). The results of the trained models are relatively stable (+- 0.2), and have a certain gap (about 0.2 AP) in comparison with the results in the [paper](http://arxiv.org/abs/2112.01527). However, the performance of the model trained with the official code is unstable and may also be slightly lower than the reported results as mentioned in the [issue](https://github.com/facebookresearch/Mask2Former/issues/46). + +## Citation + +```latex +@article{cheng2021mask2former, + title={Masked-attention Mask Transformer for Universal Image Segmentation}, + author={Bowen Cheng and Ishan Misra and Alexander G. Schwing and Alexander Kirillov and Rohit Girdhar}, + journal={arXiv}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/mask2former/mask2former_r101_8xb2-lsj-50e_coco-panoptic.py b/mmpose/configs/mmdet/mask2former/mask2former_r101_8xb2-lsj-50e_coco-panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..66685a2fca9c0e165ba0024e242d5eabf5d565c9 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_r101_8xb2-lsj-50e_coco-panoptic.py @@ -0,0 +1,7 @@ +_base_ = './mask2former_r50_8xb2-lsj-50e_coco-panoptic.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_r101_8xb2-lsj-50e_coco.py b/mmpose/configs/mmdet/mask2former/mask2former_r101_8xb2-lsj-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f4c29906d9fc6ce47ce928fb73dcb1bb6c6f7ba9 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_r101_8xb2-lsj-50e_coco.py @@ -0,0 +1,7 @@ +_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco.py'] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py b/mmpose/configs/mmdet/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..c53e981bf0d5081c3735676be922f64298a8fc80 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py @@ -0,0 +1,251 @@ +_base_ = [ + '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' +] +image_size = (1024, 1024) +batch_augments = [ + dict( + type='BatchFixedSizePad', + size=image_size, + img_pad_value=0, + pad_mask=True, + mask_pad_value=0, + pad_seg=True, + seg_pad_value=255) +] +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + pad_mask=True, + mask_pad_value=0, + pad_seg=True, + seg_pad_value=255, + batch_augments=batch_augments) + +num_things_classes = 80 +num_stuff_classes = 53 +num_classes = num_things_classes + num_stuff_classes +model = dict( + type='Mask2Former', + data_preprocessor=data_preprocessor, + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + panoptic_head=dict( + type='Mask2FormerHead', + in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( # DeformableDetrTransformerEncoder + num_layers=6, + layer_cfg=dict( # DeformableDetrTransformerEncoderLayer + self_attn_cfg=dict( # MultiScaleDeformableAttention + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + dropout=0.0, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True)))), + positional_encoding=dict(num_feats=128, normalize=True)), + enforce_decoder_input_project=False, + positional_encoding=dict(num_feats=128, normalize=True), + transformer_decoder=dict( # Mask2FormerTransformerDecoder + return_intermediate=True, + num_layers=9, + layer_cfg=dict( # Mask2FormerTransformerDecoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + cross_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True))), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0)), + panoptic_fusion_head=dict( + type='MaskFormerFusionHead', + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + loss_panoptic=None, + init_cfg=None), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='ClassificationCost', weight=2.0), + dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dict(type='DiceCost', weight=5.0, pred_act=True, eps=1.0) + ]), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + # For now, the dataset does not support + # evaluating semantic segmentation metric. + semantic_on=False, + instance_on=True, + # max_per_image is for instance segmentation. + max_per_image=100, + iou_thr=0.8, + # In Mask2Former's panoptic postprocessing, + # it will filter mask area where score is less than 0.5 . + filter_low_score=True), + init_cfg=None) + +# dataset settings +data_root = 'data/coco/' +train_pipeline = [ + dict( + type='LoadImageFromFile', + to_float32=True, + backend_args={{_base_.backend_args}}), + dict( + type='LoadPanopticAnnotations', + with_bbox=True, + with_mask=True, + with_seg=True, + backend_args={{_base_.backend_args}}), + dict(type='RandomFlip', prob=0.5), + # large scale jittering + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=image_size, + crop_type='absolute', + recompute_bbox=True, + allow_negative_crop=True), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +val_evaluator = [ + dict( + type='CocoPanopticMetric', + ann_file=data_root + 'annotations/panoptic_val2017.json', + seg_prefix=data_root + 'annotations/panoptic_val2017/', + backend_args={{_base_.backend_args}}), + dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + backend_args={{_base_.backend_args}}) +] +test_evaluator = val_evaluator + +# optimizer +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=0.0001, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi, + }, + norm_decay_mult=0.0), + clip_grad=dict(max_norm=0.01, norm_type=2)) + +# learning policy +max_iters = 368750 +param_scheduler = dict( + type='MultiStepLR', + begin=0, + end=max_iters, + by_epoch=False, + milestones=[327778, 355092], + gamma=0.1) + +# Before 365001th iteration, we do evaluation every 5000 iterations. +# After 365000th iteration, we do evaluation every 368750 iterations, +# which means that we do evaluation at the end of training. +interval = 5000 +dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] +train_cfg = dict( + type='IterBasedTrainLoop', + max_iters=max_iters, + val_interval=interval, + dynamic_intervals=dynamic_intervals) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + by_epoch=False, + save_last=True, + max_keep_ckpts=3, + interval=interval)) +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_r50_8xb2-lsj-50e_coco.py b/mmpose/configs/mmdet/mask2former/mask2former_r50_8xb2-lsj-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..24a17f58c54a2e8694a8bf960d10ebc918acdddc --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_r50_8xb2-lsj-50e_coco.py @@ -0,0 +1,100 @@ +_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'] + +num_things_classes = 80 +num_stuff_classes = 0 +num_classes = num_things_classes + num_stuff_classes +image_size = (1024, 1024) +batch_augments = [ + dict( + type='BatchFixedSizePad', + size=image_size, + img_pad_value=0, + pad_mask=True, + mask_pad_value=0, + pad_seg=False) +] +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + pad_mask=True, + mask_pad_value=0, + pad_seg=False, + batch_augments=batch_augments) +model = dict( + data_preprocessor=data_preprocessor, + panoptic_head=dict( + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])), + panoptic_fusion_head=dict( + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes), + test_cfg=dict(panoptic_on=False)) + +# dataset settings +train_pipeline = [ + dict( + type='LoadImageFromFile', + to_float32=True, + backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', prob=0.5), + # large scale jittering + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.1, 2.0), + resize_type='Resize', + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=image_size, + crop_type='absolute', + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict( + type='LoadImageFromFile', + to_float32=True, + backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +train_dataloader = dict( + dataset=dict( + type=dataset_type, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline)) +val_dataloader = dict( + dataset=dict( + type=dataset_type, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric=['bbox', 'segm'], + format_only=False, + backend_args={{_base_.backend_args}}) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mask2former/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic.py b/mmpose/configs/mmdet/mask2former/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..b275f23175e8d8294b8bb76e9708dd014ef7030b --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic.py @@ -0,0 +1,5 @@ +_base_ = ['./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa + +model = dict( + backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained))) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py b/mmpose/configs/mmdet/mask2former/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..bd59400b4aed1aac97795e474633d5581705b899 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py @@ -0,0 +1,42 @@ +_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=depths, + num_heads=[4, 8, 16, 32], + window_size=12, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict(in_channels=[128, 256, 512, 1024])) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic.py b/mmpose/configs/mmdet/mask2former/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..e203ffc96c40098e4cf0788fc47b4438ebffbb41 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic.py @@ -0,0 +1,25 @@ +_base_ = ['./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa + +model = dict( + backbone=dict( + embed_dims=192, + num_heads=[6, 12, 24, 48], + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536])) + +train_dataloader = dict(batch_size=1, num_workers=1) + +# learning policy +max_iters = 737500 +param_scheduler = dict(end=max_iters, milestones=[655556, 710184]) + +# Before 735001th iteration, we do evaluation every 5000 iterations. +# After 735000th iteration, we do evaluation every 737500 iterations, +# which means that we do evaluation at the end of training.' +interval = 5000 +dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] +train_cfg = dict( + max_iters=max_iters, + val_interval=interval, + dynamic_intervals=dynamic_intervals) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py b/mmpose/configs/mmdet/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d081db58a74dd02b3b715c3777f077d42de7ca --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py @@ -0,0 +1,37 @@ +_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + depths=depths, init_cfg=dict(type='Pretrained', + checkpoint=pretrained))) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco.py b/mmpose/configs/mmdet/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..69d5e8c6f96434973e3e9f3498155e385af815be --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco.py @@ -0,0 +1,37 @@ +_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + depths=depths, init_cfg=dict(type='Pretrained', + checkpoint=pretrained))) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py b/mmpose/configs/mmdet/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..1c00d7a697f07ad618a0b4735432a0a74d4992a9 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py @@ -0,0 +1,58 @@ +_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa + +depths = [2, 2, 6, 2] +model = dict( + type='Mask2Former', + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=depths, + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict( + type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), + init_cfg=None) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) + +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/mmpose/configs/mmdet/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py b/mmpose/configs/mmdet/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb9c21858ebe065691a8a963bf5dec85542fb57 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py @@ -0,0 +1,56 @@ +_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +depths = [2, 2, 6, 2] +model = dict( + type='Mask2Former', + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=depths, + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict( + type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), + init_cfg=None) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/mmpose/configs/mmdet/mask2former/metafile.yml b/mmpose/configs/mmdet/mask2former/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..3321239213f7345084b63b77cf02b0525a534585 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former/metafile.yml @@ -0,0 +1,223 @@ +Collections: + - Name: Mask2Former + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 8x A100 GPUs + Architecture: + - Mask2Former + Paper: + URL: https://arxiv.org/pdf/2112.01527 + Title: 'Masked-attention Mask Transformer for Universal Image Segmentation' + README: configs/mask2former/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/detectors/mask2former.py#L7 + Version: v2.23.0 + +Models: +- Name: mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py + Metadata: + Training Memory (GB): 19.1 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.5 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 54.5 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic_20220329_225200-4a16ded7.pth +- Name: mask2former_r101_8xb2-lsj-50e_coco + In Collection: Mask2Former + Config: configs/mask2former/mask2former_r101_8xb2-lsj-50e_coco.py + Metadata: + Training Memory (GB): 15.5 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.0 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r101_8xb2-lsj-50e_coco/mask2former_r101_8xb2-lsj-50e_coco_20220426_100250-ecf181e2.pth +- Name: mask2former_r101_8xb2-lsj-50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_r101_8xb2-lsj-50e_coco-panoptic.py + Metadata: + Training Memory (GB): 16.1 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.4 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 52.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r101_8xb2-lsj-50e_coco-panoptic/mask2former_r101_8xb2-lsj-50e_coco-panoptic_20220329_225104-c74d4d71.pth +- Name: mask2former_r50_8xb2-lsj-50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py + Metadata: + Training Memory (GB): 13.9 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.8 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 52.0 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic/mask2former_r50_8xb2-lsj-50e_coco-panoptic_20230118_125535-54df384a.pth +- Name: mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py + Metadata: + Training Memory (GB): 15.9 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.4 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 53.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic_20220326_224553-3ec9e0ae.pth +- Name: mask2former_r50_8xb2-lsj-50e_coco + In Collection: Mask2Former + Config: configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco.py + Metadata: + Training Memory (GB): 13.7 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_r50_8xb2-lsj-50e_coco/mask2former_r50_8xb2-lsj-50e_coco_20220506_191028-41b088b6.pth +- Name: mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic.py + Metadata: + Training Memory (GB): 21.1 + Iterations: 737500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 48.5 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 57.6 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic_20220407_104949-82f8d28d.pth +- Name: mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic.py + Metadata: + Training Memory (GB): 25.8 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 46.3 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 56.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic_20220329_230021-05ec7315.pth +- Name: mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py + Metadata: + Training Memory (GB): 26.0 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.9 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 55.1 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic_20220331_002244-8a651d82.pth +- Name: mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py + Metadata: + Training Memory (GB): 15.3 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.7 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco_20220508_091649-01b0f990.pth +- Name: mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco.py + Metadata: + Training Memory (GB): 18.8 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco_20220504_001756-c9d0c4f2.pth diff --git a/mmpose/configs/mmdet/mask2former_vis/README.md b/mmpose/configs/mmdet/mask2former_vis/README.md new file mode 100644 index 0000000000000000000000000000000000000000..699657290896f1d2ccb36ffe60ec6471f68043fd --- /dev/null +++ b/mmpose/configs/mmdet/mask2former_vis/README.md @@ -0,0 +1,81 @@ +# Mask2Former for Video Instance Segmentation + +## Abstract + + + +We find Mask2Former also achieves state-of-the-art performance on video instance segmentation without modifying the architecture, the loss or even the training pipeline. In this report, we show universal image segmentation architectures trivially generalize to video segmentation by directly predicting 3D segmentation volumes. Specifically, Mask2Former sets a new state-of-the-art of 60.4 AP on YouTubeVIS-2019 and 52.6 AP on YouTubeVIS-2021. We believe Mask2Former is also capable of handling video semantic and panoptic segmentation, given its versatility in image segmentation. We hope this will make state-of-theart video segmentation research more accessible and bring more attention to designing universal image and video segmentation architectures. + + + +
+ +
+ +## Citation + + + +```latex +@inproceedings{cheng2021mask2former, + title={Masked-attention Mask Transformer for Universal Image Segmentation}, + author={Bowen Cheng and Ishan Misra and Alexander G. Schwing and Alexander Kirillov and Rohit Girdhar}, + journal={CVPR}, + year={2022} +} +``` + +## Results and models of Mask2Former on YouTube-VIS 2021 validation dataset + +Note: Codalab has closed the evaluation portal of `YouTube-VIS 2019`, so we do not provide the results of `YouTube-VIS 2019` at present. If you want to evaluate the results of `YouTube-VIS 2021`, at present, you can submit the result to the evaluation portal of `YouTube-VIS 2022`. The value of `AP_S` is the result of `YouTube-VIS 2021`. + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AP | Config | Download | +| :----------------------: | :------: | :-----: | :-----: | :------: | :------------: | :--: | :---------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask2Former | R-50 | pytorch | 8e | 6.0 | - | 41.3 | [config](mask2former_r50_8xb2-8e_youtubevis2021.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021/mask2former_r50_8xb2-8e_youtubevis2021_20230426_131833-5d215283.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021/mask2former_r50_8xb2-8e_youtubevis2021_20230426_131833.json) | +| Mask2Former | R-101 | pytorch | 8e | 7.5 | - | 42.3 | [config](mask2former_r101_8xb2-8e_youtubevis2021.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2021/mask2former_r101_8xb2-8e_youtubevis2021_20220823_092747-8077d115.pth) \| [log](https://download.openmmlab.com/mmtracking/vis/mask2former/mask2former_r101_8xb2-8e_youtubevis2021_20220823_092747.json) | +| Mask2Former(200 queries) | Swin-L | pytorch | 8e | 18.5 | - | 52.3 | [config](mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mask2former_vis/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021_20220907_124752-48252603.pth) \| [log](https://download.openmmlab.com/mmtracking/vis/mask2former/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021_20220907_124752.json) | + +## Get started + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Prepare + +Tracking Dataset Prepare can refer to this [document](../../docs/en/user_guides/tracking_dataset_prepare.md). + +### 3. Training + +Due to the influence of parameters such as learning rate in default configuration file, we recommend using 8 GPUs for training in order to reproduce accuracy. You can use the following command to start the training. + +```shell +# Training Mask2Former on YouTube-VIS-2021 dataset with following command. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_train.sh configs/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py 8 +``` + +If you want to know about more detailed usage of `train.py/dist_train.sh/slurm_train.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 4. Testing and evaluation + +If you want to get the results of the [YouTube-VOS](https://youtube-vos.org/dataset/vis/) val/test set, please use the following command to generate result files that can be used for submission. It will be stored in `./youtube_vis_results.submission_file.zip`, you can modify the saved path in `test_evaluator` of the config. + +```shell +# The number after config file represents the number of GPUs used. +bash tools/dist_test_tracking.sh configs/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py --checkpoint ${CHECKPOINT_PATH} +``` + +If you want to know about more detailed usage of `test_tracking.py/dist_test_tracking.sh/slurm_test_tracking.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 5.Inference + +Use a single GPU to predict a video and save it as a video. + +```shell +python demo/mot_demo.py demo/demo_mot.mp4 configs/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py --checkpoint {CHECKPOINT_PATH} --out vis.mp4 +``` + +If you want to know about more detailed usage of `mot_demo.py`, please refer to this [document](../../docs/en/user_guides/tracking_inference.md). diff --git a/mmpose/configs/mmdet/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2019.py b/mmpose/configs/mmdet/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2019.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba4aea8eac72f347940fb12ac964e9bf67c2e0e --- /dev/null +++ b/mmpose/configs/mmdet/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2019.py @@ -0,0 +1,12 @@ +_base_ = './mask2former_r50_8xb2-8e_youtubevis2019.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'mask2former/mask2former_r101_8xb2-lsj-50e_coco/' + 'mask2former_r101_8xb2-lsj-50e_coco_20220426_100250-ecf181e2.pth')) diff --git a/mmpose/configs/mmdet/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2021.py b/mmpose/configs/mmdet/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2021.py new file mode 100644 index 0000000000000000000000000000000000000000..95f9ceeb38833aeef342e12178703db6901fe5f6 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2021.py @@ -0,0 +1,12 @@ +_base_ = './mask2former_r50_8xb2-8e_youtubevis2021.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'mask2former/mask2former_r101_8xb2-lsj-50e_coco/' + 'mask2former_r101_8xb2-lsj-50e_coco_20220426_100250-ecf181e2.pth')) diff --git a/mmpose/configs/mmdet/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2019.py b/mmpose/configs/mmdet/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2019.py new file mode 100644 index 0000000000000000000000000000000000000000..8dc03bf97a2ed2b90e097bbd9637a42bf4d64c35 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2019.py @@ -0,0 +1,174 @@ +_base_ = ['../_base_/datasets/youtube_vis.py', '../_base_/default_runtime.py'] + +num_classes = 40 +num_frames = 2 +model = dict( + type='Mask2FormerVideo', + data_preprocessor=dict( + type='TrackDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + track_head=dict( + type='Mask2FormerTrackHead', + in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_classes=num_classes, + num_queries=100, + num_frames=num_frames, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( # DeformableDetrTransformerEncoder + num_layers=6, + layer_cfg=dict( # DeformableDetrTransformerEncoderLayer + self_attn_cfg=dict( # MultiScaleDeformableAttention + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=128, + dropout=0.0, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True)))), + positional_encoding=dict(num_feats=128, normalize=True)), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding3D', num_feats=128, normalize=True), + transformer_decoder=dict( # Mask2FormerTransformerDecoder + return_intermediate=True, + num_layers=9, + layer_cfg=dict( # Mask2FormerTransformerDecoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + cross_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True))), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='ClassificationCost', weight=2.0), + dict( + type='CrossEntropyLossCost', + weight=5.0, + use_sigmoid=True), + dict(type='DiceCost', weight=5.0, pred_act=True, eps=1.0) + ]), + sampler=dict(type='MaskPseudoSampler'))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'mask2former/mask2former_r50_8xb2-lsj-50e_coco/' + 'mask2former_r50_8xb2-lsj-50e_coco_20220506_191028-41b088b6.pth')) + +# optimizer +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=0.0001, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi, + }, + norm_decay_mult=0.0), + clip_grad=dict(max_norm=0.01, norm_type=2)) + +# learning policy +max_iters = 6000 +param_scheduler = dict( + type='MultiStepLR', + begin=0, + end=max_iters, + by_epoch=False, + milestones=[ + 4000, + ], + gamma=0.1) +# runtime settings +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=max_iters, val_interval=6001) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='TrackLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', by_epoch=False, save_last=True, interval=2000), + visualization=dict(type='TrackVisualizationHook', draw=False)) +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False) + +# evaluator +val_evaluator = dict( + type='YouTubeVISMetric', + metric='youtube_vis_ap', + outfile_prefix='./youtube_vis_results', + format_only=True) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py b/mmpose/configs/mmdet/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py new file mode 100644 index 0000000000000000000000000000000000000000..158fe52d20fccf162cb66202fbc9069ba0f4cb68 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py @@ -0,0 +1,37 @@ +_base_ = './mask2former_r50_8xb2-8e_youtubevis2019.py' + +dataset_type = 'YouTubeVISDataset' +data_root = 'data/youtube_vis_2021/' +dataset_version = data_root[-5:-1] # 2019 or 2021 + +train_dataloader = dict( + dataset=dict( + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2021_train.json')) + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2021_valid.json')) +test_dataloader = val_dataloader + +# learning policy +max_iters = 8000 +param_scheduler = dict( + type='MultiStepLR', + begin=0, + end=max_iters, + by_epoch=False, + milestones=[ + 5500, + ], + gamma=0.1) +# runtime settings +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=max_iters, val_interval=8001) + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', by_epoch=False, save_last=True, interval=500)) diff --git a/mmpose/configs/mmdet/mask2former_vis/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021.py b/mmpose/configs/mmdet/mask2former_vis/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021.py new file mode 100644 index 0000000000000000000000000000000000000000..94dcccf408dfb989ea264536a617a48ecc13171c --- /dev/null +++ b/mmpose/configs/mmdet/mask2former_vis/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021.py @@ -0,0 +1,64 @@ +_base_ = ['./mask2former_r50_8xb2-8e_youtubevis2021.py'] +depths = [2, 2, 18, 2] +model = dict( + type='Mask2FormerVideo', + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=depths, + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + frozen_stages=-1, + init_cfg=None), + track_head=dict( + type='Mask2FormerTrackHead', + in_channels=[192, 384, 768, 1536], + num_queries=200), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v3.0/mask2former/' + 'mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic/' + 'mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic_' + '20220407_104949-82f8d28d.pth')) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/mmpose/configs/mmdet/mask2former_vis/metafile.yml b/mmpose/configs/mmdet/mask2former_vis/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..f5f4bd7c5775820f283a7544bf5978fe0aa1abc5 --- /dev/null +++ b/mmpose/configs/mmdet/mask2former_vis/metafile.yml @@ -0,0 +1,53 @@ +Collections: + - Name: Mask2Former + Metadata: + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 8x A100 GPUs + Architecture: + - Mask2Former + Paper: + URL: https://arxiv.org/pdf/2112.10764.pdf + Title: Mask2Former for Video Instance Segmentation + README: configs/mask2former/README.md + +Models: + - Name: mask2former_r50_8xb2-8e_youtubevis2021 + In Collection: Mask2Former + Config: configs/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py + Metadata: + Training Data: YouTube-VIS 2021 + Training Memory (GB): 6.0 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2021 + Metrics: + AP: 41.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021/mask2former_r50_8xb2-8e_youtubevis2021_20230426_131833-5d215283.pth + + - Name: mask2former_r101_8xb2-8e_youtubevis2021 + In Collection: Mask2Former + Config: configs/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2021.py + Metadata: + Training Data: YouTube-VIS 2021 + Training Memory (GB): 7.5 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2021 + Metrics: + AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former_vis/mask2former_r101_8xb2-8e_youtubevis2021/mask2former_r101_8xb2-8e_youtubevis2021_20220823_092747-8077d115.pth + + - Name: mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021.py + In Collection: Mask2Former + Config: configs/mask2former_vis/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021.py + Metadata: + Training Data: YouTube-VIS 2021 + Training Memory (GB): 18.5 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2021 + Metrics: + AP: 52.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mask2former_vis/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021/mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021_20220907_124752-48252603.pth diff --git a/mmpose/configs/mmdet/mask_rcnn/README.md b/mmpose/configs/mmdet/mask_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..afc5c3c92c683947ca01ad05456b0d7ff77be5e9 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/README.md @@ -0,0 +1,59 @@ +# Mask R-CNN + +> [Mask R-CNN](https://arxiv.org/abs/1703.06870) + + + +## Abstract + +We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 4.3 | | 38.0 | 34.4 | [config](./mask-rcnn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_20200504_231812.log.json) | +| R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 38.2 | 34.7 | [config](./mask-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 3.6 | 24.1 | 38.1 | 34.7 | [config](./mask-rcnn_r50_fpn_amp-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) | +| R-50-FPN | pytorch | 2x | - | - | 39.2 | 35.4 | [config](./mask-rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_20200505_003907.log.json) | +| R-101-FPN | caffe | 1x | | | 40.4 | 36.4 | [config](./mask-rcnn_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758.log.json) | +| R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 40.0 | 36.1 | [config](./mask-rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 40.8 | 36.6 | [config](./mask-rcnn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_20200505_071027.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.6 | 11.3 | 41.9 | 37.5 | [config](./mask-rcnn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 42.2 | 37.8 | [config](./mask-rcnn_x101-32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_20200506_004702.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.7 | 8.0 | 42.8 | 38.4 | [config](./mask-rcnn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201_124310.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 42.7 | 38.1 | [config](./mask-rcnn_x101-64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208.log.json) | +| X-101-32x8d-FPN | pytorch | 1x | 10.6 | - | 42.8 | 38.3 | [config](./mask-rcnn_x101-32x8d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841.log.json) | + +## Pre-trained Models + +We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :--------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-FPN](./mask-rcnn_r50-caffe_fpn_ms-poly-2x_coco.py) | caffe | 2x | 4.3 | | 40.3 | 36.5 | [config](./mask-rcnn_r50-caffe_fpn_ms-poly-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_20200504_231822.log.json) | +| [R-50-FPN](./mask-rcnn_r50-caffe_fpn_ms-poly-3x_coco.py) | caffe | 3x | 4.3 | | 40.8 | 37.0 | [config](./mask-rcnn_r50-caffe_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_20200504_163245.log.json) | +| [R-50-FPN](./mask-rcnn_r50_fpn_ms-poly-3x_coco.py) | pytorch | 3x | 4.1 | | 40.9 | 37.1 | [config](./mask-rcnn_r50_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154.log.json) | +| [R-101-FPN](./mask-rcnn_r101-caffe_fpn_ms-poly-3x_coco.py) | caffe | 3x | 5.9 | | 42.9 | 38.5 | [config](./mask-rcnn_r101-caffe_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339.log.json) | +| [R-101-FPN](./mask-rcnn_r101_fpn_ms-poly-3x_coco.py) | pytorch | 3x | 6.1 | | 42.7 | 38.5 | [config](./mask-rcnn_r101_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244.log.json) | +| [x101-32x4d-FPN](./mask-rcnn_x101-32x4d_fpn_ms-poly-3x_coco.py) | pytorch | 3x | 7.3 | | 43.6 | 39.0 | [config](./mask-rcnn_x101-32x4d_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410.log.json) | +| [X-101-32x8d-FPN](./mask-rcnn_x101-32x8d_fpn_ms-poly-3x_coco.py) | pytorch | 1x | 10.4 | | 43.4 | 39.0 | [config](./mask-rcnn_x101-32x8d_fpn_ms-poly-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346-b4637974.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346.log.json) | +| [X-101-32x8d-FPN](./mask-rcnn_x101-32x8d_fpn_ms-poly-3x_coco.py) | pytorch | 3x | 10.3 | | 44.3 | 39.5 | [config](./mask-rcnn_x101-32x8d_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042.log.json) | +| [X-101-64x4d-FPN](./mask-rcnn_x101-64x4d_fpn_ms-poly_3x_coco.py) | pytorch | 3x | 10.4 | | 44.5 | 39.7 | [config](./mask-rcnn_x101-64x4d_fpn_ms-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447.log.json) | + +## Citation + +```latex +@article{He_2017, + title={Mask R-CNN}, + journal={2017 IEEE International Conference on Computer Vision (ICCV)}, + publisher={IEEE}, + author={He, Kaiming and Gkioxari, Georgia and Dollar, Piotr and Girshick, Ross}, + year={2017}, + month={Oct} +} +``` diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..09808e4bcada43b1e935d5393894c7ba3401fc3d --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './mask-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101-caffe_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101-caffe_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e723aea81ff82dfa842d7468e166f42ee9291669 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101-caffe_fpn_ms-poly-3x_coco.py @@ -0,0 +1,19 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + # use caffe img_norm + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False), + backbone=dict( + depth=101, + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..af91ff0b8349b0e9e658b69cf4c5dd138b7b8a5a --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_2x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a5599e7c4942b523d6500e2c7c8ad4638cab45c6 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..452351050238a4d4411b2bf6fc916e2d69804766 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './mask-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..384f6dcd3ca33cd91755b48dd525d747a358ee02 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r101_fpn_ms-poly-3x_coco.py @@ -0,0 +1,10 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5b9219c9c1da8ca68cf7ada0881419b371a26a87 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './mask-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe-c4_1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe-c4_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9919f11c3fc7b68528bf6f690e39185d703aff43 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe-c4_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50-caffe-c4.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4124f138d874def6810cea6c884a02eaacdf5f71 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = './mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + # use caffe img_norm + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False), + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7702ae14a9cc54686df6a3eadec5bc8cfeb8e0a8 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-1x_coco.py @@ -0,0 +1,28 @@ +_base_ = './mask-rcnn_r50_fpn_1x_coco.py' + +model = dict( + # use caffe img_norm + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False), + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..94d94dd3613e0599f51f113ccf12e568a5b29f8f --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-1x_coco.py @@ -0,0 +1,31 @@ +_base_ = './mask-rcnn_r50_fpn_1x_coco.py' + +model = dict( + # use caffe img_norm + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False), + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-2x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..dbf87bb8346dd351c8f16700df7b9640bcfa984a --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './mask-rcnn_r50-caffe_fpn_ms-poly-1x_coco.py' + +train_cfg = dict(max_epochs=24) +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..45260e2e39b53c0107e257ef2d05a14f5d5c0323 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-3x_coco.py @@ -0,0 +1,15 @@ +_base_ = './mask-rcnn_r50-caffe_fpn_ms-poly-1x_coco.py' + +train_cfg = dict(max_epochs=36) +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_poly-1x_coco_v1.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_poly-1x_coco_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..3baf00140ecfa57ea54b68b85ac826e14490daa4 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50-caffe_fpn_poly-1x_coco_v1.py @@ -0,0 +1,31 @@ +_base_ = './mask-rcnn_r50_fpn_1x_coco.py' + +model = dict( + # use caffe img_norm + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False), + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + bbox_roi_extractor=dict( + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=2, + aligned=False)), + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + mask_roi_extractor=dict( + roi_layer=dict( + type='RoIAlign', + output_size=14, + sampling_ratio=2, + aligned=False)))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_1x-wandb_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_1x-wandb_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..28b125ccb94869aff2bb283e6533fd693c79a76e --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_1x-wandb_coco.py @@ -0,0 +1,16 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')] +visualizer = dict(vis_backends=vis_backends) + +# MMEngine support the following two ways, users can choose +# according to convenience +# default_hooks = dict(checkpoint=dict(interval=4)) +_base_.default_hooks.checkpoint.interval = 4 + +# train_cfg = dict(val_interval=2) +_base_.train_cfg.val_interval = 2 diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0fc6b91aa895e044b3fc62a3cdedbc12a052e91b --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_2x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..87cb8b4bb7d2fbfcfe667e7bd6cfc08e01e28c1a --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_2x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7371b3646fdda7bdc1fcfcd44cf8a20df27c40b5 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../common/lsj-100e_coco-instance.py' +] +image_size = (1024, 1024) +batch_augments = [ + dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) +] + +model = dict(data_preprocessor=dict(batch_augments=batch_augments)) + +train_dataloader = dict(batch_size=8, num_workers=4) +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_amp-1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_amp-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a139c48b2091a3a40943ce7ec8301b06cea01d4f --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_amp-1x_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask-rcnn_r50_fpn_1x_coco.py' + +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict(type='AmpOptimWrapper') diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..417adc3cebb3acbcc987b3f0453a78204dde1ea9 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_ms-poly-3x_coco.py @@ -0,0 +1,4 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_poly-1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_poly-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..826180ce0a831a1ee6206bd52ffa516df766136c --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_r50_fpn_poly-1x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs'), +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..921ade81e30afb60a3a6f03d2f2aecef85767da8 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..db8157f80fac23f6216afbeefed6cb80398f7e0d --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_r101_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..83e5451f38cb01d3d30712f22633fed6234d06c9 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x4d_fpn_ms-poly-3x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9b1b6fe8fcb152d9ad22bc403da6e62e936f77 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_1x_coco.py @@ -0,0 +1,22 @@ +_base_ = './mask-rcnn_r101_fpn_1x_coco.py' + +model = dict( + # ResNeXt-101-32x8d model trained with Caffe2 at FB, + # so the mean and std need to be changed. + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + bgr_to_rgb=False), + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_ms-poly-1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_ms-poly-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6ee204d90001edd3e8e08e4a59ba25dd1ec4195c --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_ms-poly-1x_coco.py @@ -0,0 +1,40 @@ +_base_ = './mask-rcnn_r101_fpn_1x_coco.py' + +model = dict( + # ResNeXt-101-32x8d model trained with Caffe2 at FB, + # so the mean and std need to be changed. + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + bgr_to_rgb=False), + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..999a30c39fc083f26fe0cd9e2ec13bb4f6063268 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-32x8d_fpn_ms-poly-3x_coco.py @@ -0,0 +1,25 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + # ResNeXt-101-32x8d model trained with Caffe2 at FB, + # so the mean and std need to be changed. + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + bgr_to_rgb=False), + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2cbb658c1b053d6674694c1a09101e965d5724ba --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_x101-32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f21a55b00db77a3cf2386a738a3b8fb39bf2fa44 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask-rcnn_x101-32x4d_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_ms-poly_3x_coco.py b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_ms-poly_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..09b49d47740b70c4a192d94a95b994d0a303f2d1 --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/mask-rcnn_x101-64x4d_fpn_ms-poly_3x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/mask_rcnn/metafile.yml b/mmpose/configs/mmdet/mask_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..ddf85c872bc8681a849c59c917a4b5ca0151d21a --- /dev/null +++ b/mmpose/configs/mmdet/mask_rcnn/metafile.yml @@ -0,0 +1,443 @@ +Collections: + - Name: Mask R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Softmax + - RPN + - Convolution + - Dense Connections + - FPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1703.06870v3 + Title: "Mask R-CNN" + README: configs/mask_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: mask-rcnn_r50-caffe_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth + + - Name: mask-rcnn_r50_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 62.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth + + - Name: mask-rcnn_r50_fpn_fp16_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r50_fpn_amp-1x_coco.py + Metadata: + Training Memory (GB): 3.6 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 41.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth + + - Name: mask-rcnn_r50_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r50_fpn_2x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 62.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth + + - Name: mask-rcnn_r101-caffe_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r101-caffe_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth + + - Name: mask-rcnn_r101_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 74.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth + + - Name: mask-rcnn_r101_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 74.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth + + - Name: mask-rcnn_x101-32x4d_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth + + - Name: mask-rcnn_x101-32x4d_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-32x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth + + - Name: mask-rcnn_x101-64x4d_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.7 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth + + - Name: mask-rcnn_x101-64x4d_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-64x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 10.7 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth + + - Name: mask-rcnn_x101-32x8d_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-32x8d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth + + - Name: mask-rcnn_r50-caffe_fpn_ms-poly-2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-2x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth + + - Name: mask-rcnn_r50-caffe_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth + + - Name: mask-rcnn_r50_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r50_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth + + - Name: mask-rcnn_r101_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r101_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 6.1 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth + + - Name: mask-rcnn_r101-caffe_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_r101-caffe_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 5.9 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth + + - Name: mask-rcnn_x101-32x4d_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-32x4d_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 7.3 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth + + - Name: mask-rcnn_x101-32x8d_fpn_ms-poly-1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-32x8d_fpn_ms-poly-1x_coco.py + Metadata: + Training Memory (GB): 10.4 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346-b4637974.pth + + - Name: mask-rcnn_x101-32x8d_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-32x8d_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 10.3 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth + + - Name: mask-rcnn_x101-64x4d_fpn_ms-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask-rcnn_x101-64x4d_fpn_ms-poly_3x_coco.py + Metadata: + Epochs: 36 + Training Memory (GB): 10.4 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth diff --git a/mmpose/configs/mmdet/maskformer/README.md b/mmpose/configs/mmdet/maskformer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ca5ce320e1eb42f9cc12b4192fecb038fff71113 --- /dev/null +++ b/mmpose/configs/mmdet/maskformer/README.md @@ -0,0 +1,58 @@ +# MaskFormer + +> [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) + + + +## Abstract + +Modern approaches typically formulate semantic segmentation as a per-pixel classification task, while instance-level segmentation is handled with an alternative mask classification. Our key insight: mask classification is sufficiently general to solve both semantic- and instance-level segmentation tasks in a unified manner using the exact same model, loss, and training procedure. Following this observation, we propose MaskFormer, a simple mask classification model which predicts a set of binary masks, each associated with a single global class label prediction. Overall, the proposed mask classification-based method simplifies the landscape of effective approaches to semantic and panoptic segmentation tasks and shows excellent empirical results. In particular, we observe that MaskFormer outperforms per-pixel classification baselines when the number of classes is large. Our mask classification-based method outperforms both current state-of-the-art semantic (55.6 mIoU on ADE20K) and panoptic segmentation (52.7 PQ on COCO) models. + +
+ +
+ +## Introduction + +MaskFormer requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── panoptic_train2017.json +│ │ │ ├── panoptic_train2017 +│ │ │ ├── panoptic_val2017.json +│ │ │ ├── panoptic_val2017 +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +``` + +## Results and Models + +| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | PQ | SQ | RQ | PQ_th | SQ_th | RQ_th | PQ_st | SQ_st | RQ_st | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :--------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 75e | 16.2 | - | 46.757 | 80.297 | 57.176 | 50.829 | 81.125 | 61.798 | 40.610 | 79.048 | 50.199 | [config](./maskformer_r50_ms-16xb1-75e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/maskformer/maskformer_r50_ms-16xb1-75e_coco/maskformer_r50_ms-16xb1-75e_coco_20230116_095226-baacd858.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/maskformer/maskformer_r50_ms-16xb1-75e_coco/maskformer_r50_ms-16xb1-75e_coco_20230116_095226.log.json) | +| Swin-L | pytorch | 300e | 27.2 | - | 53.249 | 81.704 | 64.231 | 58.798 | 82.923 | 70.282 | 44.874 | 79.863 | 55.097 | [config](./maskformer_swin-l-p4-w12_64xb1-ms-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/maskformer/maskformer_swin-l-p4-w12_64xb1-ms-300e_coco/maskformer_swin-l-p4-w12_64xb1-ms-300e_coco_20220326_221612-c63ab967.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612.log.json) | + +### Note + +1. The `R-50` version was mentioned in Table XI, in paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527). +2. The models were trained with mmdet 2.x and have been converted for mmdet 3.x. + +## Citation + +```latex +@inproceedings{cheng2021maskformer, + title={Per-Pixel Classification is Not All You Need for Semantic Segmentation}, + author={Bowen Cheng and Alexander G. Schwing and Alexander Kirillov}, + journal={NeurIPS}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/maskformer/maskformer_r50_ms-16xb1-75e_coco.py b/mmpose/configs/mmdet/maskformer/maskformer_r50_ms-16xb1-75e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..784ee7767bf1318e967444461028b49a38dc3dbc --- /dev/null +++ b/mmpose/configs/mmdet/maskformer/maskformer_r50_ms-16xb1-75e_coco.py @@ -0,0 +1,216 @@ +_base_ = [ + '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' +] + +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1, + pad_mask=True, + mask_pad_value=0, + pad_seg=True, + seg_pad_value=255) + +num_things_classes = 80 +num_stuff_classes = 53 +num_classes = num_things_classes + num_stuff_classes +model = dict( + type='MaskFormer', + data_preprocessor=data_preprocessor, + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + panoptic_head=dict( + type='MaskFormerHead', + in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside + feat_channels=256, + out_channels=256, + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + num_queries=100, + pixel_decoder=dict( + type='TransformerEncoderPixelDecoder', + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( # DetrTransformerEncoder + num_layers=6, + layer_cfg=dict( # DetrTransformerEncoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True)))), + positional_encoding=dict(num_feats=128, normalize=True)), + enforce_decoder_input_project=False, + positional_encoding=dict(num_feats=128, normalize=True), + transformer_decoder=dict( # DetrTransformerDecoder + num_layers=6, + layer_cfg=dict( # DetrTransformerDecoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True), + cross_attn_cfg=dict( # MultiheadAttention + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True))), + return_intermediate=True), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=20.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=1.0)), + panoptic_fusion_head=dict( + type='MaskFormerFusionHead', + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + loss_panoptic=None, + init_cfg=None), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='ClassificationCost', weight=1.0), + dict(type='FocalLossCost', weight=20.0, binary_input=True), + dict(type='DiceCost', weight=1.0, pred_act=True, eps=1.0) + ]), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + # For now, the dataset does not support + # evaluating semantic segmentation metric. + semantic_on=False, + instance_on=False, + # max_per_image is for instance segmentation. + max_per_image=100, + object_mask_thr=0.8, + iou_thr=0.8, + # In MaskFormer's panoptic postprocessing, + # it will not filter masks whose score is smaller than 0.5 . + filter_low_score=False), + init_cfg=None) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadPanopticAnnotations', + with_bbox=True, + with_mask=True, + with_seg=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[[ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + scales=[(400, 1333), (500, 1333), (600, 1333)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + keep_ratio=True) + ]]), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + batch_size=1, num_workers=1, dataset=dict(pipeline=train_pipeline)) + +val_dataloader = dict(batch_size=1, num_workers=1) + +test_dataloader = val_dataloader + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=0.0001, + weight_decay=0.0001, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'query_embed': dict(lr_mult=1.0, decay_mult=0.0) + }, + norm_decay_mult=0.0), + clip_grad=dict(max_norm=0.01, norm_type=2)) + +max_epochs = 75 + +# learning rate +param_scheduler = dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[50], + gamma=0.1) + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (1 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/maskformer/maskformer_swin-l-p4-w12_64xb1-ms-300e_coco.py b/mmpose/configs/mmdet/maskformer/maskformer_swin-l-p4-w12_64xb1-ms-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9e4897f26d47c049f8791169867c2df307b87f61 --- /dev/null +++ b/mmpose/configs/mmdet/maskformer/maskformer_swin-l-p4-w12_64xb1-ms-300e_coco.py @@ -0,0 +1,73 @@ +_base_ = './maskformer_r50_ms-16xb1-75e_coco.py' + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + patch_size=4, + window_size=12, + mlp_ratio=4, + depths=depths, + num_heads=[6, 12, 24, 48], + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict( + in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside + pixel_decoder=dict( + _delete_=True, + type='PixelDecoder', + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU')), + enforce_decoder_input_project=True)) + +# optimizer + +# weight_decay = 0.01 +# norm_weight_decay = 0.0 +# embed_weight_decay = 0.0 +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +norm_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'norm': norm_multi, + 'absolute_pos_embed': embed_multi, + 'relative_position_bias_table': embed_multi, + 'query_embed': embed_multi +} + +optim_wrapper = dict( + optimizer=dict(lr=6e-5, weight_decay=0.01), + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) + +max_epochs = 300 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[250], + gamma=0.1) +] + +train_cfg = dict(max_epochs=max_epochs) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (64 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/maskformer/metafile.yml b/mmpose/configs/mmdet/maskformer/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..fa58269d51c3e936f6acfaa664766afb84e7e0b6 --- /dev/null +++ b/mmpose/configs/mmdet/maskformer/metafile.yml @@ -0,0 +1,43 @@ +Collections: + - Name: MaskFormer + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 16x V100 GPUs + Architecture: + - MaskFormer + Paper: + URL: https://arxiv.org/pdf/2107.06278 + Title: 'Per-Pixel Classification is Not All You Need for Semantic Segmentation' + README: configs/maskformer/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/detectors/maskformer.py#L7 + Version: v2.22.0 + +Models: + - Name: maskformer_r50_ms-16xb1-75e_coco + In Collection: MaskFormer + Config: configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py + Metadata: + Training Memory (GB): 16.2 + Epochs: 75 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 46.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/maskformer/maskformer_r50_ms-16xb1-75e_coco/maskformer_r50_ms-16xb1-75e_coco_20230116_095226-baacd858.pth + - Name: maskformer_swin-l-p4-w12_64xb1-ms-300e_coco + In Collection: MaskFormer + Config: configs/maskformer/maskformer_swin-l-p4-w12_64xb1-ms-300e_coco.py + Metadata: + Training Memory (GB): 27.2 + Epochs: 300 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 53.2 + Weights: https://download.openmmlab.com/mmdetection/v3.0/maskformer/maskformer_swin-l-p4-w12_64xb1-ms-300e_coco/maskformer_swin-l-p4-w12_64xb1-ms-300e_coco_20220326_221612-c63ab967.pth diff --git a/mmpose/configs/mmdet/masktrack_rcnn/README.md b/mmpose/configs/mmdet/masktrack_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5cef692a382635e88732b2dc38985cfbc3c773e7 --- /dev/null +++ b/mmpose/configs/mmdet/masktrack_rcnn/README.md @@ -0,0 +1,93 @@ +# Video Instance Segmentation + +## Abstract + + + +In this paper we present a new computer vision task, named video instance segmentation. The goal of this new task is simultaneous detection, segmentation and tracking of instances in videos. In words, it is the first time that the image instance segmentation problem is extended to the video domain. To facilitate research on this new task, we propose a large-scale benchmark called YouTube-VIS, which consists of 2883 high-resolution YouTube videos, a 40-category label set and 131k high-quality instance masks. In addition, we propose a novel algorithm called MaskTrack R-CNN for this task. Our new method introduces a new tracking branch to Mask R-CNN to jointly perform the detection, segmentation and tracking tasks simultaneously. Finally, we evaluate the proposed method and several strong baselines on our new dataset. Experimental results clearly demonstrate the advantages of the proposed algorithm and reveal insight for future improvement. We believe the video instance segmentation task will motivate the community along the line of research for video understanding. + + + +
+ +
+ +## Citation + + + +```latex +@inproceedings{yang2019video, + title={Video instance segmentation}, + author={Yang, Linjie and Fan, Yuchen and Xu, Ning}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={5188--5197}, + year={2019} +} +``` + +## Results and models of MaskTrack R-CNN on YouTube-VIS 2019 validation dataset + +As mentioned in [Issues #6](https://github.com/youtubevos/MaskTrackRCNN/issues/6#issuecomment-502503505) in MaskTrack R-CNN, the result is kind of unstable for different trials, which ranges from 28 AP to 31 AP when using R-50-FPN as backbone. +The checkpoint provided below is the best one from two experiments. + +| Method | Base detector | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AP | Config | Download | +| :-------------: | :-----------: | :-------: | :-----: | :-----: | :------: | :------------: | :--: | :--------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| MaskTrack R-CNN | Mask R-CNN | R-50-FPN | pytorch | 12e | 1.61 | - | 30.2 | [config](masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py) | [model](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r50_fpn_12e_youtubevis2019/masktrack_rcnn_r50_fpn_12e_youtubevis2019_20211022_194830-6ca6b91e.pth) \| [log](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r50_fpn_12e_youtubevis2019/masktrack_rcnn_r50_fpn_12e_youtubevis2019_20211022_194830.log.json) | +| MaskTrack R-CNN | Mask R-CNN | R-101-FPN | pytorch | 12e | 2.27 | - | 32.2 | [config](masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2019.py) | [model](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r101_fpn_12e_youtubevis2019/masktrack_rcnn_r101_fpn_12e_youtubevis2019_20211023_150038-454dc48b.pth) \| [log](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r101_fpn_12e_youtubevis2019/masktrack_rcnn_r101_fpn_12e_youtubevis2019_20211023_150038.log.json) | +| MaskTrack R-CNN | Mask R-CNN | X-101-FPN | pytorch | 12e | 3.69 | - | 34.7 | [config](masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2019.py) | [model](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_x101_fpn_12e_youtubevis2019/masktrack_rcnn_x101_fpn_12e_youtubevis2019_20211023_153205-fff7a102.pth) \| [log](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_x101_fpn_12e_youtubevis2019/masktrack_rcnn_x101_fpn_12e_youtubevis2019_20211023_153205.log.json) | + +## Results and models of MaskTrack R-CNN on YouTube-VIS 2021 validation dataset + +The checkpoint provided below is the best one from two experiments. + +| Method | Base detector | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AP | Config | Download | +| :-------------: | :-----------: | :-------: | :-----: | :-----: | :------: | :------------: | :--: | :--------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| MaskTrack R-CNN | Mask R-CNN | R-50-FPN | pytorch | 12e | 1.61 | - | 28.7 | [config](masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py) | [model](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r50_fpn_12e_youtubevis2021/masktrack_rcnn_r50_fpn_12e_youtubevis2021_20211026_044948-10da90d9.pth) \| [log](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r50_fpn_12e_youtubevis2021/masktrack_rcnn_r50_fpn_12e_youtubevis2021_20211026_044948.log.json) | +| MaskTrack R-CNN | Mask R-CNN | R-101-FPN | pytorch | 12e | 2.27 | - | 31.3 | [config](masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2021.py) | [model](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r101_fpn_12e_youtubevis2021/masktrack_rcnn_r101_fpn_12e_youtubevis2021_20211026_045509-3c49e4f3.pth) \| [log](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r101_fpn_12e_youtubevis2021/masktrack_rcnn_r101_fpn_12e_youtubevis2021_20211026_045509.log.json) | +| MaskTrack R-CNN | Mask R-CNN | X-101-FPN | pytorch | 12e | 3.69 | - | 33.5 | [config](masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2021.py) | [model](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_x101_fpn_12e_youtubevis2021/masktrack_rcnn_x101_fpn_12e_youtubevis2021_20211026_095943-90831df4.pth) \| [log](https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_x101_fpn_12e_youtubevis2021/masktrack_rcnn_x101_fpn_12e_youtubevis2021_20211026_095943.log.json) | + +## Get started + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Prepare + +Tracking Dataset Prepare can refer to this [document](../../docs/en/user_guides/tracking_dataset_prepare.md). + +### 3. Training + +Due to the influence of parameters such as learning rate in default configuration file, we recommend using 8 GPUs for training in order to reproduce accuracy. You can use the following command to start the training. + +```shell +# Training MaskTrack R-CNN on YouTube-VIS-2021 dataset with following command. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_train.sh configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py 8 +``` + +If you want to know about more detailed usage of `train.py/dist_train.sh/slurm_train.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 4. Testing and evaluation + +If you want to get the results of the [YouTube-VOS](https://youtube-vos.org/dataset/vis/) val/test set, please use the following command to generate result files that can be used for submission. It will be stored in `./youtube_vis_results.submission_file.zip`, you can modify the saved path in `test_evaluator` of the config. + +```shell +# The number after config file represents the number of GPUs used. +bash tools/dist_test_tracking.sh configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py 8 --checkpoint ${CHECKPOINT_PATH} +``` + +If you want to know about more detailed usage of `train.py/dist_train.sh/slurm_train.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 5.Inference + +Use a single GPU to predict a video and save it as a video. + +```shell +python demo/mot_demo.py demo/demo_mot.mp4 configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py --checkpoint {CHECKPOINT_PATH} --out vis.mp4 +``` + +If you want to know about more detailed usage of `mot_demo.py`, please refer to this [document](../../docs/en/user_guides/tracking_inference.md). diff --git a/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2019.py b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2019.py new file mode 100644 index 0000000000000000000000000000000000000000..4be492d5419b8598120faa29eed44eada0fb5ba2 --- /dev/null +++ b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2019.py @@ -0,0 +1,12 @@ +_base_ = ['./masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py'] +model = dict( + detector=dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='torchvision://resnet101')), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth' # noqa: E501 + ))) diff --git a/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2021.py b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2021.py new file mode 100644 index 0000000000000000000000000000000000000000..81bae4af8d8945a024cd498a001e52059741f8a9 --- /dev/null +++ b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2021.py @@ -0,0 +1,28 @@ +_base_ = ['./masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py'] +model = dict( + detector=dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='torchvision://resnet101')), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth' # noqa: E501 + ))) + +data_root = 'data/youtube_vis_2021/' +dataset_version = data_root[-5:-1] + +# dataloader +train_dataloader = dict( + dataset=dict( + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2021_train.json')) +val_dataloader = dict( + dataset=dict( + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2021_valid.json')) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py new file mode 100644 index 0000000000000000000000000000000000000000..db1be7b0ddf00a07ce6e06e4e179059e68c103a3 --- /dev/null +++ b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py @@ -0,0 +1,130 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/youtube_vis.py', '../_base_/default_runtime.py' +] + +detector = _base_.model +detector.pop('data_preprocessor') +detector.roi_head.bbox_head.update(dict(num_classes=40)) +detector.roi_head.mask_head.update(dict(num_classes=40)) +detector.train_cfg.rpn.sampler.update(dict(num=64)) +detector.train_cfg.rpn_proposal.update(dict(nms_pre=200, max_per_img=200)) +detector.train_cfg.rcnn.sampler.update(dict(num=128)) +detector.test_cfg.rpn.update(dict(nms_pre=200, max_per_img=200)) +detector.test_cfg.rcnn.update(dict(score_thr=0.01)) +detector['init_cfg'] = dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa: E501 +) +del _base_.model + +model = dict( + type='MaskTrackRCNN', + data_preprocessor=dict( + type='TrackDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + detector=detector, + track_head=dict( + type='RoITrackHead', + roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + embed_head=dict( + type='RoIEmbedHead', + num_fcs=2, + roi_feat_size=7, + in_channels=256, + fc_out_channels=1024), + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=128, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + tracker=dict( + type='MaskTrackRCNNTracker', + match_weights=dict(det_score=1.0, iou=2.0, det_label=10.0), + num_frames_retain=20)) + +dataset_type = 'YouTubeVISDataset' +data_root = 'data/youtube_vis_2019/' +dataset_version = data_root[-5:-1] # 2019 or 2021 + +# train_dataloader +train_dataloader = dict( + _delete_=True, + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='TrackImgSampler'), # image-based sampling + batch_sampler=dict(type='TrackAspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2019_train.json', + data_prefix=dict(img_path='train/JPEGImages'), + pipeline=_base_.train_pipeline)) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.00125, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3.0, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# visualizer +default_hooks = dict( + visualization=dict(type='TrackVisualizationHook', draw=False)) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='TrackLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_begin=13) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# evaluator +val_evaluator = dict( + type='YouTubeVISMetric', + metric='youtube_vis_ap', + outfile_prefix='./youtube_vis_results', + format_only=True) +test_evaluator = val_evaluator + +del detector diff --git a/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py new file mode 100644 index 0000000000000000000000000000000000000000..47263d5091c3b5b76056373558ce9a0a97bb071b --- /dev/null +++ b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py @@ -0,0 +1,17 @@ +_base_ = ['./masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py'] + +data_root = 'data/youtube_vis_2021/' +dataset_version = data_root[-5:-1] + +# dataloader +train_dataloader = dict( + dataset=dict( + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2021_train.json')) +val_dataloader = dict( + dataset=dict( + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2021_valid.json')) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2019.py b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2019.py new file mode 100644 index 0000000000000000000000000000000000000000..e7e3f11e13a3a20ba8e4311963db558a9e4fd247 --- /dev/null +++ b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2019.py @@ -0,0 +1,16 @@ +_base_ = ['./masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py'] +model = dict( + detector=dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://resnext101_64x4d')), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth' # noqa: E501 + ))) diff --git a/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2021.py b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2021.py new file mode 100644 index 0000000000000000000000000000000000000000..ea4c8b92483292cc7de1b2f321d4d514427f3cb5 --- /dev/null +++ b/mmpose/configs/mmdet/masktrack_rcnn/masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2021.py @@ -0,0 +1,32 @@ +_base_ = ['./masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py'] +model = dict( + detector=dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://resnext101_64x4d')), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth' # noqa: E501 + ))) + +data_root = 'data/youtube_vis_2021/' +dataset_version = data_root[-5:-1] + +# dataloader +train_dataloader = dict( + dataset=dict( + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2021_train.json')) +val_dataloader = dict( + dataset=dict( + data_root=data_root, + dataset_version=dataset_version, + ann_file='annotations/youtube_vis_2021_valid.json')) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/masktrack_rcnn/metafile.yml b/mmpose/configs/mmdet/masktrack_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..7a1d71d582dc31f3c05f721c6ea8a225d0e0ce33 --- /dev/null +++ b/mmpose/configs/mmdet/masktrack_rcnn/metafile.yml @@ -0,0 +1,91 @@ +Collections: + - Name: MaskTrack R-CNN + Metadata: + Training Techniques: + - SGD with Momentum + Training Resources: 8x TiTanXP GPUs + Architecture: + - ResNet + Paper: + URL: https://arxiv.org/pdf/1905.04804.pdf + Title: Video Instance Segmentation + README: configs/masktrack_rcnn/README.md + +Models: + - Name: masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019 + In Collection: MaskTrack R-CNN + Config: configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2019.py + Metadata: + Training Data: YouTube-VIS 2019 + Training Memory (GB): 1.16 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2019 + Metrics: + AP: 30.2 + Weights: https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r50_fpn_12e_youtubevis2019/masktrack_rcnn_r50_fpn_12e_youtubevis2019_20211022_194830-6ca6b91e.pth + + - Name: masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2019 + In Collection: MaskTrack R-CNN + Config: configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2019.py + Metadata: + Training Data: YouTube-VIS 2019 + Training Memory (GB): 2.27 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2019 + Metrics: + AP: 32.2 + Weights: https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r101_fpn_12e_youtubevis2019/masktrack_rcnn_r101_fpn_12e_youtubevis2019_20211023_150038-454dc48b.pth + + - Name: masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2019 + In Collection: MaskTrack R-CNN + Config: configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2019.py + Metadata: + Training Data: YouTube-VIS 2019 + Training Memory (GB): 3.69 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2019 + Metrics: + AP: 34.7 + Weights: https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_x101_fpn_12e_youtubevis2019/masktrack_rcnn_x101_fpn_12e_youtubevis2019_20211023_153205-fff7a102.pth + + - Name: masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021 + In Collection: MaskTrack R-CNN + Config: configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py + Metadata: + Training Data: YouTube-VIS 2021 + Training Memory (GB): 1.16 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2021 + Metrics: + AP: 28.7 + Weights: https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r50_fpn_12e_youtubevis2021/masktrack_rcnn_r50_fpn_12e_youtubevis2021_20211026_044948-10da90d9.pth + + - Name: masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2021 + In Collection: MaskTrack R-CNN + Config: configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_r101_fpn_8xb1-12e_youtubevis2021.py + Metadata: + Training Data: YouTube-VIS 2021 + Training Memory (GB): 2.27 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2021 + Metrics: + AP: 31.3 + Weights: https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_r101_fpn_12e_youtubevis2021/masktrack_rcnn_r101_fpn_12e_youtubevis2021_20211026_045509-3c49e4f3.pth + + - Name: masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2021 + In Collection: MaskTrack R-CNN + Config: configs/masktrack_rcnn/masktrack-rcnn_mask-rcnn_x101_fpn_8xb1-12e_youtubevis2021.py + Metadata: + Training Data: YouTube-VIS 2021 + Training Memory (GB): 3.69 + Results: + - Task: Video Instance Segmentation + Dataset: YouTube-VIS 2021 + Metrics: + AP: 33.5 + Weights: https://download.openmmlab.com/mmtracking/vis/masktrack_rcnn/masktrack_rcnn_x101_fpn_12e_youtubevis2021/masktrack_rcnn_x101_fpn_12e_youtubevis2021_20211026_095943-90831df4.pth diff --git a/mmpose/configs/mmdet/misc/d2_faster-rcnn_r50-caffe_fpn_ms-90k_coco.py b/mmpose/configs/mmdet/misc/d2_faster-rcnn_r50-caffe_fpn_ms-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d93e1562606b3d6bd657454c99220d329c526f30 --- /dev/null +++ b/mmpose/configs/mmdet/misc/d2_faster-rcnn_r50-caffe_fpn_ms-90k_coco.py @@ -0,0 +1,75 @@ +_base_ = '../common/ms-90k_coco.py' + +# model settings +model = dict( + type='Detectron2Wrapper', + bgr_to_rgb=False, + detector=dict( + # The settings in `d2_detector` will merged into default settings + # in detectron2. More details please refer to + # https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/defaults.py # noqa + meta_architecture='GeneralizedRCNN', + # If you want to finetune the detector, you can use the + # checkpoint released by detectron2, for example: + # weights='detectron2://COCO-Detection/faster_rcnn_R_50_FPN_1x/137257794/model_final_b275ba.pkl' # noqa + weights='detectron2://ImageNetPretrained/MSRA/R-50.pkl', + mask_on=False, + pixel_mean=[103.530, 116.280, 123.675], + pixel_std=[1.0, 1.0, 1.0], + backbone=dict(name='build_resnet_fpn_backbone', freeze_at=2), + resnets=dict( + depth=50, + out_features=['res2', 'res3', 'res4', 'res5'], + num_groups=1, + norm='FrozenBN'), + fpn=dict( + in_features=['res2', 'res3', 'res4', 'res5'], out_channels=256), + anchor_generator=dict( + name='DefaultAnchorGenerator', + sizes=[[32], [64], [128], [256], [512]], + aspect_ratios=[[0.5, 1.0, 2.0]], + angles=[[-90, 0, 90]]), + proposal_generator=dict(name='RPN'), + rpn=dict( + head_name='StandardRPNHead', + in_features=['p2', 'p3', 'p4', 'p5', 'p6'], + iou_thresholds=[0.3, 0.7], + iou_labels=[0, -1, 1], + batch_size_per_image=256, + positive_fraction=0.5, + bbox_reg_loss_type='smooth_l1', + bbox_reg_loss_weight=1.0, + bbox_reg_weights=(1.0, 1.0, 1.0, 1.0), + smooth_l1_beta=0.0, + loss_weight=1.0, + boundary_thresh=-1, + pre_nms_topk_train=2000, + post_nms_topk_train=1000, + pre_nms_topk_test=1000, + post_nms_topk_test=1000, + nms_thresh=0.7, + conv_dims=[-1]), + roi_heads=dict( + name='StandardROIHeads', + num_classes=80, + in_features=['p2', 'p3', 'p4', 'p5'], + iou_thresholds=[0.5], + iou_labels=[0, 1], + batch_size_per_image=512, + positive_fraction=0.25, + score_thresh_test=0.05, + nms_thresh_test=0.5, + proposal_append_gt=True), + roi_box_head=dict( + name='FastRCNNConvFCHead', + num_fc=2, + fc_dim=1024, + conv_dim=256, + pooler_type='ROIAlignV2', + pooler_resolution=7, + pooler_sampling_ratio=0, + bbox_reg_loss_type='smooth_l1', + bbox_reg_loss_weight=1.0, + bbox_reg_weights=(10.0, 10.0, 5.0, 5.0), + smooth_l1_beta=0.0, + cls_agnostic_bbox_reg=False))) diff --git a/mmpose/configs/mmdet/misc/d2_mask-rcnn_r50-caffe_fpn_ms-90k_coco.py b/mmpose/configs/mmdet/misc/d2_mask-rcnn_r50-caffe_fpn_ms-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c0919c4593f028445dc033e85314320f88409a54 --- /dev/null +++ b/mmpose/configs/mmdet/misc/d2_mask-rcnn_r50-caffe_fpn_ms-90k_coco.py @@ -0,0 +1,83 @@ +_base_ = '../common/ms-poly-90k_coco-instance.py' + +# model settings +model = dict( + type='Detectron2Wrapper', + bgr_to_rgb=False, + detector=dict( + # The settings in `d2_detector` will merged into default settings + # in detectron2. More details please refer to + # https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/defaults.py # noqa + meta_architecture='GeneralizedRCNN', + # If you want to finetune the detector, you can use the + # checkpoint released by detectron2, for example: + # weights='detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl' # noqa + weights='detectron2://ImageNetPretrained/MSRA/R-50.pkl', + mask_on=True, + pixel_mean=[103.530, 116.280, 123.675], + pixel_std=[1.0, 1.0, 1.0], + backbone=dict(name='build_resnet_fpn_backbone', freeze_at=2), + resnets=dict( + depth=50, + out_features=['res2', 'res3', 'res4', 'res5'], + num_groups=1, + norm='FrozenBN'), + fpn=dict( + in_features=['res2', 'res3', 'res4', 'res5'], out_channels=256), + anchor_generator=dict( + name='DefaultAnchorGenerator', + sizes=[[32], [64], [128], [256], [512]], + aspect_ratios=[[0.5, 1.0, 2.0]], + angles=[[-90, 0, 90]]), + proposal_generator=dict(name='RPN'), + rpn=dict( + head_name='StandardRPNHead', + in_features=['p2', 'p3', 'p4', 'p5', 'p6'], + iou_thresholds=[0.3, 0.7], + iou_labels=[0, -1, 1], + batch_size_per_image=256, + positive_fraction=0.5, + bbox_reg_loss_type='smooth_l1', + bbox_reg_loss_weight=1.0, + bbox_reg_weights=(1.0, 1.0, 1.0, 1.0), + smooth_l1_beta=0.0, + loss_weight=1.0, + boundary_thresh=-1, + pre_nms_topk_train=2000, + post_nms_topk_train=1000, + pre_nms_topk_test=1000, + post_nms_topk_test=1000, + nms_thresh=0.7, + conv_dims=[-1]), + roi_heads=dict( + name='StandardROIHeads', + num_classes=80, + in_features=['p2', 'p3', 'p4', 'p5'], + iou_thresholds=[0.5], + iou_labels=[0, 1], + batch_size_per_image=512, + positive_fraction=0.25, + score_thresh_test=0.05, + nms_thresh_test=0.5, + proposal_append_gt=True), + roi_box_head=dict( + name='FastRCNNConvFCHead', + num_fc=2, + fc_dim=1024, + conv_dim=256, + pooler_type='ROIAlignV2', + pooler_resolution=7, + pooler_sampling_ratio=0, + bbox_reg_loss_type='smooth_l1', + bbox_reg_loss_weight=1.0, + bbox_reg_weights=(10.0, 10.0, 5.0, 5.0), + smooth_l1_beta=0.0, + cls_agnostic_bbox_reg=False), + roi_mask_head=dict( + name='MaskRCNNConvUpsampleHead', + conv_dim=256, + num_conv=4, + pooler_type='ROIAlignV2', + pooler_resolution=14, + pooler_sampling_ratio=0, + cls_agnostic_mask=False))) diff --git a/mmpose/configs/mmdet/misc/d2_retinanet_r50-caffe_fpn_ms-90k_coco.py b/mmpose/configs/mmdet/misc/d2_retinanet_r50-caffe_fpn_ms-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f7587648bde1d15b5c3c1e1ace6c35bb7c20b0 --- /dev/null +++ b/mmpose/configs/mmdet/misc/d2_retinanet_r50-caffe_fpn_ms-90k_coco.py @@ -0,0 +1,48 @@ +_base_ = '../common/ms-90k_coco.py' + +# model settings +model = dict( + type='Detectron2Wrapper', + bgr_to_rgb=False, + detector=dict( + # The settings in `d2_detector` will merged into default settings + # in detectron2. More details please refer to + # https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/defaults.py # noqa + meta_architecture='RetinaNet', + # If you want to finetune the detector, you can use the + # checkpoint released by detectron2, for example: + # weights='detectron2://COCO-Detection/retinanet_R_50_FPN_1x/190397773/model_final_bfca0b.pkl' # noqa + weights='detectron2://ImageNetPretrained/MSRA/R-50.pkl', + mask_on=False, + pixel_mean=[103.530, 116.280, 123.675], + pixel_std=[1.0, 1.0, 1.0], + backbone=dict(name='build_retinanet_resnet_fpn_backbone', freeze_at=2), + resnets=dict( + depth=50, + out_features=['res3', 'res4', 'res5'], + num_groups=1, + norm='FrozenBN'), + fpn=dict(in_features=['res3', 'res4', 'res5'], out_channels=256), + anchor_generator=dict( + name='DefaultAnchorGenerator', + sizes=[[x, x * 2**(1.0 / 3), x * 2**(2.0 / 3)] + for x in [32, 64, 128, 256, 512]], + aspect_ratios=[[0.5, 1.0, 2.0]], + angles=[[-90, 0, 90]]), + retinanet=dict( + num_classes=80, + in_features=['p3', 'p4', 'p5', 'p6', 'p7'], + num_convs=4, + iou_thresholds=[0.4, 0.5], + iou_labels=[0, -1, 1], + bbox_reg_weights=(1.0, 1.0, 1.0, 1.0), + bbox_reg_loss_type='smooth_l1', + smooth_l1_loss_beta=0.0, + focal_loss_gamma=2.0, + focal_loss_alpha=0.25, + prior_prob=0.01, + score_thresh_test=0.05, + topk_candidates_test=1000, + nms_thresh_test=0.5))) + +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/README.md b/mmpose/configs/mmdet/mm_grounding_dino/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c88cb1c902667e4bb480eb143d7b1268c35433dd --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/README.md @@ -0,0 +1,387 @@ +# MM Grounding DINO + +> [An Open and Comprehensive Pipeline for Unified Object Grounding and Detection](https://arxiv.org/abs/2401.02361) + + + +## Abstract + +Grounding-DINO is a state-of-the-art open-set detection model that tackles multiple vision tasks including Open-Vocabulary Detection (OVD), Phrase Grounding (PG), and Referring Expression Comprehension (REC). Its effectiveness has led to its widespread adoption as a mainstream architecture for various downstream applications. However, despite its significance, the original Grounding-DINO model lacks comprehensive public technical details due to the unavailability of its training code. To bridge this gap, we present MM-Grounding-DINO, an open-source, comprehensive, and user-friendly baseline, which is built with the MMDetection toolbox. It adopts abundant vision datasets for pre-training and various detection and grounding datasets for fine-tuning. We give a comprehensive analysis of each reported result and detailed settings for reproduction. The extensive experiments on the benchmarks mentioned demonstrate that our MM-Grounding-DINO-Tiny outperforms the Grounding-DINO-Tiny baseline. We release all our models to the research community. + +
+ +
+ +
+ +
+ +## Dataset Preparation + +Please refer to [dataset_prepare.md](dataset_prepare.md) or [中文版数据准备](dataset_prepare_zh-CN.md) + +## ✨ What's New + +💎 **We have released the pre-trained weights for Swin-B and Swin-L, welcome to try and give feedback.** + +## Usage + +Please refer to [usage.md](usage.md) or [中文版用法说明](usage_zh-CN.md) + +## Zero-Shot COCO Results and Models + +| Model | Backbone | Style | COCO mAP | Pre-Train Data | Config | Download | +| :----------: | :------: | :-------: | :--------: | :----------------------: | :------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| GDINO-T | Swin-T | Zero-shot | 46.7 | O365 | | | +| GDINO-T | Swin-T | Zero-shot | 48.1 | O365,GoldG | | | +| GDINO-T | Swin-T | Zero-shot | 48.4 | O365,GoldG,Cap4M | [config](../grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swint_ogc_mmdet-822d7e9d.pth) | +| MM-GDINO-T | Swin-T | Zero-shot | 48.5(+1.8) | O365 | [config](grounding_dino_swin-t_pretrain_obj365.py) | | +| MM-GDINO-T | Swin-T | Zero-shot | 50.4(+2.3) | O365,GoldG | [config](grounding_dino_swin-t_pretrain_obj365_goldg.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg/grounding_dino_swin-t_pretrain_obj365_goldg_20231122_132602-4ea751ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg/grounding_dino_swin-t_pretrain_obj365_goldg_20231122_132602.log.json) | +| MM-GDINO-T | Swin-T | Zero-shot | 50.5(+2.1) | O365,GoldG,GRIT | [config](grounding_dino_swin-t_pretrain_obj365_goldg_grit9m.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_20231128_200818-169cc352.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_20231128_200818.log.json) | +| MM-GDINO-T | Swin-T | Zero-shot | 50.6(+2.2) | O365,GoldG,V3Det | [config](grounding_dino_swin-t_pretrain_obj365_goldg_v3det.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_v3det_20231218_095741-e316e297.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_v3det_20231218_095741.log.json) | +| MM-GDINO-T | Swin-T | Zero-shot | 50.4(+2.0) | O365,GoldG,GRIT,V3Det | [config](grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047.log.json) | +| MM-GDINO-B | Swin-B | Zero-shot | 52.5 | O365,GoldG,V3Det | [config](grounding_dino_swin-b_pretrain_obj365_goldg_v3det.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det/grounding_dino_swin-b_pretrain_obj365_goldg_v3de-f83eef00.pth) \| [log](<>) | +| MM-GDINO-B\* | Swin-B | - | 59.5 | O365,ALL | [config](grounding_dino_swin-b_pretrain_all.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_all/grounding_dino_swin-b_pretrain_all-f9818a7c.pth) \| [log](<>) | +| MM-GDINO-L | Swin-L | Zero-shot | 53.0 | O365V2,OpenImageV6,GoldG | [config](grounding_dino_swin-l_pretrain_obj365_goldg.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg/grounding_dino_swin-l_pretrain_obj365_goldg-34dcdc53.pth) \| [log](<>) | +| MM-GDINO-L\* | Swin-L | - | 60.3 | O365V2,OpenImageV6,ALL | [config](grounding_dino_swin-l_pretrain_all.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_all/grounding_dino_swin-l_pretrain_all-56d69e78.pth) \| [log](<>) | + +- This * indicates that the model has not been fully trained yet. We will release the final weights in the future. +- ALL: GoldG,V3det,COCO2017,LVISV1,COCO2014,GRIT,RefCOCO,RefCOCO+,RefCOCOg,gRefCOCO. + +## Zero-Shot LVIS Results + +| Model | MiniVal APr | MiniVal APc | MiniVal APf | MiniVal AP | Val1.0 APr | Val1.0 APc | Val1.0 APf | Val1.0 AP | Pre-Train Data | +| :--------: | :---------: | :---------: | :---------: | :---------: | :--------: | :--------: | :--------: | :---------: | :-------------------: | +| GDINO-T | 18.8 | 24.2 | 34.7 | 28.8 | 10.1 | 15.3 | 29.9 | 20.1 | O365,GoldG,Cap4M | +| MM-GDINO-T | 28.1 | 30.2 | 42.0 | 35.7(+6.9) | 17.1 | 22.4 | 36.5 | 27.0(+6.9) | O365,GoldG | +| MM-GDINO-T | 26.6 | 32.4 | 41.8 | 36.5(+7.7) | 17.3 | 22.6 | 36.4 | 27.1(+7.0) | O365,GoldG,GRIT | +| MM-GDINO-T | 33.0 | 36.0 | 45.9 | 40.5(+11.7) | 21.5 | 25.5 | 40.2 | 30.6(+10.5) | O365,GoldG,V3Det | +| MM-GDINO-T | 34.2 | 37.4 | 46.2 | 41.4(+12.6) | 23.6 | 27.6 | 40.5 | 31.9(+11.8) | O365,GoldG,GRIT,V3Det | + +- The MM-GDINO-T config file is [mini-lvis](lvis/grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py) and [lvis 1.0](lvis/grounding_dino_swin-t_pretrain_zeroshot_lvis.py) + +## Zero-Shot ODinW (Object Detection in the Wild) Results + +### Results and models of ODinW13 + +| Method | GDINO-T
(O365,GoldG,Cap4M) | MM-GDINO-T
(O365,GoldG) | MM-GDINO-T
(O365,GoldG,GRIT) | MM-GDINO-T
(O365,GoldG,V3Det) | MM-GDINO-T
(O365,GoldG,GRIT,V3Det) | +| --------------------- | -------------------------------- | ----------------------------- | ---------------------------------- | ----------------------------------- | ---------------------------------------- | +| AerialMaritimeDrone | 0.173 | 0.133 | 0.155 | 0.177 | 0.151 | +| Aquarium | 0.195 | 0.252 | 0.261 | 0.266 | 0.283 | +| CottontailRabbits | 0.799 | 0.771 | 0.810 | 0.778 | 0.786 | +| EgoHands | 0.608 | 0.499 | 0.537 | 0.506 | 0.519 | +| NorthAmericaMushrooms | 0.507 | 0.331 | 0.462 | 0.669 | 0.767 | +| Packages | 0.687 | 0.707 | 0.687 | 0.710 | 0.706 | +| PascalVOC | 0.563 | 0.565 | 0.580 | 0.556 | 0.566 | +| pistols | 0.726 | 0.585 | 0.709 | 0.671 | 0.729 | +| pothole | 0.215 | 0.136 | 0.285 | 0.199 | 0.243 | +| Raccoon | 0.549 | 0.469 | 0.511 | 0.553 | 0.535 | +| ShellfishOpenImages | 0.393 | 0.321 | 0.437 | 0.519 | 0.488 | +| thermalDogsAndPeople | 0.657 | 0.556 | 0.603 | 0.493 | 0.542 | +| VehiclesOpenImages | 0.613 | 0.566 | 0.603 | 0.614 | 0.615 | +| Average | **0.514** | **0.453** | **0.511** | **0.516** | **0.533** | + +- The MM-GDINO-T config file is [odinw13](odinw/grounding_dino_swin-t_pretrain_odinw13.py) + +### Results and models of ODinW35 + +| Method | GDINO-T
(O365,GoldG,Cap4M) | MM-GDINO-T
(O365,GoldG) | MM-GDINO-T
(O365,GoldG,GRIT) | MM-GDINO-T
(O365,GoldG,V3Det) | MM-GDINO-T
(O365,GoldG,GRIT,V3Det) | +| --------------------------- | -------------------------------- | ----------------------------- | ---------------------------------- | ----------------------------------- | ---------------------------------------- | +| AerialMaritimeDrone_large | 0.173 | 0.133 | 0.155 | 0.177 | 0.151 | +| AerialMaritimeDrone_tiled | 0.206 | 0.170 | 0.225 | 0.184 | 0.206 | +| AmericanSignLanguageLetters | 0.002 | 0.016 | 0.020 | 0.011 | 0.007 | +| Aquarium | 0.195 | 0.252 | 0.261 | 0.266 | 0.283 | +| BCCD | 0.161 | 0.069 | 0.118 | 0.083 | 0.077 | +| boggleBoards | 0.000 | 0.002 | 0.001 | 0.001 | 0.002 | +| brackishUnderwater | 0.021 | 0.033 | 0.021 | 0.025 | 0.025 | +| ChessPieces | 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | +| CottontailRabbits | 0.806 | 0.771 | 0.810 | 0.778 | 0.786 | +| dice | 0.004 | 0.002 | 0.005 | 0.001 | 0.001 | +| DroneControl | 0.042 | 0.047 | 0.097 | 0.088 | 0.074 | +| EgoHands_generic | 0.608 | 0.527 | 0.537 | 0.506 | 0.519 | +| EgoHands_specific | 0.002 | 0.001 | 0.005 | 0.007 | 0.003 | +| HardHatWorkers | 0.046 | 0.048 | 0.070 | 0.070 | 0.108 | +| MaskWearing | 0.004 | 0.009 | 0.004 | 0.011 | 0.009 | +| MountainDewCommercial | 0.430 | 0.453 | 0.465 | 0.194 | 0.430 | +| NorthAmericaMushrooms | 0.471 | 0.331 | 0.462 | 0.669 | 0.767 | +| openPoetryVision | 0.000 | 0.001 | 0.000 | 0.000 | 0.000 | +| OxfordPets_by_breed | 0.003 | 0.002 | 0.004 | 0.006 | 0.004 | +| OxfordPets_by_species | 0.011 | 0.019 | 0.016 | 0.020 | 0.015 | +| PKLot | 0.001 | 0.004 | 0.002 | 0.008 | 0.007 | +| Packages | 0.695 | 0.707 | 0.687 | 0.710 | 0.706 | +| PascalVOC | 0.563 | 0.565 | 0.580 | 0.566 | 0.566 | +| pistols | 0.726 | 0.585 | 0.709 | 0.671 | 0.729 | +| plantdoc | 0.005 | 0.005 | 0.007 | 0.008 | 0.011 | +| pothole | 0.215 | 0.136 | 0.219 | 0.077 | 0.168 | +| Raccoons | 0.549 | 0.469 | 0.511 | 0.553 | 0.535 | +| selfdrivingCar | 0.089 | 0.091 | 0.076 | 0.094 | 0.083 | +| ShellfishOpenImages | 0.393 | 0.321 | 0.437 | 0.519 | 0.488 | +| ThermalCheetah | 0.087 | 0.063 | 0.081 | 0.030 | 0.045 | +| thermalDogsAndPeople | 0.657 | 0.556 | 0.603 | 0.493 | 0.543 | +| UnoCards | 0.006 | 0.012 | 0.010 | 0.009 | 0.005 | +| VehiclesOpenImages | 0.613 | 0.566 | 0.603 | 0.614 | 0.615 | +| WildfireSmoke | 0.134 | 0.106 | 0.154 | 0.042 | 0.127 | +| websiteScreenshots | 0.012 | 0.02 | 0.016 | 0.016 | 0.016 | +| Average | **0.227** | **0.202** | **0.228** | **0.214** | **0.284** | + +- The MM-GDINO-T config file is [odinw35](odinw/grounding_dino_swin-t_pretrain_odinw35.py) + +## Zero-Shot Referring Expression Comprehension Results + +| Method | GDINO-T
(O365,GoldG,Cap4M) | MM-GDINO-T
(O365,GoldG) | MM-GDINO-T
(O365,GoldG,GRIT) | MM-GDINO-T
(O365,GoldG,V3Det) | MM-GDINO-T
(O365,GoldG,GRIT,V3Det) | +| ---------------------- | -------------------------------- | ----------------------------- | ---------------------------------- | ----------------------------------- | ---------------------------------------- | +| RefCOCO val @1,5,10 | 50.8/89.5/94.9 | 53.1/89.9/94.7 | 53.4/90.3/95.5 | 52.1/89.8/95.0 | 53.1/89.7/95.1 | +| RefCOCO testA @1,5,10 | 57.4/91.3/95.6 | 59.7/91.5/95.9 | 58.8/91.70/96.2 | 58.4/86.8/95.6 | 59.1/91.0/95.5 | +| RefCOCO testB @1,5,10 | 45.0/86.5/92.9 | 46.4/86.9/92.2 | 46.8/87.7/93.3 | 45.4/86.2/92.6 | 46.8/87.8/93.6 | +| RefCOCO+ val @1,5,10 | 51.6/86.4/92.6 | 53.1/87.0/92.8 | 53.5/88.0/93.7 | 52.5/86.8/93.2 | 52.7/87.7/93.5 | +| RefCOCO+ testA @1,5,10 | 57.3/86.7/92.7 | 58.9/87.3/92.9 | 59.0/88.1/93.7 | 58.1/86.7/93.5 | 58.7/87.2/93.1 | +| RefCOCO+ testB @1,5,10 | 46.4/84.1/90.7 | 47.9/84.3/91.0 | 47.9/85.5/92.7 | 46.9/83.7/91.5 | 48.4/85.8/92.1 | +| RefCOCOg val @1,5,10 | 60.4/92.1/96.2 | 61.2/92.6/96.1 | 62.7/93.3/97.0 | 61.7/92.9/96.6 | 62.9/93.3/97.2 | +| RefCOCOg test @1,5,10 | 59.7/92.1/96.3 | 61.1/93.3/96.7 | 62.6/94.9/97.1 | 61.0/93.1/96.8 | 62.9/93.9/97.4 | + +| Method | thresh_score | GDINO-T
(O365,GoldG,Cap4M) | MM-GDINO-T
(O365,GoldG) | MM-GDINO-T
(O365,GoldG,GRIT) | MM-GDINO-T
(O365,GoldG,V3Det) | MM-GDINO-T
(O365,GoldG,GRIT,V3Det) | +| --------------------------------------- | ------------ | -------------------------------- | ----------------------------- | ---------------------------------- | ----------------------------------- | ---------------------------------------- | +| gRefCOCO val Pr@(F1=1, IoU≥0.5),N-acc | 0.5 | 39.3/70.4 | | | | 39.4/67.5 | +| gRefCOCO val Pr@(F1=1, IoU≥0.5),N-acc | 0.6 | 40.5/83.8 | | | | 40.6/83.1 | +| gRefCOCO val Pr@(F1=1, IoU≥0.5),N-acc | 0.7 | 41.3/91.8 | 39.8/84.7 | 40.7/89.7 | 40.3/88.8 | 41.0/91.3 | +| gRefCOCO val Pr@(F1=1, IoU≥0.5),N-acc | 0.8 | 41.5/96.8 | | | | 41.1/96.4 | +| gRefCOCO testA Pr@(F1=1, IoU≥0.5),N-acc | 0.5 | 31.9/70.4 | | | | 33.1/69.5 | +| gRefCOCO testA Pr@(F1=1, IoU≥0.5),N-acc | 0.6 | 29.3/82.9 | | | | 29.2/84.3 | +| gRefCOCO testA Pr@(F1=1, IoU≥0.5),N-acc | 0.7 | 27.2/90.2 | 26.3/89.0 | 26.0/91.9 | 25.4/91.8 | 26.1/93.0 | +| gRefCOCO testA Pr@(F1=1, IoU≥0.5),N-acc | 0.8 | 25.1/96.3 | | | | 23.8/97.2 | +| gRefCOCO testB Pr@(F1=1, IoU≥0.5),N-acc | 0.5 | 30.9/72.5 | | | | 33.0/69.6 | +| gRefCOCO testB Pr@(F1=1, IoU≥0.5),N-acc | 0.6 | 30.0/86.1 | | | | 31.6/96.7 | +| gRefCOCO testB Pr@(F1=1, IoU≥0.5),N-acc | 0.7 | 29.7/93.5 | 31.3/84.8 | 30.6/90.2 | 30.7/89.9 | 30.4/92.3 | +| gRefCOCO testB Pr@(F1=1, IoU≥0.5),N-acc | 0.8 | 29.1/97.4 | | | | 29.5/84.2 | + +- The MM-GDINO-T config file is [here](refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp.py) + +## Zero-Shot Description Detection Dataset(DOD) + +```shell +pip install ddd-dataset +``` + +| Method | mode | GDINO-T
(O365,GoldG,Cap4M) | MM-GDINO-T
(O365,GoldG) | MM-GDINO-T
(O365,GoldG,GRIT) | MM-GDINO-T
(O365,GoldG,V3Det) | MM-GDINO-T
(O365,GoldG,GRIT,V3Det) | +| -------------------------------- | -------- | -------------------------------- | ----------------------------- | ---------------------------------- | ----------------------------------- | ---------------------------------------- | +| FULL/short/middle/long/very long | concat | 17.2/18.0/18.7/14.8/16.3 | 15.6/17.3/16.7/14.3/13.1 | 17.0/17.7/18.0/15.7/15.7 | 16.2/17.4/16.8/14.9/15.4 | 17.5/23.4/18.3/14.7/13.8 | +| FULL/short/middle/long/very long | parallel | 22.3/28.2/24.8/19.1/13.9 | 21.7/24.7/24.0/20.2/13.7 | 22.5/25.6/25.1/20.5/14.9 | 22.3/25.6/24.5/20.6/14.7 | 22.9/28.1/25.4/20.4/14.4 | +| PRES/short/middle/long/very long | concat | 17.8/18.3/19.2/15.2/17.3 | 16.4/18.4/17.3/14.5/14.2 | 17.9/19.0/18.3/16.5/17.5 | 16.6/18.8/17.1/15.1/15.0 | 18.0/23.7/18.6/15.4/13.3 | +| PRES/short/middle/long/very long | parallel | 21.0/27.0/22.8/17.5/12.5 | 21.3/25.5/22.8/19.2/12.9 | 21.5/25.2/23.0/19.0/15.0 | 21.6/25.7/23.0/19.5/14.8 | 21.9/27.4/23.2/19.1/14.2 | +| ABS/short/middle/long/very long | concat | 15.4/17.1/16.4/13.6/14.9 | 13.4/13.4/14.5/13.5/11.9 | 14.5/13.1/16.7/13.6/13.3 | 14.8/12.5/15.6/14.3/15.8 | 15.9/22.2/17.1/12.5/14.4 | +| ABS/short/middle/long/very long | parallel | 26.0/32.0/33.0/23.6/15.5 | 22.8/22.2/28.7/22.9/14.7 | 25.6/26.8/33.9/24.5/14.7 | 24.1/24.9/30.7/23.8/14.7 | 26.0/30.3/34.1/23.9/14.6 | + +Note: + +1. Considering that the evaluation time for Inter-scenario is very long and the performance is low, it is temporarily not supported. The mentioned metrics are for Intra-scenario. +2. `concat` is the default inference mode for Grounding DINO, where it concatenates multiple sub-sentences with "." to form a single sentence for inference. On the other hand, "parallel" performs inference on each sub-sentence in a for-loop. +3. The MM-GDINO-T config file is [concat_dod](dod/grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py) and [parallel_dod](dod/grounding_dino_swin-t_pretrain_zeroshot_parallel_dod.py) + +## Pretrain Flickr30k Results + +| Model | Pre-Train Data | Val R@1 | Val R@5 | Val R@10 | Test R@1 | Test R@5 | Test R@10 | +| :--------: | :-------------------: | ------- | ------- | -------- | -------- | -------- | --------- | +| GLIP-T | O365,GoldG | 84.9 | 94.9 | 96.3 | 85.6 | 95.4 | 96.7 | +| GLIP-T | O365,GoldG,CC3M,SBU | 85.3 | 95.5 | 96.9 | 86.0 | 95.9 | 97.2 | +| GDINO-T | O365,GoldG,Cap4M | 87.8 | 96.6 | 98.0 | 88.1 | 96.9 | 98.2 | +| MM-GDINO-T | O365,GoldG | 85.5 | 95.6 | 97.2 | 86.2 | 95.7 | 97.4 | +| MM-GDINO-T | O365,GoldG,GRIT | 86.7 | 95.8 | 97.6 | 87.0 | 96.2 | 97.7 | +| MM-GDINO-T | O365,GoldG,V3Det | 85.9 | 95.7 | 97.4 | 86.3 | 95.7 | 97.4 | +| MM-GDINO-T | O365,GoldG,GRIT,V3Det | 86.7 | 96.0 | 97.6 | 87.2 | 96.2 | 97.7 | + +Note: + +1. `@1,5,10` refers to precision at the top 1, 5, and 10 positions in a predicted ranked list. +2. The MM-GDINO-T config file is [here](flickr30k/grounding_dino_swin-t-pretrain_flickr30k.py) + +## Validating the generalization of a pre-trained model through fine-tuning + +### RTTS + +| Architecture | Backbone | Lr schd | box AP | +| :-----------------: | :------: | ------- | -------- | +| Faster R-CNN | R-50 | 1x | 48.1 | +| Cascade R-CNN | R-50 | 1x | 50.8 | +| ATSS | R-50 | 1x | 48.2 | +| TOOD | R-50 | 1X | 50.8 | +| MM-GDINO(zero-shot) | Swin-T | | 49.8 | +| MM-GDINO | Swin-T | 1x | **69.1** | + +- The reference metrics come from https://github.com/BIGWangYuDong/lqit/tree/main/configs/detection/rtts_dataset +- The MM-GDINO-T config file is [here](rtts/grounding_dino_swin-t_finetune_8xb4_1x_rtts.py) + +### RUOD + +| Architecture | Backbone | Lr schd | box AP | +| :-----------------: | :------: | ------- | -------- | +| Faster R-CNN | R-50 | 1x | 52.4 | +| Cascade R-CNN | R-50 | 1x | 55.3 | +| ATSS | R-50 | 1x | 55.7 | +| TOOD | R-50 | 1X | 57.4 | +| MM-GDINO(zero-shot) | Swin-T | | 29.8 | +| MM-GDINO | Swin-T | 1x | **65.5** | + +- The reference metrics come from https://github.com/BIGWangYuDong/lqit/tree/main/configs/detection/ruod_dataset +- The MM-GDINO-T config file is [here](ruod/grounding_dino_swin-t_finetune_8xb4_1x_ruod.py) + +### Brain Tumor + +| Architecture | Backbone | Lr schd | box AP | +| :-----------: | :------: | ------- | ------ | +| Faster R-CNN | R-50 | 50e | 43.5 | +| Cascade R-CNN | R-50 | 50e | 46.2 | +| DINO | R-50 | 50e | 46.4 | +| Cascade-DINO | R-50 | 50e | 48.6 | +| MM-GDINO | Swin-T | 50e | 47.5 | + +- The reference metrics come from https://arxiv.org/abs/2307.11035 +- The MM-GDINO-T config file is [here](brain_tumor/grounding_dino_swin-t_finetune_8xb4_50e_brain_tumor.py) + +### Cityscapes + +| Architecture | Backbone | Lr schd | box AP | +| :-----------------: | :------: | ------- | -------- | +| Faster R-CNN | R-50 | 50e | 30.1 | +| Cascade R-CNN | R-50 | 50e | 31.8 | +| DINO | R-50 | 50e | 34.5 | +| Cascade-DINO | R-50 | 50e | 34.8 | +| MM-GDINO(zero-shot) | Swin-T | | 34.2 | +| MM-GDINO | Swin-T | 50e | **51.5** | + +- The reference metrics come from https://arxiv.org/abs/2307.11035 +- The MM-GDINO-T config file is [here](cityscapes/grounding_dino_swin-t_finetune_8xb4_50e_cityscapes.py) + +### People in Painting + +| Architecture | Backbone | Lr schd | box AP | +| :-----------------: | :------: | ------- | -------- | +| Faster R-CNN | R-50 | 50e | 17.0 | +| Cascade R-CNN | R-50 | 50e | 18.0 | +| DINO | R-50 | 50e | 12.0 | +| Cascade-DINO | R-50 | 50e | 13.4 | +| MM-GDINO(zero-shot) | Swin-T | | 23.1 | +| MM-GDINO | Swin-T | 50e | **38.9** | + +- The reference metrics come from https://arxiv.org/abs/2307.11035 +- The MM-GDINO-T config file is [here](people_in_painting/grounding_dino_swin-t_finetune_8xb4_50e_people_in_painting.py) + +### COCO + +**(1) Closed-set performance** + +| Architecture | Backbone | Lr schd | box AP | +| :-----------------: | :------: | ------- | ------ | +| Faster R-CNN | R-50 | 1x | 37.4 | +| Cascade R-CNN | R-50 | 1x | 40.3 | +| ATSS | R-50 | 1x | 39.4 | +| TOOD | R-50 | 1X | 42.4 | +| DINO | R-50 | 1X | 50.1 | +| GLIP(zero-shot) | Swin-T | | 46.6 | +| GDINO(zero-shot) | Swin-T | | 48.5 | +| MM-GDINO(zero-shot) | Swin-T | | 50.4 | +| GLIP | Swin-T | 1x | 55.4 | +| GDINO | Swin-T | 1x | 58.1 | +| MM-GDINO | Swin-T | 1x | 58.2 | + +- The MM-GDINO-T config file is [here](coco/grounding_dino_swin-t_finetune_16xb4_1x_coco.py) + +**(2) Open-set continuing pretraining performance** + +| Architecture | Backbone | Lr schd | box AP | +| :-----------------: | :------: | :-----: | :----: | +| GLIP(zero-shot) | Swin-T | | 46.7 | +| GDINO(zero-shot) | Swin-T | | 48.5 | +| MM-GDINO(zero-shot) | Swin-T | | 50.4 | +| MM-GDINO | Swin-T | 1x | 54.7 | + +- The MM-GDINO-T config file is [here](coco/grounding_dino_swin-t_finetune_16xb4_1x_sft_coco.py) +- Due to the small size of the COCO dataset, continuing pretraining solely on COCO can easily lead to overfitting. The results shown above are from the third epoch. I do not recommend you train using this approach. + +**(3) Open vocabulary performance** + +| Architecture | Backbone | Lr schd | box AP | Base box AP | Novel box AP | box AP@50 | Base box AP@50 | Novel box AP@50 | +| :-----------------: | :------: | :-----: | :----: | :---------: | :----------: | :-------: | :------------: | :-------------: | +| MM-GDINO(zero-shot) | Swin-T | | 51.1 | 48.4 | 58.9 | 66.7 | 64.0 | 74.2 | +| MM-GDINO | Swin-T | 1x | 57.2 | 56.1 | 60.4 | 73.6 | 73.0 | 75.3 | + +- The MM-GDINO-T config file is [here](coco/grounding_dino_swin-t_finetune_16xb4_1x_coco_48_17.py) + +### LVIS 1.0 + +**(1) Open-set continuing pretraining performance** + +| Architecture | Backbone | Lr schd | MiniVal APr | MiniVal APc | MiniVal APf | MiniVal AP | Val1.0 APr | Val1.0 APc | Val1.0 APf | Val1.0 AP | +| :-----------------: | :------: | :-----: | :---------: | :---------: | :---------: | :--------: | :--------: | :--------: | :--------: | :-------: | +| GLIP(zero-shot) | Swin-T | | 18.1 | 21.2 | 33.1 | 26.7 | 10.8 | 14.7 | 29.0 | 19.6 | +| GDINO(zero-shot) | Swin-T | | 18.8 | 24.2 | 34.7 | 28.8 | 10.1 | 15.3 | 29.9 | 20.1 | +| MM-GDINO(zero-shot) | Swin-T | | 34.2 | 37.4 | 46.2 | 41.4 | 23.6 | 27.6 | 40.5 | 31.9 | +| MM-GDINO | Swin-T | 1x | 50.7 | 58.8 | 60.1 | 58.7 | 45.2 | 50.2 | 56.1 | 51.7 | + +- The MM-GDINO-T config file is [here](lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis.py) + +**(2) Open vocabulary performance** + +| Architecture | Backbone | Lr schd | MiniVal APr | MiniVal APc | MiniVal APf | MiniVal AP | +| :-----------------: | :------: | :-----: | :---------: | :---------: | :---------: | :--------: | +| MM-GDINO(zero-shot) | Swin-T | | 34.2 | 37.4 | 46.2 | 41.4 | +| MM-GDINO | Swin-T | 1x | 43.2 | 57.4 | 59.3 | 57.1 | + +- The MM-GDINO-T config file is [here](lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis_866_337.py) + +### RefEXP + +#### RefCOCO + +| Architecture | Backbone | Lr schd | val @1 | val @5 | val @10 | testA @1 | testA @5 | testA @10 | testB @1 | testB @5 | testB @10 | +| :-----------------: | :------: | :-----: | :----: | :----: | :-----: | :------: | :------: | :-------: | :------: | :------: | :-------: | +| GDINO(zero-shot) | Swin-T | | 50.8 | 89.5 | 94.9 | 57.5 | 91.3 | 95.6 | 45.0 | 86.5 | 92.9 | +| MM-GDINO(zero-shot) | Swin-T | | 53.1 | 89.7 | 95.1 | 59.1 | 91.0 | 95.5 | 46.8 | 87.8 | 93.6 | +| GDINO | Swin-T | UNK | 89.2 | | | 91.9 | | | 86.0 | | | +| MM-GDINO | Swin-T | 5e | 89.5 | 98.6 | 99.4 | 91.4 | 99.2 | 99.8 | 86.6 | 97.9 | 99.1 | + +- The MM-GDINO-T config file is [here](refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcoco.py) + +#### RefCOCO+ + +| Architecture | Backbone | Lr schd | val @1 | val @5 | val @10 | testA @1 | testA @5 | testA @10 | testB @1 | testB @5 | testB @10 | +| :-----------------: | :------: | :-----: | :----: | :----: | :-----: | :------: | :------: | :-------: | :------: | :------: | :-------: | +| GDINO(zero-shot) | Swin-T | | 51.6 | 86.4 | 92.6 | 57.3 | 86.7 | 92.7 | 46.4 | 84.1 | 90.7 | +| MM-GDINO(zero-shot) | Swin-T | | 52.7 | 87.7 | 93.5 | 58.7 | 87.2 | 93.1 | 48.4 | 85.8 | 92.1 | +| GDINO | Swin-T | UNK | 81.1 | | | 87.4 | | | 74.7 | | | +| MM-GDINO | Swin-T | 5e | 82.1 | 97.8 | 99.2 | 87.5 | 99.2 | 99.7 | 74.0 | 96.3 | 96.4 | + +- The MM-GDINO-T config file is [here](refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcoco_plus.py) + +#### RefCOCOg + +| Architecture | Backbone | Lr schd | val @1 | val @5 | val @10 | test @1 | test @5 | test @10 | +| :-----------------: | :------: | :-----: | :----: | :----: | :-----: | :-----: | :-----: | :------: | +| GDINO(zero-shot) | Swin-T | | 60.4 | 92.1 | 96.2 | 59.7 | 92.1 | 96.3 | +| MM-GDINO(zero-shot) | Swin-T | | 62.9 | 93.3 | 97.2 | 62.9 | 93.9 | 97.4 | +| GDINO | Swin-T | UNK | 84.2 | | | 84.9 | | | +| MM-GDINO | Swin-T | 5e | 85.5 | 98.4 | 99.4 | 85.8 | 98.6 | 99.4 | + +- The MM-GDINO-T config file is [here](refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcocog.py) + +#### gRefCOCO + +| Architecture | Backbone | Lr schd | val Pr@(F1=1, IoU≥0.5) | val N-acc | testA Pr@(F1=1, IoU≥0.5) | testA N-acc | testB Pr@(F1=1, IoU≥0.5) | testB N-acc | +| :-----------------: | :------: | :-----: | :--------------------: | :-------: | :----------------------: | :---------: | :----------------------: | :---------: | +| GDINO(zero-shot) | Swin-T | | 41.3 | 91.8 | 27.2 | 90.2 | 29.7 | 93.5 | +| MM-GDINO(zero-shot) | Swin-T | | 41.0 | 91.3 | 26.1 | 93.0 | 30.4 | 92.3 | +| MM-GDINO | Swin-T | 5e | 45.1 | 64.7 | 42.5 | 65.5 | 40.3 | 63.2 | + +- The MM-GDINO-T config file is [here](refcoco/grounding_dino_swin-t_finetune_8xb4_5e_grefcoco.py) + +## Citation + +If you find this project useful in your research, please consider citing: + +```latex +@article{zhao2024open, + title={An Open and Comprehensive Pipeline for Unified Object Grounding and Detection}, + author={Zhao, Xiangyu and Chen, Yicheng and Xu, Shilin and Li, Xiangtai and Wang, Xinjiang and Li, Yining and Huang, Haian}, + journal={arXiv preprint arXiv:2401.02361}, + year={2024} +} +``` diff --git a/mmpose/configs/mmdet/mm_grounding_dino/brain_tumor/grounding_dino_swin-t_finetune_8xb4_50e_brain_tumor.py b/mmpose/configs/mmdet/mm_grounding_dino/brain_tumor/grounding_dino_swin-t_finetune_8xb4_50e_brain_tumor.py new file mode 100644 index 0000000000000000000000000000000000000000..1172da5b64102413eec11f223f467ad4c03a7cdf --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/brain_tumor/grounding_dino_swin-t_finetune_8xb4_50e_brain_tumor.py @@ -0,0 +1,112 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +# https://universe.roboflow.com/roboflow-100/brain-tumor-m2pbp/dataset/2 +data_root = 'data/brain_tumor_v2/' +class_name = ('label0', 'label1', 'label2') +label_name = '_annotations.coco.json' + +palette = [(220, 20, 60), (255, 0, 0), (0, 0, 142)] + +metainfo = dict(classes=class_name, palette=palette) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +train_dataloader = dict( + sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=10, + dataset=dict( + type='CocoDataset', + data_root=data_root, + metainfo=metainfo, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline, + return_classes=True, + data_prefix=dict(img='train/'), + ann_file='train/' + label_name))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + return_classes=True, + ann_file='valid/' + label_name, + data_prefix=dict(img='valid/'))) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'valid/' + label_name, + metric='bbox', + format_only=False) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1) + })) + +# learning policy +max_epochs = 5 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[4], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) + +default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/cityscapes/grounding_dino_swin-t_finetune_8xb4_50e_cityscapes.py b/mmpose/configs/mmdet/mm_grounding_dino/cityscapes/grounding_dino_swin-t_finetune_8xb4_50e_cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..c4283413c4ba0c060144d7fb85f7d064a60577c7 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/cityscapes/grounding_dino_swin-t_finetune_8xb4_50e_cityscapes.py @@ -0,0 +1,110 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/cityscapes/' +class_name = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle') +palette = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), + (0, 80, 100), (0, 0, 230), (119, 11, 32)] + +metainfo = dict(classes=class_name, palette=palette) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +train_dataloader = dict( + sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=10, + dataset=dict( + type='CocoDataset', + data_root=data_root, + metainfo=metainfo, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline, + return_classes=True, + data_prefix=dict(img='leftImg8bit/train/'), + ann_file='annotations/instancesonly_filtered_gtFine_train.json'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + return_classes=True, + ann_file='annotations/instancesonly_filtered_gtFine_val.json', + data_prefix=dict(img='leftImg8bit/val/'))) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1) + })) + +# learning policy +max_epochs = 5 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[4], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_coco.py b/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..792297accd302d390f865bee294b1294863d6ac1 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_coco.py @@ -0,0 +1,85 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='CocoDataset', + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + return_classes=True, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + 'language_model': dict(lr_mult=0.1), + })) + +# learning policy +max_epochs = 12 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) + +default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_coco_48_17.py b/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_coco_48_17.py new file mode 100644 index 0000000000000000000000000000000000000000..e68afbb43286af24612321129042e7d0e0f34b29 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_coco_48_17.py @@ -0,0 +1,157 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' +base_classes = ('person', 'bicycle', 'car', 'motorcycle', 'train', 'truck', + 'boat', 'bench', 'bird', 'horse', 'sheep', 'bear', 'zebra', + 'giraffe', 'backpack', 'handbag', 'suitcase', 'frisbee', + 'skis', 'kite', 'surfboard', 'bottle', 'fork', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'pizza', 'donut', 'chair', 'bed', 'toilet', 'tv', 'laptop', + 'mouse', 'remote', 'microwave', 'oven', 'toaster', + 'refrigerator', 'book', 'clock', 'vase', 'toothbrush') # 48 +novel_classes = ('airplane', 'bus', 'cat', 'dog', 'cow', 'elephant', + 'umbrella', 'tie', 'snowboard', 'skateboard', 'cup', 'knife', + 'cake', 'couch', 'keyboard', 'sink', 'scissors') # 17 +all_classes = ( + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', + 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'kite', 'skateboard', + 'surfboard', 'bottle', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', + 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'pizza', 'donut', + 'cake', 'chair', 'couch', 'bed', 'toilet', 'tv', 'laptop', 'mouse', + 'remote', 'keyboard', 'microwave', 'oven', 'toaster', 'sink', + 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'toothbrush') # 65 + +train_metainfo = dict(classes=base_classes) +test_metainfo = dict( + classes=all_classes, + base_classes=base_classes, + novel_classes=novel_classes) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='CocoDataset', + metainfo=train_metainfo, + data_root=data_root, + ann_file='annotations/instances_train2017_seen_2.json', + data_prefix=dict(img='train2017/'), + return_classes=True, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='CocoDataset', + metainfo=test_metainfo, + data_root=data_root, + ann_file='annotations/instances_val2017_all_2.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + return_classes=True, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='OVCocoMetric', + ann_file=data_root + 'annotations/instances_val2017_all_2.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.00005, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + # 'language_model': dict(lr_mult=0), + })) + +# learning policy +max_epochs = 12 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) + +default_hooks = dict( + checkpoint=dict( + max_keep_ckpts=1, save_best='coco/novel_ap50', rule='greater')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_sft_coco.py b/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_sft_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5505df58b8b103a93570519c20aaf0fcc144e91c --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/coco/grounding_dino_swin-t_finetune_16xb4_1x_sft_coco.py @@ -0,0 +1,93 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=20, # ======= important ===== + label_map_file='data/coco/annotations/coco2017_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='ODVGDataset', + need_text=False, + data_root=data_root, + ann_file='annotations/instances_train2017_od.json', + label_map_file='annotations/coco2017_label_map.json', + data_prefix=dict(img='train2017/'), + return_classes=True, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.00005, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + 'language_model': dict(lr_mult=0.0), + })) + +# learning policy +max_epochs = 12 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) + +default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/dataset_prepare.md b/mmpose/configs/mmdet/mm_grounding_dino/dataset_prepare.md new file mode 100644 index 0000000000000000000000000000000000000000..af60a8bf4bf7ebc0dde342a7a9ec0bd05dc1fadd --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/dataset_prepare.md @@ -0,0 +1,1193 @@ +# Data Prepare and Process + +## MM-GDINO-T Pre-train Dataset + +For the MM-GDINO-T model, we provide a total of 5 different data combination pre-training configurations. The data is trained in a progressive accumulation manner, so users can prepare it according to their actual needs. + +### 1 Objects365v1 + +The corresponding training config is [grounding_dino_swin-t_pretrain_obj365](./grounding_dino_swin-t_pretrain_obj365.py) + +Objects365v1 can be downloaded from [opendatalab](https://opendatalab.com/OpenDataLab/Objects365_v1). It offers two methods of download: CLI and SDK. + +After downloading and unzipping, place the dataset or create a symbolic link to the `data/objects365v1` directory. The directory structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── objects365v1 +│ │ ├── objects365_train.json +│ │ ├── objects365_val.json +│ │ ├── train +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── test +``` + +Then, use [coco2odvg.py](../../tools/dataset_converters/coco2odvg.py) to convert it into the ODVG format required for training. + +```shell +python tools/dataset_converters/coco2odvg.py data/objects365v1/objects365_train.json -d o365v1 +``` + +After the program runs successfully, it will create two new files, `o365v1_train_od.json` and `o365v1_label_map.json`, in the `data/objects365v1` directory. The complete structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── objects365v1 +│ │ ├── objects365_train.json +│ │ ├── objects365_val.json +│ │ ├── o365v1_train_od.json +│ │ ├── o365v1_label_map.json +│ │ ├── train +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── test +``` + +### 2 COCO 2017 + +The above configuration will evaluate the performance on the COCO 2017 dataset during the training process. Therefore, it is necessary to prepare the COCO 2017 dataset. You can download it from the [COCO](https://cocodataset.org/) official website or from [opendatalab](https://opendatalab.com/OpenDataLab/COCO_2017). + +After downloading and unzipping, place the dataset or create a symbolic link to the `data/coco` directory. The directory structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +### 3 GoldG + +After downloading the dataset, you can start training with the [grounding_dino_swin-t_pretrain_obj365_goldg](./grounding_dino_swin-t_pretrain_obj365_goldg.py) configuration. + +The GoldG dataset includes the `GQA` and `Flickr30k` datasets, which are part of the MixedGrounding dataset mentioned in the GLIP paper, excluding the COCO dataset. The download links are [mdetr_annotations](https://huggingface.co/GLIPModel/GLIP/tree/main/mdetr_annotations), and the specific files currently needed are `mdetr_annotations/final_mixed_train_no_coco.json` and `mdetr_annotations/final_flickr_separateGT_train.json`. + +Then download the [GQA images](https://nlp.stanford.edu/data/gqa/images.zip). After downloading and unzipping, place the dataset or create a symbolic link to them in the `data/gqa` directory, with the following directory structure: + +```text +mmdetection +├── configs +├── data +│ ├── gqa +| | ├── final_mixed_train_no_coco.json +│ │ ├── images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +Then download the [Flickr30k images](http://shannon.cs.illinois.edu/DenotationGraph/). You need to apply for access to this dataset and then download it using the provided link. After downloading and unzipping, place the dataset or create a symbolic link to them in the `data/flickr30k_entities` directory, with the following directory structure: + +```text +mmdetection +├── configs +├── data +│ ├── flickr30k_entities +│ │ ├── final_flickr_separateGT_train.json +│ │ ├── flickr30k_images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +For the GQA dataset, you need to use [goldg2odvg.py](../../tools/dataset_converters/goldg2odvg.py) to convert it into the ODVG format required for training: + +```shell +python tools/dataset_converters/goldg2odvg.py data/gqa/final_mixed_train_no_coco.json +``` + +After the program has run, a new file `final_mixed_train_no_coco_vg.json` will be created in the `data/gqa` directory, with the complete structure as follows: + +```text +mmdetection +├── configs +├── data +│ ├── gqa +| | ├── final_mixed_train_no_coco.json +| | ├── final_mixed_train_no_coco_vg.json +│ │ ├── images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +For the Flickr30k dataset, you need to use [goldg2odvg.py](../../tools/dataset_converters/goldg2odvg.py) to convert it into the ODVG format required for training: + +```shell +python tools/dataset_converters/goldg2odvg.py data/flickr30k_entities/final_flickr_separateGT_train.json +``` + +After the program has run, a new file `final_flickr_separateGT_train_vg.json` will be created in the `data/flickr30k_entities` directory, with the complete structure as follows: + +```text +mmdetection +├── configs +├── data +│ ├── flickr30k_entities +│ │ ├── final_flickr_separateGT_train.json +│ │ ├── final_flickr_separateGT_train_vg.json +│ │ ├── flickr30k_images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 4 GRIT-20M + +The corresponding training configuration is [grounding_dino_swin-t_pretrain_obj365_goldg_grit9m](./grounding_dino_swin-t_pretrain_obj365_goldg_grit9m.py). + +The GRIT dataset can be downloaded using the img2dataset package from [GRIT](https://huggingface.co/datasets/zzliang/GRIT#download-image). By default, the dataset size is 1.1T, and downloading and processing it may require at least 2T of disk space, depending on your available storage capacity. After downloading, the dataset is in its original format, which includes: + +```text +mmdetection +├── configs +├── data +│ ├── grit_raw +│ │ ├── 00000_stats.json +│ │ ├── 00000.parquet +│ │ ├── 00000.tar +│ │ ├── 00001_stats.json +│ │ ├── 00001.parquet +│ │ ├── 00001.tar +│ │ ├── ... +``` + +After downloading, further format processing is required: + +```shell +python tools/dataset_converters/grit_processing.py data/grit_raw data/grit_processed +``` + +The processed format is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── grit_processed +│ │ ├── annotations +│ │ │ ├── 00000.json +│ │ │ ├── 00001.json +│ │ │ ├── ... +│ │ ├── images +│ │ │ ├── 00000 +│ │ │ │ ├── 000000000.jpg +│ │ │ │ ├── 000000003.jpg +│ │ │ │ ├── 000000004.jpg +│ │ │ │ ├── ... +│ │ │ ├── 00001 +│ │ │ ├── ... +``` + +As for the GRIT dataset, you need to use [grit2odvg.py](../../tools/dataset_converters/grit2odvg.py) to convert it to the format of ODVG: + +```shell +python tools/dataset_converters/grit2odvg.py data/grit_processed/ +``` + +After the program has run, a new file `grit20m_vg.json` will be created in the `data/grit_processed` directory, which has about 9M data, with the complete structure as follows: + +```text +mmdetection +├── configs +├── data +│ ├── grit_processed +| | ├── grit20m_vg.json +│ │ ├── annotations +│ │ │ ├── 00000.json +│ │ │ ├── 00001.json +│ │ │ ├── ... +│ │ ├── images +│ │ │ ├── 00000 +│ │ │ │ ├── 000000000.jpg +│ │ │ │ ├── 000000003.jpg +│ │ │ │ ├── 000000004.jpg +│ │ │ │ ├── ... +│ │ │ ├── 00001 +│ │ │ ├── ... +``` + +### 5 V3Det + +The corresponding training configurations are: + +- [grounding_dino_swin-t_pretrain_obj365_goldg_v3det](./grounding_dino_swin-t_pretrain_obj365_goldg_v3det.py) +- [grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det](./grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py) + +The V3Det dataset can be downloaded from [opendatalab](https://opendatalab.com/V3Det/V3Det). After downloading and unzipping, place the dataset or create a symbolic link to it in the `data/v3det` directory, with the following directory structure: + +```text +mmdetection +├── configs +├── data +│ ├── v3det +│ │ ├── annotations +│ │ | ├── v3det_2023_v1_train.json +│ │ ├── images +│ │ │ ├── a00000066 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +Then use [coco2odvg.py](../../tools/dataset_converters/coco2odvg.py) to convert it into the ODVG format required for training: + +```shell +python tools/dataset_converters/coco2odvg.py data/v3det/annotations/v3det_2023_v1_train.json -d v3det +``` + +After the program has run, two new files `v3det_2023_v1_train_od.json` and `v3det_2023_v1_label_map.json` will be created in the `data/v3det/annotations` directory, with the complete structure as follows: + +```text +mmdetection +├── configs +├── data +│ ├── v3det +│ │ ├── annotations +│ │ | ├── v3det_2023_v1_train.json +│ │ | ├── v3det_2023_v1_train_od.json +│ │ | ├── v3det_2023_v1_label_map.json +│ │ ├── images +│ │ │ ├── a00000066 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 6 Data Splitting and Visualization + +Considering that users need to prepare many datasets, which is inconvenient for confirming images and annotations before training, we provide a data splitting and visualization tool. This tool can split the dataset into a tiny version and then use a visualization script to check the correctness of the images and labels. + +1. Splitting the Dataset + +The script is located [here](../../tools/misc/split_odvg.py). Taking `Object365 v1` as an example, the command to split the dataset is as follows: + +```shell +python tools/misc/split_odvg.py data/object365_v1/ o365v1_train_od.json train your_output_dir --label-map-file o365v1_label_map.json -n 200 +``` + +After running the above script, it will create a folder structure in the `your_output_dir` directory identical to `data/object365_v1/`, but it will only save 200 training images and their corresponding json files for convenient user review. + +2. Visualizing the Original Dataset + +The script is located [here](../../tools/analysis_tools/browse_grounding_raw.py). Taking `Object365 v1` as an example, the command to visualize the dataset is as follows: + +```shell +python tools/analysis_tools/browse_grounding_raw.py data/object365_v1/ o365v1_train_od.json train --label-map-file o365v1_label_map.json -o your_output_dir --not-show +``` + +After running the above script, it will generate images in the `your_output_dir` directory that include both the pictures and their labels, making it convenient for users to review. + +3. Visualizing the Output Dataset + +The script is located [here](../../tools/analysis_tools/browse_grounding_dataset.py). Users can use this script to view the results of the dataset output, including the results of data augmentation. Taking `Object365 v1` as an example, the command to visualize the dataset is as follows: + +```shell +python tools/analysis_tools/browse_grounding_dataset.py configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py -o your_output_dir --not-show +``` + +After running the above script, it will generate images in the `your_output_dir` directory that include both the pictures and their labels, making it convenient for users to review. + +## MM-GDINO-L Pre-training Data Preparation and Processing + +### 1 Object365 v2 + +Objects365_v2 can be downloaded from [opendatalab](https://opendatalab.com/OpenDataLab/Objects365). It offers two download methods: CLI and SDK. + +After downloading and unzipping, place the dataset or create a symbolic link to it in the `data/objects365v2` directory, with the following directory structure: + +```text +mmdetection +├── configs +├── data +│ ├── objects365v2 +│ │ ├── annotations +│ │ │ ├── zhiyuan_objv2_train.json +│ │ ├── train +│ │ │ ├── patch0 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +Since some category names in Objects365v2 are incorrect, it is necessary to correct them first. + +```shell +python tools/dataset_converters/fix_o365_names.py +``` + +A new annotation file `zhiyuan_objv2_train_fixname.json` will be generated in the `data/objects365v2/annotations` directory. + +Then use [coco2odvg.py](../../tools/dataset_converters/coco2odvg.py) to convert it into the ODVG format required for training: + +```shell +python tools/dataset_converters/coco2odvg.py data/objects365v2/annotations/zhiyuan_objv2_train_fixname.json -d o365v2 +``` + +After the program has run, two new files `zhiyuan_objv2_train_fixname_od.json` and `o365v2_label_map.json` will be created in the `data/objects365v2` directory, with the complete structure as follows: + +```text +mmdetection +├── configs +├── data +│ ├── objects365v2 +│ │ ├── annotations +│ │ │ ├── zhiyuan_objv2_train.json +│ │ │ ├── zhiyuan_objv2_train_fixname.json +│ │ │ ├── zhiyuan_objv2_train_fixname_od.json +│ │ │ ├── o365v2_label_map.json +│ │ ├── train +│ │ │ ├── patch0 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 2 OpenImages v6 + +OpenImages v6 can be downloaded from the [official website](https://storage.googleapis.com/openimages/web/download_v6.html). Due to the large size of the dataset, it may take some time to download. After completion, the file structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── OpenImages +│ │ ├── annotations +| │ │ ├── oidv6-train-annotations-bbox.csv +| │ │ ├── class-descriptions-boxable.csv +│ │ ├── OpenImages +│ │ │ ├── train +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +Then use [openimages2odvg.py](../../tools/dataset_converters/openimages2odvg.py) to convert it into the ODVG format required for training: + +```shell +python tools/dataset_converters/openimages2odvg.py data/OpenImages/annotations +``` + +After the program has run, two new files `oidv6-train-annotation_od.json` and `openimages_label_map.json` will be created in the `data/OpenImages/annotations` directory, with the complete structure as follows: + +```text +mmdetection +├── configs +├── data +│ ├── OpenImages +│ │ ├── annotations +| │ │ ├── oidv6-train-annotations-bbox.csv +| │ │ ├── class-descriptions-boxable.csv +| │ │ ├── oidv6-train-annotations_od.json +| │ │ ├── openimages_label_map.json +│ │ ├── OpenImages +│ │ │ ├── train +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 3 V3Det + +Referring to the data preparation section of the previously mentioned MM-GDINO-T pre-training data preparation and processing, the complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── v3det +│ │ ├── annotations +│ │ | ├── v3det_2023_v1_train.json +│ │ | ├── v3det_2023_v1_train_od.json +│ │ | ├── v3det_2023_v1_label_map.json +│ │ ├── images +│ │ │ ├── a00000066 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 4 LVIS 1.0 + +Please refer to the `2 LVIS 1.0` section of the later `Fine-tuning Dataset Preparation`. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── lvis_v1_train.json +│ │ │ ├── lvis_v1_val.json +│ │ │ ├── lvis_v1_train_od.json +│ │ │ ├── lvis_v1_label_map.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +### 5 COCO2017 OD + +You can refer to the earlier section `MM-GDINO-T Pre-training Data Preparation and Processing` for data preparation. For convenience in subsequent processing, please create a symbolic link or move the downloaded [mdetr_annotations](https://huggingface.co/GLIPModel/GLIP/tree/main/mdetr_annotations) folder to the `data/coco` path. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── ... +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +Due to some overlap between COCO2017 train and RefCOCO/RefCOCO+/RefCOCOg/gRefCOCO val, if not removed in advance, there will be data leakage when evaluating RefExp. + +```shell +python tools/dataset_converters/remove_cocotrain2017_from_refcoco.py data/coco/mdetr_annotations data/coco/annotations/instances_train2017.json +``` + +A new file `instances_train2017_norefval.json` will be created in the `data/coco/annotations` directory. Finally, use [coco2odvg.py](../../tools/dataset_converters/coco2odvg.py) to convert it into the ODVG format required for training: + +```shell +python tools/dataset_converters/coco2odvg.py data/coco/annotations/instances_train2017_norefval.json -d coco +``` + +Two new files `instances_train2017_norefval_od.json` and `coco_label_map.json` will be created in the `data/coco/annotations` directory, with the complete structure as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2017_norefval_od.json +│ │ │ ├── coco_label_map.json +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── ... +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +Note: There are 15,000 images that overlap between the COCO2017 train and LVIS 1.0 val datasets. Therefore, if the COCO2017 train dataset is used in training, the evaluation results of LVIS 1.0 val will have a data leakage issue. However, LVIS 1.0 minival does not have this problem. + +### 6 GoldG + +Please refer to the section on `MM-GDINO-T Pre-training Data Preparation and Processing`. + +```text +mmdetection +├── configs +├── data +│ ├── flickr30k_entities +│ │ ├── final_flickr_separateGT_train.json +│ │ ├── final_flickr_separateGT_train_vg.json +│ │ ├── flickr30k_images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ ├── gqa +| | ├── final_mixed_train_no_coco.json +| | ├── final_mixed_train_no_coco_vg.json +│ │ ├── images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 7 COCO2014 VG + +MDetr provides a Phrase Grounding version of the COCO2014 train annotations. The original annotation file is named `final_mixed_train.json`, and similar to the previous structure, the file structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── mdetr_annotations +│ │ │ ├── final_mixed_train.json +│ │ │ ├── ... +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +We can extract the COCO portion of the data from `final_mixed_train.json`. + +```shell +python tools/dataset_converters/extract_coco_from_mixed.py data/coco/mdetr_annotations/final_mixed_train.json +``` + +A new file named `final_mixed_train_only_coco.json` will be created in the `data/coco/mdetr_annotations` directory. Finally, use [goldg2odvg.py](../../tools/dataset_converters/goldg2odvg.py) to convert it into the ODVG format required for training: + +```shell +python tools/dataset_converters/goldg2odvg.py data/coco/mdetr_annotations/final_mixed_train_only_coco.json +``` + +A new file named `final_mixed_train_only_coco_vg.json` will be created in the `data/coco/mdetr_annotations` directory, with the complete structure as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── mdetr_annotations +│ │ │ ├── final_mixed_train.json +│ │ │ ├── final_mixed_train_only_coco.json +│ │ │ ├── final_mixed_train_only_coco_vg.json +│ │ │ ├── ... +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +Note: COCO2014 train and COCO2017 val do not have duplicate images, so there is no need to worry about data leakage issues in COCO evaluation. + +### 8 Referring Expression Comprehension + +There are a total of 4 datasets included. For data preparation, please refer to the `Fine-tuning Dataset Preparation` section. + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_refcoco+_testA.json +│ │ │ ├── finetune_refcoco+_testB.json +│ │ │ ├── finetune_refcocog_test.json +│ │ │ ├── finetune_refcoco_train_vg.json +│ │ │ ├── finetune_refcoco+_train_vg.json +│ │ │ ├── finetune_refcocog_train_vg.json +│ │ │ ├── finetune_grefcoco_train_vg.json +``` + +### 9 GRIT-20M + +Please refer to the `MM-GDINO-T Pre-training Data Preparation and Processing` section. + +## Preparation of Evaluation Dataset + +### 1 COCO 2017 + +The data preparation process is consistent with the previous descriptions, and the final structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +### 2 LVIS 1.0 + +The LVIS 1.0 val dataset includes both mini and full versions. The significance of the mini version is: + +1. The full LVIS val evaluation dataset is quite large, and conducting an evaluation with it can take a significant amount of time. +2. In the full LVIS val dataset, there are 15,000 images from the COCO2017 train dataset. If a user has used the COCO2017 data for training, there can be a data leakage issue when evaluating on the full LVIS val dataset + +The LVIS 1.0 dataset contains images that are exactly the same as the COCO2017 dataset, with the addition of new annotations. You can download the minival annotation file from [here](https://huggingface.co/GLIPModel/GLIP/blob/main/lvis_v1_minival_inserted_image_name.json), and the val 1.0 annotation file from [here](https://huggingface.co/GLIPModel/GLIP/blob/main/lvis_od_val.json). The final structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +### 3 ODinW + +ODinW, which stands for Object Detection in the Wild, is a dataset used to evaluate the generalization capability of grounding pre-trained models in different real-world scenarios. It consists of two subsets, ODinW13 and ODinW35, representing datasets composed of 13 and 35 different datasets, respectively. You can download it from [here](https://huggingface.co/GLIPModel/GLIP/tree/main/odinw_35), and then unzip each file. The final structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── odinw +│ │ ├── AerialMaritimeDrone +│ │ | |── large +│ │ | | ├── test +│ │ | | ├── train +│ │ | | ├── valid +│ │ | |── tiled +│ │ ├── AmericanSignLanguageLetters +│ │ ├── Aquarium +│ │ ├── BCCD +│ │ ├── ... +``` + +When evaluating ODinW35, custom prompts are required. Therefore, it's necessary to preprocess the annotated JSON files in advance. You can use the [override_category.py](./odinw/override_category.py) script for this purpose. After processing, it will generate new annotation files without overwriting the original ones. + +```shell +python configs/mm_grounding_dino/odinw/override_category.py data/odinw/ +``` + +### 4 DOD + +DOD stands for Described Object Detection, and it is introduced in the paper titled [Described Object Detection: Liberating Object Detection with Flexible Expressions](https://arxiv.org/abs/2307.12813). You can download the dataset from [here](https://github.com/shikras/d-cube?tab=readme-ov-file). The final structure of the dataset is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── d3 +│ │ ├── d3_images +│ │ ├── d3_json +│ │ ├── d3_pkl +``` + +### 5 Flickr30k Entities + +In the previous GoldG data preparation section, we downloaded the necessary files for training with Flickr30k. For evaluation, you will need 2 JSON files, which you can download from [here](https://huggingface.co/GLIPModel/GLIP/blob/main/mdetr_annotations/final_flickr_separateGT_val.json) and [here](https://huggingface.co/GLIPModel/GLIP/blob/main/mdetr_annotations/final_flickr_separateGT_test.json). The final structure of the dataset is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── flickr30k_entities +│ │ ├── final_flickr_separateGT_train.json +│ │ ├── final_flickr_separateGT_val.json +│ │ ├── final_flickr_separateGT_test.json +│ │ ├── final_flickr_separateGT_train_vg.json +│ │ ├── flickr30k_images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 6 Referring Expression Comprehension + +Referential Expression Comprehension includes 4 datasets: RefCOCO, RefCOCO+, RefCOCOg, and gRefCOCO. The images used in these 4 datasets are from COCO2014 train, similar to COCO2017. You can download the images from the official COCO website or opendatalab. The annotations can be directly downloaded from [here](https://huggingface.co/GLIPModel/GLIP/tree/main/mdetr_annotations). The mdetr_annotations folder contains a large number of annotations, so you can choose to download only the JSON files you need. The final structure of the dataset is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_refcoco+_testA.json +│ │ │ ├── finetune_refcoco+_testB.json +│ │ │ ├── finetune_refcocog_test.json +│ │ │ ├── finetune_refcocog_test.json +``` + +Please note that gRefCOCO is introduced in [GREC: Generalized Referring Expression Comprehension](https://arxiv.org/abs/2308.16182) and is not available in the `mdetr_annotations` folder. You will need to handle it separately. Here are the specific steps: + +1. Download [gRefCOCO](https://github.com/henghuiding/gRefCOCO?tab=readme-ov-file) and unzip it into the `data/coco/` folder. + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ ├── grefs +│ │ │ ├── grefs(unc).json +│ │ │ ├── instances.json +``` + +2. Convert to COCO format + +You can use the official [conversion script](https://github.com/henghuiding/gRefCOCO/blob/b4b1e55b4d3a41df26d6b7d843ea011d581127d4/mdetr/scripts/fine-tuning/grefexp_coco_format.py) provided by gRefCOCO. Please note that you need to uncomment line 161 and comment out line 160 in the script to obtain the full JSON file. + +```shell +# you need to clone the official repo +git clone https://github.com/henghuiding/gRefCOCO.git +cd gRefCOCO/mdetr +python scripts/fine-tuning/grefexp_coco_format.py --data_path ../../data/coco/grefs --out_path ../../data/coco/mdetr_annotations/ --coco_path ../../data/coco +``` + +Four JSON files will be generated in the `data/coco/mdetr_annotations/` folder. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_grefcoco_train.json +│ │ │ ├── finetune_grefcoco_val.json +│ │ │ ├── finetune_grefcoco_testA.json +│ │ │ ├── finetune_grefcoco_testB.json +``` + +## Fine-Tuning Dataset Preparation + +### 1 COCO 2017 + +COCO is the most commonly used dataset in the field of object detection, and we aim to explore its fine-tuning modes more comprehensively. From current developments, there are a total of three fine-tuning modes: + +1. Closed-set fine-tuning, where the description on the text side cannot be modified after fine-tuning, transforms into a closed-set algorithm. This approach maximizes performance on COCO but loses generality. +2. Open-set continued pretraining fine-tuning involves using pretraining methods consistent with the COCO dataset. There are two approaches to this: the first is to reduce the learning rate and fix certain modules, fine-tuning only on the COCO dataset; the second is to mix COCO data with some of the pre-trained data. The goal of both approaches is to improve performance on the COCO dataset as much as possible without compromising generalization. +3. Open-vocabulary fine-tuning involves adopting a common practice in the OVD (Open-Vocabulary Detection) domain. It divides COCO categories into base classes and novel classes. During training, fine-tuning is performed only on the base classes, while evaluation is conducted on both base and novel classes. This approach allows for the assessment of COCO OVD capabilities, with the goal of improving COCO dataset performance without compromising generalization as much as possible. + +\*\*(1) Closed-set Fine-tuning \*\* + +This section does not require data preparation; you can directly use the data you have prepared previously. + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +**(2) Open-set Continued Pretraining Fine-tuning** +To use this approach, you need to convert the COCO training data into ODVG format. You can use the following command for conversion: + +```shell +python tools/dataset_converters/coco2odvg.py data/coco/annotations/instances_train2017.json -d coco +``` + +This will generate new files, `instances_train2017_od.json` and `coco2017_label_map.json`, in the `data/coco/annotations/` directory. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_train2017_od.json +│ │ │ ├── coco2017_label_map.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +Once you have obtained the data, you can choose whether to perform individual pretraining or mixed pretraining. + +**(3) Open-vocabulary Fine-tuning** +For this approach, you need to convert the COCO training data into OVD (Open-Vocabulary Detection) format. You can use the following command for conversion: + +```shell +python tools/dataset_converters/coco2ovd.py data/coco/ +``` + +This will generate new files, `instances_val2017_all_2.json` and `instances_val2017_seen_2.json`, in the `data/coco/annotations/` directory. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_train2017_od.json +│ │ │ ├── instances_val2017_all_2.json +│ │ │ ├── instances_val2017_seen_2.json +│ │ │ ├── coco2017_label_map.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +You can then proceed to train and test directly using the [configuration](coco/grounding_dino_swin-t_finetune_16xb4_1x_coco_48_17.py). + +### 2 LVIS 1.0 + +LVIS is a dataset that includes 1,203 classes, making it a valuable dataset for fine-tuning. Due to its large number of classes, it's not feasible to perform closed-set fine-tuning. Therefore, we can only use open-set continued pretraining fine-tuning and open-vocabulary fine-tuning on LVIS. + +You need to prepare the LVIS training JSON files first, which you can download from [here](https://www.lvisdataset.org/dataset). We only need `lvis_v1_train.json` and `lvis_v1_val.json`. After downloading them, place them in the `data/coco/annotations/` directory, and then run the following command: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── lvis_v1_train.json +│ │ │ ├── lvis_v1_val.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +(1) Open-set continued pretraining fine-tuning + +Convert to ODVG format using the following command: + +```shell +python tools/dataset_converters/lvis2odvg.py data/coco/annotations/lvis_v1_train.json +``` + +It will generate new files, `lvis_v1_train_od.json` and `lvis_v1_label_map.json`, in the `data/coco/annotations/` directory, and the complete dataset structure will look like this: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── lvis_v1_train.json +│ │ │ ├── lvis_v1_val.json +│ │ │ ├── lvis_v1_train_od.json +│ │ │ ├── lvis_v1_label_map.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +You can directly use the provided [configuration](lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis.py) for training and testing, or you can modify the configuration to mix it with some of the pretraining datasets as needed. + +**(2) Open Vocabulary Fine-tuning** + +Convert to OVD format using the following command: + +```shell +python tools/dataset_converters/lvis2ovd.py data/coco/ +``` + +New `lvis_v1_train_od_norare.json` and `lvis_v1_label_map_norare.json` will be generated under `data/coco/annotations/`, and the complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── lvis_v1_train.json +│ │ │ ├── lvis_v1_val.json +│ │ │ ├── lvis_v1_train_od.json +│ │ │ ├── lvis_v1_label_map.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ │ ├── lvis_v1_train_od_norare.json +│ │ │ ├── lvis_v1_label_map_norare.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +然Then you can directly use the [configuration](lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis_866_337.py) for training and testing. + +### 3 RTTS + +RTTS is a foggy weather dataset, which contains 4,322 foggy images, including five classes: bicycle, bus, car, motorbike, and person. It can be downloaded from [here](https://drive.google.com/file/d/15Ei1cHGVqR1mXFep43BO7nkHq1IEGh1e/view), and then extracted to the `data/RTTS/` folder. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── RTTS +│ │ ├── annotations_json +│ │ ├── annotations_xml +│ │ ├── ImageSets +│ │ ├── JPEGImages +``` + +### 4 RUOD + +RUOD is an underwater object detection dataset. You can download it from [here](https://drive.google.com/file/d/1hxtbdgfVveUm_DJk5QXkNLokSCTa_E5o/view), and then extract it to the `data/RUOD/` folder. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── RUOD +│ │ ├── Environment_pic +│ │ ├── Environmet_ANN +│ │ ├── RUOD_ANN +│ │ ├── RUOD_pic +``` + +### 5 Brain Tumor + +Brain Tumor is a 2D detection dataset in the medical field. You can download it from [here](https://universe.roboflow.com/roboflow-100/brain-tumor-m2pbp/dataset/2), please make sure to choose the `COCO JSON` format. Then extract it to the `data/brain_tumor_v2/` folder. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── brain_tumor_v2 +│ │ ├── test +│ │ ├── train +│ │ ├── valid +``` + +### 6 Cityscapes + +Cityscapes is an urban street scene dataset. You can download it from [here](https://www.cityscapes-dataset.com/) or from opendatalab, and then extract it to the `data/cityscapes/` folder. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── cityscapes +│ │ ├── annotations +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +``` + +After downloading, you can use the [cityscapes.py](../../tools/dataset_converters/cityscapes.py) script to generate the required JSON format. + +```shell +python tools/dataset_converters/cityscapes.py data/cityscapes/ +``` + +Three new JSON files will be generated in the annotations directory. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── cityscapes +│ │ ├── annotations +│ │ │ ├── instancesonly_filtered_gtFine_train.json +│ │ │ ├── instancesonly_filtered_gtFine_val.json +│ │ │ ├── instancesonly_filtered_gtFine_test.json +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +``` + +### 7 People in Painting + +People in Painting is an oil painting dataset that you can download from [here](https://universe.roboflow.com/roboflow-100/people-in-paintings/dataset/2). Please make sure to choose the `COCO JSON` format. After downloading, unzip the dataset to the `data/people_in_painting_v2/` folder. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── people_in_painting_v2 +│ │ ├── test +│ │ ├── train +│ │ ├── valid +``` + +### 8 Referring Expression Comprehension + +Fine-tuning for Referential Expression Comprehension is similar to what was described earlier and includes four datasets. The dataset preparation for evaluation has already been organized. The complete dataset structure is as follows: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_refcoco+_testA.json +│ │ │ ├── finetune_refcoco+_testB.json +│ │ │ ├── finetune_refcocog_test.json +│ │ │ ├── finetune_refcocog_test.json +``` + +Then we need to convert it to the required ODVG format. Please use the [refcoco2odvg.py](../../tools/dataset_converters/refcoco2odvg.py) script to perform the conversion. + +```shell +python tools/dataset_converters/refcoco2odvg.py data/coco/mdetr_annotations +``` + +The converted dataset structure will include 4 new JSON files in the `data/coco/mdetr_annotations` directory. Here is the structure of the converted dataset: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_refcoco+_testA.json +│ │ │ ├── finetune_refcoco+_testB.json +│ │ │ ├── finetune_refcocog_test.json +│ │ │ ├── finetune_refcoco_train_vg.json +│ │ │ ├── finetune_refcoco+_train_vg.json +│ │ │ ├── finetune_refcocog_train_vg.json +│ │ │ ├── finetune_grefcoco_train_vg.json +``` diff --git a/mmpose/configs/mmdet/mm_grounding_dino/dataset_prepare_zh-CN.md b/mmpose/configs/mmdet/mm_grounding_dino/dataset_prepare_zh-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..10520b02fe54cda845335b55ac5bc6fa8bfdac65 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/dataset_prepare_zh-CN.md @@ -0,0 +1,1194 @@ +# 数据准备和处理 + +## MM-GDINO-T 预训练数据准备和处理 + +MM-GDINO-T 模型中我们一共提供了 5 种不同数据组合的预训练配置,数据采用逐步累加的方式进行训练,因此用户可以根据自己的实际需求准备数据。 + +### 1 Objects365 v1 + +对应的训练配置为 [grounding_dino_swin-t_pretrain_obj365](./grounding_dino_swin-t_pretrain_obj365.py) + +Objects365_v1 可以从 [opendatalab](https://opendatalab.com/OpenDataLab/Objects365_v1) 下载,其提供了 CLI 和 SDK 两者下载方式。 + +下载并解压后,将其放置或者软链接到 `data/objects365v1` 目录下,目录结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── objects365v1 +│ │ ├── objects365_train.json +│ │ ├── objects365_val.json +│ │ ├── train +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── test +``` + +然后使用 [coco2odvg.py](../../tools/dataset_converters/coco2odvg.py) 转换为训练所需的 ODVG 格式: + +```shell +python tools/dataset_converters/coco2odvg.py data/objects365v1/objects365_train.json -d o365v1 +``` + +程序运行完成后会在 `data/objects365v1` 目录下创建 `o365v1_train_od.json` 和 `o365v1_label_map.json` 两个新文件,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── objects365v1 +│ │ ├── objects365_train.json +│ │ ├── objects365_val.json +│ │ ├── o365v1_train_od.json +│ │ ├── o365v1_label_map.json +│ │ ├── train +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── test +``` + +### 2 COCO 2017 + +上述配置在训练过程中会评估 COCO 2017 数据集的性能,因此需要准备 COCO 2017 数据集。你可以从 [COCO](https://cocodataset.org/) 官网下载或者从 [opendatalab](https://opendatalab.com/OpenDataLab/COCO_2017) 下载 + +下载并解压后,将其放置或者软链接到 `data/coco` 目录下,目录结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +### 3 GoldG + +下载该数据集后就可以训练 [grounding_dino_swin-t_pretrain_obj365_goldg](./grounding_dino_swin-t_pretrain_obj365_goldg.py) 配置了。 + +GoldG 数据集包括 `GQA` 和 `Flickr30k` 两个数据集,来自 GLIP 论文中提到的 MixedGrounding 数据集,其排除了 COCO 数据集。下载链接为 [mdetr_annotations](https://huggingface.co/GLIPModel/GLIP/tree/main/mdetr_annotations),我们目前需要的是 `mdetr_annotations/final_mixed_train_no_coco.json` 和 `mdetr_annotations/final_flickr_separateGT_train.json` 文件。 + +然后下载 [GQA images](https://nlp.stanford.edu/data/gqa/images.zip) 图片。下载并解压后,将其放置或者软链接到 `data/gqa` 目录下,目录结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── gqa +| | ├── final_mixed_train_no_coco.json +│ │ ├── images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +然后下载 [Flickr30k images](http://shannon.cs.illinois.edu/DenotationGraph/) 图片。这个数据下载需要先申请,再获得下载链接后才可以下载。下载并解压后,将其放置或者软链接到 `data/flickr30k_entities` 目录下,目录结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── flickr30k_entities +│ │ ├── final_flickr_separateGT_train.json +│ │ ├── flickr30k_images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +对于 GQA 数据集,你需要使用 [goldg2odvg.py](../../tools/dataset_converters/goldg2odvg.py) 转换为训练所需的 ODVG 格式: + +```shell +python tools/dataset_converters/goldg2odvg.py data/gqa/final_mixed_train_no_coco.json +``` + +程序运行完成后会在 `data/gqa` 目录下创建 `final_mixed_train_no_coco_vg.json` 新文件,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── gqa +| | ├── final_mixed_train_no_coco.json +| | ├── final_mixed_train_no_coco_vg.json +│ │ ├── images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +对于 Flickr30k 数据集,你需要使用 [goldg2odvg.py](../../tools/dataset_converters/goldg2odvg.py) 转换为训练所需的 ODVG 格式: + +```shell +python tools/dataset_converters/goldg2odvg.py data/flickr30k_entities/final_flickr_separateGT_train.json +``` + +程序运行完成后会在 `data/flickr30k_entities` 目录下创建 `final_flickr_separateGT_train_vg.json` 新文件,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── flickr30k_entities +│ │ ├── final_flickr_separateGT_train.json +│ │ ├── final_flickr_separateGT_train_vg.json +│ │ ├── flickr30k_images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 4 GRIT-20M + +对应的训练配置为 [grounding_dino_swin-t_pretrain_obj365_goldg_grit9m](./grounding_dino_swin-t_pretrain_obj365_goldg_grit9m.py) + +GRIT数据集可以从 [GRIT](https://huggingface.co/datasets/zzliang/GRIT#download-image) 中使用 img2dataset 包下载,默认指令下载后数据集大小为 1.1T,下载和处理预估需要至少 2T 硬盘空间,可根据硬盘容量酌情下载。下载后原始格式为: + +```text +mmdetection +├── configs +├── data +│ ├── grit_raw +│ │ ├── 00000_stats.json +│ │ ├── 00000.parquet +│ │ ├── 00000.tar +│ │ ├── 00001_stats.json +│ │ ├── 00001.parquet +│ │ ├── 00001.tar +│ │ ├── ... +``` + +下载后需要对格式进行进一步处理: + +```shell +python tools/dataset_converters/grit_processing.py data/grit_raw data/grit_processed +``` + +处理后的格式为: + +```text +mmdetection +├── configs +├── data +│ ├── grit_processed +│ │ ├── annotations +│ │ │ ├── 00000.json +│ │ │ ├── 00001.json +│ │ │ ├── ... +│ │ ├── images +│ │ │ ├── 00000 +│ │ │ │ ├── 000000000.jpg +│ │ │ │ ├── 000000003.jpg +│ │ │ │ ├── 000000004.jpg +│ │ │ │ ├── ... +│ │ │ ├── 00001 +│ │ │ ├── ... +``` + +对于 GRIT 数据集,你需要使用 [grit2odvg.py](../../tools/dataset_converters/grit2odvg.py) 转化成需要的 ODVG 格式: + +```shell +python tools/dataset_converters/grit2odvg.py data/grit_processed/ +``` + +程序运行完成后会在 `data/grit_processed` 目录下创建 `grit20m_vg.json` 新文件,大概包含 9M 条数据,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── grit_processed +| | ├── grit20m_vg.json +│ │ ├── annotations +│ │ │ ├── 00000.json +│ │ │ ├── 00001.json +│ │ │ ├── ... +│ │ ├── images +│ │ │ ├── 00000 +│ │ │ │ ├── 000000000.jpg +│ │ │ │ ├── 000000003.jpg +│ │ │ │ ├── 000000004.jpg +│ │ │ │ ├── ... +│ │ │ ├── 00001 +│ │ │ ├── ... +``` + +### 5 V3Det + +对应的训练配置为 + +- [grounding_dino_swin-t_pretrain_obj365_goldg_v3det](./grounding_dino_swin-t_pretrain_obj365_goldg_v3det.py) +- [grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det](./grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py) + +V3Det 数据集下载可以从 [opendatalab](https://opendatalab.com/V3Det/V3Det) 下载,下载并解压后,将其放置或者软链接到 `data/v3det` 目录下,目录结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── v3det +│ │ ├── annotations +│ │ | ├── v3det_2023_v1_train.json +│ │ ├── images +│ │ │ ├── a00000066 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +然后使用 [coco2odvg.py](../../tools/dataset_converters/coco2odvg.py) 转换为训练所需的 ODVG 格式: + +```shell +python tools/dataset_converters/coco2odvg.py data/v3det/annotations/v3det_2023_v1_train.json -d v3det +``` + +程序运行完成后会在 `data/v3det/annotations` 目录下创建目录下创建 `v3det_2023_v1_train_od.json` 和 `v3det_2023_v1_label_map.json` 两个新文件,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── v3det +│ │ ├── annotations +│ │ | ├── v3det_2023_v1_train.json +│ │ | ├── v3det_2023_v1_train_od.json +│ │ | ├── v3det_2023_v1_label_map.json +│ │ ├── images +│ │ │ ├── a00000066 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 6 数据切分和可视化 + +考虑到用户需要准备的数据集过多,不方便对图片和标注进行训练前确认,因此我们提供了一个数据切分和可视化的工具,可以将数据集切分为 tiny 版本,然后使用可视化脚本查看图片和标签正确性。 + +1. 切分数据集 + +脚本位于 [这里](../../tools/misc/split_odvg.py), 以 `Object365 v1` 为例,切分数据集的命令如下: + +```shell +python tools/misc/split_odvg.py data/object365_v1/ o365v1_train_od.json train your_output_dir --label-map-file o365v1_label_map.json -n 200 +``` + +上述脚本运行后会在 `your_output_dir` 目录下创建和 `data/object365_v1/` 一样的文件夹结构,但是只会保存 200 张训练图片和对应的 json,方便用户查看。 + +2. 可视化原始数据集 + +脚本位于 [这里](../../tools/analysis_tools/browse_grounding_raw.py), 以 `Object365 v1` 为例,可视化数据集的命令如下: + +```shell +python tools/analysis_tools/browse_grounding_raw.py data/object365_v1/ o365v1_train_od.json train --label-map-file o365v1_label_map.json -o your_output_dir --not-show +``` + +上述脚本运行后会在 `your_output_dir` 目录下生成同时包括图片和标签的图片,方便用户查看。 + +3. 可视化 dataset 输出的数据集 + +脚本位于 [这里](../../tools/analysis_tools/browse_grounding_dataset.py), 用户可以通过该脚本查看 dataset 输出的结果即包括了数据增强的结果。 以 `Object365 v1` 为例,可视化数据集的命令如下: + +```shell +python tools/analysis_tools/browse_grounding_dataset.py configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py -o your_output_dir --not-show +``` + +上述脚本运行后会在 `your_output_dir` 目录下生成同时包括图片和标签的图片,方便用户查看。 + +## MM-GDINO-L 预训练数据准备和处理 + +### 1 Object365 v2 + +Objects365_v2 可以从 [opendatalab](https://opendatalab.com/OpenDataLab/Objects365) 下载,其提供了 CLI 和 SDK 两者下载方式。 + +下载并解压后,将其放置或者软链接到 `data/objects365v2` 目录下,目录结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── objects365v2 +│ │ ├── annotations +│ │ │ ├── zhiyuan_objv2_train.json +│ │ ├── train +│ │ │ ├── patch0 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +由于 objects365v2 类别中有部分类名是错误的,因此需要先进行修正。 + +```shell +python tools/dataset_converters/fix_o365_names.py +``` + +会在 `data/objects365v2/annotations` 下生成新的标注文件 `zhiyuan_objv2_train_fixname.json`。 + +然后使用 [coco2odvg.py](../../tools/dataset_converters/coco2odvg.py) 转换为训练所需的 ODVG 格式: + +```shell +python tools/dataset_converters/coco2odvg.py data/objects365v2/annotations/zhiyuan_objv2_train_fixname.json -d o365v2 +``` + +程序运行完成后会在 `data/objects365v2` 目录下创建 `zhiyuan_objv2_train_fixname_od.json` 和 `o365v2_label_map.json` 两个新文件,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── objects365v2 +│ │ ├── annotations +│ │ │ ├── zhiyuan_objv2_train.json +│ │ │ ├── zhiyuan_objv2_train_fixname.json +│ │ │ ├── zhiyuan_objv2_train_fixname_od.json +│ │ │ ├── o365v2_label_map.json +│ │ ├── train +│ │ │ ├── patch0 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 2 OpenImages v6 + +OpenImages v6 可以从 [官网](https://storage.googleapis.com/openimages/web/download_v6.html) 下载,由于数据集比较大,需要花费一定的时间,下载完成后文件结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── OpenImages +│ │ ├── annotations +| │ │ ├── oidv6-train-annotations-bbox.csv +| │ │ ├── class-descriptions-boxable.csv +│ │ ├── OpenImages +│ │ │ ├── train +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +然后使用 [openimages2odvg.py](../../tools/dataset_converters/openimages2odvg.py) 转换为训练所需的 ODVG 格式: + +```shell +python tools/dataset_converters/openimages2odvg.py data/OpenImages/annotations +``` + +程序运行完成后会在 `data/OpenImages/annotations` 目录下创建 `oidv6-train-annotation_od.json` 和 `openimages_label_map.json` 两个新文件,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── OpenImages +│ │ ├── annotations +| │ │ ├── oidv6-train-annotations-bbox.csv +| │ │ ├── class-descriptions-boxable.csv +| │ │ ├── oidv6-train-annotations_od.json +| │ │ ├── openimages_label_map.json +│ │ ├── OpenImages +│ │ │ ├── train +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 3 V3Det + +参见前面的 MM-GDINO-T 预训练数据准备和处理 数据准备部分,完整数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── v3det +│ │ ├── annotations +│ │ | ├── v3det_2023_v1_train.json +│ │ | ├── v3det_2023_v1_train_od.json +│ │ | ├── v3det_2023_v1_label_map.json +│ │ ├── images +│ │ │ ├── a00000066 +│ │ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 4 LVIS 1.0 + +参见后面的 `微调数据集准备` 的 `2 LVIS 1.0` 部分。完整数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── lvis_v1_train.json +│ │ │ ├── lvis_v1_val.json +│ │ │ ├── lvis_v1_train_od.json +│ │ │ ├── lvis_v1_label_map.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +### 5 COCO2017 OD + +数据准备可以参考前面的 `MM-GDINO-T 预训练数据准备和处理` 部分。为了方便后续处理,请将下载的 [mdetr_annotations](https://huggingface.co/GLIPModel/GLIP/tree/main/mdetr_annotations) 文件夹软链接或者移动到 `data/coco` 路径下 +完整数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── ... +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +由于 COCO2017 train 和 RefCOCO/RefCOCO+/RefCOCOg/gRefCOCO val 中存在部分重叠,如果不提前移除,在评测 RefExp 时候会存在数据泄露。 + +```shell +python tools/dataset_converters/remove_cocotrain2017_from_refcoco.py data/coco/mdetr_annotations data/coco/annotations/instances_train2017.json +``` + +会在 `data/coco/annotations` 目录下创建 `instances_train2017_norefval.json` 新文件。最后使用 [coco2odvg.py](../../tools/dataset_converters/coco2odvg.py) 转换为训练所需的 ODVG 格式: + +```shell +python tools/dataset_converters/coco2odvg.py data/coco/annotations/instances_train2017_norefval.json -d coco +``` + +会在 `data/coco/annotations` 目录下创建 `instances_train2017_norefval_od.json` 和 `coco_label_map.json` 两个新文件,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2017_norefval_od.json +│ │ │ ├── coco_label_map.json +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── ... +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +注意: COCO2017 train 和 LVIS 1.0 val 数据集有 15000 张图片重复,因此一旦在训练中使用了 COCO2017 train,那么 LVIS 1.0 val 的评测结果就存在数据泄露问题,LVIS 1.0 minival 没有这个问题。 + +### 6 GoldG + +参见 MM-GDINO-T 预训练数据准备和处理 部分 + +```text +mmdetection +├── configs +├── data +│ ├── flickr30k_entities +│ │ ├── final_flickr_separateGT_train.json +│ │ ├── final_flickr_separateGT_train_vg.json +│ │ ├── flickr30k_images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ ├── gqa +| | ├── final_mixed_train_no_coco.json +| | ├── final_mixed_train_no_coco_vg.json +│ │ ├── images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 7 COCO2014 VG + +MDetr 中提供了 COCO2014 train 的 Phrase Grounding 版本标注, 最原始标注文件为 `final_mixed_train.json`,和之前类似,文件结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── mdetr_annotations +│ │ │ ├── final_mixed_train.json +│ │ │ ├── ... +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +我们可以从 `final_mixed_train.json` 中提取出 COCO 部分数据 + +```shell +python tools/dataset_converters/extract_coco_from_mixed.py data/coco/mdetr_annotations/final_mixed_train.json +``` + +会在 `data/coco/mdetr_annotations` 目录下创建 `final_mixed_train_only_coco.json` 新文件,最后使用 [goldg2odvg.py](../../tools/dataset_converters/goldg2odvg.py) 转换为训练所需的 ODVG 格式: + +```shell +python tools/dataset_converters/goldg2odvg.py data/coco/mdetr_annotations/final_mixed_train_only_coco.json +``` + +会在 `data/coco/mdetr_annotations` 目录下创建 `final_mixed_train_only_coco_vg.json` 新文件,完整结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── mdetr_annotations +│ │ │ ├── final_mixed_train.json +│ │ │ ├── final_mixed_train_only_coco.json +│ │ │ ├── final_mixed_train_only_coco_vg.json +│ │ │ ├── ... +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +注意: COCO2014 train 和 COCO2017 val 没有重复图片,因此不用担心 COCO 评测的数据泄露问题。 + +### 8 Referring Expression Comprehension + +其一共包括 4 个数据集。数据准备部分请参见 微调数据集准备 部分。 + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_refcoco+_testA.json +│ │ │ ├── finetune_refcoco+_testB.json +│ │ │ ├── finetune_refcocog_test.json +│ │ │ ├── finetune_refcoco_train_vg.json +│ │ │ ├── finetune_refcoco+_train_vg.json +│ │ │ ├── finetune_refcocog_train_vg.json +│ │ │ ├── finetune_grefcoco_train_vg.json +``` + +### 9 GRIT-20M + +参见 MM-GDINO-T 预训练数据准备和处理 部分 + +## 评测数据集准备 + +### 1 COCO 2017 + +数据准备流程和前面描述一致,最终结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +### 2 LVIS 1.0 + +LVIS 1.0 val 数据集包括 mini 和全量两个版本,mini 版本存在的意义是: + +1. LVIS val 全量评测数据集比较大,评测一次需要比较久的时间 +2. LVIS val 全量数据集中包括了 15000 张 COCO2017 train, 如果用户使用了 COCO2017 数据进行训练,那么将存在数据泄露问题 + +LVIS 1.0 图片和 COCO2017 数据集图片完全一样,只是提供了新的标注而已,minival 标注文件可以从 [这里](https://huggingface.co/GLIPModel/GLIP/blob/main/lvis_v1_minival_inserted_image_name.json)下载, val 1.0 标注文件可以从 [这里](https://huggingface.co/GLIPModel/GLIP/blob/main/lvis_od_val.json) 下载。 最终结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +### 3 ODinW + +ODinw 全称为 Object Detection in the Wild,是用于验证 grounding 预训练模型在不同实际场景中的泛化能力的数据集,其包括两个子集,分别是 ODinW13 和 ODinW35,代表是由 13 和 35 个数据集组成的。你可以从 [这里](https://huggingface.co/GLIPModel/GLIP/tree/main/odinw_35)下载,然后对每个文件进行解压,最终结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── odinw +│ │ ├── AerialMaritimeDrone +│ │ | |── large +│ │ | | ├── test +│ │ | | ├── train +│ │ | | ├── valid +│ │ | |── tiled +│ │ ├── AmericanSignLanguageLetters +│ │ ├── Aquarium +│ │ ├── BCCD +│ │ ├── ... +``` + +在评测 ODinW3535 时候由于需要自定义 prompt,因此需要提前对标注的 json 文件进行处理,你可以使用 [override_category.py](./odinw/override_category.py) 脚本进行处理,处理后会生成新的标注文件,不会覆盖原先的标注文件。 + +```shell +python configs/mm_grounding_dino/odinw/override_category.py data/odinw/ +``` + +### 4 DOD + +DOD 来自 [Described Object Detection: Liberating Object Detection with Flexible Expressions](https://arxiv.org/abs/2307.12813)。其数据集可以从 [这里](https://github.com/shikras/d-cube?tab=readme-ov-file#download)下载,最终的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── d3 +│ │ ├── d3_images +│ │ ├── d3_json +│ │ ├── d3_pkl +``` + +### 5 Flickr30k Entities + +在前面 GoldG 数据准备章节中我们已经下载了 Flickr30k 训练所需文件,评估所需的文件是 2 个 json 文件,你可以从 [这里](https://huggingface.co/GLIPModel/GLIP/blob/main/mdetr_annotations/final_flickr_separateGT_val.json) 和 [这里](https://huggingface.co/GLIPModel/GLIP/blob/main/mdetr_annotations/final_flickr_separateGT_test.json)下载,最终的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── flickr30k_entities +│ │ ├── final_flickr_separateGT_train.json +│ │ ├── final_flickr_separateGT_val.json +│ │ ├── final_flickr_separateGT_test.json +│ │ ├── final_flickr_separateGT_train_vg.json +│ │ ├── flickr30k_images +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +``` + +### 6 Referring Expression Comprehension + +指代性表达式理解包括 4 个数据集: RefCOCO, RefCOCO+, RefCOCOg, gRefCOCO。这 4 个数据集所采用的图片都来自于 COCO2014 train,和 COCO2017 类似,你可以从 COCO 官方或者 opendatalab 中下载,而标注可以直接从 [这里](https://huggingface.co/GLIPModel/GLIP/tree/main/mdetr_annotations) 下载,mdetr_annotations 文件夹里面包括了其他大量的标注,你如果觉得数量过多,可以只下载所需要的几个 json 文件即可。最终的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_refcoco+_testA.json +│ │ │ ├── finetune_refcoco+_testB.json +│ │ │ ├── finetune_refcocog_test.json +│ │ │ ├── finetune_refcocog_test.json +``` + +注意 gRefCOCO 是在 [GREC: Generalized Referring Expression Comprehension](https://arxiv.org/abs/2308.16182) 被提出,并不在 `mdetr_annotations` 文件夹中,需要自行处理。具体步骤为: + +1. 下载 [gRefCOCO](https://github.com/henghuiding/gRefCOCO?tab=readme-ov-file#grefcoco-dataset-download),并解压到 data/coco/ 文件夹中 + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ ├── grefs +│ │ │ ├── grefs(unc).json +│ │ │ ├── instances.json +``` + +2. 转换为 coco 格式 + +你可以使用 gRefCOCO 官方提供的[转换脚本](https://github.com/henghuiding/gRefCOCO/blob/b4b1e55b4d3a41df26d6b7d843ea011d581127d4/mdetr/scripts/fine-tuning/grefexp_coco_format.py)。注意需要将被注释的 161 行打开,并注释 160 行才可以得到全量的 json 文件。 + +```shell +# 需要克隆官方 repo +git clone https://github.com/henghuiding/gRefCOCO.git +cd gRefCOCO/mdetr +python scripts/fine-tuning/grefexp_coco_format.py --data_path ../../data/coco/grefs --out_path ../../data/coco/mdetr_annotations/ --coco_path ../../data/coco +``` + +会在 `data/coco/mdetr_annotations/` 文件夹中生成 4 个 json 文件,完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_grefcoco_train.json +│ │ │ ├── finetune_grefcoco_val.json +│ │ │ ├── finetune_grefcoco_testA.json +│ │ │ ├── finetune_grefcoco_testB.json +``` + +## 微调数据集准备 + +### 1 COCO 2017 + +COCO 是检测领域最常用的数据集,我们希望能够更充分探索其微调模式。从目前发展来看,一共有 3 种微调方式: + +1. 闭集微调,即微调后文本端将无法修改描述,转变为闭集算法,在 COCO 上性能能够最大化,但是失去了通用性。 +2. 开集继续预训练微调,即对 COCO 数据集采用和预训练一致的预训练手段。此时有两种做法,第一种是降低学习率并固定某些模块,仅仅在 COCO 数据上预训练,第二种是将 COCO 数据和部分预训练数据混合一起训练,两种方式的目的都是在尽可能不降低泛化性时提高 COCO 数据集性能 +3. 开放词汇微调,即采用 OVD 领域常用做法,将 COCO 类别分成 base 类和 novel 类,训练时候仅仅在 base 类上进行,评测在 base 和 novel 类上进行。这种方式可以验证 COCO OVD 能力,目的也是在尽可能不降低泛化性时提高 COCO 数据集性能 + +**(1) 闭集微调** + +这个部分无需准备数据,直接用之前的数据即可。 + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +**(2) 开集继续预训练微调** +这种方式需要将 COCO 训练数据转换为 ODVG 格式,你可以使用如下命令转换: + +```shell +python tools/dataset_converters/coco2odvg.py data/coco/annotations/instances_train2017.json -d coco +``` + +会在 `data/coco/annotations/` 下生成新的 `instances_train2017_od.json` 和 `coco2017_label_map.json`,完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_train2017_od.json +│ │ │ ├── coco2017_label_map.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +在得到数据后,你可以自行选择单独预习还是混合预训练方式。 + +**(3) 开放词汇微调** +这种方式需要将 COCO 训练数据转换为 OVD 格式,你可以使用如下命令转换: + +```shell +python tools/dataset_converters/coco2ovd.py data/coco/ +``` + +会在 `data/coco/annotations/` 下生成新的 `instances_val2017_all_2.json` 和 `instances_val2017_seen_2.json`,完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_train2017_od.json +│ │ │ ├── instances_val2017_all_2.json +│ │ │ ├── instances_val2017_seen_2.json +│ │ │ ├── coco2017_label_map.json +│ │ │ ├── instances_val2017.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +然后可以直接使用 [配置](coco/grounding_dino_swin-t_finetune_16xb4_1x_coco_48_17.py) 进行训练和测试。 + +### 2 LVIS 1.0 + +LVIS 是一个包括 1203 类的数据集,同时也是一个长尾联邦数据集,对其进行微调很有意义。 由于其类别过多,我们无法对其进行闭集微调,因此只能采用开集继续预训练微调和开放词汇微调。 + +你需要先准备好 LVIS 训练 JSON 文件,你可以从 [这里](https://www.lvisdataset.org/dataset) 下载,我们只需要 `lvis_v1_train.json` 和 `lvis_v1_val.json`,然后将其放到 `data/coco/annotations/` 下,然后运行如下命令: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── lvis_v1_train.json +│ │ │ ├── lvis_v1_val.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +(1) 开集继续预训练微调 + +使用如下命令转换为 ODVG 格式: + +```shell +python tools/dataset_converters/lvis2odvg.py data/coco/annotations/lvis_v1_train.json +``` + +会在 `data/coco/annotations/` 下生成新的 `lvis_v1_train_od.json` 和 `lvis_v1_label_map.json`,完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── lvis_v1_train.json +│ │ │ ├── lvis_v1_val.json +│ │ │ ├── lvis_v1_train_od.json +│ │ │ ├── lvis_v1_label_map.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +然后可以直接使用 [配置](lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis.py) 进行训练测试,或者你修改配置将其和部分预训练数据集混合使用。 + +**(2) 开放词汇微调** + +使用如下命令转换为 OVD 格式: + +```shell +python tools/dataset_converters/lvis2ovd.py data/coco/ +``` + +会在 `data/coco/annotations/` 下生成新的 `lvis_v1_train_od_norare.json` 和 `lvis_v1_label_map_norare.json`,完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── lvis_v1_train.json +│ │ │ ├── lvis_v1_val.json +│ │ │ ├── lvis_v1_train_od.json +│ │ │ ├── lvis_v1_label_map.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── lvis_v1_minival_inserted_image_name.json +│ │ │ ├── lvis_od_val.json +│ │ │ ├── lvis_v1_train_od_norare.json +│ │ │ ├── lvis_v1_label_map_norare.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +``` + +然后可以直接使用 [配置](lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis_866_337.py) 进行训练测试 + +### 3 RTTS + +RTTS 是一个浓雾天气数据集,该数据集包含 4,322 张雾天图像,包含五个类:自行车 (bicycle)、公共汽车 (bus)、汽车 (car)、摩托车 (motorbike) 和人 (person)。可以从 [这里](https://drive.google.com/file/d/15Ei1cHGVqR1mXFep43BO7nkHq1IEGh1e/view)下载, 然后解压到 `data/RTTS/` 文件夹中。完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── RTTS +│ │ ├── annotations_json +│ │ ├── annotations_xml +│ │ ├── ImageSets +│ │ ├── JPEGImages +``` + +### 4 RUOD + +RUOD 是一个水下目标检测数据集,你可以从 [这里](https://drive.google.com/file/d/1hxtbdgfVveUm_DJk5QXkNLokSCTa_E5o/view)下载, 然后解压到 `data/RUOD/` 文件夹中。完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── RUOD +│ │ ├── Environment_pic +│ │ ├── Environmet_ANN +│ │ ├── RUOD_ANN +│ │ ├── RUOD_pic +``` + +### 5 Brain Tumor + +Brain Tumor 是一个医学领域的 2d 检测数据集,你可以从 [这里](https://universe.roboflow.com/roboflow-100/brain-tumor-m2pbp/dataset/2)下载, 请注意选择 `COCO JSON` 格式。然后解压到 `data/brain_tumor_v2/` 文件夹中。完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── brain_tumor_v2 +│ │ ├── test +│ │ ├── train +│ │ ├── valid +``` + +### 6 Cityscapes + +Cityscapes 是一个城市街景数据集,你可以从 [这里](https://www.cityscapes-dataset.com/) 或者 opendatalab 中下载, 然后解压到 `data/cityscapes/` 文件夹中。完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── cityscapes +│ │ ├── annotations +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +``` + +在下载后,然后使用 [cityscapes.py](../../tools/dataset_converters/cityscapes.py) 脚本生成我们所需要的 json 格式 + +```shell +python tools/dataset_converters/cityscapes.py data/cityscapes/ +``` + +会在 annotations 中生成 3 个新的 json 文件。完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── cityscapes +│ │ ├── annotations +│ │ │ ├── instancesonly_filtered_gtFine_train.json +│ │ │ ├── instancesonly_filtered_gtFine_val.json +│ │ │ ├── instancesonly_filtered_gtFine_test.json +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +``` + +### 7 People in Painting + +People in Painting 是一个油画数据集,你可以从 [这里](https://universe.roboflow.com/roboflow-100/people-in-paintings/dataset/2), 请注意选择 `COCO JSON` 格式。然后解压到 `data/people_in_painting_v2/` 文件夹中。完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── people_in_painting_v2 +│ │ ├── test +│ │ ├── train +│ │ ├── valid +``` + +### 8 Referring Expression Comprehension + +指代性表达式理解的微调和前面一样,也是包括 4 个数据集,在评测数据准备阶段已经全部整理好了,完整的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_refcoco+_testA.json +│ │ │ ├── finetune_refcoco+_testB.json +│ │ │ ├── finetune_refcocog_test.json +│ │ │ ├── finetune_refcocog_test.json +``` + +然后我们需要将其转换为所需的 ODVG 格式,请使用 [refcoco2odvg.py](../../tools/dataset_converters/refcoco2odvg.py) 脚本转换, + +```shell +python tools/dataset_converters/refcoco2odvg.py data/coco/mdetr_annotations +``` + +会在 `data/coco/mdetr_annotations` 中生成新的 4 个 json 文件。 转换后的数据集结构如下: + +```text +mmdetection +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── instances_train2017.json +│ │ │ ├── instances_val2017.json +│ │ │ ├── instances_train2014.json +│ │ ├── train2017 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── val2017 +│ │ │ ├── xxxx.jpg +│ │ │ ├── ... +│ │ ├── train2014 +│ │ │ ├── xxx.jpg +│ │ │ ├── ... +│ │ ├── mdetr_annotations +│ │ │ ├── final_refexp_val.json +│ │ │ ├── finetune_refcoco_testA.json +│ │ │ ├── finetune_refcoco_testB.json +│ │ │ ├── finetune_refcoco+_testA.json +│ │ │ ├── finetune_refcoco+_testB.json +│ │ │ ├── finetune_refcocog_test.json +│ │ │ ├── finetune_refcoco_train_vg.json +│ │ │ ├── finetune_refcoco+_train_vg.json +│ │ │ ├── finetune_refcocog_train_vg.json +│ │ │ ├── finetune_grefcoco_train_vg.json +``` diff --git a/mmpose/configs/mmdet/mm_grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py b/mmpose/configs/mmdet/mm_grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py new file mode 100644 index 0000000000000000000000000000000000000000..e59a0a52518aa125d556aab12f8076a95f39ec22 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py @@ -0,0 +1,78 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/d3/' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', 'sent_ids')) +] + +# -------------------------------------------------# +val_dataset_full = dict( + type='DODDataset', + data_root=data_root, + ann_file='d3_json/d3_full_annotations.json', + data_prefix=dict(img='d3_images/', anno='d3_pkl'), + pipeline=test_pipeline, + test_mode=True, + backend_args=None, + return_classes=True) + +val_evaluator_full = dict( + type='DODCocoMetric', + ann_file=data_root + 'd3_json/d3_full_annotations.json') + +# -------------------------------------------------# +val_dataset_pres = dict( + type='DODDataset', + data_root=data_root, + ann_file='d3_json/d3_pres_annotations.json', + data_prefix=dict(img='d3_images/', anno='d3_pkl'), + pipeline=test_pipeline, + test_mode=True, + backend_args=None, + return_classes=True) +val_evaluator_pres = dict( + type='DODCocoMetric', + ann_file=data_root + 'd3_json/d3_pres_annotations.json') + +# -------------------------------------------------# +val_dataset_abs = dict( + type='DODDataset', + data_root=data_root, + ann_file='d3_json/d3_abs_annotations.json', + data_prefix=dict(img='d3_images/', anno='d3_pkl'), + pipeline=test_pipeline, + test_mode=True, + backend_args=None, + return_classes=True) +val_evaluator_abs = dict( + type='DODCocoMetric', + ann_file=data_root + 'd3_json/d3_abs_annotations.json') + +# -------------------------------------------------# +datasets = [val_dataset_full, val_dataset_pres, val_dataset_abs] +dataset_prefixes = ['FULL', 'PRES', 'ABS'] +metrics = [val_evaluator_full, val_evaluator_pres, val_evaluator_abs] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_parallel_dod.py b/mmpose/configs/mmdet/mm_grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_parallel_dod.py new file mode 100644 index 0000000000000000000000000000000000000000..3d680091162e5ac96c15c76b58a18764e85d3233 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/dod/grounding_dino_swin-t_pretrain_zeroshot_parallel_dod.py @@ -0,0 +1,3 @@ +_base_ = 'grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py' + +model = dict(test_cfg=dict(chunked_size=1)) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/flickr30k/grounding_dino_swin-t-pretrain_flickr30k.py b/mmpose/configs/mmdet/mm_grounding_dino/flickr30k/grounding_dino_swin-t-pretrain_flickr30k.py new file mode 100644 index 0000000000000000000000000000000000000000..e9eb783da97a6d665002cc9192f740010282870e --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/flickr30k/grounding_dino_swin-t-pretrain_flickr30k.py @@ -0,0 +1,57 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +dataset_type = 'Flickr30kDataset' +data_root = 'data/flickr30k_entities/' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive', 'phrase_ids', 'phrases')) +] + +dataset_Flickr30k_val = dict( + type=dataset_type, + data_root=data_root, + ann_file='final_flickr_separateGT_val.json', + data_prefix=dict(img='flickr30k_images/'), + pipeline=test_pipeline, +) + +dataset_Flickr30k_test = dict( + type=dataset_type, + data_root=data_root, + ann_file='final_flickr_separateGT_test.json', + data_prefix=dict(img='flickr30k_images/'), + pipeline=test_pipeline, +) + +val_evaluator_Flickr30k = dict(type='Flickr30kMetric') + +test_evaluator_Flickr30k = dict(type='Flickr30kMetric') + +# ----------Config---------- # +dataset_prefixes = ['Flickr30kVal', 'Flickr30kTest'] +datasets = [dataset_Flickr30k_val, dataset_Flickr30k_test] +metrics = [val_evaluator_Flickr30k, test_evaluator_Flickr30k] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-b_pretrain_all.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-b_pretrain_all.py new file mode 100644 index 0000000000000000000000000000000000000000..eff58bba6b192fe43e62cb1e3ae40a546e1a3ddf --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-b_pretrain_all.py @@ -0,0 +1,335 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det/grounding_dino_swin-b_pretrain_obj365_goldg_v3de-f83eef00.pth' # noqa + +model = dict( + use_autocast=True, + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=True, + convert_weights=True, + frozen_stages=-1, + init_cfg=None), + neck=dict(in_channels=[256, 512, 1024]), +) + +o365v1_od_dataset = dict( + type='ODVGDataset', + data_root='data/objects365v1/', + ann_file='o365v1_train_odvg.json', + label_map_file='o365v1_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None, +) + +flickr30k_dataset = dict( + type='ODVGDataset', + data_root='data/flickr30k_entities/', + ann_file='final_flickr_separateGT_train_vg.json', + label_map_file=None, + data_prefix=dict(img='flickr30k_images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +gqa_dataset = dict( + type='ODVGDataset', + data_root='data/gqa/', + ann_file='final_mixed_train_no_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +v3d_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/V3Det/annotations/v3det_2023_v1_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] +v3det_dataset = dict( + type='ODVGDataset', + data_root='data/V3Det/', + ann_file='annotations/v3det_2023_v1_train_od.json', + label_map_file='annotations/v3det_2023_v1_label_map.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, # change this + pipeline=v3d_train_pipeline, + return_classes=True, + backend_args=None) + +grit_dataset = dict( + type='ODVGDataset', + data_root='grit_processed/', + ann_file='grit20m_vg.json', + label_map_file=None, + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +# --------------------------- lvis od dataset--------------------------- +lvis_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/coco/annotations/lvis_v1_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] +lvis_dataset = dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='annotations/lvis_v1_train_od.json', + label_map_file='annotations/lvis_v1_label_map.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, # change this + pipeline=lvis_train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- coco2017 od dataset--------------------------- +coco2017_train_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='annotations/instance_train2017_norefval_od.json', + label_map_file='annotations/coco2017_label_map.json', + data_prefix=dict(img='train2017'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- coco2014 vg dataset--------------------------- +coco2014_vg_dataset = dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/final_mixed_train_only_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +# --------------------------- refcoco vg dataset--------------------------- +refcoco_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/finetune_refcoco_train_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- refcoco+ vg dataset--------------------------- +refcoco_plus_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/finetune_refcoco+_train_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- refcocog vg dataset--------------------------- +refcocog_dataset = dict( + type='RepeatDataset', + times=3, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/finetune_refcocog_train_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- grefcoco vg dataset--------------------------- +grefcoco_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/finetune_grefcoco_train_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- dataloader--------------------------- +train_dataloader = dict( + batch_size=4, + num_workers=4, + sampler=dict( + _delete_=True, + type='CustomSampleSizeSampler', + ratio_mode=True, + dataset_size=[-1, -1, 0.07, -1, -1, -1, -1, -1, -1, -1, -1, -1]), + dataset=dict(datasets=[ + o365v1_od_dataset, # 1.74M + v3det_dataset, # + grit_dataset, + lvis_dataset, + coco2017_train_dataset, # 0.12M + flickr30k_dataset, # 0.15M + gqa_dataset, # 0.62M + coco2014_vg_dataset, # 0.49M + refcoco_dataset, # 0.12M + refcoco_plus_dataset, # 0.12M + refcocog_dataset, # 0.08M + grefcoco_dataset, # 0.19M + ])) + +optim_wrapper = dict(optimizer=dict(lr=0.0001)) + +# learning policy +max_iter = 304680 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=10000) + +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[228510], + gamma=0.1) +] + +default_hooks = dict( + checkpoint=dict(by_epoch=False, interval=10000, max_keep_ckpts=20)) +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det.py new file mode 100644 index 0000000000000000000000000000000000000000..743d02cffbe9c38977edad2bce8a53bd6a8594af --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det.py @@ -0,0 +1,143 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa +model = dict( + use_autocast=True, + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=True, + convert_weights=True, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[256, 512, 1024]), +) + +o365v1_od_dataset = dict( + type='ODVGDataset', + data_root='data/objects365v1/', + ann_file='o365v1_train_odvg.json', + label_map_file='o365v1_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None, +) + +flickr30k_dataset = dict( + type='ODVGDataset', + data_root='data/flickr30k_entities/', + ann_file='final_flickr_separateGT_train_vg.json', + label_map_file=None, + data_prefix=dict(img='flickr30k_images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +gqa_dataset = dict( + type='ODVGDataset', + data_root='data/gqa/', + ann_file='final_mixed_train_no_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +v3d_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/V3Det/annotations/v3det_2023_v1_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] +v3det_dataset = dict( + type='ODVGDataset', + data_root='data/V3Det/', + ann_file='annotations/v3det_2023_v1_train_od.json', + label_map_file='annotations/v3det_2023_v1_label_map.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, # change this + pipeline=v3d_train_pipeline, + return_classes=True, + backend_args=None) + +train_dataloader = dict( + dataset=dict(datasets=[ + o365v1_od_dataset, flickr30k_dataset, gqa_dataset, v3det_dataset + ])) + +# learning policy +max_epochs = 18 +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[13, 16], + gamma=0.1) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-l_pretrain_all.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-l_pretrain_all.py new file mode 100644 index 0000000000000000000000000000000000000000..a17f2344e14d8af81bd267d8bd47662f7e6e059d --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-l_pretrain_all.py @@ -0,0 +1,540 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg/grounding_dino_swin-l_pretrain_obj365_goldg-34dcdc53.pth' # noqa + +num_levels = 5 +model = dict( + use_autocast=True, + num_feature_levels=num_levels, + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(0, 1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=True, + convert_weights=True, + frozen_stages=-1, + init_cfg=None), + neck=dict(in_channels=[192, 384, 768, 1536], num_outs=num_levels), + encoder=dict(layer_cfg=dict(self_attn_cfg=dict(num_levels=num_levels))), + decoder=dict(layer_cfg=dict(cross_attn_cfg=dict(num_levels=num_levels)))) + +# --------------------------- object365v2 od dataset--------------------------- +# objv2_backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/objects365v2/': 'yudong:s3://wangyudong/obj365_v2/', +# 'data/objects365v2/': 'yudong:s3://wangyudong/obj365_v2/' +# })) +objv2_backend_args = None + +objv2_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=objv2_backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/objects365v2/annotations/o365v2_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +o365v2_dataset = dict( + type='ODVGDataset', + data_root='data/objects365v2/', + ann_file='annotations/zhiyuan_objv2_train_od.json', + label_map_file='annotations/o365v2_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=objv2_train_pipeline, + return_classes=True, + need_text=False, + backend_args=None, +) + +# --------------------------- openimagev6 od dataset--------------------------- +# oi_backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +oi_backend_args = None + +oi_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=oi_backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/OpenImages/annotations/openimages_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +oiv6_dataset = dict( + type='ODVGDataset', + data_root='data/OpenImages/', + ann_file='annotations/oidv6-train-annotations_od.json', + label_map_file='annotations/openimages_label_map.json', + data_prefix=dict(img='OpenImages/train/'), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, + pipeline=oi_train_pipeline, + return_classes=True, + backend_args=None) + +# --------------------------- v3det od dataset--------------------------- +v3d_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/V3Det/annotations/v3det_2023_v1_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] +v3det_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/V3Det/', + ann_file='annotations/v3det_2023_v1_train_od.json', + label_map_file='annotations/v3det_2023_v1_label_map.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, + pipeline=v3d_train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- lvis od dataset--------------------------- +lvis_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/coco/annotations/lvis_v1_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] +lvis_dataset = dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='annotations/lvis_v1_train_od.json', + label_map_file='annotations/lvis_v1_label_map.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, # change this + pipeline=lvis_train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- coco2017 od dataset--------------------------- +coco2017_train_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='annotations/instance_train2017_norefval_od.json', + label_map_file='annotations/coco2017_label_map.json', + data_prefix=dict(img='train2017'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- flickr30k vg dataset--------------------------- +flickr30k_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/flickr30k_entities/', + ann_file='final_flickr_separateGT_train_vg.json', + label_map_file=None, + data_prefix=dict(img='flickr30k_images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- gqa vg dataset--------------------------- +gqa_dataset = dict( + type='ODVGDataset', + data_root='data/gqa/', + ann_file='final_mixed_train_no_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +# --------------------------- coco2014 vg dataset--------------------------- +coco2014_vg_dataset = dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/final_mixed_train_only_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +# --------------------------- refcoco vg dataset--------------------------- +refcoco_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/finetune_refcoco_train_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- refcoco+ vg dataset--------------------------- +refcoco_plus_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/finetune_refcoco+_train_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- refcocog vg dataset--------------------------- +refcocog_dataset = dict( + type='RepeatDataset', + times=3, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/finetune_refcocog_train_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- grefcoco vg dataset--------------------------- +grefcoco_dataset = dict( + type='RepeatDataset', + times=2, + dataset=dict( + type='ODVGDataset', + data_root='data/coco/', + ann_file='mdetr_annotations/finetune_grefcoco_train_vg.json', + label_map_file=None, + data_prefix=dict(img='train2014'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None)) + +# --------------------------- grit vg dataset--------------------------- +# grit_backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/grit/': 'yichen:s3://chenyicheng/grit/', +# 'data/grit/': 'yichen:s3://chenyicheng/grit/' +# })) +grit_backend_args = None + +grit_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=grit_backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +grit_dataset = dict( + type='ODVGDataset', + data_root='data/grit/', + ann_file='grit20m_vg.json', + label_map_file=None, + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + pipeline=grit_train_pipeline, + return_classes=True, + backend_args=None) + +# --------------------------- dataloader--------------------------- +train_dataloader = dict( + batch_size=4, + num_workers=4, + sampler=dict( + _delete_=True, + type='CustomSampleSizeSampler', + ratio_mode=True, + # OD ~ 1.74+1.67*0.5+0.18*2+0.12*2+0.1=3.2 + # vg ~ 0.15*2+0.62*1+0.49*1+0.12*2+0.12*2+0.08*3+0.19*2+9*0.09=3.3 + dataset_size=[-1, 0.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0.09]), + dataset=dict(datasets=[ + o365v2_dataset, # 1.74M + oiv6_dataset, # 1.67M + v3det_dataset, # 0.18M + coco2017_train_dataset, # 0.12M + lvis_dataset, # 0.1M + flickr30k_dataset, # 0.15M + gqa_dataset, # 0.62M + coco2014_vg_dataset, # 0.49M + refcoco_dataset, # 0.12M + refcoco_plus_dataset, # 0.12M + refcocog_dataset, # 0.08M + grefcoco_dataset, # 0.19M + grit_dataset # 9M + ])) + +# 4NODES * 8GPU +optim_wrapper = dict(optimizer=dict(lr=0.0001)) + +max_iter = 250000 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=13000) + +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[210000], + gamma=0.1) +] + +default_hooks = dict( + checkpoint=dict(by_epoch=False, interval=13000, max_keep_ckpts=30)) +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg.py new file mode 100644 index 0000000000000000000000000000000000000000..85d43f96b3bdf79081dfb091c1cc8b6c03de7252 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg.py @@ -0,0 +1,227 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa +num_levels = 5 +model = dict( + use_autocast=True, + num_feature_levels=num_levels, + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(0, 1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=True, + convert_weights=True, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768, 1536], num_outs=num_levels), + encoder=dict(layer_cfg=dict(self_attn_cfg=dict(num_levels=num_levels))), + decoder=dict(layer_cfg=dict(cross_attn_cfg=dict(num_levels=num_levels)))) + +# --------------------------- object365v2 od dataset--------------------------- +# objv2_backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/objects365v2/': 'yudong:s3://wangyudong/obj365_v2/', +# 'data/objects365v2/': 'yudong:s3://wangyudong/obj365_v2/' +# })) +objv2_backend_args = None + +objv2_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=objv2_backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/objects365v2/annotations/o365v2_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +o365v2_dataset = dict( + type='ODVGDataset', + data_root='data/objects365v2/', + ann_file='annotations/zhiyuan_objv2_train_od.json', + label_map_file='annotations/o365v2_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=objv2_train_pipeline, + return_classes=True, + need_text=False, + backend_args=None, +) + +# --------------------------- openimagev6 od dataset--------------------------- +# oi_backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +oi_backend_args = None + +oi_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=oi_backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/OpenImages/annotations/openimages_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +oiv6_dataset = dict( + type='ODVGDataset', + data_root='data/OpenImages/', + ann_file='annotations/oidv6-train-annotations_od.json', + label_map_file='annotations/openimages_label_map.json', + data_prefix=dict(img='OpenImages/train/'), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, + pipeline=oi_train_pipeline, + return_classes=True, + backend_args=None) + +flickr30k_dataset = dict( + type='ODVGDataset', + data_root='data/flickr30k_entities/', + ann_file='final_flickr_separateGT_train_vg.json', + label_map_file=None, + data_prefix=dict(img='flickr30k_images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +gqa_dataset = dict( + type='ODVGDataset', + data_root='data/gqa/', + ann_file='final_mixed_train_no_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +train_dataloader = dict( + dataset=dict(datasets=[ + o365v2_dataset, oiv6_dataset, flickr30k_dataset, gqa_dataset + ])) + +# 4Nodex8GPU +optim_wrapper = dict(optimizer=dict(lr=0.0002)) + +max_iter = 200000 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=13000) + +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[156100], + gamma=0.5) +] + +default_hooks = dict( + checkpoint=dict(by_epoch=False, interval=13000, max_keep_ckpts=30)) +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3b35894eb5fcee6db9f02c2ab8a837cd6da20b --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py @@ -0,0 +1,102 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(220, 20, 60)]) + +model = dict(bbox_head=dict(num_classes=num_classes)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='CocoDataset', + data_root=data_root, + metainfo=metainfo, + return_classes=True, + pipeline=train_pipeline, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +max_epoch = 20 + +default_hooks = dict( + checkpoint=dict(interval=1, max_keep_ckpts=1, save_best='auto'), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict(max_epochs=max_epoch, val_interval=1) + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epoch, + by_epoch=True, + milestones=[15], + gamma=0.1) +] + +optim_wrapper = dict( + optimizer=dict(lr=0.0001), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.0), + 'language_model': dict(lr_mult=0.0) + })) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py new file mode 100644 index 0000000000000000000000000000000000000000..66060f45ea735ab5bbd8e1852c035ea20adcbd80 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py @@ -0,0 +1,247 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +lang_model_name = 'bert-base-uncased' + +model = dict( + type='GroundingDINO', + num_queries=900, + with_box_refine=True, + as_two_stage=True, + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=False, + ), + language_model=dict( + type='BertModel', + name=lang_model_name, + max_tokens=256, + pad_to_max=False, + use_sub_sentence_represent=True, + special_tokens_list=['[CLS]', '[SEP]', '.', '?'], + add_pooling_layer=False, + ), + backbone=dict( + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=True, + convert_weights=True, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict( + type='ChannelMapper', + in_channels=[192, 384, 768], + kernel_size=1, + out_channels=256, + act_cfg=None, + bias=True, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + encoder=dict( + num_layers=6, + num_cp=6, + # visual layer config + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + # text layer config + text_layer_cfg=dict( + self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)), + # fusion layer config + fusion_layer_cfg=dict( + v_dim=256, + l_dim=256, + embed_dim=1024, + num_heads=4, + init_values=1e-4), + ), + decoder=dict( + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + # query self attention layer + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to text + cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to image + cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + post_norm_cfg=None), + positional_encoding=dict( + num_feats=128, normalize=True, offset=0.0, temperature=20), + bbox_head=dict( + type='GroundingDINOHead', + num_classes=256, + sync_cls_avg_factor=True, + contrastive_cfg=dict(max_text_len=256, log_scale='auto', bias=True), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), # 2.0 in DeformDETR + loss_bbox=dict(type='L1Loss', loss_weight=5.0)), + dn_cfg=dict( # TODO: Move to model.train_cfg ? + label_noise_scale=0.5, + box_noise_scale=1.0, # 0.4 for DN-DETR + group_cfg=dict(dynamic=True, num_groups=None, + num_dn_queries=100)), # TODO: half num_dn_queries + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='BinaryFocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ])), + test_cfg=dict(max_per_img=300)) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=lang_model_name, + num_sample_negative=85, + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive')) +] + +dataset_type = 'ODVGDataset' +data_root = 'data/objects365v1/' + +coco_od_dataset = dict( + type=dataset_type, + data_root=data_root, + ann_file='o365v1_train_odvg.json', + label_map_file='o365v1_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=train_pipeline, + return_classes=True, + backend_args=None) + +train_dataloader = dict( + _delete_=True, + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict(type='ConcatDataset', datasets=[coco_od_dataset])) + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, return_classes=True)) +test_dataloader = val_dataloader + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0004, + weight_decay=0.0001), # bs=16 0.0001 + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + 'language_model': dict(lr_mult=0.1), + })) + +# learning policy +max_epochs = 30 +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[19, 26], + gamma=0.1) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (16 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) + +default_hooks = dict(visualization=dict(type='GroundingVisualizationHook')) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg.py new file mode 100644 index 0000000000000000000000000000000000000000..b7f388bdd4e8b61d1e7b6fd19445b3628164c4a0 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg.py @@ -0,0 +1,38 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +o365v1_od_dataset = dict( + type='ODVGDataset', + data_root='data/objects365v1/', + ann_file='o365v1_train_odvg.json', + label_map_file='o365v1_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None, +) + +flickr30k_dataset = dict( + type='ODVGDataset', + data_root='data/flickr30k_entities/', + ann_file='final_flickr_separateGT_train_vg.json', + label_map_file=None, + data_prefix=dict(img='flickr30k_images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +gqa_dataset = dict( + type='ODVGDataset', + data_root='data/gqa/', + ann_file='final_mixed_train_no_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +train_dataloader = dict( + dataset=dict(datasets=[o365v1_od_dataset, flickr30k_dataset, gqa_dataset])) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m.py new file mode 100644 index 0000000000000000000000000000000000000000..8e9f5ca4aaba7afb631f76b8a575101868fed2a4 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m.py @@ -0,0 +1,55 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +o365v1_od_dataset = dict( + type='ODVGDataset', + data_root='data/objects365v1/', + ann_file='o365v1_train_odvg.json', + label_map_file='o365v1_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None, +) + +flickr30k_dataset = dict( + type='ODVGDataset', + data_root='data/flickr30k_entities/', + ann_file='final_flickr_separateGT_train_vg.json', + label_map_file=None, + data_prefix=dict(img='flickr30k_images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +gqa_dataset = dict( + type='ODVGDataset', + data_root='data/gqa/', + ann_file='final_mixed_train_no_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +grit_dataset = dict( + type='ODVGDataset', + data_root='grit_processed/', + ann_file='grit20m_vg.json', + label_map_file=None, + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +train_dataloader = dict( + sampler=dict( + _delete_=True, + type='CustomSampleSizeSampler', + dataset_size=[-1, -1, -1, 500000]), + dataset=dict(datasets=[ + o365v1_od_dataset, flickr30k_dataset, gqa_dataset, grit_dataset + ])) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py new file mode 100644 index 0000000000000000000000000000000000000000..56e500c86932a8e61dba88fde2bfc00c0ced5585 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py @@ -0,0 +1,117 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +o365v1_od_dataset = dict( + type='ODVGDataset', + data_root='data/objects365v1/', + ann_file='o365v1_train_odvg.json', + label_map_file='o365v1_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None, +) + +flickr30k_dataset = dict( + type='ODVGDataset', + data_root='data/flickr30k_entities/', + ann_file='final_flickr_separateGT_train_vg.json', + label_map_file=None, + data_prefix=dict(img='flickr30k_images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +gqa_dataset = dict( + type='ODVGDataset', + data_root='data/gqa/', + ann_file='final_mixed_train_no_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +v3d_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/V3Det/annotations/v3det_2023_v1_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] +v3det_dataset = dict( + type='ODVGDataset', + data_root='data/V3Det/', + ann_file='annotations/v3det_2023_v1_train_od.json', + label_map_file='annotations/v3det_2023_v1_label_map.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, # change this + pipeline=v3d_train_pipeline, + return_classes=True, + backend_args=None) + +grit_dataset = dict( + type='ODVGDataset', + data_root='grit_processed/', + ann_file='grit20m_vg.json', + label_map_file=None, + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +train_dataloader = dict( + sampler=dict( + _delete_=True, + type='CustomSampleSizeSampler', + dataset_size=[-1, -1, -1, -1, 500000]), + dataset=dict(datasets=[ + o365v1_od_dataset, flickr30k_dataset, gqa_dataset, v3det_dataset, + grit_dataset + ])) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det.py new file mode 100644 index 0000000000000000000000000000000000000000..c89014fbbe43a1e7787fa46d7d850d42a64ff8a9 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det.py @@ -0,0 +1,101 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +o365v1_od_dataset = dict( + type='ODVGDataset', + data_root='data/objects365v1/', + ann_file='o365v1_train_odvg.json', + label_map_file='o365v1_label_map.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None, +) + +flickr30k_dataset = dict( + type='ODVGDataset', + data_root='data/flickr30k_entities/', + ann_file='final_flickr_separateGT_train_vg.json', + label_map_file=None, + data_prefix=dict(img='flickr30k_images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +gqa_dataset = dict( + type='ODVGDataset', + data_root='data/gqa/', + ann_file='final_mixed_train_no_coco_vg.json', + label_map_file=None, + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False), + pipeline=_base_.train_pipeline, + return_classes=True, + backend_args=None) + +v3d_train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/V3Det/annotations/v3det_2023_v1_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] +v3det_dataset = dict( + type='ODVGDataset', + data_root='data/V3Det/', + ann_file='annotations/v3det_2023_v1_train_od.json', + label_map_file='annotations/v3det_2023_v1_label_map.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + need_text=False, # change this + pipeline=v3d_train_pipeline, + return_classes=True, + backend_args=None) + +train_dataloader = dict( + dataset=dict(datasets=[ + o365v1_od_dataset, flickr30k_dataset, gqa_dataset, v3det_dataset + ])) diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_cat.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..6dc8dcd8df4b98a3fdb3aa26d73ce353b9251f50 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_cat.py @@ -0,0 +1,43 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadTextAnnotations'), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive')) +] + +data_root = 'data/cat/' + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=False, + dataset=dict( + type='ODVGDataset', + data_root=data_root, + label_map_file='cat_label_map.json', + ann_file='cat_train_od.json', + data_prefix=dict(img='images/'), + pipeline=test_pipeline, + return_classes=True)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + outfile_path=data_root + 'cat_train_od_v1.json', + img_prefix=data_root + 'images/', + score_thr=0.7, + nms_thr=0.5, + type='DumpODVGResults') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_flickr30k.py b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_flickr30k.py new file mode 100644 index 0000000000000000000000000000000000000000..78bf1c344bf7c795ace08283b745527dfc9b15f7 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_flickr30k.py @@ -0,0 +1,42 @@ +_base_ = 'grounding_dino_swin-t_pretrain_obj365.py' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadTextAnnotations'), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive')) +] + +data_root = 'data/flickr30k_entities/' + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=False, + dataset=dict( + type='ODVGDataset', + data_root=data_root, + ann_file='flickr_simple_train_vg.json', + data_prefix=dict(img='flickr30k_images/'), + pipeline=test_pipeline, + return_classes=True)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + outfile_path=data_root + 'flickr_simple_train_vg_v1.json', + img_prefix=data_root + 'flickr30k_images/', + score_thr=0.4, + nms_thr=0.5, + type='DumpODVGResults') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis.py b/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba12c9067511b00b616781ca0cf2e477e5e689e --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis.py @@ -0,0 +1,120 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' + +model = dict(test_cfg=dict( + max_per_img=300, + chunked_size=40, +)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/coco/annotations/lvis_v1_label_map.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type='ODVGDataset', + data_root=data_root, + need_text=False, + label_map_file='annotations/lvis_v1_label_map.json', + ann_file='annotations/lvis_v1_train_od.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + return_classes=True, + pipeline=train_pipeline))) + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + type='LVISV1Dataset', + ann_file='annotations/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='LVISFixedAPMetric', + ann_file=data_root + + 'annotations/lvis_v1_minival_inserted_image_name.json') +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + # 'language_model': dict(lr_mult=0), + })) + +# learning policy +max_epochs = 12 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=3) + +default_hooks = dict( + checkpoint=dict( + max_keep_ckpts=1, save_best='lvis_fixed_ap/AP', rule='greater')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis_866_337.py b/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis_866_337.py new file mode 100644 index 0000000000000000000000000000000000000000..28d0141d3e2c0feba26ae4ed924000960c311bf5 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_finetune_16xb4_1x_lvis_866_337.py @@ -0,0 +1,120 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' + +model = dict(test_cfg=dict( + max_per_img=300, + chunked_size=40, +)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + # change this + label_map_file='data/coco/annotations/lvis_v1_label_map_norare.json', + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type='ODVGDataset', + data_root=data_root, + need_text=False, + label_map_file='annotations/lvis_v1_label_map_norare.json', + ann_file='annotations/lvis_v1_train_od_norare.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + return_classes=True, + pipeline=train_pipeline))) + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + type='LVISV1Dataset', + ann_file='annotations/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='LVISFixedAPMetric', + ann_file=data_root + + 'annotations/lvis_v1_minival_inserted_image_name.json') +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.00005, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + # 'language_model': dict(lr_mult=0), + })) + +# learning policy +max_epochs = 12 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=3) + +default_hooks = dict( + checkpoint=dict( + max_keep_ckpts=3, save_best='lvis_fixed_ap/AP', rule='greater')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_lvis.py b/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..fb4ed438e0b59ca4c991836310cf7103cc02f0f2 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_lvis.py @@ -0,0 +1,24 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +model = dict(test_cfg=dict( + max_per_img=300, + chunked_size=40, +)) + +dataset_type = 'LVISV1Dataset' +data_root = 'data/coco/' + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + type=dataset_type, + ann_file='annotations/lvis_od_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +# numpy < 1.24.0 +val_evaluator = dict( + _delete_=True, + type='LVISFixedAPMetric', + ann_file=data_root + 'annotations/lvis_od_val.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py b/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..406a39a4264a0d6ea5d7950a205b0bac72e8f846 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/lvis/grounding_dino_swin-t_pretrain_zeroshot_mini-lvis.py @@ -0,0 +1,25 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +model = dict(test_cfg=dict( + max_per_img=300, + chunked_size=40, +)) + +dataset_type = 'LVISV1Dataset' +data_root = 'data/coco/' + +val_dataloader = dict( + dataset=dict( + data_root=data_root, + type=dataset_type, + ann_file='annotations/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +# numpy < 1.24.0 +val_evaluator = dict( + _delete_=True, + type='LVISFixedAPMetric', + ann_file=data_root + + 'annotations/lvis_v1_minival_inserted_image_name.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/metafile.yml b/mmpose/configs/mmdet/mm_grounding_dino/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..c104ac051363ab1ed033061e7b01274404d300d1 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/metafile.yml @@ -0,0 +1,90 @@ +Collections: + - Name: MM Grounding DINO + Metadata: + Training Data: Objects365, GoldG, GRIT and V3Det + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 3090 GPUs + Architecture: + - Swin Transformer + - BERT + README: configs/mm_grounding_dino/README.md + Code: + URL: + Version: v3.0.0 + +Models: + - Name: grounding_dino_swin-t_pretrain_obj365_goldg + In Collection: MM Grounding DINO + Config: configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg/grounding_dino_swin-t_pretrain_obj365_goldg_20231122_132602-4ea751ce.pth + - Name: grounding_dino_swin-t_pretrain_obj365_goldg_grit9m + In Collection: MM Grounding DINO + Config: configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.5 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_20231128_200818-169cc352.pth + - Name: grounding_dino_swin-t_pretrain_obj365_goldg_v3det + In Collection: MM Grounding DINO + Config: configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.6 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_v3det_20231218_095741-e316e297.pth + - Name: grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det + In Collection: MM Grounding DINO + Config: configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth + - Name: grounding_dino_swin-b_pretrain_obj365_goldg_v3det + In Collection: MM Grounding DINO + Config: configs/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.5 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det/grounding_dino_swin-b_pretrain_obj365_goldg_v3de-f83eef00.pth + - Name: grounding_dino_swin-b_pretrain_all + In Collection: MM Grounding DINO + Config: configs/mm_grounding_dino/grounding_dino_swin-b_pretrain_all.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 59.5 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_all/grounding_dino_swin-b_pretrain_all-f9818a7c.pth + - Name: grounding_dino_swin-l_pretrain_obj365_goldg + In Collection: MM Grounding DINO + Config: configs/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 53.0 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg/grounding_dino_swin-l_pretrain_obj365_goldg-34dcdc53.pth + - Name: grounding_dino_swin-l_pretrain_all + In Collection: MM Grounding DINO + Config: configs/mm_grounding_dino/grounding_dino_swin-l_pretrain_all.py + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 60.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_all/grounding_dino_swin-l_pretrain_all-56d69e78.pth diff --git a/mmpose/configs/mmdet/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py b/mmpose/configs/mmdet/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py new file mode 100644 index 0000000000000000000000000000000000000000..d87ca7ca1ea48a3cff83e15f3e2ad66927598d7f --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py @@ -0,0 +1,338 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' # noqa + +dataset_type = 'CocoDataset' +data_root = 'data/odinw/' + +base_test_pipeline = _base_.test_pipeline +base_test_pipeline[-1]['meta_keys'] = ('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'text', + 'custom_entities', 'caption_prompt') + +# ---------------------1 AerialMaritimeDrone---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/large/' +dataset_AerialMaritimeDrone = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + test_mode=True, + pipeline=base_test_pipeline, + return_classes=True) +val_evaluator_AerialMaritimeDrone = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------2 Aquarium---------------------# +class_name = ('fish', 'jellyfish', 'penguin', 'puffin', 'shark', 'starfish', + 'stingray') +metainfo = dict(classes=class_name) +_data_root = data_root + 'Aquarium/Aquarium Combined.v2-raw-1024.coco/' + +caption_prompt = None +# caption_prompt = { +# 'penguin': { +# 'suffix': ', which is black and white' +# }, +# 'puffin': { +# 'suffix': ' with orange beaks' +# }, +# 'stingray': { +# 'suffix': ' which is flat and round' +# }, +# } +dataset_Aquarium = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Aquarium = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------3 CottontailRabbits---------------------# +class_name = ('Cottontail-Rabbit', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'CottontailRabbits/' + +# caption_prompt = None +caption_prompt = {'Cottontail-Rabbit': {'name': 'rabbit'}} + +dataset_CottontailRabbits = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_CottontailRabbits = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------4 EgoHands---------------------# +class_name = ('hand', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/generic/' + +# caption_prompt = None +caption_prompt = {'hand': {'suffix': ' of a person'}} + +dataset_EgoHands = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------5 NorthAmericaMushrooms---------------------# +class_name = ('CoW', 'chanterelle') +metainfo = dict(classes=class_name) +_data_root = data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa + +# caption_prompt = None +caption_prompt = { + 'CoW': { + 'name': 'flat mushroom' + }, + 'chanterelle': { + 'name': 'yellow mushroom' + } +} + +dataset_NorthAmericaMushrooms = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_NorthAmericaMushrooms = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------6 Packages---------------------# +class_name = ('package', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Packages/Raw/' + +# caption_prompt = None +caption_prompt = { + 'package': { + 'prefix': 'there is a ', + 'suffix': ' on the porch' + } +} + +dataset_Packages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Packages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------7 PascalVOC---------------------# +class_name = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PascalVOC/' +dataset_PascalVOC = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PascalVOC = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------8 pistols---------------------# +class_name = ('pistol', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pistols/export/' +dataset_pistols = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pistols = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------9 pothole---------------------# +class_name = ('pothole', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pothole/' + +# caption_prompt = None +caption_prompt = { + 'pothole': { + 'prefix': 'there are some ', + 'name': 'holes', + 'suffix': ' on the road' + } +} + +dataset_pothole = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pothole = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------10 Raccoon---------------------# +class_name = ('raccoon', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Raccoon/Raccoon.v2-raw.coco/' +dataset_Raccoon = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Raccoon = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------11 ShellfishOpenImages---------------------# +class_name = ('Crab', 'Lobster', 'Shrimp') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ShellfishOpenImages/raw/' +dataset_ShellfishOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ShellfishOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------12 thermalDogsAndPeople---------------------# +class_name = ('dog', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'thermalDogsAndPeople/' +dataset_thermalDogsAndPeople = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_thermalDogsAndPeople = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------13 VehiclesOpenImages---------------------# +class_name = ('Ambulance', 'Bus', 'Car', 'Motorcycle', 'Truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'VehiclesOpenImages/416x416/' +dataset_VehiclesOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_VehiclesOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# --------------------- Config---------------------# +dataset_prefixes = [ + 'AerialMaritimeDrone', 'Aquarium', 'CottontailRabbits', 'EgoHands', + 'NorthAmericaMushrooms', 'Packages', 'PascalVOC', 'pistols', 'pothole', + 'Raccoon', 'ShellfishOpenImages', 'thermalDogsAndPeople', + 'VehiclesOpenImages' +] +datasets = [ + dataset_AerialMaritimeDrone, dataset_Aquarium, dataset_CottontailRabbits, + dataset_EgoHands, dataset_NorthAmericaMushrooms, dataset_Packages, + dataset_PascalVOC, dataset_pistols, dataset_pothole, dataset_Raccoon, + dataset_ShellfishOpenImages, dataset_thermalDogsAndPeople, + dataset_VehiclesOpenImages +] +metrics = [ + val_evaluator_AerialMaritimeDrone, val_evaluator_Aquarium, + val_evaluator_CottontailRabbits, val_evaluator_EgoHands, + val_evaluator_NorthAmericaMushrooms, val_evaluator_Packages, + val_evaluator_PascalVOC, val_evaluator_pistols, val_evaluator_pothole, + val_evaluator_Raccoon, val_evaluator_ShellfishOpenImages, + val_evaluator_thermalDogsAndPeople, val_evaluator_VehiclesOpenImages +] + +# -------------------------------------------------# +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw35.py b/mmpose/configs/mmdet/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw35.py new file mode 100644 index 0000000000000000000000000000000000000000..a6b8566aed486ef48653b6e54200cb8817910f2f --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw35.py @@ -0,0 +1,794 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' # noqa + +dataset_type = 'CocoDataset' +data_root = 'data/odinw/' + +base_test_pipeline = _base_.test_pipeline +base_test_pipeline[-1]['meta_keys'] = ('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'text', + 'custom_entities', 'caption_prompt') + +# ---------------------1 AerialMaritimeDrone_large---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/large/' +dataset_AerialMaritimeDrone_large = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AerialMaritimeDrone_large = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------2 AerialMaritimeDrone_tiled---------------------# +class_name = ('boat', 'car', 'dock', 'jetski', 'lift') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AerialMaritimeDrone/tiled/' +dataset_AerialMaritimeDrone_tiled = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AerialMaritimeDrone_tiled = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------3 AmericanSignLanguageLetters---------------------# +class_name = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z') +metainfo = dict(classes=class_name) +_data_root = data_root + 'AmericanSignLanguageLetters/American Sign Language Letters.v1-v1.coco/' # noqa +dataset_AmericanSignLanguageLetters = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_AmericanSignLanguageLetters = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------4 Aquarium---------------------# +class_name = ('fish', 'jellyfish', 'penguin', 'puffin', 'shark', 'starfish', + 'stingray') +metainfo = dict(classes=class_name) +_data_root = data_root + 'Aquarium/Aquarium Combined.v2-raw-1024.coco/' +dataset_Aquarium = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Aquarium = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------5 BCCD---------------------# +class_name = ('Platelets', 'RBC', 'WBC') +metainfo = dict(classes=class_name) +_data_root = data_root + 'BCCD/BCCD.v3-raw.coco/' +dataset_BCCD = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_BCCD = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------6 boggleBoards---------------------# +class_name = ('Q', 'a', 'an', 'b', 'c', 'd', 'e', 'er', 'f', 'g', 'h', 'he', + 'i', 'in', 'j', 'k', 'l', 'm', 'n', 'o', 'o ', 'p', 'q', 'qu', + 'r', 's', 't', 't\\', 'th', 'u', 'v', 'w', 'wild', 'x', 'y', 'z') +metainfo = dict(classes=class_name) +_data_root = data_root + 'boggleBoards/416x416AutoOrient/export/' +dataset_boggleBoards = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_boggleBoards = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------7 brackishUnderwater---------------------# +class_name = ('crab', 'fish', 'jellyfish', 'shrimp', 'small_fish', 'starfish') +metainfo = dict(classes=class_name) +_data_root = data_root + 'brackishUnderwater/960x540/' +dataset_brackishUnderwater = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_brackishUnderwater = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------8 ChessPieces---------------------# +class_name = (' ', 'black bishop', 'black king', 'black knight', 'black pawn', + 'black queen', 'black rook', 'white bishop', 'white king', + 'white knight', 'white pawn', 'white queen', 'white rook') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/' +dataset_ChessPieces = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ChessPieces = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------9 CottontailRabbits---------------------# +class_name = ('rabbit', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'CottontailRabbits/' +dataset_CottontailRabbits = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_CottontailRabbits = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------10 dice---------------------# +class_name = ('1', '2', '3', '4', '5', '6') +metainfo = dict(classes=class_name) +_data_root = data_root + 'dice/mediumColor/export/' +dataset_dice = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_dice = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------11 DroneControl---------------------# +class_name = ('follow', 'follow_hand', 'land', 'land_hand', 'null', 'object', + 'takeoff', 'takeoff-hand') +metainfo = dict(classes=class_name) +_data_root = data_root + 'DroneControl/Drone Control.v3-raw.coco/' +dataset_DroneControl = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_DroneControl = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------12 EgoHands_generic---------------------# +class_name = ('hand', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/generic/' +caption_prompt = {'hand': {'suffix': ' of a person'}} +dataset_EgoHands_generic = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands_generic = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------13 EgoHands_specific---------------------# +class_name = ('myleft', 'myright', 'yourleft', 'yourright') +metainfo = dict(classes=class_name) +_data_root = data_root + 'EgoHands/specific/' +dataset_EgoHands_specific = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_EgoHands_specific = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------14 HardHatWorkers---------------------# +class_name = ('head', 'helmet', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'HardHatWorkers/raw/' +dataset_HardHatWorkers = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_HardHatWorkers = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------15 MaskWearing---------------------# +class_name = ('mask', 'no-mask') +metainfo = dict(classes=class_name) +_data_root = data_root + 'MaskWearing/raw/' +dataset_MaskWearing = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_MaskWearing = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------16 MountainDewCommercial---------------------# +class_name = ('bottle', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'MountainDewCommercial/' +dataset_MountainDewCommercial = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_MountainDewCommercial = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------17 NorthAmericaMushrooms---------------------# +class_name = ('flat mushroom', 'yellow mushroom') +metainfo = dict(classes=class_name) +_data_root = data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa +dataset_NorthAmericaMushrooms = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/new_annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_NorthAmericaMushrooms = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/new_annotations_without_background.json', + metric='bbox') + +# ---------------------18 openPoetryVision---------------------# +class_name = ('American Typewriter', 'Andale Mono', 'Apple Chancery', 'Arial', + 'Avenir', 'Baskerville', 'Big Caslon', 'Bradley Hand', + 'Brush Script MT', 'Chalkboard', 'Comic Sans MS', 'Copperplate', + 'Courier', 'Didot', 'Futura', 'Geneva', 'Georgia', 'Gill Sans', + 'Helvetica', 'Herculanum', 'Impact', 'Kefa', 'Lucida Grande', + 'Luminari', 'Marker Felt', 'Menlo', 'Monaco', 'Noteworthy', + 'Optima', 'PT Sans', 'PT Serif', 'Palatino', 'Papyrus', + 'Phosphate', 'Rockwell', 'SF Pro', 'SignPainter', 'Skia', + 'Snell Roundhand', 'Tahoma', 'Times New Roman', 'Trebuchet MS', + 'Verdana') +metainfo = dict(classes=class_name) +_data_root = data_root + 'openPoetryVision/512x512/' +dataset_openPoetryVision = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_openPoetryVision = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------19 OxfordPets_by_breed---------------------# +class_name = ('cat-Abyssinian', 'cat-Bengal', 'cat-Birman', 'cat-Bombay', + 'cat-British_Shorthair', 'cat-Egyptian_Mau', 'cat-Maine_Coon', + 'cat-Persian', 'cat-Ragdoll', 'cat-Russian_Blue', 'cat-Siamese', + 'cat-Sphynx', 'dog-american_bulldog', + 'dog-american_pit_bull_terrier', 'dog-basset_hound', + 'dog-beagle', 'dog-boxer', 'dog-chihuahua', + 'dog-english_cocker_spaniel', 'dog-english_setter', + 'dog-german_shorthaired', 'dog-great_pyrenees', 'dog-havanese', + 'dog-japanese_chin', 'dog-keeshond', 'dog-leonberger', + 'dog-miniature_pinscher', 'dog-newfoundland', 'dog-pomeranian', + 'dog-pug', 'dog-saint_bernard', 'dog-samoyed', + 'dog-scottish_terrier', 'dog-shiba_inu', + 'dog-staffordshire_bull_terrier', 'dog-wheaten_terrier', + 'dog-yorkshire_terrier') +metainfo = dict(classes=class_name) +_data_root = data_root + 'OxfordPets/by-breed/' # noqa +dataset_OxfordPets_by_breed = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_OxfordPets_by_breed = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------20 OxfordPets_by_species---------------------# +class_name = ('cat', 'dog') +metainfo = dict(classes=class_name) +_data_root = data_root + 'OxfordPets/by-species/' # noqa +dataset_OxfordPets_by_species = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_OxfordPets_by_species = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------21 PKLot---------------------# +class_name = ('space-empty', 'space-occupied') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PKLot/640/' # noqa +dataset_PKLot = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PKLot = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------22 Packages---------------------# +class_name = ('package', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Packages/Raw/' +caption_prompt = { + 'package': { + 'prefix': 'there is a ', + 'suffix': ' on the porch' + } +} +dataset_Packages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=base_test_pipeline, + caption_prompt=caption_prompt, + test_mode=True, + return_classes=True) +val_evaluator_Packages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------23 PascalVOC---------------------# +class_name = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') +metainfo = dict(classes=class_name) +_data_root = data_root + 'PascalVOC/' +dataset_PascalVOC = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_PascalVOC = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------24 pistols---------------------# +class_name = ('pistol', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pistols/export/' +dataset_pistols = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pistols = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------25 plantdoc---------------------# +class_name = ('Apple Scab Leaf', 'Apple leaf', 'Apple rust leaf', + 'Bell_pepper leaf', 'Bell_pepper leaf spot', 'Blueberry leaf', + 'Cherry leaf', 'Corn Gray leaf spot', 'Corn leaf blight', + 'Corn rust leaf', 'Peach leaf', 'Potato leaf', + 'Potato leaf early blight', 'Potato leaf late blight', + 'Raspberry leaf', 'Soyabean leaf', 'Soybean leaf', + 'Squash Powdery mildew leaf', 'Strawberry leaf', + 'Tomato Early blight leaf', 'Tomato Septoria leaf spot', + 'Tomato leaf', 'Tomato leaf bacterial spot', + 'Tomato leaf late blight', 'Tomato leaf mosaic virus', + 'Tomato leaf yellow virus', 'Tomato mold leaf', + 'Tomato two spotted spider mites leaf', 'grape leaf', + 'grape leaf black rot') +metainfo = dict(classes=class_name) +_data_root = data_root + 'plantdoc/416x416/' +dataset_plantdoc = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_plantdoc = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------26 pothole---------------------# +class_name = ('pothole', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'pothole/' +caption_prompt = { + 'pothole': { + 'name': 'holes', + 'prefix': 'there are some ', + 'suffix': ' on the road' + } +} +dataset_pothole = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + caption_prompt=caption_prompt, + pipeline=base_test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_pothole = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------27 Raccoon---------------------# +class_name = ('raccoon', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'Raccoon/Raccoon.v2-raw.coco/' +dataset_Raccoon = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_Raccoon = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------28 selfdrivingCar---------------------# +class_name = ('biker', 'car', 'pedestrian', 'trafficLight', + 'trafficLight-Green', 'trafficLight-GreenLeft', + 'trafficLight-Red', 'trafficLight-RedLeft', + 'trafficLight-Yellow', 'trafficLight-YellowLeft', 'truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'selfdrivingCar/fixedLarge/export/' +dataset_selfdrivingCar = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='val_annotations_without_background.json', + data_prefix=dict(img=''), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_selfdrivingCar = dict( + type='CocoMetric', + ann_file=_data_root + 'val_annotations_without_background.json', + metric='bbox') + +# ---------------------29 ShellfishOpenImages---------------------# +class_name = ('Crab', 'Lobster', 'Shrimp') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ShellfishOpenImages/raw/' +dataset_ShellfishOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ShellfishOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------30 ThermalCheetah---------------------# +class_name = ('cheetah', 'human') +metainfo = dict(classes=class_name) +_data_root = data_root + 'ThermalCheetah/' +dataset_ThermalCheetah = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_ThermalCheetah = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------31 thermalDogsAndPeople---------------------# +class_name = ('dog', 'person') +metainfo = dict(classes=class_name) +_data_root = data_root + 'thermalDogsAndPeople/' +dataset_thermalDogsAndPeople = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_thermalDogsAndPeople = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------32 UnoCards---------------------# +class_name = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', + '12', '13', '14') +metainfo = dict(classes=class_name) +_data_root = data_root + 'UnoCards/raw/' +dataset_UnoCards = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_UnoCards = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------33 VehiclesOpenImages---------------------# +class_name = ('Ambulance', 'Bus', 'Car', 'Motorcycle', 'Truck') +metainfo = dict(classes=class_name) +_data_root = data_root + 'VehiclesOpenImages/416x416/' +dataset_VehiclesOpenImages = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_VehiclesOpenImages = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------34 WildfireSmoke---------------------# +class_name = ('smoke', ) +metainfo = dict(classes=class_name) +_data_root = data_root + 'WildfireSmoke/' +dataset_WildfireSmoke = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_WildfireSmoke = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# ---------------------35 websiteScreenshots---------------------# +class_name = ('button', 'field', 'heading', 'iframe', 'image', 'label', 'link', + 'text') +metainfo = dict(classes=class_name) +_data_root = data_root + 'websiteScreenshots/' +dataset_websiteScreenshots = dict( + type=dataset_type, + metainfo=metainfo, + data_root=_data_root, + ann_file='valid/annotations_without_background.json', + data_prefix=dict(img='valid/'), + pipeline=_base_.test_pipeline, + test_mode=True, + return_classes=True) +val_evaluator_websiteScreenshots = dict( + type='CocoMetric', + ann_file=_data_root + 'valid/annotations_without_background.json', + metric='bbox') + +# --------------------- Config---------------------# + +dataset_prefixes = [ + 'AerialMaritimeDrone_large', + 'AerialMaritimeDrone_tiled', + 'AmericanSignLanguageLetters', + 'Aquarium', + 'BCCD', + 'boggleBoards', + 'brackishUnderwater', + 'ChessPieces', + 'CottontailRabbits', + 'dice', + 'DroneControl', + 'EgoHands_generic', + 'EgoHands_specific', + 'HardHatWorkers', + 'MaskWearing', + 'MountainDewCommercial', + 'NorthAmericaMushrooms', + 'openPoetryVision', + 'OxfordPets_by_breed', + 'OxfordPets_by_species', + 'PKLot', + 'Packages', + 'PascalVOC', + 'pistols', + 'plantdoc', + 'pothole', + 'Raccoons', + 'selfdrivingCar', + 'ShellfishOpenImages', + 'ThermalCheetah', + 'thermalDogsAndPeople', + 'UnoCards', + 'VehiclesOpenImages', + 'WildfireSmoke', + 'websiteScreenshots', +] + +datasets = [ + dataset_AerialMaritimeDrone_large, dataset_AerialMaritimeDrone_tiled, + dataset_AmericanSignLanguageLetters, dataset_Aquarium, dataset_BCCD, + dataset_boggleBoards, dataset_brackishUnderwater, dataset_ChessPieces, + dataset_CottontailRabbits, dataset_dice, dataset_DroneControl, + dataset_EgoHands_generic, dataset_EgoHands_specific, + dataset_HardHatWorkers, dataset_MaskWearing, dataset_MountainDewCommercial, + dataset_NorthAmericaMushrooms, dataset_openPoetryVision, + dataset_OxfordPets_by_breed, dataset_OxfordPets_by_species, dataset_PKLot, + dataset_Packages, dataset_PascalVOC, dataset_pistols, dataset_plantdoc, + dataset_pothole, dataset_Raccoon, dataset_selfdrivingCar, + dataset_ShellfishOpenImages, dataset_ThermalCheetah, + dataset_thermalDogsAndPeople, dataset_UnoCards, dataset_VehiclesOpenImages, + dataset_WildfireSmoke, dataset_websiteScreenshots +] + +metrics = [ + val_evaluator_AerialMaritimeDrone_large, + val_evaluator_AerialMaritimeDrone_tiled, + val_evaluator_AmericanSignLanguageLetters, val_evaluator_Aquarium, + val_evaluator_BCCD, val_evaluator_boggleBoards, + val_evaluator_brackishUnderwater, val_evaluator_ChessPieces, + val_evaluator_CottontailRabbits, val_evaluator_dice, + val_evaluator_DroneControl, val_evaluator_EgoHands_generic, + val_evaluator_EgoHands_specific, val_evaluator_HardHatWorkers, + val_evaluator_MaskWearing, val_evaluator_MountainDewCommercial, + val_evaluator_NorthAmericaMushrooms, val_evaluator_openPoetryVision, + val_evaluator_OxfordPets_by_breed, val_evaluator_OxfordPets_by_species, + val_evaluator_PKLot, val_evaluator_Packages, val_evaluator_PascalVOC, + val_evaluator_pistols, val_evaluator_plantdoc, val_evaluator_pothole, + val_evaluator_Raccoon, val_evaluator_selfdrivingCar, + val_evaluator_ShellfishOpenImages, val_evaluator_ThermalCheetah, + val_evaluator_thermalDogsAndPeople, val_evaluator_UnoCards, + val_evaluator_VehiclesOpenImages, val_evaluator_WildfireSmoke, + val_evaluator_websiteScreenshots +] + +# -------------------------------------------------# +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/odinw/override_category.py b/mmpose/configs/mmdet/mm_grounding_dino/odinw/override_category.py new file mode 100644 index 0000000000000000000000000000000000000000..9ff05fc6e5e4d0989cf7fcf7af4dc902ee99f3a3 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/odinw/override_category.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmengine + + +def parse_args(): + parser = argparse.ArgumentParser(description='Override Category') + parser.add_argument('data_root') + return parser.parse_args() + + +def main(): + args = parse_args() + + ChessPieces = [{ + 'id': 1, + 'name': ' ', + 'supercategory': 'pieces' + }, { + 'id': 2, + 'name': 'black bishop', + 'supercategory': 'pieces' + }, { + 'id': 3, + 'name': 'black king', + 'supercategory': 'pieces' + }, { + 'id': 4, + 'name': 'black knight', + 'supercategory': 'pieces' + }, { + 'id': 5, + 'name': 'black pawn', + 'supercategory': 'pieces' + }, { + 'id': 6, + 'name': 'black queen', + 'supercategory': 'pieces' + }, { + 'id': 7, + 'name': 'black rook', + 'supercategory': 'pieces' + }, { + 'id': 8, + 'name': 'white bishop', + 'supercategory': 'pieces' + }, { + 'id': 9, + 'name': 'white king', + 'supercategory': 'pieces' + }, { + 'id': 10, + 'name': 'white knight', + 'supercategory': 'pieces' + }, { + 'id': 11, + 'name': 'white pawn', + 'supercategory': 'pieces' + }, { + 'id': 12, + 'name': 'white queen', + 'supercategory': 'pieces' + }, { + 'id': 13, + 'name': 'white rook', + 'supercategory': 'pieces' + }] + + _data_root = args.data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/' + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = ChessPieces + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + CottontailRabbits = [{ + 'id': 1, + 'name': 'rabbit', + 'supercategory': 'Cottontail-Rabbit' + }] + + _data_root = args.data_root + 'CottontailRabbits/' + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = CottontailRabbits + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + NorthAmericaMushrooms = [{ + 'id': 1, + 'name': 'flat mushroom', + 'supercategory': 'mushroom' + }, { + 'id': 2, + 'name': 'yellow mushroom', + 'supercategory': 'mushroom' + }] + + _data_root = args.data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa + json_data = mmengine.load(_data_root + + 'valid/annotations_without_background.json') + json_data['categories'] = NorthAmericaMushrooms + mmengine.dump(json_data, + _data_root + 'valid/new_annotations_without_background.json') + + +if __name__ == '__main__': + main() diff --git a/mmpose/configs/mmdet/mm_grounding_dino/people_in_painting/grounding_dino_swin-t_finetune_8xb4_50e_people_in_painting.py b/mmpose/configs/mmdet/mm_grounding_dino/people_in_painting/grounding_dino_swin-t_finetune_8xb4_50e_people_in_painting.py new file mode 100644 index 0000000000000000000000000000000000000000..449d8682f896c3857e6a50b16a13b43acc77ebc2 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/people_in_painting/grounding_dino_swin-t_finetune_8xb4_50e_people_in_painting.py @@ -0,0 +1,109 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +# https://universe.roboflow.com/roboflow-100/people-in-paintings/dataset/2 +data_root = 'data/people_in_painting_v2/' +class_name = ('Human', ) +palette = [(220, 20, 60)] + +metainfo = dict(classes=class_name, palette=palette) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +train_dataloader = dict( + sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=10, + dataset=dict( + type='CocoDataset', + data_root=data_root, + metainfo=metainfo, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline, + return_classes=True, + data_prefix=dict(img='train/'), + ann_file='train/_annotations.coco.json'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + return_classes=True, + ann_file='valid/_annotations.coco.json', + data_prefix=dict(img='valid/'))) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'valid/_annotations.coco.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1) + })) + +# learning policy +max_epochs = 5 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[4], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_grefcoco.py b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_grefcoco.py new file mode 100644 index 0000000000000000000000000000000000000000..983ffe5c6f3f6e59cf1616a0b22c17f065e08437 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_grefcoco.py @@ -0,0 +1,170 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + # change this + dict(type='RandomFlip', prob=0.0), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='ODVGDataset', + data_root=data_root, + ann_file='mdetr_annotations/finetune_grefcoco_train_vg.json', + data_prefix=dict(img='train2014/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + return_classes=True, + pipeline=train_pipeline)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_val.json' +val_dataset_all_val = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) +val_evaluator_all_val = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_testA.json' +val_dataset_refcoco_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testA = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_testB.json' +val_dataset_refcoco_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testB = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +datasets = [ + val_dataset_all_val, val_dataset_refcoco_testA, val_dataset_refcoco_testB +] +dataset_prefixes = ['grefcoco_val', 'grefcoco_testA', 'grefcoco_testB'] +metrics = [ + val_evaluator_all_val, val_evaluator_refcoco_testA, + val_evaluator_refcoco_testB +] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + # 'language_model': dict(lr_mult=0), + })) + +# learning policy +max_epochs = 5 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[3], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcoco.py b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcoco.py new file mode 100644 index 0000000000000000000000000000000000000000..d91af473a239f2f48a09a272d926e00c52da987b --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcoco.py @@ -0,0 +1,167 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + # change this + dict(type='RandomFlip', prob=0.0), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='ODVGDataset', + data_root=data_root, + ann_file='mdetr_annotations/finetune_refcoco_train_vg.json', + data_prefix=dict(img='train2014/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + return_classes=True, + pipeline=train_pipeline)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco_val.json' +val_dataset_all_val = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) +val_evaluator_all_val = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco_testA.json' +val_dataset_refcoco_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testA = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco_testB.json' +val_dataset_refcoco_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testB = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +datasets = [ + val_dataset_all_val, val_dataset_refcoco_testA, val_dataset_refcoco_testB +] +dataset_prefixes = ['refcoco_val', 'refcoco_testA', 'refcoco_testB'] +metrics = [ + val_evaluator_all_val, val_evaluator_refcoco_testA, + val_evaluator_refcoco_testB +] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + # 'language_model': dict(lr_mult=0), + })) + +# learning policy +max_epochs = 5 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[3], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcoco_plus.py b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcoco_plus.py new file mode 100644 index 0000000000000000000000000000000000000000..871adc8efb48532fb5e0fbfa07e6019c37911712 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcoco_plus.py @@ -0,0 +1,167 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + # change this + dict(type='RandomFlip', prob=0.0), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='ODVGDataset', + data_root=data_root, + ann_file='mdetr_annotations/finetune_refcoco+_train_vg.json', + data_prefix=dict(img='train2014/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + return_classes=True, + pipeline=train_pipeline)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco+_val.json' +val_dataset_all_val = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) +val_evaluator_all_val = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco+_testA.json' +val_dataset_refcoco_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testA = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco+_testB.json' +val_dataset_refcoco_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testB = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +datasets = [ + val_dataset_all_val, val_dataset_refcoco_testA, val_dataset_refcoco_testB +] +dataset_prefixes = ['refcoco+_val', 'refcoco+_testA', 'refcoco+_testB'] +metrics = [ + val_evaluator_all_val, val_evaluator_refcoco_testA, + val_evaluator_refcoco_testB +] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + # 'language_model': dict(lr_mult=0), + })) + +# learning policy +max_epochs = 5 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[3], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcocog.py b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcocog.py new file mode 100644 index 0000000000000000000000000000000000000000..a351d6f9d123fc8f2000990a5e6d02adbb3eb2fa --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_finetune_8xb4_5e_refcocog.py @@ -0,0 +1,145 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + # change this + dict(type='RandomFlip', prob=0.0), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict( + type='RandomSamplingNegPos', + tokenizer_name=_base_.lang_model_name, + num_sample_negative=85, + max_tokens=256), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities', 'tokens_positive', 'dataset_mode')) +] + +train_dataloader = dict( + dataset=dict( + _delete_=True, + type='ODVGDataset', + data_root=data_root, + ann_file='mdetr_annotations/finetune_refcocog_train_vg.json', + data_prefix=dict(img='train2014/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + return_classes=True, + pipeline=train_pipeline)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcocog_val.json' +val_dataset_all_val = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) +val_evaluator_all_val = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcocog_test.json' +val_dataset_refcoco_test = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=_base_.test_pipeline, + backend_args=None) + +val_evaluator_refcoco_test = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +datasets = [val_dataset_all_val, val_dataset_refcoco_test] +dataset_prefixes = ['refcocog_val', 'refcocog_test'] +metrics = [val_evaluator_all_val, val_evaluator_refcoco_test] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1), + # 'language_model': dict(lr_mult=0), + })) + +# learning policy +max_epochs = 5 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[3], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) + +default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp.py b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp.py new file mode 100644 index 0000000000000000000000000000000000000000..437d71c6b357eda85d13b5efd4c81d4d32f91120 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp.py @@ -0,0 +1,228 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +# 30 is an empirical value, just set it to the maximum value +# without affecting the evaluation result +model = dict(test_cfg=dict(max_per_img=30)) + +data_root = 'data/coco/' + +test_pipeline = [ + dict( + type='LoadImageFromFile', backend_args=None, + imdecode_backend='pillow'), + dict( + type='FixScaleResize', + scale=(800, 1333), + keep_ratio=True, + backend='pillow'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'text', 'custom_entities', + 'tokens_positive')) +] + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/final_refexp_val.json' +val_dataset_all_val = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) +val_evaluator_all_val = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco_testA.json' +val_dataset_refcoco_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testA = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco_testB.json' +val_dataset_refcoco_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcoco_testB = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco+_testA.json' +val_dataset_refcoco_plus_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcoco_plus_testA = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcoco+_testB.json' +val_dataset_refcoco_plus_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcoco_plus_testB = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_refcocog_test.json' +val_dataset_refcocog_test = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_refcocog_test = dict( + type='RefExpMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + topk=(1, 5, 10)) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_val.json' +val_dataset_grefcoco_val = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_grefcoco_val = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_testA.json' +val_dataset_grefcoco_testA = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_grefcoco_testA = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +ann_file = 'mdetr_annotations/finetune_grefcoco_testB.json' +val_dataset_grefcoco_testB = dict( + type='MDETRStyleRefCocoDataset', + data_root=data_root, + ann_file=ann_file, + data_prefix=dict(img='train2014/'), + test_mode=True, + return_classes=True, + pipeline=test_pipeline, + backend_args=None) + +val_evaluator_grefcoco_testB = dict( + type='gRefCOCOMetric', + ann_file=data_root + ann_file, + metric='bbox', + iou_thrs=0.5, + thresh_score=0.7, + thresh_f1=1.0) + +# -------------------------------------------------# +datasets = [ + val_dataset_all_val, val_dataset_refcoco_testA, val_dataset_refcoco_testB, + val_dataset_refcoco_plus_testA, val_dataset_refcoco_plus_testB, + val_dataset_refcocog_test, val_dataset_grefcoco_val, + val_dataset_grefcoco_testA, val_dataset_grefcoco_testB +] +dataset_prefixes = [ + 'val', 'refcoco_testA', 'refcoco_testB', 'refcoco+_testA', + 'refcoco+_testB', 'refcocog_test', 'grefcoco_val', 'grefcoco_testA', + 'grefcoco_testB' +] +metrics = [ + val_evaluator_all_val, val_evaluator_refcoco_testA, + val_evaluator_refcoco_testB, val_evaluator_refcoco_plus_testA, + val_evaluator_refcoco_plus_testB, val_evaluator_refcocog_test, + val_evaluator_grefcoco_val, val_evaluator_grefcoco_testA, + val_evaluator_grefcoco_testB +] + +val_dataloader = dict( + dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='MultiDatasetsEvaluator', + metrics=metrics, + dataset_prefixes=dataset_prefixes) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/mm_grounding_dino/rtts/grounding_dino_swin-t_finetune_8xb4_1x_rtts.py b/mmpose/configs/mmdet/mm_grounding_dino/rtts/grounding_dino_swin-t_finetune_8xb4_1x_rtts.py new file mode 100644 index 0000000000000000000000000000000000000000..95c2be058e2c407fc92de93f4b79ec8b36e25c18 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/rtts/grounding_dino_swin-t_finetune_8xb4_1x_rtts.py @@ -0,0 +1,106 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/RTTS/' +class_name = ('bicycle', 'bus', 'car', 'motorbike', 'person') +palette = [(255, 97, 0), (0, 201, 87), (176, 23, 31), (138, 43, 226), + (30, 144, 255)] + +metainfo = dict(classes=class_name, palette=palette) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +train_dataloader = dict( + sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + _delete_=True, + type='CocoDataset', + data_root=data_root, + metainfo=metainfo, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline, + return_classes=True, + ann_file='annotations_json/rtts_train.json', + data_prefix=dict(img=''))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + return_classes=True, + ann_file='annotations_json/rtts_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations_json/rtts_val.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1) + })) + +# learning policy +max_epochs = 12 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/ruod/grounding_dino_swin-t_finetune_8xb4_1x_ruod.py b/mmpose/configs/mmdet/mm_grounding_dino/ruod/grounding_dino_swin-t_finetune_8xb4_1x_ruod.py new file mode 100644 index 0000000000000000000000000000000000000000..f57682b29d970fb6d46c2f459f773b03e803695d --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/ruod/grounding_dino_swin-t_finetune_8xb4_1x_ruod.py @@ -0,0 +1,108 @@ +_base_ = '../grounding_dino_swin-t_pretrain_obj365.py' + +data_root = 'data/RUOD/' +class_name = ('holothurian', 'echinus', 'scallop', 'starfish', 'fish', + 'corals', 'diver', 'cuttlefish', 'turtle', 'jellyfish') +palette = [(235, 211, 70), (106, 90, 205), (160, 32, 240), (176, 23, 31), + (142, 0, 0), (230, 0, 0), (106, 0, 228), (60, 100, 0), (80, 100, 0), + (70, 0, 0)] + +metainfo = dict(classes=class_name, palette=palette) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', 'text', + 'custom_entities')) +] + +train_dataloader = dict( + sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + _delete_=True, + type='CocoDataset', + data_root=data_root, + metainfo=metainfo, + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline, + return_classes=True, + ann_file='RUOD_ANN/instances_train.json', + data_prefix=dict(img='RUOD_pic/train/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + return_classes=True, + ann_file='RUOD_ANN/instances_test.json', + data_prefix=dict(img='RUOD_pic/test/'))) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'RUOD_ANN/instances_test.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict(custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'backbone': dict(lr_mult=0.1) + })) + +# learning policy +max_epochs = 12 +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[11], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto')) + +load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa diff --git a/mmpose/configs/mmdet/mm_grounding_dino/usage.md b/mmpose/configs/mmdet/mm_grounding_dino/usage.md new file mode 100644 index 0000000000000000000000000000000000000000..123c6638cbea2cad01d935994f08eab252f35cbf --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/usage.md @@ -0,0 +1,491 @@ +# Usage + +## Install + +After installing MMDet according to the instructions in the [get_started](../../docs/zh_cn/get_started.md) section, you need to install additional dependency packages: + +```shell +cd $MMDETROOT + +pip install -r requirements/multimodal.txt +pip install emoji ddd-dataset +pip install git+https://github.com/lvis-dataset/lvis-api.git" +``` + +Please note that since the LVIS third-party library does not currently support numpy 1.24, ensure that your numpy version meets the requirements. It is recommended to install numpy version 1.23. + +## Instructions + +### Download BERT Weight + +MM Grounding DINO uses BERT as its language model and requires access to https://huggingface.co/. If you encounter connection errors due to network access issues, you can download the necessary files on a computer with network access and save them locally. Finally, modify the `lang_model_name` field in the configuration file to the local path. For specific instructions, please refer to the following code: + +```python +from transformers import BertConfig, BertModel +from transformers import AutoTokenizer + +config = BertConfig.from_pretrained("bert-base-uncased") +model = BertModel.from_pretrained("bert-base-uncased", add_pooling_layer=False, config=config) +tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + +config.save_pretrained("your path/bert-base-uncased") +model.save_pretrained("your path/bert-base-uncased") +tokenizer.save_pretrained("your path/bert-base-uncased") +``` + +### Download NLTK Weight + +When MM Grounding DINO performs Phrase Grounding inference, it may extract noun phrases. Although it downloads specific models at runtime, considering that some users' running environments cannot connect to the internet, it is possible to download them in advance to the `~/nltk_data` path. + +```python +import nltk +nltk.download('punkt', download_dir='~/nltk_data') +nltk.download('averaged_perceptron_tagger', download_dir='~/nltk_data') +``` + +### Download MM Grounding DINO-T Weight + +For convenience in demonstration, you can download the MM Grounding DINO-T model weights in advance to the current path. + +```shell +wget load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa +``` + +## Inference + +Before inference, for a better experience of the inference effects on different images, it is recommended that you first download [these images](https://github.com/microsoft/X-Decoder/tree/main/inference_demo/images) to the current path. + +MM Grounding DINO supports four types of inference methods: Closed-Set Object Detection, Open Vocabulary Object Detection, Phrase Grounding, and Referential Expression Comprehension. The details are explained below. + +**(1) Closed-Set Object Detection** + +Since MM Grounding DINO is a pretrained model, it can theoretically be applied to any closed-set detection dataset. Currently, we support commonly used datasets such as coco/voc/cityscapes/objects365v1/lvis, etc. Below, we will use coco as an example. + +```shell +python demo/image_demo.py images/animals.png \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts '$: coco' +``` + +The predictions for `outputs/vis/animals.png` will be generated in the current directory, as shown in the following image. + +
+ +
+ +Since ostrich is not one of the 80 classes in COCO, it will not be detected. + +It's important to note that Objects365v1 and LVIS have a large number of categories. If you try to input all category names directly into the network, it may exceed 256 tokens, leading to poor model predictions. In such cases, you can use the `--chunked-size` parameter to perform chunked predictions. However, please be aware that chunked predictions may take longer to complete due to the large number of categories. + +```shell +python demo/image_demo.py images/animals.png \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts '$: lvis' --chunked-size 70 \ + --palette random +``` + +
+ +
+ +Different `--chunked-size` values can lead to different prediction results. You can experiment with different chunked sizes to find the one that works best for your specific task and dataset. + +**(2) Open Vocabulary Object Detection** + +Open vocabulary object detection refers to the ability to input arbitrary class names during inference. + +```shell +python demo/image_demo.py images/animals.png \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts 'zebra. giraffe' -c +``` + +
+ +
+ +**(3) Phrase Grounding** + +Phrase Grounding refers to the process where a user inputs a natural language description, and the model automatically detects the corresponding bounding boxes for the mentioned noun phrases. It can be used in two ways: + +1. Automatically extracting noun phrases using the NLTK library and then performing detection. + +```shell +python demo/image_demo.py images/apples.jpg \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts 'There are many apples here.' +``` + +
+ +
+ +The program will automatically split `many apples` as a noun phrase and then detect the corresponding objects. Different input descriptions can have a significant impact on the prediction results. + +2. Users can manually specify which parts of the sentence are noun phrases to avoid errors in NLTK extraction. + +```shell +python demo/image_demo.py images/fruit.jpg \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts 'The picture contains watermelon, flower, and a white bottle.' \ + --tokens-positive "[[[21,31]], [[45,59]]]" --pred-score-thr 0.12 +``` + +The noun phrase corresponding to positions 21-31 is `watermelon`, and the noun phrase corresponding to positions 45-59 is `a white bottle`. + +
+ +
+ +**(4) Referential Expression Comprehension** + +Referential expression understanding refers to the model automatically comprehending the referential expressions involved in a user's language description without the need for noun phrase extraction. + +```shell +python demo/image_demo.py images/apples.jpg \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts 'red apple.' \ + --tokens-positive -1 +``` + +
+ +
+ +## Evaluation + +Our provided evaluation scripts are unified, and you only need to prepare the data in advance and then run the relevant configuration. + +(1) Zero-Shot COCO2017 val + +```shell +# single GPU +python tools/test.py configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth + +# 8 GPUs +./tools/dist_test.sh configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth 8 +``` + +(2) Zero-Shot ODinW13 + +```shell +# single GPU +python tools/test.py configs/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth + +# 8 GPUs +./tools/dist_test.sh configs/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth 8 +``` + +## Visualization of Evaluation Results + +For the convenience of visualizing and analyzing model prediction results, we provide support for visualizing evaluation dataset prediction results. Taking referential expression understanding as an example, the usage is as follows: + +```shell +python tools/test.py configs/mm_grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth --work-dir refcoco_result --show-dir save_path +``` + +During the inference process, it will save the visualization results to the `refcoco_result/{current_timestamp}/save_path` directory. For other evaluation dataset visualizations, you only need to replace the configuration file. + +Here are some visualization results for various datasets. The left image represents the Ground Truth (GT). The right image represents the Predicted Result. + +1. COCO2017 val Results: + +
+ +
+ +2. Flickr30k Entities Results: + +
+ +
+ +3. DOD Results: + +
+ +
+ +4. RefCOCO val Results: + +
+ +
+ +5. RefCOCO testA Results: + +
+ +
+ +6. gRefCOCO val Results: + +
+ +
+ +## Training + +If you want to reproduce our results, you can train the model by using the following command after preparing the dataset: + +```shell +# Training on a single machine with 8 GPUs for obj365v1 dataset +./tools/dist_train.sh configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py 8 +# Training on a single machine with 8 GPUs for datasets like obj365v1, goldg, grit, v3det, and other datasets is similar. +./tools/dist_train.sh configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py 8 +``` + +For multi-machine training, please refer to [train.md](../../docs/zh_cn/user_guides/train.md). The MM-Grounding-DINO T model is designed to work with 32 GPUs (specifically, 3090Ti GPUs). If your total batch size is not 32x4=128, you will need to manually adjust the learning rate accordingly. + +### Pretraining Custom Format Explanation + +In order to standardize the pretraining formats for different datasets, we refer to the format design proposed by [Open-GroundingDino](https://github.com/longzw1997/Open-GroundingDino). Specifically, it is divided into two formats. + +**(1) Object Detection Format (OD)** + +```text +{"filename": "obj365_train_000000734304.jpg", + "height": 512, + "width": 769, + "detection": { + "instances": [ + {"bbox": [109.4768676992, 346.0190429696, 135.1918335098, 365.3641967616], "label": 2, "category": "chair"}, + {"bbox": [58.612365705900004, 323.2281494016, 242.6005859067, 451.4166870016], "label": 8, "category": "car"} + ] + } +} +``` + +The numerical values corresponding to labels in the label dictionary should match the respective label_map. Each item in the instances list corresponds to a bounding box (in the format x1y1x2y2). + +**(2) Phrase Grounding Format (VG)** + +```text +{"filename": "2405116.jpg", + "height": 375, + "width": 500, + "grounding": + {"caption": "Two surfers walking down the shore. sand on the beach.", + "regions": [ + {"bbox": [206, 156, 282, 248], "phrase": "Two surfers", "tokens_positive": [[0, 3], [4, 11]]}, + {"bbox": [303, 338, 443, 343], "phrase": "sand", "tokens_positive": [[36, 40]]}, + {"bbox": [[327, 223, 421, 282], [300, 200, 400, 210]], "phrase": "beach", "tokens_positive": [[48, 53]]} + ] + } +``` + +The `tokens_positive` field indicates the character positions of the current phrase within the caption. + +## Example of Fine-tuning Custom Dataset + +In order to facilitate downstream fine-tuning on custom datasets, we have provided a fine-tuning example using the simple "cat" dataset as an illustration. + +### 1 Data Preparation + +```shell +cd mmdetection +wget https://download.openmmlab.com/mmyolo/data/cat_dataset.zip +unzip cat_dataset.zip -d data/cat/ +``` + +The "cat" dataset is a single-category dataset consisting of 144 images, already converted to the COCO format. + +
+cat dataset +
+ +### 2 Configuration Preparation + +Due to the simplicity and small size of the "cat" dataset, we trained it for 20 epochs using 8 GPUs, with corresponding learning rate scaling. We did not train the language model, only the visual model. + +Detailed configuration information can be found in [grounding_dino_swin-t_finetune_8xb4_20e_cat](grounding_dino_swin-t_finetune_8xb4_20e_cat.py). + +### 3 Visualization and Evaluation of Zero-Shot Results + +Due to MM Grounding DINO being an open-set detection model, you can perform detection and evaluation even if it was not trained on the cat dataset. + +Visualization of a single image: + +```shell +cd mmdetection +python demo/image_demo.py data/cat/images/IMG_20211205_120756.jpg configs/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth --texts cat. +``` + +Evaluation results of Zero-shot on test dataset: + +```shell +python tools/test.py configs/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth +``` + +```text + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.881 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 1.000 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.929 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.881 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.913 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.913 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.913 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.913 +``` + +### 4 Fine-tuning + +```shell +./tools/dist_train.sh configs/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py 8 --work-dir cat_work_dir +``` + +The model will save the best-performing checkpoint. It achieved its best performance at the 16th epoch, with the following results: + +```text + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.901 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 1.000 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.930 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.901 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.967 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.967 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.967 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.967 +``` + +We can observe that after fine-tuning, the training performance on the cat dataset improved from 88.1 to 90.1. However, due to the small dataset size, the evaluation metrics show some fluctuations. + +## Iterative Generation and Optimization Pipeline of Model Self-training Pseduo Label + +To facilitate users in creating their own datasets from scratch or those who want to leverage the model's inference capabilities for iterative pseudo-label generation and optimization, continuously modifying pseudo-labels to improve model performance, we have provided relevant pipelines. + +Since we have defined two data formats, we will provide separate explanations for demonstration purposes. + +### 1 Object Detection Format + +Here, we continue to use the aforementioned cat dataset as an example. Let's assume that we currently have a series of images and predefined categories but no annotations. + +1. Generate initial `odvg` format file + +```python +import os +import cv2 +import json +import jsonlines + +data_root = 'data/cat' +images_path = os.path.join(data_root, 'images') +out_path = os.path.join(data_root, 'cat_train_od.json') +metas = [] +for files in os.listdir(images_path): + img = cv2.imread(os.path.join(images_path, files)) + height, width, _ = img.shape + metas.append({"filename": files, "height": height, "width": width}) + +with jsonlines.open(out_path, mode='w') as writer: + writer.write_all(metas) + +# 生成 label_map.json,由于只有一个类别,所以只需要写一个 cat 即可 +label_map_path = os.path.join(data_root, 'cat_label_map.json') +with open(label_map_path, 'w') as f: + json.dump({'0': 'cat'}, f) +``` + +Two files, `cat_train_od.json` and `cat_label_map.json`, will be generated in the `data/cat` directory. + +2. Inference with pre-trained model and save the results + +We provide a readily usable [configuration](grounding_dino_swin-t_pretrain_pseudo-labeling_cat.py). If you are using a different dataset, you can refer to this configuration for modifications. + +```shell +python tools/test.py configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_cat.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth +``` + +A new file `cat_train_od_v1.json` will be generated in the `data/cat` directory. You can manually open it to confirm or use the provided [script](../../tools/analysis_tools/browse_grounding_raw.py) to visualize the results. + +```shell +python tools/analysis_tools/browse_grounding_raw.py data/cat/ cat_train_od_v1.json images --label-map-file cat_label_map.json -o your_output_dir --not-show +``` + +The visualization results will be generated in the `your_output_dir` directory. + +3. Continue training to boost performance + +After obtaining pseudo-labels, you can mix them with some pre-training data for further pre-training to improve the model's performance on the current dataset. Then, you can repeat step 2 to obtain more accurate pseudo-labels, and continue this iterative process. + +### 2 Phrase Grounding Format + +1. Generate initial `odvg` format file + +The bootstrapping process of Phrase Grounding requires providing captions corresponding to each image and pre-segmented phrase information initially. Taking flickr30k entities images as an example, the generated typical file should look like this: + +```text +[ +{"filename": "3028766968.jpg", + "height": 375, + "width": 500, + "grounding": + {"caption": "Man with a black shirt on sit behind a desk sorting threw a giant stack of people work with a smirk on his face .", + "regions": [ + {"bbox": [0, 0, 1, 1], "phrase": "a giant stack of people", "tokens_positive": [[58, 81]]}, + {"bbox": [0, 0, 1, 1], "phrase": "a black shirt", "tokens_positive": [[9, 22]]}, + {"bbox": [0, 0, 1, 1], "phrase": "a desk", "tokens_positive": [[37, 43]]}, + {"bbox": [0, 0, 1, 1], "phrase": "his face", "tokens_positive": [[103, 111]]}, + {"bbox": [0, 0, 1, 1], "phrase": "Man", "tokens_positive": [[0, 3]]}]}} +{"filename": "6944134083.jpg", + "height": 319, + "width": 500, + "grounding": + {"caption": "Two men are competing in a horse race .", + "regions": [ + {"bbox": [0, 0, 1, 1], "phrase": "Two men", "tokens_positive": [[0, 7]]}]}} +] +``` + +Bbox needs to be set to `[0, 0, 1, 1]` for initialization to make sure the programme could run, but this value would not be utilized. + +```text +{"filename": "3028766968.jpg", "height": 375, "width": 500, "grounding": {"caption": "Man with a black shirt on sit behind a desk sorting threw a giant stack of people work with a smirk on his face .", "regions": [{"bbox": [0, 0, 1, 1], "phrase": "a giant stack of people", "tokens_positive": [[58, 81]]}, {"bbox": [0, 0, 1, 1], "phrase": "a black shirt", "tokens_positive": [[9, 22]]}, {"bbox": [0, 0, 1, 1], "phrase": "a desk", "tokens_positive": [[37, 43]]}, {"bbox": [0, 0, 1, 1], "phrase": "his face", "tokens_positive": [[103, 111]]}, {"bbox": [0, 0, 1, 1], "phrase": "Man", "tokens_positive": [[0, 3]]}]}} +{"filename": "6944134083.jpg", "height": 319, "width": 500, "grounding": {"caption": "Two men are competing in a horse race .", "regions": [{"bbox": [0, 0, 1, 1], "phrase": "Two men", "tokens_positive": [[0, 7]]}]}} +``` + +You can directly copy the text above, and assume that the text content is pasted into a file named `flickr_simple_train_vg.json`, which is placed in the pre-prepared `data/flickr30k_entities` dataset directory, as detailed in the data preparation document. + +2. Inference with pre-trained model and save the results + +We provide a directly usable [configuration](https://chat.openai.com/c/grounding_dino_swin-t_pretrain_pseudo-labeling_flickr30k.py). If you are using a different dataset, you can refer to this configuration for modifications. + +```shell +python tools/test.py configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_flickr30k.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth +``` + +The translation of your text from Chinese to English is: "A new file `flickr_simple_train_vg_v1.json` will be generated in the `data/flickr30k_entities` directory. You can manually open it to confirm or use the [script](../../tools/analysis_tools/browse_grounding_raw.py) to visualize the effects + +```shell +python tools/analysis_tools/browse_grounding_raw.py data/flickr30k_entities/ flickr_simple_train_vg_v1.json flickr30k_images -o your_output_dir --not-show +``` + +The visualization results will be generated in the `your_output_dir` directory, as shown in the following image: + +
+ +
+ +3. Continue training to boost performance + +After obtaining the pseudo-labels, you can mix some pre-training data to continue pre-training jointly, which enhances the model's performance on the current dataset. Then, rerun step 2 to obtain more accurate pseudo-labels, and repeat this cycle iteratively. diff --git a/mmpose/configs/mmdet/mm_grounding_dino/usage_zh-CN.md b/mmpose/configs/mmdet/mm_grounding_dino/usage_zh-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..5f625ea6ca8dc09225aebbe00c424fc0128cf736 --- /dev/null +++ b/mmpose/configs/mmdet/mm_grounding_dino/usage_zh-CN.md @@ -0,0 +1,491 @@ +# 用法说明 + +## 安装 + +在按照 [get_started](../../docs/zh_cn/get_started.md) 一节的说明安装好 MMDet 之后,需要安装额外的依赖包: + +```shell +cd $MMDETROOT + +pip install -r requirements/multimodal.txt +pip install emoji ddd-dataset +pip install git+https://github.com/lvis-dataset/lvis-api.git" +``` + +请注意由于 LVIS 第三方库暂时不支持 numpy 1.24,因此请确保您的 numpy 版本符合要求。建议安装 numpy 1.23 版本。 + +## 说明 + +### BERT 权重下载 + +MM Grounding DINO 采用了 BERT 作为语言模型,需要访问 https://huggingface.co/, 如果您因为网络访问问题遇到连接错误,可以在有网络访问权限的电脑上下载所需文件并保存在本地。最后,修改配置文件中的 `lang_model_name` 字段为本地路径即可。具体请参考以下代码: + +```python +from transformers import BertConfig, BertModel +from transformers import AutoTokenizer + +config = BertConfig.from_pretrained("bert-base-uncased") +model = BertModel.from_pretrained("bert-base-uncased", add_pooling_layer=False, config=config) +tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + +config.save_pretrained("your path/bert-base-uncased") +model.save_pretrained("your path/bert-base-uncased") +tokenizer.save_pretrained("your path/bert-base-uncased") +``` + +### NLTK 权重下载 + +MM Grounding DINO 在进行 Phrase Grounding 推理时候可能会进行名词短语提取,虽然会在运行时候下载特定的模型,但是考虑到有些用户运行环境无法联网,因此可以提前下载到 `~/nltk_data` 路径下 + +```python +import nltk +nltk.download('punkt', download_dir='~/nltk_data') +nltk.download('averaged_perceptron_tagger', download_dir='~/nltk_data') +``` + +### MM Grounding DINO-T 模型权重下载 + +为了方便演示,您可以提前下载 MM Grounding DINO-T 模型权重到当前路径下 + +```shell +wget load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa +``` + +## 推理 + +在推理前,为了更好的体验不同图片的推理效果,建议您先下载 [这些图片](https://github.com/microsoft/X-Decoder/tree/main/inference_demo/images) 到当前路径下 + +MM Grounding DINO 支持了闭集目标检测,开放词汇目标检测,Phrase Grounding 和指代性表达式理解 4 种推理方式,下面详细说明。 + +**(1) 闭集目标检测** + +由于 MM Grounding DINO 是预训练模型,理论上可以应用于任何闭集检测数据集,目前我们支持了常用的 coco/voc/cityscapes/objects365v1/lvis 等,下面以 coco 为例 + +```shell +python demo/image_demo.py images/animals.png \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts '$: coco' +``` + +会在当前路径下生成 `outputs/vis/animals.png` 的预测结果,如下图所示 + +
+ +
+ +由于鸵鸟并不在 COCO 80 类中, 因此不会检测出来。 + +需要注意,由于 objects365v1 和 lvis 类别很多,如果直接将类别名全部输入到网络中,会超过 256 个 token 导致模型预测效果极差,此时我们需要通过 `--chunked-size` 参数进行截断预测, 同时预测时间会比较长。 + +```shell +python demo/image_demo.py images/animals.png \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts '$: lvis' --chunked-size 70 \ + --palette random +``` + +
+ +
+ +不同的 `--chunked-size` 会导致不同的预测效果,您可以自行尝试。 + +**(2) 开放词汇目标检测** + +开放词汇目标检测是指在推理时候,可以输入任意的类别名 + +```shell +python demo/image_demo.py images/animals.png \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts 'zebra. giraffe' -c +``` + +
+ +
+ +**(3) Phrase Grounding** + +Phrase Grounding 是指的用户输入一句语言描述,模型自动对其涉及到的名词短语想对应的 bbox 进行检测,有两种用法 + +1. 通过 NLTK 库自动提取名词短语,然后进行检测 + +```shell +python demo/image_demo.py images/apples.jpg \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts 'There are many apples here.' +``` + +
+ +
+ +程序内部会自动切分出 `many apples` 作为名词短语,然后检测出对应物体。不同的输入描述对预测结果影响很大。 + +2. 用户自己指定句子中哪些为名词短语,避免 NLTK 提取错误的情况 + +```shell +python demo/image_demo.py images/fruit.jpg \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts 'The picture contains watermelon, flower, and a white bottle.' \ + --tokens-positive "[[[21,31]], [[45,59]]]" --pred-score-thr 0.12 +``` + +21,31 对应的名词短语为 `watermelon`,45,59 对应的名词短语为 `a white bottle`。 + +
+ +
+ +**(4) 指代性表达式理解** + +指代性表达式理解是指的用户输入一句语言描述,模型自动对其涉及到的指代性表达式进行理解, 不需要进行名词短语提取。 + +```shell +python demo/image_demo.py images/apples.jpg \ + configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth \ + --texts 'red apple.' \ + --tokens-positive -1 +``` + +
+ +
+ +## 评测 + +我们所提供的评测脚本都是统一的,你只需要提前准备好数据,然后运行相关配置就可以了 + +(1) Zero-Shot COCO2017 val + +```shell +# 单卡 +python tools/test.py configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth + +# 8 卡 +./tools/dist_test.sh configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth 8 +``` + +(2) Zero-Shot ODinW13 + +```shell +# 单卡 +python tools/test.py configs/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth + +# 8 卡 +./tools/dist_test.sh configs/mm_grounding_dino/odinw/grounding_dino_swin-t_pretrain_odinw13.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth 8 +``` + +## 评测数据集结果可视化 + +为了方便大家对模型预测结果进行可视化和分析,我们支持了评测数据集预测结果可视化,以指代性表达式理解为例用法如下: + +```shell +python tools/test.py configs/mm_grounding_dino/refcoco/grounding_dino_swin-t_pretrain_zeroshot_refexp \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth --work-dir refcoco_result --show-dir save_path +``` + +模型在推理过程中会将可视化结果保存到 `refcoco_result/{当前时间戳}/save_path` 路径下。其余评测数据集可视化只需要替换配置文件即可。 + +下面展示一些数据集的可视化结果: 左图为 GT,右图为预测结果 + +1. COCO2017 val 结果: + +
+ +
+ +2. Flickr30k Entities 结果: + +
+ +
+ +3. DOD 结果: + +
+ +
+ +4. RefCOCO val 结果: + +
+ +
+ +5. RefCOCO testA 结果: + +
+ +
+ +6. gRefCOCO val 结果: + +
+ +
+ +## 模型训练 + +如果想复现我们的结果,你可以在准备好数据集后,直接通过如下命令进行训练 + +```shell +# 单机 8 卡训练仅包括 obj365v1 数据集 +./tools/dist_train.sh configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365.py 8 +# 单机 8 卡训练包括 obj365v1/goldg/grit/v3det 数据集,其余数据集类似 +./tools/dist_train.sh configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det.py 8 +``` + +多机训练的用法请参考 [train.md](../../docs/zh_cn/user_guides/train.md)。MM-Grounding-DINO T 模型默认采用的是 32 张 3090Ti,如果你的总 bs 数不是 32x4=128,那么你需要手动的线性调整学习率。 + +### 预训练自定义格式说明 + +为了统一不同数据集的预训练格式,我们参考 [Open-GroundingDino](https://github.com/longzw1997/Open-GroundingDino) 所设计的格式。具体来说分成 2 种格式 + +**(1) 目标检测数据格式 OD** + +```text +{"filename": "obj365_train_000000734304.jpg", + "height": 512, + "width": 769, + "detection": { + "instances": [ + {"bbox": [109.4768676992, 346.0190429696, 135.1918335098, 365.3641967616], "label": 2, "category": "chair"}, + {"bbox": [58.612365705900004, 323.2281494016, 242.6005859067, 451.4166870016], "label": 8, "category": "car"} + ] + } +} +``` + +label字典中所对应的数值需要和相应的 label_map 一致。 instances 列表中的每一项都对应一个 bbox (x1y1x2y2 格式)。 + +**(2) phrase grounding 数据格式 VG** + +```text +{"filename": "2405116.jpg", + "height": 375, + "width": 500, + "grounding": + {"caption": "Two surfers walking down the shore. sand on the beach.", + "regions": [ + {"bbox": [206, 156, 282, 248], "phrase": "Two surfers", "tokens_positive": [[0, 3], [4, 11]]}, + {"bbox": [303, 338, 443, 343], "phrase": "sand", "tokens_positive": [[36, 40]]}, + {"bbox": [[327, 223, 421, 282], [300, 200, 400, 210]], "phrase": "beach", "tokens_positive": [[48, 53]]} + ] + } +``` + +tokens_positive 表示当前 phrase 在 caption 中的字符位置。 + +## 自定义数据集微调训练案例 + +为了方便用户针对自定义数据集进行下游微调,我们特意提供了以简单的 cat 数据集为例的微调训练案例。 + +### 1 数据准备 + +```shell +cd mmdetection +wget https://download.openmmlab.com/mmyolo/data/cat_dataset.zip +unzip cat_dataset.zip -d data/cat/ +``` + +cat 数据集是一个单类别数据集,包含 144 张图片,已经转换为 coco 格式。 + +
+cat dataset +
+ +### 2 配置准备 + +由于 cat 数据集的简单性和数量较少,我们使用 8 卡训练 20 个 epoch,相应的缩放学习率,不训练语言模型,只训练视觉模型。 + +详细的配置信息可以在 [grounding_dino_swin-t_finetune_8xb4_20e_cat](grounding_dino_swin-t_finetune_8xb4_20e_cat.py) 中找到。 + +### 3 可视化和 Zero-Shot 评估 + +由于 MM Grounding DINO 是一个开放的检测模型,所以即使没有在 cat 数据集上训练,也可以进行检测和评估。 + +单张图片的可视化结果如下: + +```shell +cd mmdetection +python demo/image_demo.py data/cat/images/IMG_20211205_120756.jpg configs/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py --weights grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth --texts cat. +``` + +测试集上的 Zero-Shot 评估结果如下: + +```shell +python tools/test.py configs/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth +``` + +```text + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.881 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 1.000 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.929 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.881 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.913 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.913 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.913 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.913 +``` + +### 4 模型训练 + +```shell +./tools/dist_train.sh configs/mm_grounding_dino/grounding_dino_swin-t_finetune_8xb4_20e_cat.py 8 --work-dir cat_work_dir +``` + +模型将会保存性能最佳的模型。在第 16 epoch 时候达到最佳,性能如下所示: + +```text + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.901 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 1.000 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.930 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.901 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.967 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.967 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.967 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.967 +``` + +我们可以发现,经过微调训练后,cat 数据集的训练性能从 88.1 提升到了 90.1。同时由于数据集比较小,评估指标波动比较大。 + +## 模型自训练伪标签迭代生成和优化 pipeline + +为了方便用户从头构建自己的数据集或者希望利用模型推理能力进行自举式伪标签迭代生成和优化,不断修改伪标签来提升模型性能,我们特意提供了相关的 pipeline。 + +由于我们定义了两种数据格式,为了演示我们也将分别进行说明。 + +### 1 目标检测格式 + +此处我们依然采用上述的 cat 数据集为例,假设我们目前只有一系列图片和预定义的类别,并不存在标注。 + +1. 生成初始 odvg 格式文件 + +```python +import os +import cv2 +import json +import jsonlines + +data_root = 'data/cat' +images_path = os.path.join(data_root, 'images') +out_path = os.path.join(data_root, 'cat_train_od.json') +metas = [] +for files in os.listdir(images_path): + img = cv2.imread(os.path.join(images_path, files)) + height, width, _ = img.shape + metas.append({"filename": files, "height": height, "width": width}) + +with jsonlines.open(out_path, mode='w') as writer: + writer.write_all(metas) + +# 生成 label_map.json,由于只有一个类别,所以只需要写一个 cat 即可 +label_map_path = os.path.join(data_root, 'cat_label_map.json') +with open(label_map_path, 'w') as f: + json.dump({'0': 'cat'}, f) +``` + +会在 `data/cat` 目录下生成 `cat_train_od.json` 和 `cat_label_map.json` 两个文件。 + +2. 使用预训练模型进行推理,并保存结果 + +我们提供了直接可用的 [配置](grounding_dino_swin-t_pretrain_pseudo-labeling_cat.py), 如果你是其他数据集可以参考这个配置进行修改。 + +```shell +python tools/test.py configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_cat.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth +``` + +会在 `data/cat` 目录下新生成 `cat_train_od_v1.json` 文件,你可以手动打开确认或者使用 [脚本](../../tools/analysis_tools/browse_grounding_raw.py) 可视化效果 + +```shell +python tools/analysis_tools/browse_grounding_raw.py data/cat/ cat_train_od_v1.json images --label-map-file cat_label_map.json -o your_output_dir --not-show +``` + +会在 your_output_dir 目录下生成可视化结果 + +3. 继续训练提高性能 + +在得到伪标签后,你可以混合一些预训练数据联合进行继续预训练,提升模型在当前数据集上的性能,然后重新运行 2 步骤,得到更准确的伪标签,如此循环迭代即可。 + +### 2 Phrase Grounding 格式 + +1. 生成初始 odvg 格式文件 + +Phrase Grounding 的自举流程要求初始时候提供每张图片对应的 caption 和提前切割好的 phrase 信息。以 flickr30k entities 图片为例,生成的典型的文件应该如下所示: + +```text +[ +{"filename": "3028766968.jpg", + "height": 375, + "width": 500, + "grounding": + {"caption": "Man with a black shirt on sit behind a desk sorting threw a giant stack of people work with a smirk on his face .", + "regions": [ + {"bbox": [0, 0, 1, 1], "phrase": "a giant stack of people", "tokens_positive": [[58, 81]]}, + {"bbox": [0, 0, 1, 1], "phrase": "a black shirt", "tokens_positive": [[9, 22]]}, + {"bbox": [0, 0, 1, 1], "phrase": "a desk", "tokens_positive": [[37, 43]]}, + {"bbox": [0, 0, 1, 1], "phrase": "his face", "tokens_positive": [[103, 111]]}, + {"bbox": [0, 0, 1, 1], "phrase": "Man", "tokens_positive": [[0, 3]]}]}} +{"filename": "6944134083.jpg", + "height": 319, + "width": 500, + "grounding": + {"caption": "Two men are competing in a horse race .", + "regions": [ + {"bbox": [0, 0, 1, 1], "phrase": "Two men", "tokens_positive": [[0, 7]]}]}} +] +``` + +初始时候 bbox 必须要设置为 `[0, 0, 1, 1]`,因为这能确保程序正常运行,但是 bbox 的值并不会被使用。 + +```text +{"filename": "3028766968.jpg", "height": 375, "width": 500, "grounding": {"caption": "Man with a black shirt on sit behind a desk sorting threw a giant stack of people work with a smirk on his face .", "regions": [{"bbox": [0, 0, 1, 1], "phrase": "a giant stack of people", "tokens_positive": [[58, 81]]}, {"bbox": [0, 0, 1, 1], "phrase": "a black shirt", "tokens_positive": [[9, 22]]}, {"bbox": [0, 0, 1, 1], "phrase": "a desk", "tokens_positive": [[37, 43]]}, {"bbox": [0, 0, 1, 1], "phrase": "his face", "tokens_positive": [[103, 111]]}, {"bbox": [0, 0, 1, 1], "phrase": "Man", "tokens_positive": [[0, 3]]}]}} +{"filename": "6944134083.jpg", "height": 319, "width": 500, "grounding": {"caption": "Two men are competing in a horse race .", "regions": [{"bbox": [0, 0, 1, 1], "phrase": "Two men", "tokens_positive": [[0, 7]]}]}} +``` + +你可直接复制上面的文本,并假设将文本内容粘贴到命名为 `flickr_simple_train_vg.json` 文件中,并放置于提前准备好的 `data/flickr30k_entities` 数据集目录下,具体见数据准备文档。 + +2. 使用预训练模型进行推理,并保存结果 + +我们提供了直接可用的 [配置](grounding_dino_swin-t_pretrain_pseudo-labeling_flickr30k.py), 如果你是其他数据集可以参考这个配置进行修改。 + +```shell +python tools/test.py configs/mm_grounding_dino/grounding_dino_swin-t_pretrain_pseudo-labeling_flickr30k.py \ + grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth +``` + +会在 `data/flickr30k_entities` 目录下新生成 `flickr_simple_train_vg_v1.json` 文件,你可以手动打开确认或者使用 [脚本](../../tools/analysis_tools/browse_grounding_raw.py) 可视化效果 + +```shell +python tools/analysis_tools/browse_grounding_raw.py data/flickr30k_entities/ flickr_simple_train_vg_v1.json flickr30k_images -o your_output_dir --not-show +``` + +会在 `your_output_dir` 目录下生成可视化结果,如下图所示: + +
+ +
+ +3. 继续训练提高性能 + +在得到伪标签后,你可以混合一些预训练数据联合进行继续预训练,提升模型在当前数据集上的性能,然后重新运行 2 步骤,得到更准确的伪标签,如此循环迭代即可。 diff --git a/mmpose/configs/mmdet/ms_rcnn/README.md b/mmpose/configs/mmdet/ms_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..abbec9b6851ee135f61a82b82a7a58423b204b97 --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/README.md @@ -0,0 +1,36 @@ +# MS R-CNN + +> [Mask Scoring R-CNN](https://arxiv.org/abs/1903.00241) + + + +## Abstract + +Letting a deep network be aware of the quality of its own predictions is an interesting yet important problem. In the task of instance segmentation, the confidence of instance classification is used as mask quality score in most instance segmentation frameworks. However, the mask quality, quantified as the IoU between the instance mask and its ground truth, is usually not well correlated with classification score. In this paper, we study this problem and propose Mask Scoring R-CNN which contains a network block to learn the quality of the predicted instance masks. The proposed network block takes the instance feature and the corresponding predicted mask together to regress the mask IoU. The mask scoring strategy calibrates the misalignment between mask quality and mask score, and improves instance segmentation performance by prioritizing more accurate mask predictions during COCO AP evaluation. By extensive evaluations on the COCO dataset, Mask Scoring R-CNN brings consistent and noticeable gain with different models, and outperforms the state-of-the-art Mask R-CNN. We hope our simple and effective approach will provide a new direction for improving instance segmentation. + +
+ +
+ +## Results and Models + +| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 4.5 | | 38.2 | 36.0 | [config](./ms-rcnn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848.log.json) | +| R-50-FPN | caffe | 2x | - | - | 38.8 | 36.3 | [config](./ms-rcnn_r50-caffe_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_20200506_004738.log.json) | +| R-101-FPN | caffe | 1x | 6.5 | | 40.4 | 37.6 | [config](./ms-rcnn_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_20200506_004755.log.json) | +| R-101-FPN | caffe | 2x | - | - | 41.1 | 38.1 | [config](./ms-rcnn_r101-caffe_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_20200506_011134.log.json) | +| R-X101-32x4d | pytorch | 2x | 7.9 | 11.0 | 41.8 | 38.7 | [config](./ms-rcnn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206_100113.log.json) | +| R-X101-64x4d | pytorch | 1x | 11.0 | 8.0 | 43.0 | 39.5 | [config](./ms-rcnn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206_091744.log.json) | +| R-X101-64x4d | pytorch | 2x | 11.0 | 8.0 | 42.6 | 39.5 | [config](./ms-rcnn_x101-64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308_012247.log.json) | + +## Citation + +```latex +@inproceedings{huang2019msrcnn, + title={Mask Scoring R-CNN}, + author={Zhaojin Huang and Lichao Huang and Yongchao Gong and Chang Huang and Xinggang Wang}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019}, +} +``` diff --git a/mmpose/configs/mmdet/ms_rcnn/metafile.yml b/mmpose/configs/mmdet/ms_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..290f05436949c68d226d8bc2f107e480acbd6b4c --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/metafile.yml @@ -0,0 +1,159 @@ +Collections: + - Name: Mask Scoring R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RPN + - FPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1903.00241 + Title: 'Mask Scoring R-CNN' + README: configs/ms_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_scoring_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: ms-rcnn_r50-caffe_fpn_1x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth + + - Name: ms-rcnn_r50-caffe_fpn_2x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth + + - Name: ms-rcnn_r101-caffe_fpn_1x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms-rcnn_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth + + - Name: ms-rcnn_r101-caffe_fpn_2x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms-rcnn_r101-caffe_fpn_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth + + - Name: ms-rcnn_x101-32x4d_fpn_1x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms-rcnn_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.9 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth + + - Name: ms-rcnn_x101-64x4d_fpn_1x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms-rcnn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 11.0 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth + + - Name: ms-rcnn_x101-64x4d_fpn_2x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms-rcnn_x101-64x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 11.0 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth diff --git a/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff4f2d66ae6de88ba9d5d8fb5cf31abaa4cb3c5 --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './ms-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r101-caffe_fpn_2x_coco.py b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r101-caffe_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..54b29e4f7aea547e2b26782b71ada8053930d325 --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r101-caffe_fpn_2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './ms-rcnn_r101-caffe_fpn_1x_coco.py' +# learning policy +max_epochs = 24 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e7fbc51f1ba431ca7c22ff3d2c74cfc9e1263ffb --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50-caffe_fpn_1x_coco.py' +model = dict( + type='MaskScoringRCNN', + roi_head=dict( + type='MaskScoringRoIHead', + mask_iou_head=dict( + type='MaskIoUHead', + num_convs=4, + num_fcs=2, + roi_feat_size=14, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + num_classes=80)), + # model training and testing settings + train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) diff --git a/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50-caffe_fpn_2x_coco.py b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50-caffe_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..033488229220e5b044c30c43f5e72f8468f68224 --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50-caffe_fpn_2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './ms-rcnn_r50-caffe_fpn_1x_coco.py' +# learning policy +max_epochs = 24 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0ae47d1c38daa4430de4b4264bbb2aef0eb7f7ea --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +model = dict( + type='MaskScoringRCNN', + roi_head=dict( + type='MaskScoringRoIHead', + mask_iou_head=dict( + type='MaskIoUHead', + num_convs=4, + num_fcs=2, + roi_feat_size=14, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + num_classes=80)), + # model training and testing settings + train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) diff --git a/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1a5d0d0f3188e8e661cc9ab7a731fc631dd950ac --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ms-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..16290076c07d7a97108b89e4a41b5ff51cbbcdc1 --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ms-rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-64x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-64x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7aec1874394692a63dc8caeef2609cf01b7bfd7c --- /dev/null +++ b/mmpose/configs/mmdet/ms_rcnn/ms-rcnn_x101-64x4d_fpn_2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './ms-rcnn_x101-64x4d_fpn_1x_coco.py' +# learning policy +max_epochs = 24 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/nas_fcos/README.md b/mmpose/configs/mmdet/nas_fcos/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a0ec77c8f118f8aeb47ef4cb0efb0022790fa270 --- /dev/null +++ b/mmpose/configs/mmdet/nas_fcos/README.md @@ -0,0 +1,35 @@ +# NAS-FCOS + +> [NAS-FCOS: Fast Neural Architecture Search for Object Detection](https://arxiv.org/abs/1906.04423) + + + +## Abstract + +The success of deep neural networks relies on significant architecture engineering. Recently neural architecture search (NAS) has emerged as a promise to greatly reduce manual effort in network design by automatically searching for optimal architectures, although typically such algorithms need an excessive amount of computational resources, e.g., a few thousand GPU-days. To date, on challenging vision tasks such as object detection, NAS, especially fast versions of NAS, is less studied. Here we propose to search for the decoder structure of object detectors with search efficiency being taken into consideration. To be more specific, we aim to efficiently search for the feature pyramid network (FPN) as well as the prediction head of a simple anchor-free object detector, namely FCOS, using a tailored reinforcement learning paradigm. With carefully designed search space, search algorithms and strategies for evaluating network quality, we are able to efficiently search a top-performing detection architecture within 4 days using 8 V100 GPUs. The discovered architecture surpasses state-of-the-art object detection models (such as Faster R-CNN, RetinaNet and FCOS) by 1.5 to 3.5 points in AP on the COCO dataset, with comparable computation complexity and memory footprint, demonstrating the efficacy of the proposed NAS for object detection. + +
+ +
+ +## Results and Models + +| Head | Backbone | Style | GN-head | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :------: | :---: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| NAS-FCOSHead | R-50 | caffe | Y | 1x | | | 39.4 | [config](./nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520.log.json) | +| FCOSHead | R-50 | caffe | Y | 1x | | | 38.5 | [config](./nas-fcos_r50-caffe_fpn_fcoshead-gn-head_4xb4-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521.log.json) | + +**Notes:** + +- To be consistent with the author's implementation, we use 4 GPUs with 4 images/GPU. + +## Citation + +```latex +@article{wang2019fcos, + title={Nas-fcos: Fast neural architecture search for object detection}, + author={Wang, Ning and Gao, Yang and Chen, Hao and Wang, Peng and Tian, Zhi and Shen, Chunhua}, + journal={arXiv preprint arXiv:1906.04423}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/nas_fcos/metafile.yml b/mmpose/configs/mmdet/nas_fcos/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..02292a41516b6b2d5ab87e629f2bd2672e61e0fb --- /dev/null +++ b/mmpose/configs/mmdet/nas_fcos/metafile.yml @@ -0,0 +1,44 @@ +Collections: + - Name: NAS-FCOS + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 4x V100 GPUs + Architecture: + - FPN + - NAS-FCOS + - ResNet + Paper: + URL: https://arxiv.org/abs/1906.04423 + Title: 'NAS-FCOS: Fast Neural Architecture Search for Object Detection' + README: configs/nas_fcos/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/nasfcos.py#L6 + Version: v2.1.0 + +Models: + - Name: nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco + In Collection: NAS-FCOS + Config: configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth + + - Name: nas-fcos_r50-caffe_fpn_fcoshead-gn-head_4xb4-1x_coco + In Collection: NAS-FCOS + Config: configs/nas_fcos/nas-fcos_r50-caffe_fpn_fcoshead-gn-head_4xb4-1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth diff --git a/mmpose/configs/mmdet/nas_fcos/nas-fcos_r50-caffe_fpn_fcoshead-gn-head_4xb4-1x_coco.py b/mmpose/configs/mmdet/nas_fcos/nas-fcos_r50-caffe_fpn_fcoshead-gn-head_4xb4-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ba207c9fbdddc5cd30e4d4d86add2c98664e7ffb --- /dev/null +++ b/mmpose/configs/mmdet/nas_fcos/nas-fcos_r50-caffe_fpn_fcoshead-gn-head_4xb4-1x_coco.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='NASFCOS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False, eps=0), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=dict( + type='NASFCOS_FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5, + norm_cfg=dict(type='BN'), + conv_cfg=dict(type='DCNv2', deform_groups=2)), + bbox_head=dict( + type='FCOSHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + norm_cfg=dict(type='GN', num_groups=32), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# dataset settings +train_dataloader = dict(batch_size=4, num_workers=2) + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.01), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) diff --git a/mmpose/configs/mmdet/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py b/mmpose/configs/mmdet/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..329f34c45ca0ea3f95e8da8505717df86b7c79c0 --- /dev/null +++ b/mmpose/configs/mmdet/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py @@ -0,0 +1,74 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='NASFCOS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False, eps=0), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=dict( + type='NASFCOS_FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5, + norm_cfg=dict(type='BN'), + conv_cfg=dict(type='DCNv2', deform_groups=2)), + bbox_head=dict( + type='NASFCOSHead', + num_classes=80, + in_channels=256, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + norm_cfg=dict(type='GN', num_groups=32), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# dataset settings +train_dataloader = dict(batch_size=4, num_workers=2) + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.01), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) diff --git a/mmpose/configs/mmdet/nas_fpn/README.md b/mmpose/configs/mmdet/nas_fpn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..260ec470fda46ae8d41dd768c5924da59803eb94 --- /dev/null +++ b/mmpose/configs/mmdet/nas_fpn/README.md @@ -0,0 +1,36 @@ +# NAS-FPN + +> [NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection](https://arxiv.org/abs/1904.07392) + + + +## Abstract + +Current state-of-the-art convolutional architectures for object detection are manually designed. Here we aim to learn a better architecture of feature pyramid network for object detection. We adopt Neural Architecture Search and discover a new feature pyramid architecture in a novel scalable search space covering all cross-scale connections. The discovered architecture, named NAS-FPN, consists of a combination of top-down and bottom-up connections to fuse features across scales. NAS-FPN, combined with various backbone models in the RetinaNet framework, achieves better accuracy and latency tradeoff compared to state-of-the-art object detection models. NAS-FPN improves mobile detection accuracy by 2 AP compared to state-of-the-art SSDLite with MobileNetV2 model in \[32\] and achieves 48.3 AP which surpasses Mask R-CNN \[10\] detection accuracy with less computation time. + +
+ +
+ +## Results and Models + +We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. RetinaNet is used in the paper. + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | 50e | 12.9 | 22.9 | 37.9 | [config](./retinanet_r50_fpn_crop640-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco_20200529_095329.log.json) | +| R-50-NASFPN | 50e | 13.2 | 23.0 | 40.5 | [config](./retinanet_r50_nasfpn_crop640-50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco_20200528_230008.log.json) | + +**Note**: We find that it is unstable to train NAS-FPN and there is a small chance that results can be 3% mAP lower. + +## Citation + +```latex +@inproceedings{ghiasi2019fpn, + title={Nas-fpn: Learning scalable feature pyramid architecture for object detection}, + author={Ghiasi, Golnaz and Lin, Tsung-Yi and Le, Quoc V}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={7036--7045}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/nas_fpn/metafile.yml b/mmpose/configs/mmdet/nas_fpn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..aef0df6d7f38c71d691526004c0f1d19d66744b0 --- /dev/null +++ b/mmpose/configs/mmdet/nas_fpn/metafile.yml @@ -0,0 +1,59 @@ +Collections: + - Name: NAS-FPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - NAS-FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.07392 + Title: 'NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection' + README: configs/nas_fpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/nas_fpn.py#L67 + Version: v2.0.0 + +Models: + - Name: retinanet_r50_fpn_crop640-50e_coco + In Collection: NAS-FPN + Config: configs/nas_fpn/retinanet_r50_fpn_crop640-50e_coco.py + Metadata: + Training Memory (GB): 12.9 + inference time (ms/im): + - value: 43.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth + + - Name: retinanet_r50_nasfpn_crop640-50e_coco + In Collection: NAS-FPN + Config: configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py + Metadata: + Training Memory (GB): 13.2 + inference time (ms/im): + - value: 43.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth diff --git a/mmpose/configs/mmdet/nas_fpn/retinanet_r50_fpn_crop640-50e_coco.py b/mmpose/configs/mmdet/nas_fpn/retinanet_r50_fpn_crop640-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..11c34f6758a4862571e3f840424341c3964115be --- /dev/null +++ b/mmpose/configs/mmdet/nas_fpn/retinanet_r50_fpn_crop640-50e_coco.py @@ -0,0 +1,78 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=64, + batch_augments=[dict(type='BatchFixedSizePad', size=(640, 640))]), + backbone=dict(norm_eval=False), + neck=dict( + relu_before_extra_convs=True, + no_norm_on_lateral=True, + norm_cfg=norm_cfg), + bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), + # training and testing settings + train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(640, 640), + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# training schedule for 50e +max_epochs = 50 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[30, 40], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001), + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) + +env_cfg = dict(cudnn_benchmark=True) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py b/mmpose/configs/mmdet/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a851b745defb72aa05df289a3002c1534655d118 --- /dev/null +++ b/mmpose/configs/mmdet/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py @@ -0,0 +1,16 @@ +_base_ = './retinanet_r50_fpn_crop640-50e_coco.py' + +# model settings +model = dict( + # `pad_size_divisor=128` ensures the feature maps sizes + # in `NAS_FPN` won't mismatch. + data_preprocessor=dict(pad_size_divisor=128), + neck=dict( + _delete_=True, + type='NASFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + stack_times=7, + start_level=1, + norm_cfg=dict(type='BN', requires_grad=True))) diff --git a/mmpose/configs/mmdet/objects365/README.md b/mmpose/configs/mmdet/objects365/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fca0dbfc94505b437598a02ba9e2c6cf10778834 --- /dev/null +++ b/mmpose/configs/mmdet/objects365/README.md @@ -0,0 +1,102 @@ +# Objects365 Dataset + +> [Objects365 Dataset](https://openaccess.thecvf.com/content_ICCV_2019/papers/Shao_Objects365_A_Large-Scale_High-Quality_Dataset_for_Object_Detection_ICCV_2019_paper.pdf) + + + +## Abstract + + + +#### Objects365 Dataset V1 + +[Objects365 Dataset V1](http://www.objects365.org/overview.html) is a brand new dataset, +designed to spur object detection research with a focus on diverse objects in the Wild. +It has 365 object categories over 600K training images. More than 10 million, high-quality bounding boxes are manually labeled through a three-step, carefully designed annotation pipeline. It is the largest object detection dataset (with full annotation) so far and establishes a more challenging benchmark for the community. Objects365 can serve as a better feature learning dataset for localization-sensitive tasks like object detection +and semantic segmentation. + + + +
+ +
+ +#### Objects365 Dataset V2 + +[Objects365 Dataset V2](http://www.objects365.org/overview.html) is based on the V1 release of the Objects365 dataset. +Objects 365 annotated 365 object classes on more than 1800k images, with more than 29 million bounding boxes in the training set, surpassing PASCAL VOC, ImageNet, and COCO datasets. +Objects 365 includes 11 categories of people, clothing, living room, bathroom, kitchen, office/medical, electrical appliances, transportation, food, animals, sports/musical instruments, and each category has dozens of subcategories. + +## Citation + +``` +@inproceedings{shao2019objects365, + title={Objects365: A large-scale, high-quality dataset for object detection}, + author={Shao, Shuai and Li, Zeming and Zhang, Tianyuan and Peng, Chao and Yu, Gang and Zhang, Xiangyu and Li, Jing and Sun, Jian}, + booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, + pages={8430--8439}, + year={2019} +} +``` + +## Prepare Dataset + +1. You need to download and extract Objects365 dataset. Users can download Objects365 V2 by using `tools/misc/download_dataset.py`. + + **Usage** + + ```shell + python tools/misc/download_dataset.py --dataset-name objects365v2 \ + --save-dir ${SAVING PATH} \ + --unzip \ + --delete # Optional, delete the download zip file + ``` + + **Note:** There is no download link for Objects365 V1 right now. If you would like to download Objects365-V1, please visit [official website](http://www.objects365.org/) to concat the author. + +2. The directory should be like this: + + ```none + mmdetection + ├── mmdet + ├── tools + ├── configs + ├── data + │ ├── Objects365 + │ │ ├── Obj365_v1 + │ │ │ ├── annotations + │ │ │ │ ├── objects365_train.json + │ │ │ │ ├── objects365_val.json + │ │ │ ├── train # training images + │ │ │ ├── val # validation images + │ │ ├── Obj365_v2 + │ │ │ ├── annotations + │ │ │ │ ├── zhiyuan_objv2_train.json + │ │ │ │ ├── zhiyuan_objv2_val.json + │ │ │ ├── train # training images + │ │ │ │ ├── patch0 + │ │ │ │ ├── patch1 + │ │ │ │ ├── ... + │ │ │ ├── val # validation images + │ │ │ │ ├── patch0 + │ │ │ │ ├── patch1 + │ │ │ │ ├── ... + ``` + +## Results and Models + +### Objects365 V1 + +| Architecture | Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | +| :----------: | :------: | :-----: | :-----: | :------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50 | pytorch | 1x | - | 19.6 | [config](https://github.com/open-mmlab/mmdetection/tree/main/configs/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1/faster_rcnn_r50_fpn_16x4_1x_obj365v1_20221219_181226-9ff10f95.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1/faster_rcnn_r50_fpn_16x4_1x_obj365v1_20221219_181226.log.json) | +| Faster R-CNN | R-50 | pytorch | 1350K | - | 22.3 | [config](https://github.com/open-mmlab/mmdetection/tree/main/configs/objects365/faster-rcnn_r50-syncbn_fpn_1350k_objects365v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1_20220510_142457-337d8965.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1_20220510_142457.log.json) | +| Retinanet | R-50 | pytorch | 1x | - | 14.8 | [config](https://github.com/open-mmlab/mmdetection/tree/main/configs/objects365/retinanet_r50_fpn_1x_objects365v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v1/retinanet_r50_fpn_1x_obj365v1_20221219_181859-ba3e3dd5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v1/retinanet_r50_fpn_1x_obj365v1_20221219_181859.log.json) | +| Retinanet | R-50 | pytorch | 1350K | - | 18.0 | [config](https://github.com/open-mmlab/mmdetection/tree/main/configs/objects365/retinanet_r50-syncbn_fpn_1350k_objects365v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1/retinanet_r50_fpn_syncbn_1350k_obj365v1_20220513_111237-7517c576.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1/retinanet_r50_fpn_syncbn_1350k_obj365v1_20220513_111237.log.json) | + +### Objects365 V2 + +| Architecture | Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | +| :----------: | :------: | :-----: | :-----: | :------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50 | pytorch | 1x | - | 19.8 | [config](https://github.com/open-mmlab/mmdetection/tree/main/configs/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v2.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2/faster_rcnn_r50_fpn_16x4_1x_obj365v2_20221220_175040-5910b015.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2/faster_rcnn_r50_fpn_16x4_1x_obj365v2_20221220_175040.log.json) | +| Retinanet | R-50 | pytorch | 1x | - | 16.7 | [config](https://github.com/open-mmlab/mmdetection/tree/main/configs/objects365/retinanet_r50_fpn_1x_objects365v2.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v2/retinanet_r50_fpn_1x_obj365v2_20221223_122105-d9b191f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v2/retinanet_r50_fpn_1x_obj365v2_20221223_122105.log.json) | diff --git a/mmpose/configs/mmdet/objects365/faster-rcnn_r50-syncbn_fpn_1350k_objects365v1.py b/mmpose/configs/mmdet/objects365/faster-rcnn_r50-syncbn_fpn_1350k_objects365v1.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7d0a360b95b1a72f779a8f7ad22a7e03235720 --- /dev/null +++ b/mmpose/configs/mmdet/objects365/faster-rcnn_r50-syncbn_fpn_1350k_objects365v1.py @@ -0,0 +1,49 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/objects365v2_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict(norm_cfg=dict(type='SyncBN', requires_grad=True)), + roi_head=dict(bbox_head=dict(num_classes=365))) + +# training schedule for 1350K +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=1350000, # 36 epochs + val_interval=150000) + +# Using 8 GPUS while training +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=1350000, + by_epoch=False, + milestones=[900000, 1200000], + gamma=0.1) +] + +train_dataloader = dict(sampler=dict(type='InfiniteSampler')) +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=150000)) + +log_processor = dict(by_epoch=False) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v1.py b/mmpose/configs/mmdet/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v1.py new file mode 100644 index 0000000000000000000000000000000000000000..bc0d96fa22920a34f9ab9437a0f15cc93f46d0fa --- /dev/null +++ b/mmpose/configs/mmdet/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v1.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/objects365v1_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict(roi_head=dict(bbox_head=dict(num_classes=365))) + +train_dataloader = dict( + batch_size=4, # using 16 GPUS while training. total batch size is 16 x 4) +) + +# Using 32 GPUS while training +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v2.py b/mmpose/configs/mmdet/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v2.py new file mode 100644 index 0000000000000000000000000000000000000000..1090678f652444c82a627fbf8bdda39fe0077f1e --- /dev/null +++ b/mmpose/configs/mmdet/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v2.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/objects365v2_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict(roi_head=dict(bbox_head=dict(num_classes=365))) + +train_dataloader = dict( + batch_size=4, # using 16 GPUS while training. total batch size is 16 x 4) +) + +# Using 32 GPUS while training +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/objects365/metafile.yml b/mmpose/configs/mmdet/objects365/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..d43e8bde9d2aad9516f5383cd4152faf8f097660 --- /dev/null +++ b/mmpose/configs/mmdet/objects365/metafile.yml @@ -0,0 +1,101 @@ +- Name: retinanet_r50_fpn_1x_objects365v1 + In Collection: RetinaNet + Config: configs/objects365/retinanet_r50_fpn_1x_objects365v1.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 12 + Training Data: Objects365 v1 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Objects365 v1 + Metrics: + box AP: 14.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v1/retinanet_r50_fpn_1x_obj365v1_20221219_181859-ba3e3dd5.pth + +- Name: retinanet_r50-syncbn_fpn_1350k_objects365v1 + In Collection: RetinaNet + Config: configs/objects365/retinanet_r50-syncbn_fpn_1350k_objects365v1.py + Metadata: + Training Memory (GB): 7.6 + Iterations: 1350000 + Training Data: Objects365 v1 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Objects365 v1 + Metrics: + box AP: 18.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1/retinanet_r50_fpn_syncbn_1350k_obj365v1_20220513_111237-7517c576.pth + +- Name: retinanet_r50_fpn_1x_objects365v2 + In Collection: RetinaNet + Config: configs/objects365/retinanet_r50_fpn_1x_objects365v2.py + Metadata: + Training Memory (GB): 7.2 + Epochs: 12 + Training Data: Objects365 v2 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Objects365 v2 + Metrics: + box AP: 16.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v2/retinanet_r50_fpn_1x_obj365v2_20221223_122105-d9b191f1.pth + +- Name: faster-rcnn_r50_fpn_16xb4-1x_objects365v1 + In Collection: Faster R-CNN + Config: configs/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v1.py + Metadata: + Training Memory (GB): 11.4 + Epochs: 12 + Training Data: Objects365 v1 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Objects365 v1 + Metrics: + box AP: 19.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1/faster_rcnn_r50_fpn_16x4_1x_obj365v1_20221219_181226-9ff10f95.pth + +- Name: faster-rcnn_r50-syncbn_fpn_1350k_objects365v1 + In Collection: Faster R-CNN + Config: configs/objects365/faster-rcnn_r50-syncbn_fpn_1350k_objects365v1.py + Metadata: + Training Memory (GB): 8.6 + Iterations: 1350000 + Training Data: Objects365 v1 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Objects365 v1 + Metrics: + box AP: 22.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1_20220510_142457-337d8965.pth + +- Name: faster-rcnn_r50_fpn_16xb4-1x_objects365v2 + In Collection: Faster R-CNN + Config: configs/objects365/faster-rcnn_r50_fpn_16xb4-1x_objects365v2.py + Metadata: + Training Memory (GB): 10.8 + Epochs: 12 + Training Data: Objects365 v1 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Objects365 v2 + Metrics: + box AP: 19.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2/faster_rcnn_r50_fpn_16x4_1x_obj365v2_20221220_175040-5910b015.pth diff --git a/mmpose/configs/mmdet/objects365/retinanet_r50-syncbn_fpn_1350k_objects365v1.py b/mmpose/configs/mmdet/objects365/retinanet_r50-syncbn_fpn_1350k_objects365v1.py new file mode 100644 index 0000000000000000000000000000000000000000..c41dfce8bc67e7f4d18434a2c10a33c66da403c1 --- /dev/null +++ b/mmpose/configs/mmdet/objects365/retinanet_r50-syncbn_fpn_1350k_objects365v1.py @@ -0,0 +1,49 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/objects365v2_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict(norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict(num_classes=365)) + +# training schedule for 1350K +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=1350000, # 36 epochs + val_interval=150000) + +# Using 8 GPUS while training +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=10000), + dict( + type='MultiStepLR', + begin=0, + end=1350000, + by_epoch=False, + milestones=[900000, 1200000], + gamma=0.1) +] + +train_dataloader = dict(sampler=dict(type='InfiniteSampler')) +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=150000)) + +log_processor = dict(by_epoch=False) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/objects365/retinanet_r50_fpn_1x_objects365v1.py b/mmpose/configs/mmdet/objects365/retinanet_r50_fpn_1x_objects365v1.py new file mode 100644 index 0000000000000000000000000000000000000000..72144192aaa36d757053a982ed7ad2a886916b75 --- /dev/null +++ b/mmpose/configs/mmdet/objects365/retinanet_r50_fpn_1x_objects365v1.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/objects365v1_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict(bbox_head=dict(num_classes=365)) + +# Using 8 GPUS while training +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=10000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/objects365/retinanet_r50_fpn_1x_objects365v2.py b/mmpose/configs/mmdet/objects365/retinanet_r50_fpn_1x_objects365v2.py new file mode 100644 index 0000000000000000000000000000000000000000..219544126ab0ab6e93d50f1962ffaf40f25b14f0 --- /dev/null +++ b/mmpose/configs/mmdet/objects365/retinanet_r50_fpn_1x_objects365v2.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/objects365v2_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict(bbox_head=dict(num_classes=365)) + +# Using 8 GPUS while training +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=10000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/ocsort/README.md b/mmpose/configs/mmdet/ocsort/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e9b86c6c6c1ca167b875c5d3241af28dc4919358 --- /dev/null +++ b/mmpose/configs/mmdet/ocsort/README.md @@ -0,0 +1,56 @@ +# Observation-Centric SORT: Rethinking SORT for Robust Multi-Object Tracking + +## Abstract + + + +Multi-Object Tracking (MOT) has rapidly progressed with the development of object detection and re-identification. However, motion modeling, which facilitates object association by forecasting short-term trajec- tories with past observations, has been relatively under-explored in recent years. Current motion models in MOT typically assume that the object motion is linear in a small time window and needs continuous observations, so these methods are sensitive to occlusions and non-linear motion and require high frame-rate videos. In this work, we show that a simple motion model can obtain state-of-the-art tracking performance without other cues like appearance. We emphasize the role of “observation” when recovering tracks from being lost and reducing the error accumulated by linear motion models during the lost period. We thus name the proposed method as Observation-Centric SORT, OC-SORT for short. It remains simple, online, and real-time but improves robustness over occlusion and non-linear motion. It achieves 63.2 and 62.1 HOTA on MOT17 and MOT20, respectively, surpassing all published methods. It also sets new states of the art on KITTI Pedestrian Tracking and DanceTrack where the object motion is highly non-linear + + + +
+ +
+ +## Citation + + + +```latex +@article{cao2022observation, + title={Observation-Centric SORT: Rethinking SORT for Robust Multi-Object Tracking}, + author={Cao, Jinkun and Weng, Xinshuo and Khirodkar, Rawal and Pang, Jiangmiao and Kitani, Kris}, + journal={arXiv preprint arXiv:2203.14360}, + year={2022} +} +``` + +## Results and models on MOT17 + +The performance on `MOT17-half-val` is comparable with the performance from [the OC-SORT official implementation](https://github.com/noahcao/OC_SORT). We use the same YOLO-X detector weights as in [ByteTrack](https://github.com/open-mmlab/mmtracking/tree/master/configs/mot/bytetrack). + +| Method | Detector | Train Set | Test Set | Public | Inf time (fps) | HOTA | MOTA | IDF1 | FP | FN | IDSw. | Config | Download | +| :-----: | :------: | :---------------------: | :------: | :----: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :-------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| OC-SORT | YOLOX-X | CrowdHuman + half-train | half-val | N | - | 67.5 | 77.5 | 78.2 | 15987 | 19590 | 855 | [config](ocsort_yolox_x_crowdhuman_mot17-private-half.py) | [model](https://download.openmmlab.com/mmtracking/mot/ocsort/mot_dataset/ocsort_yolox_x_crowdhuman_mot17-private-half_20220813_101618-fe150582.pth) \| [log](https://download.openmmlab.com/mmtracking/mot/ocsort/mot_dataset/ocsort_yolox_x_crowdhuman_mot17-private-half_20220813_101618.log.json) | + +## Get started + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Prepare + +Tracking Dataset Prepare can refer to this [document](../../docs/en/user_guides/tracking_dataset_prepare.md). + +### 3. Training + +OCSORT training is same as Bytetrack, please refer to [document](../../configs/bytetrack/README.md). + +### 4. Testing and evaluation + +OCSORT evaluation and test are same as Bytetrack, please refer to [document](../../configs/bytetrack/README.md). + +### 5.Inference + +OCSORT inference is same as Bytetrack, please refer to [document](../../configs/bytetrack/README.md). diff --git a/mmpose/configs/mmdet/ocsort/metafile.yml b/mmpose/configs/mmdet/ocsort/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..0a31ef108ea7c594d3566970763ff704234d4e0c --- /dev/null +++ b/mmpose/configs/mmdet/ocsort/metafile.yml @@ -0,0 +1,27 @@ +Collections: + - Name: OCSORT + Metadata: + Training Techniques: + - SGD with Momentum + Training Resources: 8x V100 GPUs + Architecture: + - YOLOX + Paper: + URL: https://arxiv.org/abs/2203.14360 + Title: Observation-Centric SORT Rethinking SORT for Robust Multi-Object Tracking + README: configs/ocsort/README.md + +Models: + - Name: ocsort_yolox_x_crowdhuman_mot17-private-half + In Collection: OCSORT + Config: configs/ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py + Metadata: + Training Data: CrowdHuman + MOT17-half-train + Results: + - Task: Multiple Object Tracking + Dataset: MOT17-half-val + Metrics: + HOTA: 67.5 + MOTA: 77.5 + IDF1: 78.2 + Weights: https://download.openmmlab.com/mmtracking/mot/ocsort/mot_dataset/ocsort_yolox_x_crowdhuman_mot17-private-half_20220813_101618-fe150582.pth diff --git a/mmpose/configs/mmdet/ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..ea04923d6aec237c51b7e23d0348c487cb9d697b --- /dev/null +++ b/mmpose/configs/mmdet/ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py @@ -0,0 +1,18 @@ +_base_ = [ + '../bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501 +] + +model = dict( + type='OCSORT', + tracker=dict( + _delete_=True, + type='OCSORTTracker', + motion=dict(type='KalmanFilter'), + obj_score_thr=0.3, + init_track_thr=0.7, + weight_iou_with_det_scores=True, + match_iou_thr=0.3, + num_tentatives=3, + vel_consist_weight=0.2, + vel_delta_t=3, + num_frames_retain=30)) diff --git a/mmpose/configs/mmdet/ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py b/mmpose/configs/mmdet/ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py new file mode 100644 index 0000000000000000000000000000000000000000..ea04923d6aec237c51b7e23d0348c487cb9d697b --- /dev/null +++ b/mmpose/configs/mmdet/ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot20train_test-mot20test.py @@ -0,0 +1,18 @@ +_base_ = [ + '../bytetrack/bytetrack_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501 +] + +model = dict( + type='OCSORT', + tracker=dict( + _delete_=True, + type='OCSORTTracker', + motion=dict(type='KalmanFilter'), + obj_score_thr=0.3, + init_track_thr=0.7, + weight_iou_with_det_scores=True, + match_iou_thr=0.3, + num_tentatives=3, + vel_consist_weight=0.2, + vel_delta_t=3, + num_frames_retain=30)) diff --git a/mmpose/configs/mmdet/openimages/README.md b/mmpose/configs/mmdet/openimages/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ccfc721da568833222038000ac1a5ea12e9bb732 --- /dev/null +++ b/mmpose/configs/mmdet/openimages/README.md @@ -0,0 +1,149 @@ +# Open Images Dataset + +> [Open Images Dataset](https://arxiv.org/abs/1811.00982) + + + +## Abstract + + + +#### Open Images v6 + +[Open Images](https://storage.googleapis.com/openimages/web/index.html) is a dataset of ~9M images annotated with image-level labels, +object bounding boxes, object segmentation masks, visual relationships, +and localized narratives: + +- It contains a total of 16M bounding boxes for 600 object classes on + 1.9M images, making it the largest existing dataset with object location + annotations. The boxes have been largely manually drawn by professional + annotators to ensure accuracy and consistency. The images are very diverse + and often contain complex scenes with several objects (8.3 per image on + average). + +- Open Images also offers visual relationship annotations, indicating pairs + of objects in particular relations (e.g. "woman playing guitar", "beer on + table"), object properties (e.g. "table is wooden"), and human actions (e.g. + "woman is jumping"). In total it has 3.3M annotations from 1,466 distinct + relationship triplets. + +- In V5 we added segmentation masks for 2.8M object instances in 350 classes. + Segmentation masks mark the outline of objects, which characterizes their + spatial extent to a much higher level of detail. + +- In V6 we added 675k localized narratives: multimodal descriptions of images + consisting of synchronized voice, text, and mouse traces over the objects being + described. (Note we originally launched localized narratives only on train in V6, + but since July 2020 we also have validation and test covered.) + +- Finally, the dataset is annotated with 59.9M image-level labels spanning 19,957 + classes. + +We believe that having a single dataset with unified annotations for image +classification, object detection, visual relationship detection, instance +segmentation, and multimodal image descriptions will enable to study these +tasks jointly and stimulate progress towards genuine scene understanding. + + + +
+ +
+ +#### Open Images Challenge 2019 + +[Open Images Challenges 2019](https://storage.googleapis.com/openimages/web/challenge2019.html) is based on the V5 release of the Open +Images dataset. The images of the dataset are very varied and +often contain complex scenes with several objects (explore the dataset). + +## Citation + +``` +@article{OpenImages, + author = {Alina Kuznetsova and Hassan Rom and Neil Alldrin and Jasper Uijlings and Ivan Krasin and Jordi Pont-Tuset and Shahab Kamali and Stefan Popov and Matteo Malloci and Alexander Kolesnikov and Tom Duerig and Vittorio Ferrari}, + title = {The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale}, + year = {2020}, + journal = {IJCV} +} +``` + +## Prepare Dataset + +1. You need to download and extract Open Images dataset. + +2. The Open Images dataset does not have image metas (width and height of the image), + which will be used during training and testing (evaluation). We suggest to get test image metas before + training/testing by using `tools/misc/get_image_metas.py`. + + **Usage** + + ```shell + python tools/misc/get_image_metas.py ${CONFIG} \ + --dataset ${DATASET TYPE} \ # train or val or test + --out ${OUTPUT FILE NAME} + ``` + +3. The directory should be like this: + + ```none + mmdetection + ├── mmdet + ├── tools + ├── configs + ├── data + │ ├── OpenImages + │ │ ├── annotations + │ │ │ ├── bbox_labels_600_hierarchy.json + │ │ │ ├── class-descriptions-boxable.csv + │ │ │ ├── oidv6-train-annotations-bbox.scv + │ │ │ ├── validation-annotations-bbox.csv + │ │ │ ├── validation-annotations-human-imagelabels-boxable.csv + │ │ │ ├── validation-image-metas.pkl # get from script + │ │ ├── challenge2019 + │ │ │ ├── challenge-2019-train-detection-bbox.txt + │ │ │ ├── challenge-2019-validation-detection-bbox.txt + │ │ │ ├── class_label_tree.np + │ │ │ ├── class_sample_train.pkl + │ │ │ ├── challenge-2019-validation-detection-human-imagelabels.csv # download from official website + │ │ │ ├── challenge-2019-validation-metas.pkl # get from script + │ │ ├── OpenImages + │ │ │ ├── train # training images + │ │ │ ├── test # testing images + │ │ │ ├── validation # validation images + ``` + +**Note**: + +1. The training and validation images of Open Images Challenge dataset are based on + Open Images v6, but the test images are different. +2. The Open Images Challenges annotations are obtained from [TSD](https://github.com/Sense-X/TSD). + You can also download the annotations from [official website](https://storage.googleapis.com/openimages/web/challenge2019_downloads.html), + and set data.train.type=OpenImagesDataset, data.val.type=OpenImagesDataset, and data.test.type=OpenImagesDataset in the config +3. If users do not want to use `validation-annotations-human-imagelabels-boxable.csv` and `challenge-2019-validation-detection-human-imagelabels.csv` + users can set `test_dataloader.dataset.image_level_ann_file=None` and `test_dataloader.dataset.image_level_ann_file=None` in the config. + Please note that loading image-levels label is the default of Open Images evaluation metric. + More details please refer to the [official website](https://storage.googleapis.com/openimages/web/evaluation.html) + +## Results and Models + +| Architecture | Backbone | Style | Lr schd | Sampler | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------------------------: | :------: | :-----: | :-----: | :-----------------: | :------: | :------------: | :----: | :------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50 | pytorch | 1x | Group Sampler | 7.7 | - | 51.6 | [config](./faster-rcnn_r50_fpn_32xb2-1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159.log.json) | +| Faster R-CNN | R-50 | pytorch | 1x | Class Aware Sampler | 7.7 | - | 60.0 | [config](./faster-rcnn_r50_fpn_32xb2-cas-1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424.log.json) | +| Faster R-CNN (Challenge 2019) | R-50 | pytorch | 1x | Group Sampler | 7.7 | - | 54.9 | [config](./faster-rcnn_r50_fpn_32xb2-1x_openimages-challenge.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100.log.json) | +| Faster R-CNN (Challenge 2019) | R-50 | pytorch | 1x | Class Aware Sampler | 7.1 | - | 65.0 | [config](./faster-rcnn_r50_fpn_32xb2-cas-1x_openimages-challenge.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021.log.json) | +| Retinanet | R-50 | pytorch | 1x | Group Sampler | 6.6 | - | 61.5 | [config](./retinanet_r50_fpn_32xb2-1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954.log.json) | +| SSD | VGG16 | pytorch | 36e | Group Sampler | 10.8 | - | 35.4 | [config](./ssd300_32xb8-36e_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232.log.json) | + +**Notes:** + +- 'cas' is short for 'Class Aware Sampler' + +### Results of consider image level labels + +| Architecture | Sampler | Consider Image Level Labels | box AP | +| :-------------------------------: | :-----------------: | :-------------------------: | :----: | +| Faster R-CNN r50 (Challenge 2019) | Group Sampler | w/o | 62.19 | +| Faster R-CNN r50 (Challenge 2019) | Group Sampler | w/ | 54.87 | +| Faster R-CNN r50 (Challenge 2019) | Class Aware Sampler | w/o | 71.77 | +| Faster R-CNN r50 (Challenge 2019) | Class Aware Sampler | w/ | 64.98 | diff --git a/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-1x_openimages-challenge.py b/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-1x_openimages-challenge.py new file mode 100644 index 0000000000000000000000000000000000000000..e79a92cccb2e432e5dd60bc080dab76781eb32bc --- /dev/null +++ b/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-1x_openimages-challenge.py @@ -0,0 +1,39 @@ +_base_ = ['faster-rcnn_r50_fpn_32xb2-1x_openimages.py'] + +model = dict( + roi_head=dict(bbox_head=dict(num_classes=500)), + test_cfg=dict(rcnn=dict(score_thr=0.01))) + +# dataset settings +dataset_type = 'OpenImagesChallengeDataset' +train_dataloader = dict( + dataset=dict( + type=dataset_type, + ann_file='challenge2019/challenge-2019-train-detection-bbox.txt', + label_file='challenge2019/cls-label-description.csv', + hierarchy_file='challenge2019/class_label_tree.np', + meta_file='challenge2019/challenge-2019-train-metas.pkl')) +val_dataloader = dict( + dataset=dict( + type=dataset_type, + ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt', + data_prefix=dict(img='OpenImages/'), + label_file='challenge2019/cls-label-description.csv', + hierarchy_file='challenge2019/class_label_tree.np', + meta_file='challenge2019/challenge-2019-validation-metas.pkl', + image_level_ann_file='challenge2019/challenge-2019-validation-' + 'detection-human-imagelabels.csv')) +test_dataloader = dict( + dataset=dict( + type=dataset_type, + ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt', + label_file='challenge2019/cls-label-description.csv', + hierarchy_file='challenge2019/class_label_tree.np', + meta_file='challenge2019/challenge-2019-validation-metas.pkl', + image_level_ann_file='challenge2019/challenge-2019-validation-' + 'detection-human-imagelabels.csv')) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-1x_openimages.py b/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-1x_openimages.py new file mode 100644 index 0000000000000000000000000000000000000000..f3f0aa0a0ff0ef16cd6e55543a72b5fe405ec5a8 --- /dev/null +++ b/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-1x_openimages.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/openimages_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict(roi_head=dict(bbox_head=dict(num_classes=601))) + +# Using 32 GPUS while training +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 64, + by_epoch=False, + begin=0, + end=26000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-cas-1x_openimages-challenge.py b/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-cas-1x_openimages-challenge.py new file mode 100644 index 0000000000000000000000000000000000000000..9e428725bcc39d2c009a2382c191fa53fe5ce284 --- /dev/null +++ b/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-cas-1x_openimages-challenge.py @@ -0,0 +1,5 @@ +_base_ = ['faster-rcnn_r50_fpn_32xb2-1x_openimages-challenge.py'] + +# Use ClassAwareSampler +train_dataloader = dict( + sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1)) diff --git a/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-cas-1x_openimages.py b/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-cas-1x_openimages.py new file mode 100644 index 0000000000000000000000000000000000000000..803190abfee63ea87e70dfe1b0fddca02f3556b8 --- /dev/null +++ b/mmpose/configs/mmdet/openimages/faster-rcnn_r50_fpn_32xb2-cas-1x_openimages.py @@ -0,0 +1,5 @@ +_base_ = ['faster-rcnn_r50_fpn_32xb2-1x_openimages.py'] + +# Use ClassAwareSampler +train_dataloader = dict( + sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1)) diff --git a/mmpose/configs/mmdet/openimages/metafile.yml b/mmpose/configs/mmdet/openimages/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..76c1209471921610f791a074ed7a6863cd0709c0 --- /dev/null +++ b/mmpose/configs/mmdet/openimages/metafile.yml @@ -0,0 +1,102 @@ +Models: + - Name: faster-rcnn_r50_fpn_32x2_1x_openimages + In Collection: Faster R-CNN + Config: configs/openimages/faster-rcnn_r50_fpn_32xb2-1x_openimages.py + Metadata: + Training Memory (GB): 7.7 + Epochs: 12 + Training Data: Open Images v6 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images v6 + Metrics: + box AP: 51.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth + + - Name: retinanet_r50_fpn_32xb2-1x_openimages + In Collection: RetinaNet + Config: configs/openimages/retinanet_r50_fpn_32xb2-1x_openimages.py + Metadata: + Training Memory (GB): 6.6 + Epochs: 12 + Training Data: Open Images v6 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images v6 + Metrics: + box AP: 61.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth + + - Name: ssd300_32xb8-36e_openimages + In Collection: SSD + Config: configs/openimages/ssd300_32xb8-36e_openimages.py + Metadata: + Training Memory (GB): 10.8 + Epochs: 36 + Training Data: Open Images v6 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images v6 + Metrics: + box AP: 35.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth + + - Name: faster-rcnn_r50_fpn_32x2_1x_openimages_challenge + In Collection: Faster R-CNN + Config: configs/openimages/faster-rcnn_r50_fpn_32xb2-1x_openimages-challenge.py + Metadata: + Training Memory (GB): 7.7 + Epochs: 12 + Training Data: Open Images Challenge 2019 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images Challenge 2019 + Metrics: + box AP: 54.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth + + - Name: faster-rcnn_r50_fpn_32x2_cas_1x_openimages + In Collection: Faster R-CNN + Config: configs/openimages/faster-rcnn_r50_fpn_32xb2-cas-1x_openimages.py + Metadata: + Training Memory (GB): 7.7 + Epochs: 12 + Training Data: Open Images Challenge 2019 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images Challenge 2019 + Metrics: + box AP: 60.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth + + - Name: faster-rcnn_r50_fpn_32x2_cas_1x_openimages_challenge + In Collection: Faster R-CNN + Config: configs/openimages/faster-rcnn_r50_fpn_32xb2-cas-1x_openimages-challenge.py + Metadata: + Training Memory (GB): 7.1 + Epochs: 12 + Training Data: Open Images Challenge 2019 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images Challenge 2019 + Metrics: + box AP: 65.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth diff --git a/mmpose/configs/mmdet/openimages/retinanet_r50_fpn_32xb2-1x_openimages.py b/mmpose/configs/mmdet/openimages/retinanet_r50_fpn_32xb2-1x_openimages.py new file mode 100644 index 0000000000000000000000000000000000000000..97a0eb075c730ceeaa494190e0b8369706c7d7c3 --- /dev/null +++ b/mmpose/configs/mmdet/openimages/retinanet_r50_fpn_32xb2-1x_openimages.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/openimages_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict(bbox_head=dict(num_classes=601)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 64, + by_epoch=False, + begin=0, + end=26000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/openimages/ssd300_32xb8-36e_openimages.py b/mmpose/configs/mmdet/openimages/ssd300_32xb8-36e_openimages.py new file mode 100644 index 0000000000000000000000000000000000000000..9cb51cae00a8707c0a901b99620851132e9eaccf --- /dev/null +++ b/mmpose/configs/mmdet/openimages/ssd300_32xb8-36e_openimages.py @@ -0,0 +1,88 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' +] +model = dict( + bbox_head=dict( + num_classes=601, + anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) +# dataset settings +dataset_type = 'OpenImagesDataset' +data_root = 'data/OpenImages/' +input_size = 300 +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='Expand', + mean={{_base_.model.data_preprocessor.mean}}, + to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}}, + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'instances')) +] + +train_dataloader = dict( + batch_size=8, # using 32 GPUS while training. total batch size is 32 x 8 + batch_sampler=None, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=3, # repeat 3 times, total epochs are 12 x 3 + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/oidv6-train-annotations-bbox.csv', + data_prefix=dict(img='OpenImages/train/'), + label_file='annotations/class-descriptions-boxable.csv', + hierarchy_file='annotations/bbox_labels_600_hierarchy.json', + meta_file='annotations/train-image-metas.pkl', + pipeline=train_pipeline))) +val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline)) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4)) +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.001, + by_epoch=False, + begin=0, + end=20000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=256) diff --git a/mmpose/configs/mmdet/paa/README.md b/mmpose/configs/mmdet/paa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..625aacf24516087cefe1082271f25baf536bc03d --- /dev/null +++ b/mmpose/configs/mmdet/paa/README.md @@ -0,0 +1,47 @@ +# PAA + +> [Probabilistic Anchor Assignment with IoU Prediction for Object Detection](https://arxiv.org/abs/2007.08103) + + + +## Abstract + +In object detection, determining which anchors to assign as positive or negative samples, known as anchor assignment, has been revealed as a core procedure that can significantly affect a model's performance. In this paper we propose a novel anchor assignment strategy that adaptively separates anchors into positive and negative samples for a ground truth bounding box according to the model's learning status such that it is able to reason about the separation in a probabilistic manner. To do so we first calculate the scores of anchors conditioned on the model and fit a probability distribution to these scores. The model is then trained with anchors separated into positive and negative samples according to their probabilities. Moreover, we investigate the gap between the training and testing objectives and propose to predict the Intersection-over-Unions of detected boxes as a measure of localization quality to reduce the discrepancy. The combined score of classification and localization qualities serving as a box selection metric in non-maximum suppression well aligns with the proposed anchor assignment strategy and leads significant performance improvements. The proposed methods only add a single convolutional layer to RetinaNet baseline and does not require multiple anchors per location, so are efficient. Experimental results verify the effectiveness of the proposed methods. Especially, our models set new records for single-stage detectors on MS COCO test-dev dataset with various backbones. + +
+ +
+ +## Results and Models + +We provide config files to reproduce the object detection results in the +ECCV 2020 paper for Probabilistic Anchor Assignment with IoU +Prediction for Object Detection. + +| Backbone | Lr schd | Mem (GB) | Score voting | box AP | Config | Download | +| :-------: | :-----: | :------: | :----------: | :----: | :------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | 12e | 3.7 | True | 40.4 | [config](./paa_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.log.json) | +| R-50-FPN | 12e | 3.7 | False | 40.2 | - | | +| R-50-FPN | 18e | 3.7 | True | 41.4 | [config](./paa_r50_fpn_1.5x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.log.json) | +| R-50-FPN | 18e | 3.7 | False | 41.2 | - | | +| R-50-FPN | 24e | 3.7 | True | 41.6 | [config](./paa_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.log.json) | +| R-50-FPN | 36e | 3.7 | True | 43.3 | [config](./paa_r50_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722.log.json) | +| R-101-FPN | 12e | 6.2 | True | 42.6 | [config](./paa_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.log.json) | +| R-101-FPN | 12e | 6.2 | False | 42.4 | - | | +| R-101-FPN | 24e | 6.2 | True | 43.5 | [config](./paa_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.log.json) | +| R-101-FPN | 36e | 6.2 | True | 45.1 | [config](./paa_r101_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202.log.json) | + +**Note**: + +1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.2 mAP. We report the best results. + +## Citation + +```latex +@inproceedings{paa-eccv2020, + title={Probabilistic Anchor Assignment with IoU Prediction for Object Detection}, + author={Kim, Kang and Lee, Hee Seok}, + booktitle = {ECCV}, + year={2020} +} +``` diff --git a/mmpose/configs/mmdet/paa/metafile.yml b/mmpose/configs/mmdet/paa/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..078b974971d3a3faf537cc52937278488923667e --- /dev/null +++ b/mmpose/configs/mmdet/paa/metafile.yml @@ -0,0 +1,111 @@ +Collections: + - Name: PAA + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Probabilistic Anchor Assignment + - ResNet + Paper: + URL: https://arxiv.org/abs/2007.08103 + Title: 'Probabilistic Anchor Assignment with IoU Prediction for Object Detection' + README: configs/paa/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/paa.py#L6 + Version: v2.4.0 + +Models: + - Name: paa_r50_fpn_1x_coco + In Collection: PAA + Config: configs/paa/paa_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.7 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth + + - Name: paa_r50_fpn_1.5x_coco + In Collection: PAA + Config: configs/paa/paa_r50_fpn_1.5x_coco.py + Metadata: + Training Memory (GB): 3.7 + Epochs: 18 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth + + - Name: paa_r50_fpn_2x_coco + In Collection: PAA + Config: configs/paa/paa_r50_fpn_2x_coco.py + Metadata: + Training Memory (GB): 3.7 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth + + - Name: paa_r50_fpn_mstrain_3x_coco + In Collection: PAA + Config: configs/paa/paa_r50_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 3.7 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth + + - Name: paa_r101_fpn_1x_coco + In Collection: PAA + Config: configs/paa/paa_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth + + - Name: paa_r101_fpn_2x_coco + In Collection: PAA + Config: configs/paa/paa_r101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth + + - Name: paa_r101_fpn_mstrain_3x_coco + In Collection: PAA + Config: configs/paa/paa_r101_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth diff --git a/mmpose/configs/mmdet/paa/paa_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/paa/paa_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..94f1c278dc16c1befbca510ca0ac5ba407969f6d --- /dev/null +++ b/mmpose/configs/mmdet/paa/paa_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './paa_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/paa/paa_r101_fpn_2x_coco.py b/mmpose/configs/mmdet/paa/paa_r101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c6136f3bb404df6a6fc18536e6770116738af6c7 --- /dev/null +++ b/mmpose/configs/mmdet/paa/paa_r101_fpn_2x_coco.py @@ -0,0 +1,18 @@ +_base_ = './paa_r101_fpn_1x_coco.py' +max_epochs = 24 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +# training schedule for 2x +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/paa/paa_r101_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/paa/paa_r101_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8529dcdb90adb2b02162f4d2268088f5f376fcb0 --- /dev/null +++ b/mmpose/configs/mmdet/paa/paa_r101_fpn_ms-3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './paa_r50_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/paa/paa_r50_fpn_1.5x_coco.py b/mmpose/configs/mmdet/paa/paa_r50_fpn_1.5x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ae993b5c4370c8fc3e450f84fb7058528b853727 --- /dev/null +++ b/mmpose/configs/mmdet/paa/paa_r50_fpn_1.5x_coco.py @@ -0,0 +1,18 @@ +_base_ = './paa_r50_fpn_1x_coco.py' +max_epochs = 18 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[12, 16], + gamma=0.1) +] + +# training schedule for 1.5x +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/paa/paa_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/paa/paa_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f806a3ea65ffb9ee8b898122fb678b94ef212637 --- /dev/null +++ b/mmpose/configs/mmdet/paa/paa_r50_fpn_1x_coco.py @@ -0,0 +1,80 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='PAA', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='PAAHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.1, + neg_iou_thr=0.1, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/paa/paa_r50_fpn_2x_coco.py b/mmpose/configs/mmdet/paa/paa_r50_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6908e4eb97fcfa92a20d486ceab9a7ddfaf480b7 --- /dev/null +++ b/mmpose/configs/mmdet/paa/paa_r50_fpn_2x_coco.py @@ -0,0 +1,18 @@ +_base_ = './paa_r50_fpn_1x_coco.py' +max_epochs = 24 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +# training schedule for 2x +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/paa/paa_r50_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/paa/paa_r50_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fed8b90a0fde7a1d344160a6658be04d1f9c654e --- /dev/null +++ b/mmpose/configs/mmdet/paa/paa_r50_fpn_ms-3x_coco.py @@ -0,0 +1,29 @@ +_base_ = './paa_r50_fpn_1x_coco.py' +max_epochs = 36 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] + +# training schedule for 3x +train_cfg = dict(max_epochs=max_epochs) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/pafpn/README.md b/mmpose/configs/mmdet/pafpn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..36cd6e9fd5d6e31ac94e59c06cc1055be8480d21 --- /dev/null +++ b/mmpose/configs/mmdet/pafpn/README.md @@ -0,0 +1,34 @@ +# PAFPN + +> [Path Aggregation Network for Instance Segmentation](https://arxiv.org/abs/1803.01534) + + + +## Abstract + +The way that information propagates in neural networks is of great importance. In this paper, we propose Path Aggregation Network (PANet) aiming at boosting information flow in proposal-based instance segmentation framework. Specifically, we enhance the entire feature hierarchy with accurate localization signals in lower layers by bottom-up path augmentation, which shortens the information path between lower layers and topmost feature. We present adaptive feature pooling, which links feature grid and all feature levels to make useful information in each feature level propagate directly to following proposal subnetworks. A complementary branch capturing different views for each proposal is created to further improve mask prediction. These improvements are simple to implement, with subtle extra computational overhead. Our PANet reaches the 1st place in the COCO 2017 Challenge Instance Segmentation task and the 2nd place in Object Detection task without large-batch training. It is also state-of-the-art on MVD and Cityscapes. + +
+ +
+ +## Results and Models + +| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 4.0 | 17.2 | 37.5 | | [config](./faster-rcnn_r50_pafpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_20200503_105836.log.json) | + +## Citation + +```latex +@inproceedings{liu2018path, + author = {Shu Liu and + Lu Qi and + Haifang Qin and + Jianping Shi and + Jiaya Jia}, + title = {Path Aggregation Network for Instance Segmentation}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2018} +} +``` diff --git a/mmpose/configs/mmdet/pafpn/faster-rcnn_r50_pafpn_1x_coco.py b/mmpose/configs/mmdet/pafpn/faster-rcnn_r50_pafpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1452baeca7e680b11f9b2ec654abe689d3e53042 --- /dev/null +++ b/mmpose/configs/mmdet/pafpn/faster-rcnn_r50_pafpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' + +model = dict( + neck=dict( + type='PAFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/pafpn/metafile.yml b/mmpose/configs/mmdet/pafpn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..7772d276ab6f0da685ed8ea5e58efd8fc5164529 --- /dev/null +++ b/mmpose/configs/mmdet/pafpn/metafile.yml @@ -0,0 +1,38 @@ +Collections: + - Name: PAFPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - PAFPN + Paper: + URL: https://arxiv.org/abs/1803.01534 + Title: 'Path Aggregation Network for Instance Segmentation' + README: configs/pafpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/pafpn.py#L11 + Version: v2.0.0 + +Models: + - Name: faster-rcnn_r50_pafpn_1x_coco + In Collection: PAFPN + Config: configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 58.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth diff --git a/mmpose/configs/mmdet/panoptic_fpn/README.md b/mmpose/configs/mmdet/panoptic_fpn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0321fb7ce1db42868c7753ce56fb330fef7e4764 --- /dev/null +++ b/mmpose/configs/mmdet/panoptic_fpn/README.md @@ -0,0 +1,62 @@ +# Panoptic FPN + +> [Panoptic feature pyramid networks](https://arxiv.org/abs/1901.02446) + + + +## Abstract + +The recently introduced panoptic segmentation task has renewed our community's interest in unifying the tasks of instance segmentation (for thing classes) and semantic segmentation (for stuff classes). However, current state-of-the-art methods for this joint task use separate and dissimilar networks for instance and semantic segmentation, without performing any shared computation. In this work, we aim to unify these methods at the architectural level, designing a single network for both tasks. Our approach is to endow Mask R-CNN, a popular instance segmentation method, with a semantic segmentation branch using a shared Feature Pyramid Network (FPN) backbone. Surprisingly, this simple baseline not only remains effective for instance segmentation, but also yields a lightweight, top-performing method for semantic segmentation. In this work, we perform a detailed study of this minimally extended version of Mask R-CNN with FPN, which we refer to as Panoptic FPN, and show it is a robust and accurate baseline for both tasks. Given its effectiveness and conceptual simplicity, we hope our method can serve as a strong baseline and aid future research in panoptic segmentation. + +
+ +
+ +## Dataset + +PanopticFPN requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── panoptic_train2017.json +│ │ │ ├── panoptic_train2017 +│ │ │ ├── panoptic_val2017.json +│ │ │ ├── panoptic_val2017 +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +``` + +## Results and Models + +| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | PQ | SQ | RQ | PQ_th | SQ_th | RQ_th | PQ_st | SQ_st | RQ_st | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :---: | :---: | :---: | :---------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 4.7 | | 40.2 | 77.8 | 49.3 | 47.8 | 80.9 | 57.5 | 28.9 | 73.1 | 37.0 | [config](./panoptic-fpn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153.log.json) | +| R-50-FPN | pytorch | 3x | - | - | 42.5 | 78.1 | 51.7 | 50.3 | 81.5 | 60.3 | 30.7 | 73.0 | 38.8 | [config](./panoptic-fpn_r50_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155.log.json) | +| R-101-FPN | pytorch | 1x | 6.7 | | 42.2 | 78.3 | 51.4 | 50.1 | 81.4 | 59.9 | 30.3 | 73.6 | 38.5 | [config](./panoptic-fpn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950.log.json) | +| R-101-FPN | pytorch | 3x | - | - | 44.1 | 78.9 | 53.6 | 52.1 | 81.7 | 62.3 | 32.0 | 74.6 | 40.3 | [config](./panoptic-fpn_r101_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712.log.json) | + +## Citation + +The base method for panoptic segmentation task. + +```latex +@inproceedings{kirillov2018panopticfpn, + author = { + Alexander Kirillov, + Ross Girshick, + Kaiming He, + Piotr Dollar, + }, + title = {Panoptic Feature Pyramid Networks}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2019} +} +``` diff --git a/mmpose/configs/mmdet/panoptic_fpn/metafile.yml b/mmpose/configs/mmdet/panoptic_fpn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..c99275ec3f37f47db756b96a4603c466d5fbd946 --- /dev/null +++ b/mmpose/configs/mmdet/panoptic_fpn/metafile.yml @@ -0,0 +1,70 @@ +Collections: + - Name: PanopticFPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - PanopticFPN + Paper: + URL: https://arxiv.org/pdf/1901.02446 + Title: 'Panoptic feature pyramid networks' + README: configs/panoptic_fpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/detectors/panoptic_fpn.py#L7 + Version: v2.16.0 + +Models: + - Name: panoptic_fpn_r50_fpn_1x_coco + In Collection: PanopticFPN + Config: configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.6 + Epochs: 12 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth + + - Name: panoptic_fpn_r50_fpn_mstrain_3x_coco + In Collection: PanopticFPN + Config: configs/panoptic_fpn/panoptic-fpn_r50_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 4.6 + Epochs: 36 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 42.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth + + - Name: panoptic_fpn_r101_fpn_1x_coco + In Collection: PanopticFPN + Config: configs/panoptic_fpn/panoptic-fpn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.5 + Epochs: 12 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 42.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth + + - Name: panoptic_fpn_r101_fpn_mstrain_3x_coco + In Collection: PanopticFPN + Config: configs/panoptic_fpn/panoptic-fpn_r101_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 6.5 + Epochs: 36 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 44.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth diff --git a/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b960254ef5ecfac1de790a66a5378535114e9ba3 --- /dev/null +++ b/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './panoptic-fpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r101_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r101_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..268782ee2cca31796e43423300319176556cfef7 --- /dev/null +++ b/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r101_fpn_ms-3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './panoptic-fpn_r50_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c89ef520124a43c910b35a4808153e4c455d3a --- /dev/null +++ b/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_panoptic.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='PanopticFPN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + pad_mask=True, + mask_pad_value=0, + pad_seg=True, + seg_pad_value=255), + semantic_head=dict( + type='PanopticFPNHead', + num_things_classes=80, + num_stuff_classes=53, + in_channels=256, + inner_channels=128, + start_level=0, + end_level=4, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=None, + loss_seg=dict( + type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)), + panoptic_fusion_head=dict( + type='HeuristicFusionHead', + num_things_classes=80, + num_stuff_classes=53), + test_cfg=dict( + rcnn=dict( + score_thr=0.6, + nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True), + max_per_img=100, + mask_thr_binary=0.5), + # used in HeuristicFusionHead + panoptic=dict(mask_overlap=0.5, stuff_area_limit=4096))) + +# Forced to remove NumClassCheckHook +custom_hooks = [] diff --git a/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r50_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r50_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b18a8f8dd7eb6c49e277346ffe71c6e36c9d3b68 --- /dev/null +++ b/mmpose/configs/mmdet/panoptic_fpn/panoptic-fpn_r50_fpn_ms-3x_coco.py @@ -0,0 +1,35 @@ +_base_ = './panoptic-fpn_r50_fpn_1x_coco.py' + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadPanopticAnnotations', + with_bbox=True, + with_mask=True, + with_seg=True), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# TODO: Use RepeatDataset to speed up training +# training schedule for 3x +train_cfg = dict(max_epochs=36, val_interval=3) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=36, + by_epoch=True, + milestones=[24, 33], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/pascal_voc/README.md b/mmpose/configs/mmdet/pascal_voc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2ead3add79ec914d9720562ce4c4c121fac15a7e --- /dev/null +++ b/mmpose/configs/mmdet/pascal_voc/README.md @@ -0,0 +1,40 @@ +# Pascal VOC + +> [The Pascal Visual Object Classes (VOC) Challenge](https://link.springer.com/article/10.1007/s11263-009-0275-4) + + + +## Abstract + +The Pascal Visual Object Classes (VOC) challenge is a benchmark in visual object category recognition and detection, providing the vision and machine learning communities with a standard dataset of images and annotation, and standard evaluation procedures. Organised annually from 2005 to present, the challenge and its associated dataset has become accepted as the benchmark for object detection. + +This paper describes the dataset and evaluation procedure. We review the state-of-the-art in evaluated methods for both classification and detection, analyse whether the methods are statistically different, what they are learning from the images (e.g. the object or its context), and what the methods find easy or confuse. The paper concludes with lessons learnt in the three year history of the challenge, and proposes directions for future improvement and extension. + +
+ +
+ +## Results and Models + +| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :------: | :-----: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN C4 | R-50 | caffe | 18k | | - | 80.9 | [config](./faster-rcnn_r50-caffe-c4_ms-18k_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712//home/dong/code_sensetime/2022Q1/mmdetection/work_dirs/prepare_voc/gather/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712_20220314_234327-847a14d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712_20220314_234327.log.json) | +| Faster R-CNN | R-50 | pytorch | 1x | 2.6 | - | 80.4 | [config](./faster-rcnn_r50_fpn_1x_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712-54bef0f3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712.log.json) | +| Retinanet | R-50 | pytorch | 1x | 2.1 | - | 77.3 | [config](./retinanet_r50_fpn_1x_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200617-47cbdd0e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200616_014642.log.json) | +| SSD300 | VGG16 | - | 120e | - | - | 76.5 | [config](./ssd300_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd300_voc0712/ssd300_voc0712_20220320_194658-17edda1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd300_voc0712/ssd300_voc0712_20220320_194658.log.json) | +| SSD512 | VGG16 | - | 120e | - | - | 79.5 | [config](./ssd512_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd512_voc0712/ssd512_voc0712_20220320_194717-03cefefe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd512_voc0712/ssd512_voc0712_20220320_194717.log.json) | + +## Citation + +```latex +@Article{Everingham10, + author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.", + title = "The Pascal Visual Object Classes (VOC) Challenge", + journal = "International Journal of Computer Vision", + volume = "88", + year = "2010", + number = "2", + month = jun, + pages = "303--338", +} +``` diff --git a/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50-caffe-c4_ms-18k_voc0712.py b/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50-caffe-c4_ms-18k_voc0712.py new file mode 100644 index 0000000000000000000000000000000000000000..dddc0bbdf33948478e11bb701f844a8473ddf165 --- /dev/null +++ b/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50-caffe-c4_ms-18k_voc0712.py @@ -0,0 +1,86 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50-caffe-c4.py', + '../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py', + '../_base_/default_runtime.py' +] +model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 480), (1333, 512), (1333, 544), (1333, 576), + (1333, 608), (1333, 640), (1333, 672), (1333, 704), + (1333, 736), (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + _delete_=True, + type='ConcatDataset', + datasets=[ + dict( + type='VOCDataset', + data_root={{_base_.data_root}}, + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args={{_base_.backend_args}}), + dict( + type='VOCDataset', + data_root={{_base_.data_root}}, + ann_file='VOC2012/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2012/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args={{_base_.backend_args}}) + ])) + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# training schedule for 18k +max_iter = 18000 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=3000) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=100), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[12000, 16000], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=3000)) +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712-cocofmt.py b/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712-cocofmt.py new file mode 100644 index 0000000000000000000000000000000000000000..0b0aa41d67fc4edfde6d534e2e54a135f5de6e44 --- /dev/null +++ b/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712-cocofmt.py @@ -0,0 +1,100 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', + '../_base_/default_runtime.py' +] +model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) + +METAINFO = { + 'classes': + ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', + 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', + 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'), + # palette is a list of color tuples, which is used for visualization. + 'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192), + (197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255), + (153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252), + (182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0), + (0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)] +} + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/VOCdevkit/' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1000, 600), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1000, 600), keep_ratio=True), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + dataset=dict( + type='RepeatDataset', + times=3, + dataset=dict( + _delete_=True, + type=dataset_type, + data_root=data_root, + ann_file='annotations/voc0712_trainval.json', + data_prefix=dict(img=''), + metainfo=METAINFO, + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args={{_base_.backend_args}}))) +val_dataloader = dict( + dataset=dict( + type=dataset_type, + ann_file='annotations/voc07_test.json', + data_prefix=dict(img=''), + metainfo=METAINFO, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/voc07_test.json', + metric='bbox', + format_only=False, + backend_args={{_base_.backend_args}}) +test_evaluator = val_evaluator + +# training schedule, the dataset is repeated 3 times, so the +# actual epoch = 4 * 3 = 12 +max_epochs = 4 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[3], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712.py b/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712.py new file mode 100644 index 0000000000000000000000000000000000000000..07391667b35c9db9e352a03624411bb568f5396a --- /dev/null +++ b/mmpose/configs/mmdet/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', + '../_base_/default_runtime.py' +] +model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) + +# training schedule, voc dataset is repeated 3 times, in +# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12 +max_epochs = 4 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[3], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/pascal_voc/retinanet_r50_fpn_1x_voc0712.py b/mmpose/configs/mmdet/pascal_voc/retinanet_r50_fpn_1x_voc0712.py new file mode 100644 index 0000000000000000000000000000000000000000..c86a6f199c9317804692189975f3abaff24f6aff --- /dev/null +++ b/mmpose/configs/mmdet/pascal_voc/retinanet_r50_fpn_1x_voc0712.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', + '../_base_/default_runtime.py' +] +model = dict(bbox_head=dict(num_classes=20)) + +# training schedule, voc dataset is repeated 3 times, in +# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12 +max_epochs = 4 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[3], + gamma=0.1) +] +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/mmpose/configs/mmdet/pascal_voc/ssd300_voc0712.py b/mmpose/configs/mmdet/pascal_voc/ssd300_voc0712.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7a1368b76aa53700bd81a912b54e84ab58e53a --- /dev/null +++ b/mmpose/configs/mmdet/pascal_voc/ssd300_voc0712.py @@ -0,0 +1,102 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +model = dict( + bbox_head=dict( + num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2, + 0.9)))) +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +input_size = 300 +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean={{_base_.model.data_preprocessor.mean}}, + to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}}, + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=8, + num_workers=3, + dataset=dict( # RepeatDataset + # the dataset is repeated 10 times, and the training schedule is 2x, + # so the actual epoch = 12 * 10 = 120. + times=10, + dataset=dict( # ConcatDataset + # VOCDataset will add different `dataset_type` in dataset.metainfo, + # which will get error if using ConcatDataset. Adding + # `ignore_keys` can avoid this error. + ignore_keys=['dataset_type'], + datasets=[ + dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline), + dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2012/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2012/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + ]))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +custom_hooks = [ + dict(type='NumClassCheckHook'), + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 20], + gamma=0.1) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/pascal_voc/ssd512_voc0712.py b/mmpose/configs/mmdet/pascal_voc/ssd512_voc0712.py new file mode 100644 index 0000000000000000000000000000000000000000..6c4dc8a3eec86ccced7d44120b254463d18c00f5 --- /dev/null +++ b/mmpose/configs/mmdet/pascal_voc/ssd512_voc0712.py @@ -0,0 +1,82 @@ +_base_ = 'ssd300_voc0712.py' + +input_size = 512 +model = dict( + neck=dict( + out_channels=(512, 1024, 512, 256, 256, 256, 256), + level_strides=(2, 2, 2, 2, 1), + level_paddings=(1, 1, 1, 1, 1), + last_kernel_size=4), + bbox_head=dict( + in_channels=(512, 1024, 512, 256, 256, 256, 256), + anchor_generator=dict( + input_size=input_size, + strides=[8, 16, 32, 64, 128, 256, 512], + basesize_ratio_range=(0.15, 0.9), + ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2])))) + +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean={{_base_.model.data_preprocessor.mean}}, + to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}}, + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + # avoid bboxes being resized + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=8, + num_workers=3, + dataset=dict( # RepeatDataset + # the dataset is repeated 10 times, and the training schedule is 2x, + # so the actual epoch = 12 * 10 = 120. + times=10, + dataset=dict( # ConcatDataset + # VOCDataset will add different `dataset_type` in dataset.metainfo, + # which will get error if using ConcatDataset. Adding + # `ignore_keys` can avoid this error. + ignore_keys=['dataset_type'], + datasets=[ + dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline), + dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2012/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2012/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + ]))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/pisa/README.md b/mmpose/configs/mmdet/pisa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..39f79ecd1b9b007b6bbf1417e6fd809d47141470 --- /dev/null +++ b/mmpose/configs/mmdet/pisa/README.md @@ -0,0 +1,50 @@ +# PISA + +> [Prime Sample Attention in Object Detection](https://arxiv.org/abs/1904.04821) + + + +## Abstract + +It is a common paradigm in object detection frameworks to treat all samples equally and target at maximizing the performance on average. In this work, we revisit this paradigm through a careful study on how different samples contribute to the overall performance measured in terms of mAP. Our study suggests that the samples in each mini-batch are neither independent nor equally important, and therefore a better classifier on average does not necessarily mean higher mAP. Motivated by this study, we propose the notion of Prime Samples, those that play a key role in driving the detection performance. We further develop a simple yet effective sampling and learning strategy called PrIme Sample Attention (PISA) that directs the focus of the training process towards such samples. Our experiments demonstrate that it is often more effective to focus on prime samples than hard samples when training a detector. Particularly, On the MSCOCO dataset, PISA outperforms the random sampling baseline and hard mining schemes, e.g., OHEM and Focal Loss, consistently by around 2% on both single-stage and two-stage detectors, even with a strong backbone ResNeXt-101. + +
+ +
+ +## Results and Models + +| PISA | Network | Backbone | Lr schd | box AP | mask AP | Config | Download | +| :--: | :----------: | :------------: | :-----: | :----: | :-----: | :----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| × | Faster R-CNN | R-50-FPN | 1x | 36.4 | | - | | +| √ | Faster R-CNN | R-50-FPN | 1x | 38.4 | | [config](./faster-rcnn_r50_fpn_pisa_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco_20200506_185619.log.json) | +| × | Faster R-CNN | X101-32x4d-FPN | 1x | 40.1 | | - | | +| √ | Faster R-CNN | X101-32x4d-FPN | 1x | 41.9 | | [config](./faster-rcnn_x101-32x4d_fpn_pisa_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco_20200505_181503.log.json) | +| × | Mask R-CNN | R-50-FPN | 1x | 37.3 | 34.2 | - | | +| √ | Mask R-CNN | R-50-FPN | 1x | 39.1 | 35.2 | [config](./mask-rcnn_r50_fpn_pisa_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco_20200508_150500.log.json) | +| × | Mask R-CNN | X101-32x4d-FPN | 1x | 41.1 | 37.1 | - | | +| √ | Mask R-CNN | X101-32x4d-FPN | 1x | | | | | +| × | RetinaNet | R-50-FPN | 1x | 35.6 | | - | | +| √ | RetinaNet | R-50-FPN | 1x | 36.9 | | [config](./retinanet-r50_fpn_pisa_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco_20200504_014311.log.json) | +| × | RetinaNet | X101-32x4d-FPN | 1x | 39.0 | | - | | +| √ | RetinaNet | X101-32x4d-FPN | 1x | 40.7 | | [config](./retinanet_x101-32x4d_fpn_pisa_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco_20200505_001404.log.json) | +| × | SSD300 | VGG16 | 1x | 25.6 | | - | | +| √ | SSD300 | VGG16 | 1x | 27.6 | | [config](./ssd300_pisa_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco_20200504_144325.log.json) | +| × | SSD512 | VGG16 | 1x | 29.3 | | - | | +| √ | SSD512 | VGG16 | 1x | 31.8 | | [config](./ssd512_pisa_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco_20200508_131030.log.json) | + +**Notes:** + +- In the original paper, all models are trained and tested on mmdet v1.x, thus results may not be exactly the same with this release on v2.0. +- It is noted PISA only modifies the training pipeline so the inference time remains the same with the baseline. + +## Citation + +```latex +@inproceedings{cao2019prime, + title={Prime sample attention in object detection}, + author={Cao, Yuhang and Chen, Kai and Loy, Chen Change and Lin, Dahua}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2020} +} +``` diff --git a/mmpose/configs/mmdet/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py b/mmpose/configs/mmdet/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..237a3b13aa5e61f04579670af01df8f481d80dd1 --- /dev/null +++ b/mmpose/configs/mmdet/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' + +model = dict( + roi_head=dict( + type='PISARoIHead', + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + sampler=dict( + type='ScoreHLRSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0.), + isr=dict(k=2, bias=0), + carl=dict(k=1, bias=0.2))), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/mmpose/configs/mmdet/pisa/faster-rcnn_x101-32x4d_fpn_pisa_1x_coco.py b/mmpose/configs/mmdet/pisa/faster-rcnn_x101-32x4d_fpn_pisa_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4b2c8d9a20ac7adf1965bb3d98e868c785cb23c3 --- /dev/null +++ b/mmpose/configs/mmdet/pisa/faster-rcnn_x101-32x4d_fpn_pisa_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../faster_rcnn/faster-rcnn_x101-32x4d_fpn_1x_coco.py' + +model = dict( + roi_head=dict( + type='PISARoIHead', + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + sampler=dict( + type='ScoreHLRSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0.), + isr=dict(k=2, bias=0), + carl=dict(k=1, bias=0.2))), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/mmpose/configs/mmdet/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py b/mmpose/configs/mmdet/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d6a6823591b1d7780c7f9d49029579afede239aa --- /dev/null +++ b/mmpose/configs/mmdet/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' + +model = dict( + roi_head=dict( + type='PISARoIHead', + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + sampler=dict( + type='ScoreHLRSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0.), + isr=dict(k=2, bias=0), + carl=dict(k=1, bias=0.2))), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/mmpose/configs/mmdet/pisa/mask-rcnn_x101-32x4d_fpn_pisa_1x_coco.py b/mmpose/configs/mmdet/pisa/mask-rcnn_x101-32x4d_fpn_pisa_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f2ac19fe75ba8c5b2440772eced16397e2273735 --- /dev/null +++ b/mmpose/configs/mmdet/pisa/mask-rcnn_x101-32x4d_fpn_pisa_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py' + +model = dict( + roi_head=dict( + type='PISARoIHead', + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + sampler=dict( + type='ScoreHLRSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0.), + isr=dict(k=2, bias=0), + carl=dict(k=1, bias=0.2))), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/mmpose/configs/mmdet/pisa/metafile.yml b/mmpose/configs/mmdet/pisa/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..3be5c3baf6d386d246b8fdc39035245d7dbbaad5 --- /dev/null +++ b/mmpose/configs/mmdet/pisa/metafile.yml @@ -0,0 +1,110 @@ +Collections: + - Name: PISA + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - PISA + - RPN + - ResNet + - RoIPool + Paper: + URL: https://arxiv.org/abs/1904.04821 + Title: 'Prime Sample Attention in Object Detection' + README: configs/pisa/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/pisa_roi_head.py#L8 + Version: v2.1.0 + +Models: + - Name: pisa_faster_rcnn_r50_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth + + - Name: pisa_faster_rcnn_x101_32x4d_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/faster-rcnn_x101-32x4d_fpn_pisa_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth + + - Name: pisa_mask_rcnn_r50_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth + + - Name: pisa_retinanet_r50_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/retinanet-r50_fpn_pisa_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth + + - Name: pisa_retinanet_x101_32x4d_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/retinanet_x101-32x4d_fpn_pisa_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth + + - Name: pisa_ssd300_coco + In Collection: PISA + Config: configs/pisa/ssd300_pisa_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 27.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth + + - Name: pisa_ssd512_coco + In Collection: PISA + Config: configs/pisa/ssd512_pisa_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 31.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth diff --git a/mmpose/configs/mmdet/pisa/retinanet-r50_fpn_pisa_1x_coco.py b/mmpose/configs/mmdet/pisa/retinanet-r50_fpn_pisa_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..70f89e227ec64b5c7224375aac0cf7ae3a10a29e --- /dev/null +++ b/mmpose/configs/mmdet/pisa/retinanet-r50_fpn_pisa_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' + +model = dict( + bbox_head=dict( + type='PISARetinaHead', + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), + train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) diff --git a/mmpose/configs/mmdet/pisa/retinanet_x101-32x4d_fpn_pisa_1x_coco.py b/mmpose/configs/mmdet/pisa/retinanet_x101-32x4d_fpn_pisa_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9caad45d34a9cde84a3c29ad45e3080bb831bb76 --- /dev/null +++ b/mmpose/configs/mmdet/pisa/retinanet_x101-32x4d_fpn_pisa_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = '../retinanet/retinanet_x101-32x4d_fpn_1x_coco.py' + +model = dict( + bbox_head=dict( + type='PISARetinaHead', + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), + train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) diff --git a/mmpose/configs/mmdet/pisa/ssd300_pisa_coco.py b/mmpose/configs/mmdet/pisa/ssd300_pisa_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b10236baeb1925483c2fdb025d86c45d51ba0276 --- /dev/null +++ b/mmpose/configs/mmdet/pisa/ssd300_pisa_coco.py @@ -0,0 +1,7 @@ +_base_ = '../ssd/ssd300_coco.py' + +model = dict( + bbox_head=dict(type='PISASSDHead'), + train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) + +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/pisa/ssd512_pisa_coco.py b/mmpose/configs/mmdet/pisa/ssd512_pisa_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..939c7f453d4d881324c3b0443b0696eb96b3df4f --- /dev/null +++ b/mmpose/configs/mmdet/pisa/ssd512_pisa_coco.py @@ -0,0 +1,7 @@ +_base_ = '../ssd/ssd512_coco.py' + +model = dict( + bbox_head=dict(type='PISASSDHead'), + train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) + +optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/point_rend/README.md b/mmpose/configs/mmdet/point_rend/README.md new file mode 100644 index 0000000000000000000000000000000000000000..efa1dcac214adafb6f7a9b9c6aba97e9ecd7b51c --- /dev/null +++ b/mmpose/configs/mmdet/point_rend/README.md @@ -0,0 +1,33 @@ +# PointRend + +> [PointRend: Image Segmentation as Rendering](https://arxiv.org/abs/1912.08193) + + + +## Abstract + +We present a new method for efficient high-quality image segmentation of objects and scenes. By analogizing classical computer graphics methods for efficient rendering with over- and undersampling challenges faced in pixel labeling tasks, we develop a unique perspective of image segmentation as a rendering problem. From this vantage, we present the PointRend (Point-based Rendering) neural network module: a module that performs point-based segmentation predictions at adaptively selected locations based on an iterative subdivision algorithm. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. While many concrete implementations of the general idea are possible, we show that a simple design already achieves excellent results. Qualitatively, PointRend outputs crisp object boundaries in regions that are over-smoothed by previous methods. Quantitatively, PointRend yields significant gains on COCO and Cityscapes, for both instance and semantic segmentation. PointRend's efficiency enables output resolutions that are otherwise impractical in terms of memory or computation compared to existing approaches. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :---: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 4.6 | | 38.4 | 36.3 | [config](./point-rend_r50-caffe_fpn_ms-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco_20200612_161407.log.json) | +| R-50-FPN | caffe | 3x | 4.6 | | 41.0 | 38.0 | [config](./point-rend_r50-caffe_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco_20200614_002632.log.json) | + +Note: All models are trained with multi-scale, the input image shorter side is randomly scaled to one of (640, 672, 704, 736, 768, 800). + +## Citation + +```latex +@InProceedings{kirillov2019pointrend, + title={{PointRend}: Image Segmentation as Rendering}, + author={Alexander Kirillov and Yuxin Wu and Kaiming He and Ross Girshick}, + journal={ArXiv:1912.08193}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/point_rend/metafile.yml b/mmpose/configs/mmdet/point_rend/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..f54f8a860b7951c1e99471b1f10e69c4685d998b --- /dev/null +++ b/mmpose/configs/mmdet/point_rend/metafile.yml @@ -0,0 +1,54 @@ +Collections: + - Name: PointRend + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - PointRend + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1912.08193 + Title: 'PointRend: Image Segmentation as Rendering' + README: configs/point_rend/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/point_rend.py#L6 + Version: v2.2.0 + +Models: + - Name: point_rend_r50_caffe_fpn_mstrain_1x_coco + In Collection: PointRend + Config: configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py + Metadata: + Training Memory (GB): 4.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth + + - Name: point_rend_r50_caffe_fpn_mstrain_3x_coco + In Collection: PointRend + Config: configs/point_rend/point-rend_r50-caffe_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 4.6 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth diff --git a/mmpose/configs/mmdet/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py b/mmpose/configs/mmdet/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8b17f5a340bad54a8fe9b366ccc7d5574f687b17 --- /dev/null +++ b/mmpose/configs/mmdet/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py @@ -0,0 +1,44 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-1x_coco.py' +# model settings +model = dict( + type='PointRend', + roi_head=dict( + type='PointRendRoIHead', + mask_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='concat', + roi_layer=dict( + _delete_=True, type='SimpleRoIAlign', output_size=14), + out_channels=256, + featmap_strides=[4]), + mask_head=dict( + _delete_=True, + type='CoarseMaskHead', + num_fcs=2, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + point_head=dict( + type='MaskPointHead', + num_fcs=3, + in_channels=256, + fc_channels=256, + num_classes=80, + coarse_pred_each_layer=True, + loss_point=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + mask_size=7, + num_points=14 * 14, + oversample_ratio=3, + importance_sample_ratio=0.75)), + test_cfg=dict( + rcnn=dict( + subdivision_steps=5, + subdivision_num_points=28 * 28, + scale_factor=2))) diff --git a/mmpose/configs/mmdet/point_rend/point-rend_r50-caffe_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/point_rend/point-rend_r50-caffe_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b11faaa98ebc5b61f086a2297debda6769dc6270 --- /dev/null +++ b/mmpose/configs/mmdet/point_rend/point-rend_r50-caffe_fpn_ms-3x_coco.py @@ -0,0 +1,18 @@ +_base_ = './point-rend_r50-caffe_fpn_ms-1x_coco.py' + +max_epochs = 36 + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] + +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/pvt/README.md b/mmpose/configs/mmdet/pvt/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fccad4f6b8b7e6ac89e937fac6d7858ecbfa881b --- /dev/null +++ b/mmpose/configs/mmdet/pvt/README.md @@ -0,0 +1,57 @@ +# PVT + +> [Pyramid vision transformer: A versatile backbone for dense prediction without convolutions](https://arxiv.org/abs/2102.12122) + + + +## Abstract + +Although using convolutional neural networks (CNNs) as backbones achieves great successes in computer vision, this work investigates a simple backbone network useful for many dense prediction tasks without convolutions. Unlike the recently-proposed Transformer model (e.g., ViT) that is specially designed for image classification, we propose Pyramid Vision Transformer~(PVT), which overcomes the difficulties of porting Transformer to various dense prediction tasks. PVT has several merits compared to prior arts. (1) Different from ViT that typically has low-resolution outputs and high computational and memory cost, PVT can be not only trained on dense partitions of the image to achieve high output resolution, which is important for dense predictions but also using a progressive shrinking pyramid to reduce computations of large feature maps. (2) PVT inherits the advantages from both CNN and Transformer, making it a unified backbone in various vision tasks without convolutions by simply replacing CNN backbones. (3) We validate PVT by conducting extensive experiments, showing that it boosts the performance of many downstream tasks, e.g., object detection, semantic, and instance segmentation. For example, with a comparable number of parameters, RetinaNet+PVT achieves 40.4 AP on the COCO dataset, surpassing RetinNet+ResNet50 (36.3 AP) by 4.1 absolute AP. We hope PVT could serve as an alternative and useful backbone for pixel-level predictions and facilitate future researches. + +Transformer recently has shown encouraging progresses in computer vision. In this work, we present new baselines by improving the original Pyramid Vision Transformer (abbreviated as PVTv1) by adding three designs, including (1) overlapping patch embedding, (2) convolutional feed-forward networks, and (3) linear complexity attention layers. +With these modifications, our PVTv2 significantly improves PVTv1 on three tasks e.g., classification, detection, and segmentation. Moreover, PVTv2 achieves comparable or better performances than recent works such as Swin Transformer. We hope this work will facilitate state-of-the-art Transformer researches in computer vision. + +
+ +
+ +## Results and Models + +### RetinaNet (PVTv1) + +| Backbone | Lr schd | Mem (GB) | box AP | Config | Download | +| :--------: | :-----: | :------: | :----: | :----------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| PVT-Tiny | 12e | 8.5 | 36.6 | [config](./retinanet_pvt-t_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110.log.json) | +| PVT-Small | 12e | 14.5 | 40.4 | [config](./retinanet_pvt-s_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921.log.json) | +| PVT-Medium | 12e | 20.9 | 41.7 | [config](./retinanet_pvt-m_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243.log.json) | + +### RetinaNet (PVTv2) + +| Backbone | Lr schd | Mem (GB) | box AP | Config | Download | +| :------: | :-----: | :------: | :----: | :-------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| PVTv2-B0 | 12e | 7.4 | 37.1 | [config](./retinanet_pvtv2-b0_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157.log.json) | +| PVTv2-B1 | 12e | 9.5 | 41.2 | [config](./retinanet_pvtv2-b1_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318.log.json) | +| PVTv2-B2 | 12e | 16.2 | 44.6 | [config](./retinanet_pvtv2-b2_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843.log.json) | +| PVTv2-B3 | 12e | 23.0 | 46.0 | [config](./retinanet_pvtv2-b3_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512.log.json) | +| PVTv2-B4 | 12e | 17.0 | 46.3 | [config](./retinanet_pvtv2-b4_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151.log.json) | +| PVTv2-B5 | 12e | 18.7 | 46.1 | [config](./retinanet_pvtv2-b5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800.log.json) | + +## Citation + +```latex +@article{wang2021pyramid, + title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={arXiv preprint arXiv:2102.12122}, + year={2021} +} +``` + +```latex +@article{wang2021pvtv2, + title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={arXiv preprint arXiv:2106.13797}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/pvt/metafile.yml b/mmpose/configs/mmdet/pvt/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..58843784955f3f4be7aeebf7caa9b50b7891f4c5 --- /dev/null +++ b/mmpose/configs/mmdet/pvt/metafile.yml @@ -0,0 +1,243 @@ +Models: + - Name: retinanet_pvt-t_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvt-t_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth + Paper: + URL: https://arxiv.org/abs/2102.12122 + Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 + Version: 2.17.0 + + - Name: retinanet_pvt-s_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvt-s_fpn_1x_coco.py + Metadata: + Training Memory (GB): 14.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth + Paper: + URL: https://arxiv.org/abs/2102.12122 + Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 + Version: 2.17.0 + + - Name: retinanet_pvt-m_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvt-m_fpn_1x_coco.py + Metadata: + Training Memory (GB): 20.9 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth + Paper: + URL: https://arxiv.org/abs/2102.12122 + Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b0_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b1_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b2_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py + Metadata: + Training Memory (GB): 16.2 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b3_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py + Metadata: + Training Memory (GB): 23.0 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b4_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py + Metadata: + Training Memory (GB): 17.0 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b5_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 18.7 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvt-l_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvt-l_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1a6f604bdb367106bc75680808ce6fabc2740ed1 --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvt-l_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 8, 27, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_large.pth'))) +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict(type='AmpOptimWrapper') diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvt-m_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvt-m_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b888f788b6c7310491751774238451bb7107dccc --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvt-m_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 4, 18, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_medium.pth'))) diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvt-s_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvt-s_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..46603488bb3ceb4fc1052139da53340a3d595256 --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvt-s_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 4, 6, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_small.pth'))) diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvt-t_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvt-t_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5f67c444f262613d615b8b7331991ca7e2f57935 --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvt-t_fpn_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RetinaNet', + backbone=dict( + _delete_=True, + type='PyramidVisionTransformer', + num_layers=[2, 2, 2, 2], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_tiny.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optim_wrapper = dict( + optimizer=dict( + _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cbebf90fb89d81bd2f4c0874dc2c82cf7c7393d0 --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RetinaNet', + backbone=dict( + _delete_=True, + type='PyramidVisionTransformerV2', + embed_dims=32, + num_layers=[2, 2, 2, 2], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b0.pth')), + neck=dict(in_channels=[32, 64, 160, 256])) +# optimizer +optim_wrapper = dict( + optimizer=dict( + _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5374c50925f5c7ed8a761eda40dc4bf374df3aeb --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b1.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9a18debbe5f8b9918e0d086ad6d54d203ef310 --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 4, 6, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b2.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7a47f820324af7fecf773640d7d1829b0c115471 --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 4, 18, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b3.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5faf4c507ba89ffe614b2b9d34d452e4c106b0fe --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py @@ -0,0 +1,20 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 8, 27, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b4.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optim_wrapper = dict( + optimizer=dict( + _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)) + +# dataset settings +train_dataloader = dict(batch_size=1, num_workers=1) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..afff8719ece41dbfbbe23e2259b9973bb29871f6 --- /dev/null +++ b/mmpose/configs/mmdet/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py @@ -0,0 +1,21 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 6, 40, 3], + mlp_ratios=(4, 4, 4, 4), + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b5.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optim_wrapper = dict( + optimizer=dict( + _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)) + +# dataset settings +train_dataloader = dict(batch_size=1, num_workers=1) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/mmpose/configs/mmdet/qdtrack/README.md b/mmpose/configs/mmdet/qdtrack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5a6efe7d3fd62d61b328d8ce248e1fd9132f5792 --- /dev/null +++ b/mmpose/configs/mmdet/qdtrack/README.md @@ -0,0 +1,89 @@ +# Quasi-Dense Similarity Learning for Multiple Object Tracking + +## Abstract + + + +Similarity learning has been recognized as a crucial step for object tracking. However, existing multiple object tracking methods only use sparse ground truth matching as the training objective, while ignoring the majority of the informative regions on the images. In this paper, we present Quasi-Dense Similarity Learning, which densely samples hundreds of region proposals on a pair of images for contrastive learning. We can directly combine this similarity learning with existing detection methods to build Quasi-Dense Tracking (QDTrack) without turning to displacementregression or motion priors. We also find that the resulting distinctive feature space admits a simple nearest neighbor search at the inference time. Despite its simplicity, QD-Track outperforms all existing methods on MOT, BDD100K, Waymo, and TAO tracking benchmarks. It achieves 68.7 MOTA at 20.3 FPS on MOT17 without using external training data. Compared to methods with similar detectors, it boosts almost 10 points of MOTA and significantly decreases the number of ID switches on BDD100K and Waymo datasets. + + + +
+ + +
+ +## Results and models on MOT17 + +| Method | Detector | Train Set | Test Set | Public | Inf time (fps) | HOTA | MOTA | IDF1 | FP | FN | IDSw. | Config | Download | +| :-----: | :----------: | :--------: | :------: | :----: | :------------: | :--: | :--: | :--: | :--: | :---: | :---: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| QDTrack | Faster R-CNN | half-train | half-val | N | - | 57.1 | 68.1 | 68.6 | 7707 | 42732 | 1083 | [config](qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py) | [model](https://download.openmmlab.com/mmtracking/mot/qdtrack/mot_dataset/qdtrack_faster-rcnn_r50_fpn_4e_mot17_20220315_145635-76f295ef.pth) \| [log](https://download.openmmlab.com/mmtracking/mot/qdtrack/mot_dataset/qdtrack_faster-rcnn_r50_fpn_4e_mot17_20220315_145635.log.json) | + +## Get started + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Prepare + +Tracking Dataset Prepare can refer to this [document](../../docs/en/user_guides/tracking_dataset_prepare.md). + +### 3. Training + +Due to the influence of parameters such as learning rate in default configuration file, we recommend using 8 GPUs for training in order to reproduce accuracy. You can use the following command to start the training. + +```shell +# Training QDTrack on mot17-half-train dataset with following command. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_train.sh configs/qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py 8 +``` + +If you want to know about more detailed usage of `train.py/dist_train.sh/slurm_train.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 4. Testing and evaluation + +**4.1 Example on MOTxx-halfval dataset** + +```shell +# Example 1: Test on motXX-half-val set +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_test_tracking.sh configs/qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py 8 --checkpoint ${CHECKPOINT_PATH} +``` + +**4.2 use video_baesd to evaluating and testing** +we also provide two_ways(img_based or video_based) to evaluating and testing. +if you want to use video_based to evaluating and testing, you can modify config as follows + +``` +val_dataloader = dict( + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False)) +``` + +If you want to know about more detailed usage of `test_tracking.py/dist_test_tracking.sh/slurm_test_tracking.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 5.Inference + +Use a single GPU to predict a video and save it as a video. + +```shell +python demo/mot_demo.py demo/demo_mot.mp4 configs/qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py --checkpoint ${CHECKPOINT_PATH} --out mot.mp4 +``` + +If you want to know about more detailed usage of `mot_demo.py`, please refer to this [document](../../docs/en/user_guides/tracking_inference.md). + +## Citation + + + +```latex +@inproceedings{pang2021quasi, + title={Quasi-dense similarity learning for multiple object tracking}, + author={Pang, Jiangmiao and Qiu, Linlu and Li, Xia and Chen, Haofeng and Li, Qi and Darrell, Trevor and Yu, Fisher}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, + pages={164--173}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/qdtrack/metafile.yml b/mmpose/configs/mmdet/qdtrack/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..e5c5504d1bd00e43bdba7f28efcbf9dd23555342 --- /dev/null +++ b/mmpose/configs/mmdet/qdtrack/metafile.yml @@ -0,0 +1,30 @@ +Collections: + - Name: QDTrack + Metadata: + Training Data: MOT17, crowdhuman + Training Techniques: + - SGD + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Paper: + URL: https://arxiv.org/pdf/2006.06664.pdf + Title: Quasi-Dense Similarity Learning for Multiple Object Tracking + README: configs/qdtrack/README.md + +Models: + - Name: qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval + In Collection: QDTrack + Config: configs/qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py + Metadata: + Training Data: MOT17 + Training Memory (GB): 5.83 + Epochs: 4 + Results: + - Task: Multi-object Tracking + Dataset: MOT17 + Metrics: + HOTA: 57.1 + MOTA: 68.1 + IDF1: 68.6 + Weights: https://download.openmmlab.com/mmtracking/mot/qdtrack/mot_dataset/qdtrack_faster-rcnn_r50_fpn_4e_mot17_20220315_145635-76f295ef.pth diff --git a/mmpose/configs/mmdet/qdtrack/qdtrack_faster-rcnn_r50_fpn_4e_base.py b/mmpose/configs/mmdet/qdtrack/qdtrack_faster-rcnn_r50_fpn_4e_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e3c17c3eb97eedef88949c841364b858a3a1d6e9 --- /dev/null +++ b/mmpose/configs/mmdet/qdtrack/qdtrack_faster-rcnn_r50_fpn_4e_base.py @@ -0,0 +1,118 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/default_runtime.py' +] + +detector = _base_.model +detector.pop('data_preprocessor') + +detector['backbone'].update( + dict( + norm_cfg=dict(type='BN', requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +detector.rpn_head.loss_bbox.update( + dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)) +detector.rpn_head.bbox_coder.update(dict(clip_border=False)) +detector.roi_head.bbox_head.update(dict(num_classes=1)) +detector.roi_head.bbox_head.bbox_coder.update(dict(clip_border=False)) +detector['init_cfg'] = dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/' + 'faster_rcnn_r50_fpn_1x_coco-person/' + 'faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth' + # noqa: E501 +) +del _base_.model + +model = dict( + type='QDTrack', + data_preprocessor=dict( + type='TrackDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + detector=detector, + track_head=dict( + type='QuasiDenseTrackHead', + roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + embed_head=dict( + type='QuasiDenseEmbedHead', + num_convs=4, + num_fcs=1, + embed_channels=256, + norm_cfg=dict(type='GN', num_groups=32), + loss_track=dict(type='MultiPosCrossEntropyLoss', loss_weight=0.25), + loss_track_aux=dict( + type='MarginL2Loss', + neg_pos_ub=3, + pos_margin=0, + neg_margin=0.1, + hard_mining=True, + loss_weight=1.0)), + loss_bbox=dict(type='L1Loss', loss_weight=1.0), + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='CombinedSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=3, + add_gt_as_proposals=True, + pos_sampler=dict(type='InstanceBalancedPosSampler'), + neg_sampler=dict(type='RandomSampler')))), + tracker=dict( + type='QuasiDenseTracker', + init_score_thr=0.9, + obj_score_thr=0.5, + match_score_thr=0.5, + memo_tracklet_frames=30, + memo_backdrop_frames=1, + memo_momentum=0.8, + nms_conf_thr=0.5, + nms_backdrop_iou_thr=0.3, + nms_class_iou_thr=0.7, + with_cats=True, + match_metric='bisoftmax')) +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=35, norm_type=2)) +# learning policy +param_scheduler = [ + dict(type='MultiStepLR', begin=0, end=4, by_epoch=True, milestones=[3]) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=4, val_interval=4) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +default_hooks = dict( + logger=dict(type='LoggerHook', interval=50), + visualization=dict(type='TrackVisualizationHook', draw=False)) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='TrackLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# custom hooks +custom_hooks = [ + # Synchronize model buffers such as running_mean and running_var in BN + # at the end of each epoch + dict(type='SyncBuffersHook') +] diff --git a/mmpose/configs/mmdet/qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..d87604dad6bf39028a8111708307482186118b19 --- /dev/null +++ b/mmpose/configs/mmdet/qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py @@ -0,0 +1,14 @@ +_base_ = [ + './qdtrack_faster-rcnn_r50_fpn_4e_base.py', + '../_base_/datasets/mot_challenge.py', +] + +# evaluator +val_evaluator = [ + dict(type='CocoVideoMetric', metric=['bbox'], classwise=True), + dict(type='MOTChallengeMetric', metric=['HOTA', 'CLEAR', 'Identity']) +] + +test_evaluator = val_evaluator +# The fluctuation of HOTA is about +-1. +randomness = dict(seed=6) diff --git a/mmpose/configs/mmdet/queryinst/README.md b/mmpose/configs/mmdet/queryinst/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ee62ccbf8a3b77a6ddbb62c8ba3740bc509d8ae8 --- /dev/null +++ b/mmpose/configs/mmdet/queryinst/README.md @@ -0,0 +1,36 @@ +# QueryInst + +> [Instances as Queries](https://openaccess.thecvf.com/content/ICCV2021/html/Fang_Instances_As_Queries_ICCV_2021_paper.html) + + + +## Abstract + +We present QueryInst, a new perspective for instance segmentation. QueryInst is a multi-stage end-to-end system that treats instances of interest as learnable queries, enabling query based object detectors, e.g., Sparse R-CNN, to have strong instance segmentation performance. The attributes of instances such as categories, bounding boxes, instance masks, and instance association embeddings are represented by queries in a unified manner. In QueryInst, a query is shared by both detection and segmentation via dynamic convolutions and driven by parallelly-supervised multi-stage learning. We conduct extensive experiments on three challenging benchmarks, i.e., COCO, CityScapes, and YouTube-VIS to evaluate the effectiveness of QueryInst in object detection, instance segmentation, and video instance segmentation tasks. For the first time, we demonstrate that a simple end-to-end query based framework can achieve the state-of-the-art performance in various instance-level recognition tasks. + +
+ +
+ +## Results and Models + +| Model | Backbone | Style | Lr schd | Number of Proposals | Multi-Scale | RandomCrop | box AP | mask AP | Config | Download | +| :-------: | :-------: | :-----: | :-----: | :-----------------: | :---------: | :--------: | :----: | :-----: | :---------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| QueryInst | R-50-FPN | pytorch | 1x | 100 | False | False | 42.0 | 37.5 | [config](./queryinst_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916.log.json) | +| QueryInst | R-50-FPN | pytorch | 3x | 100 | True | False | 44.8 | 39.8 | [config](./queryinst_r50_fpn_ms-480-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643.log.json) | +| QueryInst | R-50-FPN | pytorch | 3x | 300 | True | True | 47.5 | 41.7 | [config](./queryinst_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802.log.json) | +| QueryInst | R-101-FPN | pytorch | 3x | 100 | True | False | 46.4 | 41.0 | [config](./queryinst_r101_fpn_ms-480-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048.log.json) | +| QueryInst | R-101-FPN | pytorch | 3x | 300 | True | True | 49.0 | 42.9 | [config](./queryinst_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621.log.json) | + +## Citation + +```latex +@InProceedings{Fang_2021_ICCV, + author = {Fang, Yuxin and Yang, Shusheng and Wang, Xinggang and Li, Yu and Fang, Chen and Shan, Ying and Feng, Bin and Liu, Wenyu}, + title = {Instances As Queries}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2021}, + pages = {6910-6919} +} +``` diff --git a/mmpose/configs/mmdet/queryinst/metafile.yml b/mmpose/configs/mmdet/queryinst/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..3ea3b00a945c8856b8c63f68a0ec6a48c70a933f --- /dev/null +++ b/mmpose/configs/mmdet/queryinst/metafile.yml @@ -0,0 +1,100 @@ +Collections: + - Name: QueryInst + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - QueryInst + Paper: + URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Fang_Instances_As_Queries_ICCV_2021_paper.pdf + Title: 'Instances as Queries' + README: configs/queryinst/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/main/mmdet/models/detectors/queryinst.py + Version: v2.18.0 + +Models: + - Name: queryinst_r50_fpn_1x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth + + - Name: queryinst_r50_fpn_ms-480-800-3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_ms-480-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth + + - Name: queryinst_r50_fpn_300-proposals_crop-ms-480-800-3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth + + - Name: queryinst_r101_fpn_ms-480-800-3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r101_fpn_ms-480-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth + + - Name: queryinst_r101_fpn_300-proposals_crop-ms-480-800-3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth diff --git a/mmpose/configs/mmdet/queryinst/queryinst_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py b/mmpose/configs/mmdet/queryinst/queryinst_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1692c134698a98da33612487a9fb703117fdb8b6 --- /dev/null +++ b/mmpose/configs/mmdet/queryinst/queryinst_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './queryinst_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/queryinst/queryinst_r101_fpn_ms-480-800-3x_coco.py b/mmpose/configs/mmdet/queryinst/queryinst_r101_fpn_ms-480-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..dd5b7f452e583eb362e0bb05f272a771d68b6e48 --- /dev/null +++ b/mmpose/configs/mmdet/queryinst/queryinst_r101_fpn_ms-480-800-3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..63d61d78872b452bdd8d2607fc03181b169ea845 --- /dev/null +++ b/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_1x_coco.py @@ -0,0 +1,155 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +num_stages = 6 +num_proposals = 100 +model = dict( + type='QueryInst', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + add_extra_convs='on_input', + num_outs=4), + rpn_head=dict( + type='EmbeddingRPNHead', + num_proposals=num_proposals, + proposal_feature_channel=256), + roi_head=dict( + type='SparseRoIHead', + num_stages=num_stages, + stage_loss_weights=[1] * num_stages, + proposal_feature_channel=256, + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='DIIHead', + num_classes=80, + num_ffn_fcs=2, + num_heads=8, + num_cls_fcs=1, + num_reg_fcs=3, + feedforward_channels=2048, + in_channels=256, + dropout=0.0, + ffn_act_cfg=dict(type='ReLU', inplace=True), + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=7, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=False, + target_means=[0., 0., 0., 0.], + target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) + ], + mask_head=[ + dict( + type='DynamicMaskHead', + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=14, + with_proj=False, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + num_convs=4, + num_classes=80, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + conv_out_channels=256, + class_agnostic=False, + norm_cfg=dict(type='BN'), + upsample_cfg=dict(type='deconv', scale_factor=2), + loss_mask=dict( + type='DiceLoss', + loss_weight=8.0, + use_sigmoid=True, + activate=False, + eps=1e-5)) for _ in range(num_stages) + ]), + # training and testing settings + train_cfg=dict( + rpn=None, + rcnn=[ + dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xyxy'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ]), + sampler=dict(type='PseudoSampler'), + pos_weight=1, + mask_size=28, + ) for _ in range(num_stages) + ]), + test_cfg=dict( + rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001), + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}), + clip_grad=dict(max_norm=0.1, norm_type=2)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py b/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..33ab061267bc9753f490acc57ed8d4193f1250b4 --- /dev/null +++ b/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py @@ -0,0 +1,45 @@ +_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py' +num_proposals = 300 +model = dict( + rpn_head=dict(num_proposals=num_proposals), + test_cfg=dict( + _delete_=True, + rpn=None, + rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) + +# augmentation strategy originates from DETR. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[[ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + scales=[(400, 1333), (500, 1333), (600, 1333)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + keep_ratio=True) + ]]), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_ms-480-800-3x_coco.py b/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_ms-480-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6b99374ef4364dc76a60c2dd74377f92c15780ed --- /dev/null +++ b/mmpose/configs/mmdet/queryinst/queryinst_r50_fpn_ms-480-800-3x_coco.py @@ -0,0 +1,32 @@ +_base_ = './queryinst_r50_fpn_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# learning policy +max_epochs = 36 +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/regnet/README.md b/mmpose/configs/mmdet/regnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0bfcec1891ccb468bcccf975b9bd26bca53e0a7f --- /dev/null +++ b/mmpose/configs/mmdet/regnet/README.md @@ -0,0 +1,121 @@ +# RegNet + +> [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) + + + +## Abstract + +In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs. + +
+ +
+ +## Introduction + +We implement RegNetX and RegNetY models in detection systems and provide their first results on Mask R-CNN, Faster R-CNN and RetinaNet. + +The pre-trained models are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). + +## Usage + +To use a regnet model, there are two steps to do: + +1. Convert the model to ResNet-style supported by MMDetection +2. Modify backbone and neck in config accordingly + +### Convert model + +We already prepare models of FLOPs from 400M to 12G in our model zoo. + +For more general usage, we also provide script `regnet2mmdet.py` in the tools directory to convert the key of models pretrained by [pycls](https://github.com/facebookresearch/pycls/) to +ResNet-style checkpoints used in MMDetection. + +```bash +python -u tools/model_converters/regnet2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +### Modify config + +The users can modify the config's `depth` of backbone and corresponding keys in `arch` according to the configs in the [pycls model zoo](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). +The parameter `in_channels` in FPN can be found in the Figure 15 & 16 of the paper (`wi` in the legend). +This directory already provides some configs with their performance, using RegNetX from 800MF to 12GF level. +For other pre-trained models or self-implemented regnet models, the users are responsible to check these parameters by themselves. + +**Note**: Although Fig. 15 & 16 also provide `w0`, `wa`, `wm`, `group_w`, and `bot_mul` for `arch`, they are quantized thus inaccurate, using them sometimes produces different backbone that does not match the key in the pre-trained model. + +## Results and Models + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------------------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-FPN](../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py) | pytorch | 1x | 4.4 | 12.0 | 38.2 | 34.7 | [config](../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | +| [RegNetX-3.2GF-FPN](./mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 5.0 | | 40.3 | 36.6 | [config](./mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) | +| [RegNetX-4.0GF-FPN](./mask-rcnn_regnetx-4GF_fpn_1x_coco.py) | pytorch | 1x | 5.5 | | 41.5 | 37.4 | [config](./mask-rcnn_regnetx-4GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217.log.json) | +| [R-101-FPN](../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py) | pytorch | 1x | 6.4 | 10.3 | 40.0 | 36.1 | [config](../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | +| [RegNetX-6.4GF-FPN](./mask-rcnn_regnetx-6.4GF_fpn_1x_coco.py) | pytorch | 1x | 6.1 | | 41.0 | 37.1 | [config](./mask-rcnn_regnetx-6.4GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439.log.json) | +| [X-101-32x4d-FPN](../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py) | pytorch | 1x | 7.6 | 9.4 | 41.9 | 37.5 | [config](../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) | +| [RegNetX-8.0GF-FPN](./mask-rcnn_regnetx-8GF_fpn_1x_coco.py) | pytorch | 1x | 6.4 | | 41.7 | 37.5 | [config](./mask-rcnn_regnetx-8GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515.log.json) | +| [RegNetX-12GF-FPN](./mask-rcnn_regnetx-12GF_fpn_1x_coco.py) | pytorch | 1x | 7.4 | | 42.2 | 38 | [config](./mask-rcnn_regnetx-12GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552.log.json) | +| [RegNetX-3.2GF-FPN-DCN-C3-C5](./mask-rcnn_regnetx-3.2GF-mdconv-c3-c5_fpn_1x_coco.py) | pytorch | 1x | 5.0 | | 40.3 | 36.6 | [config](./mask-rcnn_regnetx-3.2GF-mdconv-c3-c5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726.log.json) | + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-FPN](../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py) | pytorch | 1x | 4.0 | 18.2 | 37.4 | [config](../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| [RegNetX-3.2GF-FPN](./faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 4.5 | | 39.9 | [config](./faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927.log.json) | +| [RegNetX-3.2GF-FPN](./faster-rcnn_regnetx-3.2GF_fpn_2x_coco.py) | pytorch | 2x | 4.5 | | 41.1 | [config](./faster-rcnn_regnetx-3.2GF_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955.log.json) | + +### RetinaNet + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-----------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-FPN](../retinanet/retinanet_r50_fpn_1x_coco.py) | pytorch | 1x | 3.8 | 16.6 | 36.5 | [config](../retinanet/retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) | +| [RegNetX-800MF-FPN](./retinanet_regnetx-800MF_fpn_1x_coco.py) | pytorch | 1x | 2.5 | | 35.6 | [config](./retinanet_regnetx-800MF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403.log.json) | +| [RegNetX-1.6GF-FPN](./retinanet_regnetx-1.6GF_fpn_1x_coco.py) | pytorch | 1x | 3.3 | | 37.3 | [config](./retinanet_regnetx-1.6GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403.log.json) | +| [RegNetX-3.2GF-FPN](./retinanet_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 4.2 | | 39.1 | [config](./retinanet_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) | + +### Pre-trained models + +We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :---------------: | :----------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster RCNN | [RegNetX-400MF-FPN](./faster-rcnn_regnetx-400MF_fpn_ms-3x_coco.py) | pytorch | 3x | 2.3 | | 37.1 | - | [config](./faster-rcnn_regnetx-400MF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112.log.json) | +| Faster RCNN | [RegNetX-800MF-FPN](./faster-rcnn_regnetx-800MF_fpn_ms-3x_coco.py) | pytorch | 3x | 2.8 | | 38.8 | - | [config](./faster-rcnn_regnetx-800MF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118.log.json) | +| Faster RCNN | [RegNetX-1.6GF-FPN](./faster-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py) | pytorch | 3x | 3.4 | | 40.5 | - | [config](./faster-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325.log.json) | +| Faster RCNN | [RegNetX-3.2GF-FPN](./faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py) | pytorch | 3x | 4.4 | | 42.3 | - | [config](./faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152.log.json) | +| Faster RCNN | [RegNetX-4GF-FPN](./faster-rcnn_regnetx-4GF_fpn_ms-3x_coco.py) | pytorch | 3x | 4.9 | | 42.8 | - | [config](./faster-rcnn_regnetx-4GF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201.log.json) | +| Mask RCNN | [RegNetX-400MF-FPN](./mask-rcnn_regnetx-400MF_fpn_ms-poly-3x_coco.py) | pytorch | 3x | 2.5 | | 37.6 | 34.4 | [config](./mask-rcnn_regnetx-400MF_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443.log.json) | +| Mask RCNN | [RegNetX-800MF-FPN](./mask-rcnn_regnetx-800MF_fpn_ms-poly-3x_coco.py) | pytorch | 3x | 2.9 | | 39.5 | 36.1 | [config](./mask-rcnn_regnetx-800MF_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641.log.json) | +| Mask RCNN | [RegNetX-1.6GF-FPN](./mask-rcnn_regnetx-1.6GF_fpn_ms-poly-3x_coco.py) | pytorch | 3x | 3.6 | | 40.9 | 37.5 | [config](./mask-rcnn_regnetx-1.6GF_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641.log.json) | +| Mask RCNN | [RegNetX-3.2GF-FPN](./mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py) | pytorch | 3x | 5.0 | | 43.1 | 38.7 | [config](./mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221.log.json) | +| Mask RCNN | [RegNetX-4GF-FPN](./mask-rcnn_regnetx-4GF_fpn_ms-poly-3x_coco.py) | pytorch | 3x | 5.1 | | 43.4 | 39.2 | [config](./mask-rcnn_regnetx-4GF_fpn_ms-poly-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621.log.json) | +| Cascade Mask RCNN | [RegNetX-400MF-FPN](./cascade-mask-rcnn_regnetx-400MF_fpn_ms-3x_coco.py) | pytorch | 3x | 4.3 | | 41.6 | 36.4 | [config](./cascade-mask-rcnn_regnetx-400MF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619.log.json) | +| Cascade Mask RCNN | [RegNetX-800MF-FPN](./cascade-mask-rcnn_regnetx-800MF_fpn_ms-3x_coco.py) | pytorch | 3x | 4.8 | | 42.8 | 37.6 | [config](./cascade-mask-rcnn_regnetx-800MF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616.log.json) | +| Cascade Mask RCNN | [RegNetX-1.6GF-FPN](./cascade-mask-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py) | pytorch | 3x | 5.4 | | 44.5 | 39.0 | [config](./cascade-mask-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616.log.json) | +| Cascade Mask RCNN | [RegNetX-3.2GF-FPN](./cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py) | pytorch | 3x | 6.4 | | 45.8 | 40.0 | [config](./cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616.log.json) | +| Cascade Mask RCNN | [RegNetX-4GF-FPN](./cascade-mask-rcnn_regnetx-4GF_fpn_ms-3x_coco.py) | pytorch | 3x | 6.9 | | 45.8 | 40.0 | [config](./cascade-mask-rcnn_regnetx-4GF_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034.log.json) | + +### Notice + +1. The models are trained using a different weight decay, i.e., `weight_decay=5e-5` according to the setting in ImageNet training. This brings improvement of at least 0.7 AP absolute but does not improve the model using ResNet-50. +2. RetinaNets using RegNets are trained with learning rate 0.02 with gradient clip. We find that using learning rate 0.02 could improve the results by at least 0.7 AP absolute and gradient clip is necessary to stabilize the training. However, this does not improve the performance of ResNet-50-FPN RetinaNet. + +## Citation + +```latex +@article{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..74e6adaba5c262d45aaec876d1225b0061bb290b --- /dev/null +++ b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_1.6gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), + neck=dict( + type='FPN', + in_channels=[72, 168, 408, 912], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ea219021260b6aa3a844eb6b4780e9669e50ed3b --- /dev/null +++ b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py @@ -0,0 +1,28 @@ +_base_ = [ + '../common/ms_3x_coco-instance.py', + '../_base_/models/cascade-mask-rcnn_r50_fpn.py' +] +model = dict( + data_preprocessor=dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict(optimizer=dict(weight_decay=0.00005)) diff --git a/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-400MF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-400MF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3fe47f837437163710ecd28f1bb217c643464965 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-400MF_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_400mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), + neck=dict( + type='FPN', + in_channels=[32, 64, 160, 384], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-4GF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-4GF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e22886a80f92ba4269477a307b2689c45468381c --- /dev/null +++ b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-4GF_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_4.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 560, 1360], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-800MF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-800MF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..655bdc60c772875e0a1ed871bd6bf02aab8e39cc --- /dev/null +++ b/mmpose/configs/mmdet/regnet/cascade-mask-rcnn_regnetx-800MF_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_800mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), + neck=dict( + type='FPN', + in_channels=[64, 128, 288, 672], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e8302bdd1537b825f36777e3211d27dec8fb0c --- /dev/null +++ b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_1.6gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), + neck=dict( + type='FPN', + in_channels=[72, 168, 408, 912], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..db49092e2fb7e1cf3dbcad2bb99aa08396ea35e7 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + data_preprocessor=dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)) diff --git a/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_2x_coco.py b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..be533603085a89b65556b47f5e333fdde734bbd1 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d3d5d5d689162d805c0cfb4d84f9a128faf90c25 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py @@ -0,0 +1,25 @@ +_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py'] +model = dict( + data_preprocessor=dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict(optimizer=dict(weight_decay=0.00005)) diff --git a/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-400MF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-400MF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2edeff9c1f5a794ed14dc8723917986ac26e3d36 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-400MF_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_400mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), + neck=dict( + type='FPN', + in_channels=[32, 64, 160, 384], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-4GF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-4GF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..afcbb5d5d1a8aee47267d1f82fff8d40fa0d8e9b --- /dev/null +++ b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-4GF_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_4.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 560, 1360], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-800MF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-800MF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f659ec9689068afd94aa3bc545d4fed91ffb5eb4 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/faster-rcnn_regnetx-800MF_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_800mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), + neck=dict( + type='FPN', + in_channels=[64, 128, 288, 672], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-1.6GF_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-1.6GF_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..60874c66dbc37df824a9c44bb8c28a441f7f84e4 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-1.6GF_fpn_ms-poly-3x_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_1.6gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), + neck=dict( + type='FPN', + in_channels=[72, 168, 408, 912], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005), + clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-12GF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-12GF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e82cecea010fb32143f809add198a052285a6897 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-12GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_12gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_12gf')), + neck=dict( + type='FPN', + in_channels=[224, 448, 896, 2240], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF-mdconv-c3-c5_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF-mdconv-c3-c5_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c1d1ac3a7bd87bd210b4cd2194dd7e430f8d96 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF-mdconv-c3-c5_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf'))) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c52bf13ff6df5cda353c21ac32a950602620dbde --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + data_preprocessor=dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..36482c939dc3e600171b98bc159440e5fb740ffa --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py @@ -0,0 +1,60 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + data_preprocessor=dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning policy +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-400MF_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-400MF_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b96e1921f0dae8ad6656a7785d9d4655f9f349b3 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-400MF_fpn_ms-poly-3x_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_400mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), + neck=dict( + type='FPN', + in_channels=[32, 64, 160, 384], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005), + clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-4GF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-4GF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ce9f8ef4ffbcce66ec0184b3ff06a92425231597 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-4GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_4.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 560, 1360], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-4GF_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-4GF_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f160ccf66700d98a6403ed736928e529368e800c --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-4GF_fpn_ms-poly-3x_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_4.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 560, 1360], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005), + clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-6.4GF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-6.4GF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e17a3d7695fa7ba9e135d7a436118aae29be4747 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-6.4GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_6.4gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_6.4gf')), + neck=dict( + type='FPN', + in_channels=[168, 392, 784, 1624], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-800MF_fpn_ms-poly-3x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-800MF_fpn_ms-poly-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..93851fdbb99e5d8e3a58062c7ad83d2acad14ac6 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-800MF_fpn_ms-poly-3x_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../common/ms-poly_3x_coco-instance.py', + '../_base_/models/mask-rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_800mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), + neck=dict( + type='FPN', + in_channels=[64, 128, 288, 672], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005), + clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-8GF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-8GF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..62a4c931512e6b46093b03fd4e80741a93151c6a --- /dev/null +++ b/mmpose/configs/mmdet/regnet/mask-rcnn_regnetx-8GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_8.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 720, 1920], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/metafile.yml b/mmpose/configs/mmdet/regnet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..19fbba80f0396e1dad7a330ef769d98ad1a0c4d2 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/metafile.yml @@ -0,0 +1,797 @@ +Models: + - Name: mask-rcnn_regnetx-3.2GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-4GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-4GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-6.4GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-6.4GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.1 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-8GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-8GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-12GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-12GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-3.2GF-mdconv-c3-c5_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-3.2GF-mdconv-c3-c5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster-rcnn_regnetx-3.2GF_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster-rcnn_regnetx-3.2GF_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster-rcnn_regnetx-3.2GF_fpn_2x_coco.py + Metadata: + Training Memory (GB): 4.5 + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: retinanet_regnetx-800MF_fpn_1x_coco + In Collection: RetinaNet + Config: configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 2.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: retinanet_regnetx-1.6GF_fpn_1x_coco + In Collection: RetinaNet + Config: configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.3 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: retinanet_regnetx-3.2GF_fpn_1x_coco + In Collection: RetinaNet + Config: configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster-rcnn_regnetx-400MF_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster-rcnn_regnetx-400MF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 2.3 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster-rcnn_regnetx-800MF_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster-rcnn_regnetx-800MF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 2.8 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster-rcnn_regnetx-1.6GF_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 3.4 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 4.4 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster-rcnn_regnetx-4GF_fpn_ms-3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster-rcnn_regnetx-4GF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 4.9 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-400MF_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-400MF_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 2.5 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-800MF_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-800MF_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 2.9 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-1.6GF_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-1.6GF_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 3.6 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask-rcnn_regnetx-4GF_fpn_ms-poly-3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask-rcnn_regnetx-4GF_fpn_ms-poly-3x_coco.py + Metadata: + Training Memory (GB): 5.1 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade-mask-rcnn_regnetx-400MF_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade-mask-rcnn_regnetx-400MF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade-mask-rcnn_regnetx-800MF_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade-mask-rcnn_regnetx-800MF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 4.8 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade-mask-rcnn_regnetx-1.6GF_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade-mask-rcnn_regnetx-1.6GF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 5.4 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 6.4 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade-mask-rcnn_regnetx-4GF_fpn_ms-3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade-mask-rcnn_regnetx-4GF_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 6.9 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 diff --git a/mmpose/configs/mmdet/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7395c1bfbfa16670294c721f9f3135da9b9e69ae --- /dev/null +++ b/mmpose/configs/mmdet/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_1.6gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), + neck=dict( + type='FPN', + in_channels=[72, 168, 408, 912], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8a32cec195901e2f1326bf62f4fa4508e744d2 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + data_preprocessor=dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005), + clip_grad=dict(max_norm=35, norm_type=2)) diff --git a/mmpose/configs/mmdet/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py b/mmpose/configs/mmdet/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f6f8989320d6ffbcd55148471f62a962c52f9131 --- /dev/null +++ b/mmpose/configs/mmdet/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_800mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), + neck=dict( + type='FPN', + in_channels=[64, 128, 288, 672], + out_channels=256, + num_outs=5)) diff --git a/mmpose/configs/mmdet/reid/README.md b/mmpose/configs/mmdet/reid/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a5bfe5ec49947e939a3261fa9938d77cc04df44f --- /dev/null +++ b/mmpose/configs/mmdet/reid/README.md @@ -0,0 +1,135 @@ +# Training a ReID Model + +You may want to train a ReID model for multiple object tracking or other applications. We support ReID model training in MMDetection, which is built upon [MMPretrain](https://github.com/open-mmlab/mmpretrain). + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Preparation + +This section will show how to train a ReID model on standard datasets i.e. MOT17. + +We need to download datasets following docs. We use [ReIDDataset](mmdet/datasets/reid_dataset.py) to maintain standard datasets. In this case, you need to convert the official dataset to this style. We provide scripts and the usages as follow: + +```python +python tools/dataset_converters/mot2reid.py -i ./data/MOT17/ -o ./data/MOT17/reid --val-split 0.2 --vis-threshold 0.3 +``` + +Arguments: + +- `--val-split`: Proportion of the validation dataset to the whole ReID dataset. +- `--vis-threshold`: Threshold of visibility for each person. + +The directory of the converted datasets is as follows: + +``` +MOT17 +├── train +├── test +├── reid +│ ├── imgs +│ │ ├── MOT17-02-FRCNN_000002 +│ │ │ ├── 000000.jpg +│ │ │ ├── 000001.jpg +│ │ │ ├── ... +│ │ ├── MOT17-02-FRCNN_000003 +│ │ │ ├── 000000.jpg +│ │ │ ├── 000001.jpg +│ │ │ ├── ... +│ ├── meta +│ │ ├── train_80.txt +│ │ ├── val_20.txt +``` + +Note: `80` in `train_80.txt` means the proportion of the training dataset to the whole ReID dataset is eighty percent. While the proportion of the validation dataset is twenty percent. + +For training, we provide a annotation list `train_80.txt`. Each line of the list constraints a filename and its corresponding ground-truth labels. The format is as follows: + +``` +MOT17-05-FRCNN_000110/000018.jpg 0 +MOT17-13-FRCNN_000146/000014.jpg 1 +MOT17-05-FRCNN_000088/000004.jpg 2 +MOT17-02-FRCNN_000009/000081.jpg 3 +``` + +For validation, The annotation list `val_20.txt` remains the same as format above. + +Note: Images in `MOT17/reid/imgs` are cropped from raw images in `MOT17/train` by the corresponding `gt.txt`. The value of ground-truth labels should fall in range `[0, num_classes - 1]`. + +### 3. Training + +#### Training on a single GPU + +```shell +python tools/train.py configs/reid/reid_r50_8xb32-6e_mot17train80_test-mot17val20.py +``` + +#### Training on multiple GPUs + +We provide `tools/dist_train.sh` to launch training on multiple GPUs. +The basic usage is as follows. + +```shell +bash tools/dist_train.sh configs/reid/reid_r50_8xb32-6e_mot17train80_test-mot17val20.py 8 +``` + +### 4. Customize Dataset + +This section will show how to train a ReID model on customize datasets. + +### 4.1 Dataset Preparation + +You need to convert your customize datasets to existing dataset format. + +#### An example of customized dataset + +Assume we are going to implement a `Filelist` dataset, which takes filelists for both training and testing. The directory of the dataset is as follows: + +``` +Filelist +├── imgs +│ ├── person1 +│ │ ├── 000000.jpg +│ │ ├── 000001.jpg +│ │ ├── ... +│ ├── person2 +│ │ ├── 000000.jpg +│ │ ├── 000001.jpg +│ │ ├── ... +├── meta +│ ├── train.txt +│ ├── val.txt +``` + +The format of annotation list is as follows: + +``` +person1/000000.jpg 0 +person1/000001.jpg 0 +person2/000000.jpg 1 +person2/000001.jpg 1 +``` + +You can directly use [ReIDDataset](mmdet/datasets/reid_dataset.py). In this case, you only need to modify the config as follows: + +```python +# modify the path of annotation files and the image path prefix +data = dict( + train=dict( + data_prefix='data/Filelist/imgs', + ann_file='data/Filelist/meta/train.txt'), + val=dict( + data_prefix='data/Filelist/imgs', + ann_file='data/Filelist/meta/val.txt'), + test=dict( + data_prefix='data/Filelist/imgs', + ann_file='data/Filelist/meta/val.txt'), +) +# modify the number of classes, assume your training set has 100 classes +model = dict(reid=dict(head=dict(num_classes=100))) +``` + +### 4.2 Training + +The training stage is the same as `Standard Dataset`. diff --git a/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot15train80_test-mot15val20.py b/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot15train80_test-mot15val20.py new file mode 100644 index 0000000000000000000000000000000000000000..4e30b22964d0504771678dbd0a551bc16a0714ea --- /dev/null +++ b/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot15train80_test-mot15val20.py @@ -0,0 +1,7 @@ +_base_ = ['./reid_r50_8xb32-6e_mot17train80_test-mot17val20.py'] +model = dict(head=dict(num_classes=368)) +# data +data_root = 'data/MOT15/' +train_dataloader = dict(dataset=dict(data_root=data_root)) +val_dataloader = dict(dataset=dict(data_root=data_root)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot16train80_test-mot16val20.py b/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot16train80_test-mot16val20.py new file mode 100644 index 0000000000000000000000000000000000000000..468b9bfb2453f97c83282cc2f383c7592694269c --- /dev/null +++ b/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot16train80_test-mot16val20.py @@ -0,0 +1,7 @@ +_base_ = ['./reid_r50_8xb32-6e_mot17train80_test-mot17val20.py'] +model = dict(head=dict(num_classes=371)) +# data +data_root = 'data/MOT16/' +train_dataloader = dict(dataset=dict(data_root=data_root)) +val_dataloader = dict(dataset=dict(data_root=data_root)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot17train80_test-mot17val20.py b/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot17train80_test-mot17val20.py new file mode 100644 index 0000000000000000000000000000000000000000..83669de7c170c5de0e2054808ef7a76878bc1f24 --- /dev/null +++ b/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot17train80_test-mot17val20.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/datasets/mot_challenge_reid.py', '../_base_/default_runtime.py' +] +model = dict( + type='BaseReID', + data_preprocessor=dict( + type='ReIDDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + backbone=dict( + type='mmpretrain.ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), + head=dict( + type='LinearReIDHead', + num_fcs=1, + in_channels=2048, + fc_channels=1024, + out_channels=128, + num_classes=380, + loss_cls=dict(type='mmpretrain.CrossEntropyLoss', loss_weight=1.0), + loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU')), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth' # noqa: E501 + )) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + clip_grad=None, + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=6, + by_epoch=True, + milestones=[5], + gamma=0.1) +] + +# train, val, test setting +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot20train80_test-mot20val20.py b/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot20train80_test-mot20val20.py new file mode 100644 index 0000000000000000000000000000000000000000..8a807996186c35f91e23f6e0ec95a2191479c15b --- /dev/null +++ b/mmpose/configs/mmdet/reid/reid_r50_8xb32-6e_mot20train80_test-mot20val20.py @@ -0,0 +1,10 @@ +_base_ = ['./reid_r50_8xb32-6e_mot17train80_test-mot17val20.py'] +model = dict(head=dict(num_classes=1701)) +# data +data_root = 'data/MOT20/' +train_dataloader = dict(dataset=dict(data_root=data_root)) +val_dataloader = dict(dataset=dict(data_root=data_root)) +test_dataloader = val_dataloader + +# train, val, test setting +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6, val_interval=7) diff --git a/mmpose/configs/mmdet/reppoints/README.md b/mmpose/configs/mmdet/reppoints/README.md new file mode 100644 index 0000000000000000000000000000000000000000..03cb86bef4e24298075d67b5acb4a2e30bafef7e --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/README.md @@ -0,0 +1,59 @@ +# RepPoints + +> [RepPoints: Point Set Representation for Object Detection](https://arxiv.org/abs/1904.11490) + + + +## Abstract + +Modern object detectors rely heavily on rectangular bounding boxes, such as anchors, proposals and the final predictions, to represent objects at various recognition stages. The bounding box is convenient to use but provides only a coarse localization of objects and leads to a correspondingly coarse extraction of object features. In this paper, we present RepPoints(representative points), a new finer representation of objects as a set of sample points useful for both localization and recognition. Given ground truth localization and recognition targets for training, RepPoints learn to automatically arrange themselves in a manner that bounds the spatial extent of an object and indicates semantically significant local areas. They furthermore do not require the use of anchors to sample a space of bounding boxes. We show that an anchor-free object detector based on RepPoints can be as effective as the state-of-the-art anchor-based detection methods, with 46.5 AP and 67.4 AP50 on the COCO test-dev detection benchmark, using ResNet-101 model. + +
+ +
+ +## Introdution + +By [Ze Yang](https://yangze.tech/), [Shaohui Liu](http://b1ueber2y.me/), and [Han Hu](https://ancientmooner.github.io/). + +We provide code support and configuration files to reproduce the results in the paper for +["RepPoints: Point Set Representation for Object Detection"](https://arxiv.org/abs/1904.11490) on COCO object detection. + +**RepPoints**, initially described in [arXiv](https://arxiv.org/abs/1904.11490), is a new representation method for visual objects, on which visual understanding tasks are typically centered. Visual object representation, aiming at both geometric description and appearance feature extraction, is conventionally achieved by `bounding box + RoIPool (RoIAlign)`. The bounding box representation is convenient to use; however, it provides only a rectangular localization of objects that lacks geometric precision and may consequently degrade feature quality. Our new representation, RepPoints, models objects by a `point set` instead of a `bounding box`, which learns to adaptively position themselves over an object in a manner that circumscribes the object’s `spatial extent` and enables `semantically aligned feature extraction`. This richer and more flexible representation maintains the convenience of bounding boxes while facilitating various visual understanding applications. This repo demonstrated the effectiveness of RepPoints for COCO object detection. + +Another feature of this repo is the demonstration of an `anchor-free detector`, which can be as effective as state-of-the-art anchor-based detection methods. The anchor-free detector can utilize either `bounding box` or `RepPoints` as the basic object representation. + +## Results and Models + +The results on COCO 2017val are shown in the table below. + +| Method | Backbone | GN | Anchor | convert func | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------: | :-----------: | :-: | :----: | :----------: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| BBox | R-50-FPN | Y | single | - | 1x | 3.9 | 15.9 | 36.4 | [config](./reppoints-bbox_r50_fpn-gn_head-gn-grid_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916.log.json) | +| BBox | R-50-FPN | Y | none | - | 1x | 3.9 | 15.4 | 37.4 | [config](./reppoints-bbox_r50-center_fpn-gn_head-gn-grid_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916.log.json) | +| RepPoints | R-50-FPN | N | none | moment | 1x | 3.3 | 18.5 | 37.0 | [config](./reppoints-moment_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330_233609.log.json) | +| RepPoints | R-50-FPN | Y | none | moment | 1x | 3.9 | 17.5 | 38.1 | [config](./reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952-3e51b550.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952.log.json) | +| RepPoints | R-50-FPN | Y | none | moment | 2x | 3.9 | - | 38.6 | [config](./reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329_150020.log.json) | +| RepPoints | R-101-FPN | Y | none | moment | 2x | 5.8 | 13.7 | 40.5 | [config](./reppoints-moment_r101_fpn-gn_head-gn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329_132205.log.json) | +| RepPoints | R-101-FPN-DCN | Y | none | moment | 2x | 5.9 | 12.1 | 42.9 | [config](./reppoints-moment_r101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132134.log.json) | +| RepPoints | X-101-FPN-DCN | Y | none | moment | 2x | 7.1 | 9.3 | 44.2 | [config](./reppoints-moment_x101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132201.log.json) | + +**Notes:** + +- `R-xx`, `X-xx` denote the ResNet and ResNeXt architectures, respectively. +- `DCN` denotes replacing 3x3 conv with the 3x3 deformable convolution in `c3-c5` stages of backbone. +- `none` in the `anchor` column means 2-d `center point` (x,y) is used to represent the initial object hypothesis. `single` denotes one 4-d anchor box (x,y,w,h) with IoU based label assign criterion is adopted. +- `moment`, `partial MinMax`, `MinMax` in the `convert func` column are three functions to convert a point set to a pseudo box. +- Note the results here are slightly different from those reported in the paper, due to framework change. While the original paper uses an [MXNet](https://mxnet.apache.org/) implementation, we re-implement the method in [PyTorch](https://pytorch.org/) based on mmdetection. + +## Citation + +```latex +@inproceedings{yang2019reppoints, + title={RepPoints: Point Set Representation for Object Detection}, + author={Yang, Ze and Liu, Shaohui and Hu, Han and Wang, Liwei and Lin, Stephen}, + booktitle={The IEEE International Conference on Computer Vision (ICCV)}, + month={Oct}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/reppoints/metafile.yml b/mmpose/configs/mmdet/reppoints/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..732d541fb548f6eed00d6ba0fb4ffe3854b4f9c5 --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/metafile.yml @@ -0,0 +1,181 @@ +Collections: + - Name: RepPoints + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Group Normalization + - FPN + - RepPoints + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.11490 + Title: 'RepPoints: Point Set Representation for Object Detection' + README: configs/reppoints/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/reppoints_detector.py#L9 + Version: v2.0.0 + +Models: + - Name: reppoints-bbox_r50_fpn-gn_head-gn-grid_1x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints-bbox_r50_fpn-gn_head-gn-grid_1x_coco.py + Metadata: + Training Memory (GB): 3.9 + inference time (ms/im): + - value: 62.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth + + - Name: reppoints-bbox_r50-center_fpn-gn_head-gn-grid_1x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints-bbox_r50-center_fpn-gn_head-gn-grid_1x_coco.py + Metadata: + Training Memory (GB): 3.9 + inference time (ms/im): + - value: 64.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth + + - Name: reppoints-moment_r50_fpn_1x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.3 + inference time (ms/im): + - value: 54.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth + + - Name: reppoints-moment_r50_fpn-gn_head-gn_1x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py + Metadata: + Training Memory (GB): 3.9 + inference time (ms/im): + - value: 57.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952-3e51b550.pth + + - Name: reppoints-moment_r50_fpn-gn_head-gn_2x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py + Metadata: + Training Memory (GB): 3.9 + inference time (ms/im): + - value: 57.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth + + - Name: reppoints-moment_r101_fpn-gn_head-gn_2x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints-moment_r101_fpn-gn_head-gn_2x_coco.py + Metadata: + Training Memory (GB): 5.8 + inference time (ms/im): + - value: 72.99 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth + + - Name: reppoints-moment_r101-dconv-c3-c5_fpn-gn_head-gn_2x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints-moment_r101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py + Metadata: + Training Memory (GB): 5.9 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth + + - Name: reppoints-moment_x101-dconv-c3-c5_fpn-gn_head-gn_2x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints-moment_x101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 107.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth diff --git a/mmpose/configs/mmdet/reppoints/reppoints-bbox_r50-center_fpn-gn_head-gn-grid_1x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-bbox_r50-center_fpn-gn_head-gn-grid_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f116e53f6ded9468098733c1bab938831fee041d --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-bbox_r50-center_fpn-gn_head-gn-grid_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py' +model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) diff --git a/mmpose/configs/mmdet/reppoints/reppoints-bbox_r50_fpn-gn_head-gn-grid_1x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-bbox_r50_fpn-gn_head-gn-grid_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..76be39b8de8f52d48c6cdd4626f23221e35164ab --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-bbox_r50_fpn-gn_head-gn-grid_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py' +model = dict( + bbox_head=dict(transform_method='minmax', use_grid_points=True), + # training and testing settings + train_cfg=dict( + init=dict( + assigner=dict( + _delete_=True, + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1)))) diff --git a/mmpose/configs/mmdet/reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7dffe77a062268737205fd86ab23f22cd85479 --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py' +model = dict(bbox_head=dict(transform_method='minmax')) diff --git a/mmpose/configs/mmdet/reppoints/reppoints-moment_r101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-moment_r101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5c2bfab40020d7508ba90029ad29b24da8a7ad78 --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-moment_r101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py @@ -0,0 +1,8 @@ +_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/reppoints/reppoints-moment_r101_fpn-gn_head-gn_2x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-moment_r101_fpn-gn_head-gn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..02c447ada075ca6b076a5e7ff2ed74fb3b80c30d --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-moment_r101_fpn-gn_head-gn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cedf2226b5ecd2e5dd207041523ab4a2627a1734 --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py @@ -0,0 +1,3 @@ +_base_ = './reppoints-moment_r50_fpn_1x_coco.py' +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) diff --git a/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4490d4496af6d680fbed2eedcaf73e138afff0cc --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py' + +max_epochs = 24 + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..df7e72a80c66f42fe8554cfb344fee87ee5fe24a --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-moment_r50_fpn_1x_coco.py @@ -0,0 +1,74 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RepPointsDetector', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + bbox_head=dict( + type='RepPointsHead', + num_classes=80, + in_channels=256, + feat_channels=256, + point_feat_channels=256, + stacked_convs=3, + num_points=9, + gradient_mul=0.1, + point_strides=[8, 16, 32, 64, 128], + point_base_scale=4, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5), + loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0), + transform_method='moment'), + # training and testing settings + train_cfg=dict( + init=dict( + assigner=dict(type='PointAssigner', scale=4, pos_num=1), + allowed_border=-1, + pos_weight=-1, + debug=False), + refine=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/reppoints/reppoints-moment_x101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-moment_x101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a9909efe511da9423859de6ce096b1b1524a9b6f --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-moment_x101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/reppoints/reppoints-partial-minmax_r50_fpn-gn_head-gn_1x_coco.py b/mmpose/configs/mmdet/reppoints/reppoints-partial-minmax_r50_fpn-gn_head-gn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..30f7844b8344110896c5d885bd0ca340322045e4 --- /dev/null +++ b/mmpose/configs/mmdet/reppoints/reppoints-partial-minmax_r50_fpn-gn_head-gn_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py' +model = dict(bbox_head=dict(transform_method='partial_minmax')) diff --git a/mmpose/configs/mmdet/res2net/README.md b/mmpose/configs/mmdet/res2net/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cd6732b60aff3d80eeb23f14a97657f57344a480 --- /dev/null +++ b/mmpose/configs/mmdet/res2net/README.md @@ -0,0 +1,77 @@ +# Res2Net + +> [Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/abs/1904.01169) + + + +## Abstract + +Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale representation ability, leading to consistent performance gains on a wide range of applications. However, most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net block on all these models and demonstrate consistent performance gains over baseline models on widely-used datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, further verify the superiority of the Res2Net over the state-of-the-art baseline methods. + +
+ +
+ +## Introduction + +We propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. + +| Backbone | Params. | GFLOPs | top-1 err. | top-5 err. | +| :---------------: | :-----: | :----: | :--------: | :--------: | +| ResNet-101 | 44.6 M | 7.8 | 22.63 | 6.44 | +| ResNeXt-101-64x4d | 83.5M | 15.5 | 20.40 | - | +| HRNetV2p-W48 | 77.5M | 16.1 | 20.70 | 5.50 | +| Res2Net-101 | 45.2M | 8.3 | 18.77 | 4.64 | + +Compared with other backbone networks, Res2Net requires fewer parameters and FLOPs. + +**Note:** + +- GFLOPs for classification are calculated with image size (224x224). + +## Results and Models + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 2x | 7.4 | - | 43.0 | [config](./faster-rcnn_res2net-101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco_20200514_231734.log.json) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 2x | 7.9 | - | 43.6 | 38.7 | [config](./mask-rcnn_res2net-101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco_20200515_002413.log.json) | + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 20e | 7.8 | - | 45.7 | [config](./cascade-rcnn_res2net-101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco_20200515_091644.log.json) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 20e | 9.5 | - | 46.4 | 40.0 | [config](./cascade-mask-rcnn_res2net-101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco_20200515_091645.log.json) | + +### Hybrid Task Cascade (HTC) + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 20e | - | - | 47.5 | 41.6 | [config](./htc_res2net-101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco_20200515_150029.log.json) | + +- Res2Net ImageNet pretrained models are in [Res2Net-PretrainedModels](https://github.com/Res2Net/Res2Net-PretrainedModels). +- More applications of Res2Net are in [Res2Net-Github](https://github.com/Res2Net/). + +## Citation + +```latex +@article{gao2019res2net, + title={Res2Net: A New Multi-scale Backbone Architecture}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + journal={IEEE TPAMI}, + year={2020}, + doi={10.1109/TPAMI.2019.2938758}, +} +``` diff --git a/mmpose/configs/mmdet/res2net/cascade-mask-rcnn_res2net-101_fpn_20e_coco.py b/mmpose/configs/mmdet/res2net/cascade-mask-rcnn_res2net-101_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..21b6d2ea1c0167b8dd643211b520ac89ddd63e10 --- /dev/null +++ b/mmpose/configs/mmdet/res2net/cascade-mask-rcnn_res2net-101_fpn_20e_coco.py @@ -0,0 +1,10 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/mmpose/configs/mmdet/res2net/cascade-rcnn_res2net-101_fpn_20e_coco.py b/mmpose/configs/mmdet/res2net/cascade-rcnn_res2net-101_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..670a77454e060f8f639dbdc40064b71cd82520e9 --- /dev/null +++ b/mmpose/configs/mmdet/res2net/cascade-rcnn_res2net-101_fpn_20e_coco.py @@ -0,0 +1,10 @@ +_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/mmpose/configs/mmdet/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py b/mmpose/configs/mmdet/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..033cf574962f51a75c3fce1e74a22efb9c6320f2 --- /dev/null +++ b/mmpose/configs/mmdet/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py @@ -0,0 +1,10 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/mmpose/configs/mmdet/res2net/htc_res2net-101_fpn_20e_coco.py b/mmpose/configs/mmdet/res2net/htc_res2net-101_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d5542fda4c8181a417f14817180296e84944b832 --- /dev/null +++ b/mmpose/configs/mmdet/res2net/htc_res2net-101_fpn_20e_coco.py @@ -0,0 +1,10 @@ +_base_ = '../htc/htc_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/mmpose/configs/mmdet/res2net/mask-rcnn_res2net-101_fpn_2x_coco.py b/mmpose/configs/mmdet/res2net/mask-rcnn_res2net-101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3a2d57304d07d9b3dbc58ee9a5d8f2355c6b4427 --- /dev/null +++ b/mmpose/configs/mmdet/res2net/mask-rcnn_res2net-101_fpn_2x_coco.py @@ -0,0 +1,10 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/mmpose/configs/mmdet/res2net/metafile.yml b/mmpose/configs/mmdet/res2net/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..1d9f9ea023d895cd8a93b0f48b3bc4dee5a93e6b --- /dev/null +++ b/mmpose/configs/mmdet/res2net/metafile.yml @@ -0,0 +1,146 @@ +Models: + - Name: faster-rcnn_res2net-101_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 + + - Name: mask-rcnn_res2net-101_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/res2net/mask-rcnn_res2net-101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.9 + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 + + - Name: cascade-rcnn_res2net-101_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/res2net/cascade-rcnn_res2net-101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 + + - Name: cascade-mask-rcnn_res2net-101_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/res2net/cascade-mask-rcnn_res2net-101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 9.5 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 + + - Name: htc_res2net-101_fpn_20e_coco + In Collection: HTC + Config: configs/res2net/htc_res2net-101_fpn_20e_coco.py + Metadata: + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 diff --git a/mmpose/configs/mmdet/resnest/README.md b/mmpose/configs/mmdet/resnest/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a72f842357999af4bf48e0b26edd2581d01d7a80 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/README.md @@ -0,0 +1,54 @@ +# ResNeSt + +> [ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955) + + + +## Abstract + +It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. + +
+ +
+ +## Results and Models + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| S-50-FPN | pytorch | 1x | 4.8 | - | 42.0 | [config](./faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20200926_125502.log.json) | +| S-101-FPN | pytorch | 1x | 7.1 | - | 44.5 | [config](./faster-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201006_021058-421517f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201006_021058.log.json) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| S-50-FPN | pytorch | 1x | 5.5 | - | 42.6 | 38.1 | [config](./mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20200926_125503-8a2c3d47.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20200926_125503.log.json) | +| S-101-FPN | pytorch | 1x | 7.8 | - | 45.2 | 40.2 | [config](./mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_215831-af60cdf9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201005_215831.log.json) | + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| S-50-FPN | pytorch | 1x | - | - | 44.5 | [config](./cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201122_213640-763cc7b5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201005_113242.log.json) | +| S-101-FPN | pytorch | 1x | 8.4 | - | 46.8 | [config](./cascade-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201005_113242-b9459f8f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201122_213640.log.json) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| S-50-FPN | pytorch | 1x | - | - | 45.4 | 39.5 | [config](./cascade-mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201122_104428-99eca4c7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201122_104428.log.json) | +| S-101-FPN | pytorch | 1x | 10.5 | - | 47.7 | 41.4 | [config](./cascade-mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_113243-42607475.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201005_113243.log.json) | + +## Citation + +```latex +@article{zhang2020resnest, +title={ResNeSt: Split-Attention Networks}, +author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, +journal={arXiv preprint arXiv:2004.08955}, +year={2020} +} +``` diff --git a/mmpose/configs/mmdet/resnest/cascade-mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py b/mmpose/configs/mmdet/resnest/cascade-mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f4f19925788acc357e9720513d4f388598927a70 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/cascade-mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade-mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py' +model = dict( + backbone=dict( + stem_channels=128, + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='open-mmlab://resnest101'))) diff --git a/mmpose/configs/mmdet/resnest/cascade-mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py b/mmpose/configs/mmdet/resnest/cascade-mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c6ef41c05cd97d19320c02fb065b0cde1dda54d7 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/cascade-mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py @@ -0,0 +1,101 @@ +_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) + +model = dict( + # use ResNeSt img_norm + data_preprocessor=dict( + mean=[123.68, 116.779, 103.939], + std=[58.393, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + stem_channels=64, + depth=50, + radix=2, + reduction_factor=4, + avg_down_stride=True, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), + roi_head=dict( + bbox_head=[ + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict(norm_cfg=norm_cfg))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/resnest/cascade-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py b/mmpose/configs/mmdet/resnest/cascade-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9dbf3fae5ffb9382b053852c35e263f109668020 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/cascade-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py' +model = dict( + backbone=dict( + stem_channels=128, + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='open-mmlab://resnest101'))) diff --git a/mmpose/configs/mmdet/resnest/cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py b/mmpose/configs/mmdet/resnest/cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce7b56320a6511376237710c25061edd44b17dd --- /dev/null +++ b/mmpose/configs/mmdet/resnest/cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py @@ -0,0 +1,93 @@ +_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + # use ResNeSt img_norm + data_preprocessor=dict( + mean=[123.68, 116.779, 103.939], + std=[58.393, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + stem_channels=64, + depth=50, + radix=2, + reduction_factor=4, + avg_down_stride=True, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), + roi_head=dict( + bbox_head=[ + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], )) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/resnest/faster-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py b/mmpose/configs/mmdet/resnest/faster-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f1e16321adff643d593268f868c09f5a318e7e93 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/faster-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py' +model = dict( + backbone=dict( + stem_channels=128, + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='open-mmlab://resnest101'))) diff --git a/mmpose/configs/mmdet/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py b/mmpose/configs/mmdet/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0ec6e07af1fcd250171cb769252eeb03f92da8 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py @@ -0,0 +1,39 @@ +_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + # use ResNeSt img_norm + data_preprocessor=dict( + mean=[123.68, 116.779, 103.939], + std=[58.393, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + stem_channels=64, + depth=50, + radix=2, + reduction_factor=4, + avg_down_stride=True, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/resnest/mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py b/mmpose/configs/mmdet/resnest/mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3edf49f052f1f3c875cca2c061276cc1aca77604 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py' +model = dict( + backbone=dict( + stem_channels=128, + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='open-mmlab://resnest101'))) diff --git a/mmpose/configs/mmdet/resnest/mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py b/mmpose/configs/mmdet/resnest/mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c6f27000862d74e23a665f3bf8caae0ec4a3d6f5 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py @@ -0,0 +1,46 @@ +_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + # use ResNeSt img_norm + data_preprocessor=dict( + mean=[123.68, 116.779, 103.939], + std=[58.393, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + stem_channels=64, + depth=50, + radix=2, + reduction_factor=4, + avg_down_stride=True, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/resnest/metafile.yml b/mmpose/configs/mmdet/resnest/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..265c94094975858ff0cc0ceac3870c9b4f9b9a84 --- /dev/null +++ b/mmpose/configs/mmdet/resnest/metafile.yml @@ -0,0 +1,230 @@ +Models: + - Name: faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco + In Collection: Faster R-CNN + Config: configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py + Metadata: + Training Memory (GB): 4.8 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: faster-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco + In Collection: Faster R-CNN + Config: configs/resnest/faster-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py + Metadata: + Training Memory (GB): 7.1 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201006_021058-421517f1.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco + In Collection: Mask R-CNN + Config: configs/resnest/mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py + Metadata: + Training Memory (GB): 5.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20200926_125503-8a2c3d47.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco + In Collection: Mask R-CNN + Config: configs/resnest/mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_215831-af60cdf9.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco + In Collection: Cascade R-CNN + Config: configs/resnest/cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py + Metadata: + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201122_213640-763cc7b5.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: cascade-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco + In Collection: Cascade R-CNN + Config: configs/resnest/cascade-rcnn_s101_fpn_syncbn-backbone+head_ms-range-1x_coco.py + Metadata: + Training Memory (GB): 8.4 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201005_113242-b9459f8f.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: cascade-mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco + In Collection: Cascade R-CNN + Config: configs/resnest/cascade-mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py + Metadata: + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201122_104428-99eca4c7.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: cascade-mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco + In Collection: Cascade R-CNN + Config: configs/resnest/cascade-mask-rcnn_s101_fpn_syncbn-backbone+head_ms-1x_coco.py + Metadata: + Training Memory (GB): 10.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_113243-42607475.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 diff --git a/mmpose/configs/mmdet/resnet_strikes_back/README.md b/mmpose/configs/mmdet/resnet_strikes_back/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f015729a8d4ae4d78a909185a9b93b619e0f0f04 --- /dev/null +++ b/mmpose/configs/mmdet/resnet_strikes_back/README.md @@ -0,0 +1,40 @@ +# ResNet strikes back + +> [ResNet strikes back: An improved training procedure in timm](https://arxiv.org/abs/2110.00476) + + + +## Abstract + +The influential Residual Networks designed by He et al. remain the gold-standard architecture in numerous scientific publications. They typically serve as the default architecture in studies, or as baselines when new architectures are proposed. Yet there has been significant progress on best practices for training neural networks since the inception of the ResNet architecture in 2015. Novel optimization & dataaugmentation have increased the effectiveness of the training recipes. + +In this paper, we re-evaluate the performance of the vanilla ResNet-50 when trained with a procedure that integrates such advances. We share competitive training settings and pre-trained models in the timm open-source library, with the hope that they will serve as better baselines for future work. For instance, with our more demanding training setting, a vanilla ResNet-50 reaches 80.4% top-1 accuracy at resolution 224×224 on ImageNet-val without extra data or distillation. We also report the performance achieved with popular models with our training procedure. + +
+ +
+ +## Results and Models + +| Method | Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------------: | :------: | :-----: | :------: | :------------: | :---------: | :---------: | :------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50 rsb | 1x | 3.9 | - | 40.8 (+3.4) | - | [Config](./faster-rcnn_r50-rsb-pre_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229-32ae82a9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229.log.json) | +| Mask R-CNN | R-50 rsb | 1x | 4.5 | - | 41.2 (+3.0) | 38.2 (+3.0) | [Config](./mask-rcnn_r50-rsb-pre_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054.log.json) | +| Cascade Mask R-CNN | R-50 rsb | 1x | 6.2 | - | 44.8 (+3.6) | 39.9 (+3.6) | [Config](./cascade-mask-rcnn_r50-rsb-pre_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636-8b9ad50f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636.log.json) | +| RetinaNet | R-50 rsb | 1x | 3.8 | - | 39.0 (+2.5) | - | [Config](./retinanet_r50-rsb-pre_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432-bd24aae9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432.log.json) | + +**Notes:** + +- 'rsb' is short for 'resnet strikes back' +- We have done some grid searches on learning rate and weight decay and get these optimal hyper-parameters. + +## Citation + +```latex +@article{wightman2021resnet, +title={Resnet strikes back: An improved training procedure in timm}, +author={Ross Wightman, Hugo Touvron, Hervé Jégou}, +journal={arXiv preprint arXiv:2110.00476}, +year={2021} +} +``` diff --git a/mmpose/configs/mmdet/resnet_strikes_back/cascade-mask-rcnn_r50-rsb-pre_fpn_1x_coco.py b/mmpose/configs/mmdet/resnet_strikes_back/cascade-mask-rcnn_r50-rsb-pre_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..de7b95b0863d1ea89382fd9fa5852eccf0f34150 --- /dev/null +++ b/mmpose/configs/mmdet/resnet_strikes_back/cascade-mask-rcnn_r50-rsb-pre_fpn_1x_coco.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/cascade-mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optim_wrapper = dict( + optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05), + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/mmpose/configs/mmdet/resnet_strikes_back/faster-rcnn_r50-rsb-pre_fpn_1x_coco.py b/mmpose/configs/mmdet/resnet_strikes_back/faster-rcnn_r50-rsb-pre_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8c60f66a7ba8e5b6a7ee6af06e771b3c6ad71f6c --- /dev/null +++ b/mmpose/configs/mmdet/resnet_strikes_back/faster-rcnn_r50-rsb-pre_fpn_1x_coco.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optim_wrapper = dict( + optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05), + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/mmpose/configs/mmdet/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py b/mmpose/configs/mmdet/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..85e25d392359b1a7811fb0c933ede5edacbfb9c3 --- /dev/null +++ b/mmpose/configs/mmdet/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optim_wrapper = dict( + optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05), + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/mmpose/configs/mmdet/resnet_strikes_back/metafile.yml b/mmpose/configs/mmdet/resnet_strikes_back/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..74b152107d7a6d96f671c52d5273c79751122bfa --- /dev/null +++ b/mmpose/configs/mmdet/resnet_strikes_back/metafile.yml @@ -0,0 +1,116 @@ +Models: + - Name: faster-rcnn_r50_fpn_rsb-pretrain_1x_coco + In Collection: Faster R-CNN + Config: configs/resnet_strikes_back/faster-rcnn_r50-rsb-pre_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.9 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229-32ae82a9.pth + Paper: + URL: https://arxiv.org/abs/2110.00476 + Title: 'ResNet strikes back: An improved training procedure in timm' + README: configs/resnet_strikes_back/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md + Version: v2.22.0 + + - Name: cascade-mask-rcnn_r50_fpn_rsb-pretrain_1x_coco + In Collection: Cascade R-CNN + Config: configs/resnet_strikes_back/cascade-mask-rcnn_r50-rsb-pre_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636-8b9ad50f.pth + Paper: + URL: https://arxiv.org/abs/2110.00476 + Title: 'ResNet strikes back: An improved training procedure in timm' + README: configs/resnet_strikes_back/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md + Version: v2.22.0 + + - Name: retinanet_r50-rsb-pre_fpn_1x_coco + In Collection: RetinaNet + Config: configs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432-bd24aae9.pth + Paper: + URL: https://arxiv.org/abs/2110.00476 + Title: 'ResNet strikes back: An improved training procedure in timm' + README: configs/resnet_strikes_back/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md + Version: v2.22.0 + + - Name: mask-rcnn_r50_fpn_rsb-pretrain_1x_coco + In Collection: Mask R-CNN + Config: configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth + Paper: + URL: https://arxiv.org/abs/2110.00476 + Title: 'ResNet strikes back: An improved training procedure in timm' + README: configs/resnet_strikes_back/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md + Version: v2.22.0 diff --git a/mmpose/configs/mmdet/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py b/mmpose/configs/mmdet/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce7bfd87d6b41a36acc4ff207695e38ef89700c --- /dev/null +++ b/mmpose/configs/mmdet/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optim_wrapper = dict( + optimizer=dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05), + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/mmpose/configs/mmdet/retinanet/README.md b/mmpose/configs/mmdet/retinanet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b38335a3ce3585918cd45f70a18a2c703d201e9b --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/README.md @@ -0,0 +1,53 @@ +# RetinaNet + +> [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002) + + + +## Abstract + +The highest accuracy object detectors to date are based on a two-stage approach popularized by R-CNN, where a classifier is applied to a sparse set of candidate object locations. In contrast, one-stage detectors that are applied over a regular, dense sampling of possible object locations have the potential to be faster and simpler, but have trailed the accuracy of two-stage detectors thus far. In this paper, we investigate why this is the case. We discover that the extreme foreground-background class imbalance encountered during training of dense detectors is the central cause. We propose to address this class imbalance by reshaping the standard cross entropy loss such that it down-weights the loss assigned to well-classified examples. Our novel Focal Loss focuses training on a sparse set of hard examples and prevents the vast number of easy negatives from overwhelming the detector during training. To evaluate the effectiveness of our loss, we design and train a simple dense detector we call RetinaNet. Our results show that when trained with the focal loss, RetinaNet is able to match the speed of previous one-stage detectors while surpassing the accuracy of all existing state-of-the-art two-stage detectors. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :----------: | :------: | :------------: | :----: | :---------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-18-FPN | pytorch | 1x | 1.7 | | 31.7 | [config](./retinanet_r18_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055.log.json) | +| R-18-FPN | pytorch | 1x(1 x 8 BS) | 5.0 | | 31.7 | [config](./retinanet_r18_fpn_1xb8-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255.log.json) | +| R-50-FPN | caffe | 1x | 3.5 | 18.6 | 36.3 | [config](./retinanet_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531_012518.log.json) | +| R-50-FPN | pytorch | 1x | 3.8 | 19.0 | 36.5 | [config](./retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 2.8 | 31.6 | 36.4 | [config](./retinanet_r50_fpn_amp-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json) | +| R-50-FPN | pytorch | 2x | - | - | 37.4 | [config](./retinanet_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131_114738.log.json) | +| R-101-FPN | caffe | 1x | 5.5 | 14.7 | 38.5 | [config](./retinanet_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531_012536.log.json) | +| R-101-FPN | pytorch | 1x | 5.7 | 15.0 | 38.5 | [config](./retinanet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130_003055.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 38.9 | [config](./retinanet_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131_114859.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.0 | 12.1 | 39.9 | [config](./retinanet_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130_003004.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 40.1 | [config](./retinanet_x101-32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131_114812.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.0 | 8.7 | 41.0 | [config](./retinanet_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130_003008.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 40.8 | [config](./retinanet_x101-64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131_114833.log.json) | + +## Pre-trained Models + +We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. + +| Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :----: | :--------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 3x | 3.5 | 39.5 | [config](./retinanet_r50_fpn_ms-640-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.log.json) | +| R-101-FPN | caffe | 3x | 5.4 | 40.7 | [config](./retinanet_r101-caffe_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.log.json) | +| R-101-FPN | pytorch | 3x | 5.4 | 41 | [config](./retinanet_r101_fpn_ms-640-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.log.json) | +| X-101-64x4d-FPN | pytorch | 3x | 9.8 | 41.6 | [config](./retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.log.json) | + +## Citation + +```latex +@inproceedings{lin2017focal, + title={Focal loss for dense object detection}, + author={Lin, Tsung-Yi and Goyal, Priya and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + year={2017} +} +``` diff --git a/mmpose/configs/mmdet/retinanet/metafile.yml b/mmpose/configs/mmdet/retinanet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..0551541c59100d3cc8fb361cc8895c2dbd4cf8f3 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/metafile.yml @@ -0,0 +1,312 @@ +Collections: + - Name: RetinaNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Focal Loss + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1708.02002 + Title: "Focal Loss for Dense Object Detection" + README: configs/retinanet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/retinanet.py#L6 + Version: v2.0.0 + +Models: + - Name: retinanet_r18_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r18_fpn_1x_coco.py + Metadata: + Training Memory (GB): 1.7 + Training Resources: 8x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 31.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth + + - Name: retinanet_r18_fpn_1xb8-1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r18_fpn_1xb8-1x_coco.py + Metadata: + Training Memory (GB): 5.0 + Training Resources: 1x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 31.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth + + - Name: retinanet_r50-caffe_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.5 + inference time (ms/im): + - value: 53.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth + + - Name: retinanet_r50_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + inference time (ms/im): + - value: 52.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth + + - Name: retinanet_r50_fpn_amp-1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_fpn_amp-1x_coco.py + Metadata: + Training Memory (GB): 2.8 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 31.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth + + - Name: retinanet_r50_fpn_2x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_fpn_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth + + - Name: retinanet_r50_fpn_ms-640-800-3x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_fpn_ms-640-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth + + - Name: retinanet_r101-caffe_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + inference time (ms/im): + - value: 68.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth + + - Name: retinanet_r101-caffe_fpn_ms-3x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101-caffe_fpn_ms-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth + + - Name: retinanet_r101_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.7 + inference time (ms/im): + - value: 66.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth + + - Name: retinanet_r101_fpn_2x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 5.7 + inference time (ms/im): + - value: 66.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth + + - Name: retinanet_r101_fpn_ms-640-800-3x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101_fpn_ms-640-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth + + - Name: retinanet_x101-32x4d_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth + + - Name: retinanet_x101-32x4d_fpn_2x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101-32x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth + + - Name: retinanet_x101-64x4d_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.0 + inference time (ms/im): + - value: 114.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth + + - Name: retinanet_x101-64x4d_fpn_2x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101-64x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 10.0 + inference time (ms/im): + - value: 114.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth + + - Name: retinanet_x101-64x4d_fpn_ms-640-800-3x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1f3a4487103eea868eafe8539517b38455025bbe --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './retinanet_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r101-caffe_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r101-caffe_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cfe773459c2529079274b241f5f99ae66d8906ad --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r101-caffe_fpn_ms-3x_coco.py @@ -0,0 +1,8 @@ +_base_ = './retinanet_r50-caffe_fpn_ms-3x_coco.py' +# learning policy +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a7f06002413dcdf2716975655a582a3eefaf007a --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_2x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..721112a221953bb86dc3259e3991d7f0f740b26c --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './retinanet_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..be018eaac672a4c1c3a61eac9940c4d28ea4fb40 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_ms-640-800-3x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_ms-640-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..566397227f7861a268c4cc4e111279b95b620ab8 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r101_fpn_ms-640-800-3x_coco.py @@ -0,0 +1,9 @@ +_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py'] +# optimizer +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..960211806756d38cf74eed998addcca3f8467a4d --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_1x_coco.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +# TODO: support auto scaling lr +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +# auto_scale_lr = dict(base_batch_size=16) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_1xb8-1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_1xb8-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d2e88d68e3366671e402b1766d3b456593262a9b --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_1xb8-1x_coco.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# data +train_dataloader = dict(batch_size=8) + +# model +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) + +# Note: If the learning rate is set to 0.0025, the mAP will be 32.4. +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)) +# TODO: support auto scaling lr +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (1 GPUs) x (8 samples per GPU) +# auto_scale_lr = dict(base_batch_size=8) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d6833f3f4711ec28a25ae8a51687fc4ac13ffb89 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r18_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6ba1cdddc4707b40f549189f768457312635669d --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + # use caffe img_norm + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..93687d8c27b73ae2a172b45a733345e5fc036f03 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-1x_coco.py @@ -0,0 +1,15 @@ +_base_ = './retinanet_r50-caffe_fpn_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6d1604fb9efd5deb11ffc04f6f9685739f82aea9 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './retinanet_r50-caffe_fpn_ms-1x_coco.py' +# training schedule for 2x +train_cfg = dict(max_epochs=24) + +# learning rate policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5a6d42a13c27d5fc0b8072e2c96ef5d15a0f248c --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50-caffe_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = './retinanet_r50-caffe_fpn_ms-1x_coco.py' + +# training schedule for 2x +train_cfg = dict(max_epochs=36) + +# learning rate policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=36, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..00d2567b245dba2b2be815a92146ea1364e1e799 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', + './retinanet_tta.py' +] + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_2x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..47511b78ed2edb43121de2fc27986f6bb81abcfa --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_2x_coco.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# training schedule for 2x +train_cfg = dict(max_epochs=24) + +# learning rate policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2f10db2f3c84d4b1970f13f54c563408487d04af --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py @@ -0,0 +1,21 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../common/lsj-200e_coco-detection.py' +] + +image_size = (1024, 1024) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +model = dict(data_preprocessor=dict(batch_augments=batch_augments)) + +train_dataloader = dict(batch_size=8, num_workers=4) +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_90k_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1e1b2fd950a0293220cc93ce3f3b377b4163f3aa --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_90k_coco.py @@ -0,0 +1,24 @@ +_base_ = 'retinanet_r50_fpn_1x_coco.py' + +# training schedule for 90k +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=90000, + val_interval=10000) +# learning rate policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=90000, + by_epoch=False, + milestones=[60000, 80000], + gamma=0.1) +] +train_dataloader = dict(sampler=dict(type='InfiniteSampler')) +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000)) + +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_amp-1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_amp-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..acf5266337b8e73957a1cdf2b06076c1733b4d56 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_amp-1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' + +# MMEngine support the following two ways, users can choose +# according to convenience +# optim_wrapper = dict(type='AmpOptimWrapper') +_base_.optim_wrapper.type = 'AmpOptimWrapper' diff --git a/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_ms-640-800-3x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_ms-640-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d91cf8ce0df15968706631d7eac76e834cba93dc --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_r50_fpn_ms-640-800-3x_coco.py @@ -0,0 +1,4 @@ +_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py'] +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_tta.py b/mmpose/configs/mmdet/retinanet/retinanet_tta.py new file mode 100644 index 0000000000000000000000000000000000000000..d0f37e0ab25e2aff1ad55e76a7ee02777293d507 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_tta.py @@ -0,0 +1,23 @@ +tta_model = dict( + type='DetTTAModel', + tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) + +img_scales = [(1333, 800), (666, 400), (2000, 1200)] +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=None), + dict( + type='TestTimeAug', + transforms=[[ + dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales + ], [ + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], [dict(type='LoadAnnotations', with_bbox=True)], + [ + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'flip', + 'flip_direction')) + ]]) +] diff --git a/mmpose/configs/mmdet/retinanet/retinanet_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..765a4c2cc0f69bf13891bf371c94c17b6cd5f30c --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_x101-32x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_x101-32x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..14de96faf70180d7828a670630a8f48a3cd1081d --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_x101-32x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..948cd18e4d995d18d947b345ba7229b5cad60eb1 --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ad04b6eea793add40c81d1d7096481597357d5bd --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py b/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..853134160cd2128cac7954cca7e008444522fd2c --- /dev/null +++ b/mmpose/configs/mmdet/retinanet/retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py @@ -0,0 +1,11 @@ +_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py'] +# optimizer +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) +optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01)) diff --git a/mmpose/configs/mmdet/rpn/README.md b/mmpose/configs/mmdet/rpn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bd328b4746d4125f68554eeeca3d2d765c638a5a --- /dev/null +++ b/mmpose/configs/mmdet/rpn/README.md @@ -0,0 +1,39 @@ +# RPN + +> [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497) + + + +## Abstract + +State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. + +
+ +
+ +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AR1000 | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 3.5 | 22.6 | 58.7 | [config](./rpn_r50-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_caffe_fpn_1x_coco/rpn_r50_caffe_fpn_1x_coco_20200531-5b903a37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_caffe_fpn_1x_coco/rpn_r50_caffe_fpn_1x_coco_20200531_012334.log.json) | +| R-50-FPN | pytorch | 1x | 3.8 | 22.3 | 58.2 | [config](./rpn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218_151240.log.json) | +| R-50-FPN | pytorch | 2x | - | - | 58.6 | [config](./rpn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_2x_coco/rpn_r50_fpn_2x_coco_20200131-0728c9b3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_2x_coco/rpn_r50_fpn_2x_coco_20200131_190631.log.json) | +| R-101-FPN | caffe | 1x | 5.4 | 17.3 | 60.0 | [config](./rpn_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_caffe_fpn_1x_coco/rpn_r101_caffe_fpn_1x_coco_20200531-0629a2e2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_caffe_fpn_1x_coco/rpn_r101_caffe_fpn_1x_coco_20200531_012345.log.json) | +| R-101-FPN | pytorch | 1x | 5.8 | 16.5 | 59.7 | [config](./rpn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_1x_coco/rpn_r101_fpn_1x_coco_20200131-2ace2249.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_1x_coco/rpn_r101_fpn_1x_coco_20200131_191000.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 60.2 | [config](./rpn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_2x_coco/rpn_r101_fpn_2x_coco_20200131-24e3db1a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_2x_coco/rpn_r101_fpn_2x_coco_20200131_191106.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.0 | 13.0 | 60.6 | [config](./rpn_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_1x_coco/rpn_x101_32x4d_fpn_1x_coco_20200219-b02646c6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_1x_coco/rpn_x101_32x4d_fpn_1x_coco_20200219_012037.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 61.1 | [config](./rpn_x101-32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_2x_coco/rpn_x101_32x4d_fpn_2x_coco_20200208-d22bd0bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_2x_coco/rpn_x101_32x4d_fpn_2x_coco_20200208_200752.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.1 | 9.1 | 61.0 | [config](./rpn_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_1x_coco/rpn_x101_64x4d_fpn_1x_coco_20200208-cde6f7dd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_1x_coco/rpn_x101_64x4d_fpn_1x_coco_20200208_200752.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 61.5 | [config](./rpn_x101-64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_2x_coco/rpn_x101_64x4d_fpn_2x_coco_20200208-c65f524f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_2x_coco/rpn_x101_64x4d_fpn_2x_coco_20200208_200752.log.json) | + +## Citation + +```latex +@inproceedings{ren2015faster, + title={Faster r-cnn: Towards real-time object detection with region proposal networks}, + author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian}, + booktitle={Advances in neural information processing systems}, + year={2015} +} +``` diff --git a/mmpose/configs/mmdet/rpn/metafile.yml b/mmpose/configs/mmdet/rpn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..9796ead6d2ed28f0e10e16165103e31c289dae26 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/metafile.yml @@ -0,0 +1,127 @@ +Collections: + - Name: RPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1506.01497 + Title: "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" + README: configs/rpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/rpn.py#L6 + Version: v2.0.0 + +Models: + - Name: rpn_r50-caffe_fpn_1x_coco + In Collection: RPN + Config: configs/rpn/rpn_r50-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.5 + Training Resources: 8x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + AR@1000: 58.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_caffe_fpn_1x_coco/rpn_r50_caffe_fpn_1x_coco_20200531-5b903a37.pth + + - Name: rpn_r50_fpn_1x_coco + In Collection: RPN + Config: configs/rpn/rpn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Training Resources: 8x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + AR@1000: 58.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth + + - Name: rpn_r50_fpn_2x_coco + In Collection: RPN + Config: rpn_r50_fpn_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + AR@1000: 58.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_2x_coco/rpn_r50_fpn_2x_coco_20200131-0728c9b3.pth + + - Name: rpn_r101-caffe_fpn_1x_coco + In Collection: RPN + Config: configs/rpn/rpn_r101-caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.4 + Training Resources: 8x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + AR@1000: 60.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_caffe_fpn_1x_coco/rpn_r101_caffe_fpn_1x_coco_20200531-0629a2e2.pth + + - Name: rpn_x101-32x4d_fpn_1x_coco + In Collection: RPN + Config: configs/rpn/rpn_x101-32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.0 + Training Resources: 8x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + AR@1000: 60.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_1x_coco/rpn_x101_32x4d_fpn_1x_coco_20200219-b02646c6.pth + + - Name: rpn_x101-32x4d_fpn_2x_coco + In Collection: RPN + Config: configs/rpn/rpn_x101-32x4d_fpn_2x_coco.py + Metadata: + Training Resources: 8x V100 GPUs + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + AR@1000: 61.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_2x_coco/rpn_x101_32x4d_fpn_2x_coco_20200208-d22bd0bb.pth + + - Name: rpn_x101-64x4d_fpn_1x_coco + In Collection: RPN + Config: configs/rpn/rpn_x101-64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.1 + Training Resources: 8x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + AR@1000: 61.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_1x_coco/rpn_x101_64x4d_fpn_1x_coco_20200208-cde6f7dd.pth + + - Name: rpn_x101-64x4d_fpn_2x_coco + In Collection: RPN + Config: configs/rpn/rpn_x101-64x4d_fpn_2x_coco.py + Metadata: + Training Resources: 8x V100 GPUs + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + AR@1000: 61.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_2x_coco/rpn_x101_64x4d_fpn_2x_coco_20200208-c65f524f.pth diff --git a/mmpose/configs/mmdet/rpn/rpn_r101-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/rpn/rpn_r101-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..22977af8cb761f9415c55f8fa6d458937a00ba06 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_r101-caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './rpn_r50-caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/mmpose/configs/mmdet/rpn/rpn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/rpn/rpn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..962728ff08abb4652c617a085649575b6cfdcbf8 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/rpn/rpn_r101_fpn_2x_coco.py b/mmpose/configs/mmdet/rpn/rpn_r101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ac7671c1c2421c0caa7b42d012cc3a2edc068934 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './rpn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/rpn/rpn_r50-caffe-c4_1x_coco.py b/mmpose/configs/mmdet/rpn/rpn_r50-caffe-c4_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..76b878c874d6545e537ee8a9618e83bb095de281 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_r50-caffe-c4_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/rpn_r50-caffe-c4.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +val_evaluator = dict(metric='proposal_fast') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/rpn/rpn_r50-caffe_fpn_1x_coco.py b/mmpose/configs/mmdet/rpn/rpn_r50-caffe_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..530f365210572f9bf55ca2775bfdbeba98567076 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_r50-caffe_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' +# use caffe img_norm +model = dict( + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) diff --git a/mmpose/configs/mmdet/rpn/rpn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/rpn/rpn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe88d395b8a32e7513ede3c0c724e29b3554da6 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_r50_fpn_1x_coco.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +val_evaluator = dict(metric='proposal_fast') +test_evaluator = val_evaluator + +# inference on val dataset and dump the proposals with evaluate metric +# data_root = 'data/coco/' +# test_evaluator = [ +# dict( +# type='DumpProposals', +# output_dir=data_root + 'proposals/', +# proposals_file='rpn_r50_fpn_1x_val2017.pkl'), +# dict( +# type='CocoMetric', +# ann_file=data_root + 'annotations/instances_val2017.json', +# metric='proposal_fast', +# backend_args={{_base_.backend_args}}, +# format_only=False) +# ] + +# inference on training dataset and dump the proposals without evaluate metric +# data_root = 'data/coco/' +# test_dataloader = dict( +# dataset=dict( +# ann_file='annotations/instances_train2017.json', +# data_prefix=dict(img='train2017/'))) +# +# test_evaluator = [ +# dict( +# type='DumpProposals', +# output_dir=data_root + 'proposals/', +# proposals_file='rpn_r50_fpn_1x_train2017.pkl'), +# ] diff --git a/mmpose/configs/mmdet/rpn/rpn_r50_fpn_2x_coco.py b/mmpose/configs/mmdet/rpn/rpn_r50_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebccbcfaf394fcbb4fbdaea51abdd583f628cac --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_r50_fpn_2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' + +# learning policy +max_epochs = 24 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/rpn/rpn_x101-32x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/rpn/rpn_x101-32x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d0c73948ac56afa34b9d6c8d22d6158271306b8c --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_x101-32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/rpn/rpn_x101-32x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/rpn/rpn_x101-32x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c6880b762abc8f5d3bf12f278054d76958756fb2 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_x101-32x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './rpn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/rpn/rpn_x101-64x4d_fpn_1x_coco.py b/mmpose/configs/mmdet/rpn/rpn_x101-64x4d_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..96e691a912c424f09add038c75631a2e1fefeffc --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_x101-64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/rpn/rpn_x101-64x4d_fpn_2x_coco.py b/mmpose/configs/mmdet/rpn/rpn_x101-64x4d_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4182a39667c47d774a1df9d34a1bc2fe60b45538 --- /dev/null +++ b/mmpose/configs/mmdet/rpn/rpn_x101-64x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './rpn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/rtmdet/README.md b/mmpose/configs/mmdet/rtmdet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1677184af761a5b6ac5d643ddf7e2d802f96723e --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/README.md @@ -0,0 +1,457 @@ +# RTMDet: An Empirical Study of Designing Real-Time Object Detectors + +> [RTMDet: An Empirical Study of Designing Real-Time Object Detectors](https://arxiv.org/abs/2212.07784) + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real) + + + +## Abstract + +In this paper, we aim to design an efficient real-time object detector that exceeds the YOLO series and is easily extensible for many object recognition tasks such as instance segmentation and rotated object detection. To obtain a more efficient model architecture, we explore an architecture that has compatible capacities in the backbone and neck, constructed by a basic building block that consists of large-kernel depth-wise convolutions. We further introduce soft labels when calculating matching costs in the dynamic label assignment to improve accuracy. Together with better training techniques, the resulting object detector, named RTMDet, achieves 52.8% AP on COCO with 300+ FPS on an NVIDIA 3090 GPU, outperforming the current mainstream industrial detectors. RTMDet achieves the best parameter-accuracy trade-off with tiny/small/medium/large/extra-large model sizes for various application scenarios, and obtains new state-of-the-art performance on real-time instance segmentation and rotated object detection. We hope the experimental results can provide new insights into designing versatile real-time object detectors for many object recognition tasks. + +
+ +
+ +## Results and Models + +### Object Detection + +| Model | size | box AP | Params(M) | FLOPS(G) | TRT-FP16-Latency(ms)
RTX3090 | TRT-FP16-Latency(ms)
T4 | Config | Download | +| :-----------------: | :--: | :----: | :-------: | :------: | :-----------------------------: | :------------------------: | :------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| RTMDet-tiny | 640 | 41.1 | 4.8 | 8.1 | 0.98 | 2.34 | [config](./rtmdet_tiny_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_tiny_8xb32-300e_coco/rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_tiny_8xb32-300e_coco/rtmdet_tiny_8xb32-300e_coco_20220902_112414.log.json) | +| RTMDet-s | 640 | 44.6 | 8.89 | 14.8 | 1.22 | 2.96 | [config](./rtmdet_s_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_s_8xb32-300e_coco/rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_s_8xb32-300e_coco/rtmdet_s_8xb32-300e_coco_20220905_161602.log.json) | +| RTMDet-m | 640 | 49.4 | 24.71 | 39.27 | 1.62 | 6.41 | [config](./rtmdet_m_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220.log.json) | +| RTMDet-l | 640 | 51.5 | 52.3 | 80.23 | 2.44 | 10.32 | [config](./rtmdet_l_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_l_8xb32-300e_coco/rtmdet_l_8xb32-300e_coco_20220719_112030-5a0be7c4.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_l_8xb32-300e_coco/rtmdet_l_8xb32-300e_coco_20220719_112030.log.json) | +| RTMDet-x | 640 | 52.8 | 94.86 | 141.67 | 3.10 | 18.80 | [config](./rtmdet_x_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_x_8xb32-300e_coco/rtmdet_x_8xb32-300e_coco_20220715_230555-cc79b9ae.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_x_8xb32-300e_coco/rtmdet_x_8xb32-300e_coco_20220715_230555.log.json) | +| RTMDet-x-P6 | 1280 | 54.9 | | | | | [config](./rtmdet_x_p6_4xb8-300e_coco.py) | [model](https://github.com/orange0-jp/orange-weights/releases/download/v0.1.0rtmdet-p6/rtmdet_x_p6_4xb8-300e_coco-bf32be58.pth) | +| RTMDet-l-ConvNeXt-B | 640 | 53.1 | | | | | [config](./rtmdet_l_convnext_b_4xb32-100e_coco.py) | [model](https://github.com/orange0-jp/orange-weights/releases/download/v0.1.0rtmdet-swin-convnext/rtmdet_l_convnext_b_4xb32-100e_coco-d4731b3d.pth) | +| RTMDet-l-Swin-B | 640 | 52.4 | | | | | [config](./rtmdet_l_swin_b_4xb32-100e_coco.py) | [model](https://github.com/orange0-jp/orange-weights/releases/download/v0.1.0rtmdet-swin-convnext/rtmdet_l_swin_b_4xb32-100e_coco-0828ce5d.pth) | +| RTMDet-l-Swin-B-P6 | 1280 | 56.4 | | | | | [config](./rtmdet_l_swin_b_p6_4xb16-100e_coco.py) | [model](https://github.com/orange0-jp/orange-weights/releases/download/v0.1.0rtmdet-swin-convnext/rtmdet_l_swin_b_p6_4xb16-100e_coco-a1486b6f.pth) | + +**Note**: + +1. We implement a fast training version of RTMDet in [MMYOLO](https://github.com/open-mmlab/mmyolo). Its training speed is **2.6 times faster** and memory requirement is lower! Try it [here](https://github.com/open-mmlab/mmyolo/tree/main/configs/rtmdet)! +2. The inference speed of RTMDet is measured with TensorRT 8.4.3, cuDNN 8.2.0, FP16, batch size=1, and without NMS. +3. For a fair comparison, the config of bbox postprocessing is changed to be consistent with YOLOv5/6/7 after [PR#9494](https://github.com/open-mmlab/mmdetection/pull/9494), bringing about 0.1~0.3% AP improvement. + +### Instance Segmentation + +RTMDet-Ins is the state-of-the-art real-time instance segmentation on coco dataset: + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real) + +| Model | size | box AP | mask AP | Params(M) | FLOPS(G) | TRT-FP16-Latency(ms) | Config | Download | +| :-------------: | :--: | :----: | :-----: | :-------: | :------: | :------------------: | :--------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| RTMDet-Ins-tiny | 640 | 40.5 | 35.4 | 5.6 | 11.8 | 1.70 | [config](./rtmdet-ins_tiny_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco/rtmdet-ins_tiny_8xb32-300e_coco_20221130_151727-ec670f7e.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco/rtmdet-ins_tiny_8xb32-300e_coco_20221130_151727.log.json) | +| RTMDet-Ins-s | 640 | 44.0 | 38.7 | 10.18 | 21.5 | 1.93 | [config](./rtmdet-ins_s_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_s_8xb32-300e_coco/rtmdet-ins_s_8xb32-300e_coco_20221121_212604-fdc5d7ec.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_s_8xb32-300e_coco/rtmdet-ins_s_8xb32-300e_coco_20221121_212604.log.json) | +| RTMDet-Ins-m | 640 | 48.8 | 42.1 | 27.58 | 54.13 | 2.69 | [config](./rtmdet-ins_m_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_m_8xb32-300e_coco/rtmdet-ins_m_8xb32-300e_coco_20221123_001039-6eba602e.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_m_8xb32-300e_coco/rtmdet-ins_m_8xb32-300e_coco_20221123_001039.log.json) | +| RTMDet-Ins-l | 640 | 51.2 | 43.7 | 57.37 | 106.56 | 3.68 | [config](./rtmdet-ins_l_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_l_8xb32-300e_coco/rtmdet-ins_l_8xb32-300e_coco_20221124_103237-78d1d652.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_l_8xb32-300e_coco/rtmdet-ins_l_8xb32-300e_coco_20221124_103237.log.json) | +| RTMDet-Ins-x | 640 | 52.4 | 44.6 | 102.7 | 182.7 | 5.31 | [config](./rtmdet-ins_x_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_x_8xb16-300e_coco/rtmdet-ins_x_8xb16-300e_coco_20221124_111313-33d4595b.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_x_8xb16-300e_coco/rtmdet-ins_x_8xb16-300e_coco_20221124_111313.log.json) | + +**Note**: + +1. The inference speed of RTMDet-Ins is measured on an NVIDIA 3090 GPU with TensorRT 8.4.3, cuDNN 8.2.0, FP16, batch size=1. Top 100 masks are kept and the post process latency is included. + +### Rotated Object Detection + +RTMDet-R achieves state-of-the-art on various remote sensing datasets. + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real) + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/one-stage-anchor-free-oriented-object-1)](https://paperswithcode.com/sota/one-stage-anchor-free-oriented-object-1?p=rtmdet-an-empirical-study-of-designing-real) + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real) + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/one-stage-anchor-free-oriented-object-3)](https://paperswithcode.com/sota/one-stage-anchor-free-oriented-object-3?p=rtmdet-an-empirical-study-of-designing-real) + +Models and configs of RTMDet-R are available in [MMRotate](https://github.com/open-mmlab/mmrotate/tree/1.x/configs/rotated_rtmdet). + +| Backbone | pretrain | Aug | mmAP | mAP50 | mAP75 | Params(M) | FLOPS(G) | TRT-FP16-Latency(ms) | Config | Download | +| :---------: | :------: | :---: | :---: | :---: | :---: | :-------: | :------: | :------------------: | :---------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| RTMDet-tiny | IN | RR | 47.37 | 75.36 | 50.64 | 4.88 | 20.45 | 4.40 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota/rotated_rtmdet_tiny-3x-dota-9d821076.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota/rotated_rtmdet_tiny-3x-dota_20221201_120814.json) | +| RTMDet-tiny | IN | MS+RR | 53.59 | 79.82 | 58.87 | 4.88 | 20.45 | 4.40 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota_ms/rotated_rtmdet_tiny-3x-dota_ms-f12286ff.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota_ms/rotated_rtmdet_tiny-3x-dota_ms_20221113_201235.log) | +| RTMDet-s | IN | RR | 48.16 | 76.93 | 50.59 | 8.86 | 37.62 | 4.86 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_s-3x-dota.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_s-3x-dota/rotated_rtmdet_s-3x-dota-11f6ccf5.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_s-3x-dota/rotated_rtmdet_s-3x-dota_20221124_081442.json) | +| RTMDet-s | IN | MS+RR | 54.43 | 79.98 | 60.07 | 8.86 | 37.62 | 4.86 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_s-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_s-3x-dota_ms/rotated_rtmdet_s-3x-dota_ms-20ead048.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_s-3x-dota_ms/rotated_rtmdet_s-3x-dota_ms_20221113_201055.json) | +| RTMDet-m | IN | RR | 50.56 | 78.24 | 54.47 | 24.67 | 99.76 | 7.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_m-3x-dota.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_m-3x-dota/rotated_rtmdet_m-3x-dota-beeadda6.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_m-3x-dota/rotated_rtmdet_m-3x-dota_20221122_011234.json) | +| RTMDet-m | IN | MS+RR | 55.00 | 80.26 | 61.26 | 24.67 | 99.76 | 7.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_m-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_m-3x-dota_ms/rotated_rtmdet_m-3x-dota_ms-c71eb375.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_m-3x-dota_ms/rotated_rtmdet_m-3x-dota_ms_20221122_011234.json) | +| RTMDet-l | IN | RR | 51.01 | 78.85 | 55.21 | 52.27 | 204.21 | 10.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_l-3x-dota.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-3x-dota/rotated_rtmdet_l-3x-dota-23992372.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-3x-dota/rotated_rtmdet_l-3x-dota_20221122_011241.json) | +| RTMDet-l | IN | MS+RR | 55.52 | 80.54 | 61.47 | 52.27 | 204.21 | 10.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_l-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-3x-dota_ms/rotated_rtmdet_l-3x-dota_ms-2738da34.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-3x-dota_ms/rotated_rtmdet_l-3x-dota_ms_20221122_011241.json) | +| RTMDet-l | COCO | MS+RR | 56.74 | 81.33 | 63.45 | 52.27 | 204.21 | 10.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_l-coco_pretrain-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-coco_pretrain-3x-dota_ms/rotated_rtmdet_l-coco_pretrain-3x-dota_ms-06d248a2.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-coco_pretrain-3x-dota_ms/rotated_rtmdet_l-coco_pretrain-3x-dota_ms_20221113_202010.json) | + +### Classification + +We also provide the imagenet classification configs of the RTMDet backbone. Find more details in the [classification folder](./classification). + +| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +| :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 224x224 | 2.73 | 0.34 | 69.44 | 89.45 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth) | +| CSPNeXt-s | 224x224 | 4.89 | 0.66 | 74.41 | 92.23 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth) | +| CSPNeXt-m | 224x224 | 13.05 | 1.93 | 79.27 | 94.79 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth) | +| CSPNeXt-l | 224x224 | 27.16 | 4.19 | 81.30 | 95.62 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth) | +| CSPNeXt-x | 224x224 | 48.85 | 7.76 | 82.10 | 95.69 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-x_8xb256-rsb-a1-600e_in1k-b3f78edd.pth) | + +## Citation + +```latex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +## Visualization + +
+ +
+ +## Deployment Tutorial + +Here is a basic example of deploy RTMDet with [MMDeploy-1.x](https://github.com/open-mmlab/mmdeploy/tree/1.x). + +### Step1. Install MMDeploy + +Before starting the deployment, please make sure you install MMDetection and MMDeploy-1.x correctly. + +- Install MMDetection, please refer to the [MMDetection installation guide](https://mmdetection.readthedocs.io/en/latest/get_started.html). +- Install MMDeploy-1.x, please refer to the [MMDeploy-1.x installation guide](https://mmdeploy.readthedocs.io/en/1.x/get_started.html#installation). + +If you want to deploy RTMDet with ONNXRuntime, TensorRT, or other inference engine, +please make sure you have installed the corresponding dependencies and MMDeploy precompiled packages. + +### Step2. Convert Model + +After the installation, you can enjoy the model deployment journey starting from converting PyTorch model to backend model by running MMDeploy's `tools/deploy.py`. + +The detailed model conversion tutorial please refer to the [MMDeploy document](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/convert_model.html). +Here we only give the example of converting RTMDet. + +MMDeploy supports converting dynamic and static models. Dynamic models support different input shape, but the inference speed is slower than static models. +To achieve the best performance, we suggest converting RTMDet with static setting. + +- If you only want to use ONNX, please use [`configs/mmdet/detection/detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_onnxruntime_static.py) as the deployment config. +- If you want to use TensorRT, please use [`configs/mmdet/detection/detection_tensorrt_static-640x640.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_tensorrt_static-640x640.py). + +If you want to customize the settings in the deployment config for your requirements, please refer to [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/write_config.html). + +After preparing the deployment config, you can run the `tools/deploy.py` script to convert your model. +Here we take converting RTMDet-s to TensorRT as an example: + +```shell +# go to the mmdeploy folder +cd ${PATH_TO_MMDEPLOY} + +# download RTMDet-s checkpoint +wget -P checkpoint https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_s_8xb32-300e_coco/rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth + +# run the command to start model conversion +python tools/deploy.py \ + configs/mmdet/detection/detection_tensorrt_static-640x640.py \ + ${PATH_TO_MMDET}/configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \ + checkpoint/rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth \ + demo/resources/det.jpg \ + --work-dir ./work_dirs/rtmdet \ + --device cuda:0 \ + --show +``` + +If the script runs successfully, you will see the following files: + +``` +|----work_dirs + |----rtmdet + |----end2end.onnx # ONNX model + |----end2end.engine # TensorRT engine file +``` + +After this, you can check the inference results with MMDeploy Model Converter API: + +```python +from mmdeploy.apis import inference_model + +result = inference_model( + model_cfg='${PATH_TO_MMDET}/configs/rtmdet/rtmdet_s_8xb32-300e_coco.py', + deploy_cfg='${PATH_TO_MMDEPLOY}/configs/mmdet/detection/detection_tensorrt_static-640x640.py', + backend_files=['work_dirs/rtmdet/end2end.engine'], + img='demo/resources/det.jpg', + device='cuda:0') +``` + +#### Advanced Setting + +To convert the model with TRT-FP16, you can enable the fp16 mode in your deploy config: + +```python +# in MMDeploy config +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=True # enable fp16 + )) +``` + +To reduce the end to end inference speed with the inference engine, we suggest you to adjust the post-processing setting of the model. +We set a very low score threshold during training and testing to achieve better COCO mAP. +However, in actual usage scenarios, a relatively high score threshold (e.g. 0.3) is usually used. + +You can adjust the score threshold and the number of detection boxes in your model config according to the actual usage to reduce the time-consuming of post-processing. + +```python +# in MMDetection config +model = dict( + test_cfg=dict( + nms_pre=1000, # keep top-k score bboxes before nms + min_bbox_size=0, + score_thr=0.3, # score threshold to filter bboxes + nms=dict(type='nms', iou_threshold=0.65), + max_per_img=100) # only keep top-100 as the final results. +) +``` + +### Step3. Inference with SDK + +We provide both Python and C++ inference API with MMDeploy SDK. + +To use SDK, you need to dump the required info during converting the model. Just add `--dump-info` to the model conversion command: + +```shell +python tools/deploy.py \ + configs/mmdet/detection/detection_tensorrt_static-640x640.py \ + ${PATH_TO_MMDET}/configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \ + checkpoint/rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth \ + demo/resources/det.jpg \ + --work-dir ./work_dirs/rtmdet-sdk \ + --device cuda:0 \ + --show \ + --dump-info # dump sdk info +``` + +After running the command, it will dump 3 json files additionally for the SDK: + +``` +|----work_dirs + |----rtmdet-sdk + |----end2end.onnx # ONNX model + |----end2end.engine # TensorRT engine file + # json files for the SDK + |----pipeline.json + |----deploy.json + |----detail.json +``` + +#### Python API + +Here is a basic example of SDK Python API: + +```python +from mmdeploy_python import Detector +import cv2 + +img = cv2.imread('demo/resources/det.jpg') + +# create a detector +detector = Detector(model_path='work_dirs/rtmdet-sdk', device_name='cuda', device_id=0) +# run the inference +bboxes, labels, _ = detector(img) +# Filter the result according to threshold +indices = [i for i in range(len(bboxes))] +for index, bbox, label_id in zip(indices, bboxes, labels): + [left, top, right, bottom], score = bbox[0:4].astype(int), bbox[4] + if score < 0.3: + continue + # draw bbox + cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0)) + +cv2.imwrite('output_detection.png', img) +``` + +#### C++ API + +Here is a basic example of SDK C++ API: + +```C++ +#include +#include +#include "mmdeploy/detector.hpp" + +int main() { + const char* device_name = "cuda"; + int device_id = 0; + std::string model_path = "work_dirs/rtmdet-sdk"; + std::string image_path = "demo/resources/det.jpg"; + + // 1. load model + mmdeploy::Model model(model_path); + // 2. create predictor + mmdeploy::Detector detector(model, mmdeploy::Device{device_name, device_id}); + // 3. read image + cv::Mat img = cv::imread(image_path); + // 4. inference + auto dets = detector.Apply(img); + // 5. deal with the result. Here we choose to visualize it + for (int i = 0; i < dets.size(); ++i) { + const auto& box = dets[i].bbox; + fprintf(stdout, "box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n", + i, box.left, box.top, box.right, box.bottom, dets[i].label_id, dets[i].score); + if (bboxes[i].score < 0.3) { + continue; + } + cv::rectangle(img, cv::Point{(int)box.left, (int)box.top}, + cv::Point{(int)box.right, (int)box.bottom}, cv::Scalar{0, 255, 0}); + } + cv::imwrite("output_detection.png", img); + return 0; +} +``` + +To build C++ example, please add MMDeploy package in your CMake project as following: + +```cmake +find_package(MMDeploy REQUIRED) +target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) +``` + +#### Other languages + +- [C# API Examples](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csharp) +- [JAVA API Examples](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/java) + +### Deploy RTMDet Instance Segmentation Model + +We support RTMDet-Ins ONNXRuntime and TensorRT deployment after [MMDeploy v1.0.0rc2](https://github.com/open-mmlab/mmdeploy/tree/v1.0.0rc2). And its deployment process is almost consistent with the detection model. + +#### Step1. Install MMDeploy >= v1.0.0rc2 + +Please refer to the [MMDeploy-1.x installation guide](https://mmdeploy.readthedocs.io/en/1.x/get_started.html#installation) to install the latest version. +Please remember to replace the pre-built package with the latest version. +The v1.0.0rc2 package can be downloaded from [v1.0.0rc2 release page](https://github.com/open-mmlab/mmdeploy/releases/tag/v1.0.0rc2). + +Step2. Convert Model + +This step has no difference with the previous tutorial. The only thing you need to change is switching to the RTMDet-Ins deploy config: + +- If you want to use ONNXRuntime, please use [`configs/mmdet/instance-seg/instance-seg_rtmdet-ins_onnxruntime_static-640x640.py`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/configs/mmdet/instance-seg/instance-seg_rtmdet-ins_onnxruntime_static-640x640.py) as the deployment config. +- If you want to use TensorRT, please use [`configs/mmdet/instance-seg/instance-seg_rtmdet-ins_tensorrt_static-640x640.py`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/configs/mmdet/instance-seg/instance-seg_rtmdet-ins_tensorrt_static-640x640.py). + +Here we take converting RTMDet-Ins-s to TensorRT as an example: + +```shell +# go to the mmdeploy folder +cd ${PATH_TO_MMDEPLOY} + +# download RTMDet-s checkpoint +wget -P checkpoint https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_s_8xb32-300e_coco/rtmdet-ins_s_8xb32-300e_coco_20221121_212604-fdc5d7ec.pth + +# run the command to start model conversion +python tools/deploy.py \ + configs/mmdet/instance-seg/instance-seg_rtmdet-ins_tensorrt_static-640x640.py \ + ${PATH_TO_MMDET}/configs/rtmdet/rtmdet-ins_s_8xb32-300e_coco.py \ + checkpoint/rtmdet-ins_s_8xb32-300e_coco_20221121_212604-fdc5d7ec.pth \ + demo/resources/det.jpg \ + --work-dir ./work_dirs/rtmdet-ins \ + --device cuda:0 \ + --show +``` + +If the script runs successfully, you will see the following files: + +``` +|----work_dirs + |----rtmdet-ins + |----end2end.onnx # ONNX model + |----end2end.engine # TensorRT engine file +``` + +After this, you can check the inference results with MMDeploy Model Converter API: + +```python +from mmdeploy.apis import inference_model + +result = inference_model( + model_cfg='${PATH_TO_MMDET}/configs/rtmdet/rtmdet-ins_s_8xb32-300e_coco.py', + deploy_cfg='${PATH_TO_MMDEPLOY}/configs/mmdet/instance-seg/instance-seg_rtmdet-ins_tensorrt_static-640x640.py', + backend_files=['work_dirs/rtmdet-ins/end2end.engine'], + img='demo/resources/det.jpg', + device='cuda:0') +``` + +### Model Config + +In MMDetection's config, we use `model` to set up detection algorithm components. In addition to neural network components such as `backbone`, `neck`, etc, it also requires `data_preprocessor`, `train_cfg`, and `test_cfg`. `data_preprocessor` is responsible for processing a batch of data output by dataloader. `train_cfg`, and `test_cfg` in the model config are for training and testing hyperparameters of the components.Taking RTMDet as an example, we will introduce each field in the config according to different function modules: + +```python +model = dict( + type='RTMDet', # The name of detector + data_preprocessor=dict( # The config of data preprocessor, usually includes image normalization and padding + type='DetDataPreprocessor', # The type of the data preprocessor. Refer to https://mmdetection.readthedocs.io/en/latest/api.html#mmdet.models.data_preprocessors.DetDataPreprocessor + mean=[103.53, 116.28, 123.675], # Mean values used to pre-training the pre-trained backbone models, ordered in R, G, B + std=[57.375, 57.12, 58.395], # Standard variance used to pre-training the pre-trained backbone models, ordered in R, G, B + bgr_to_rgb=False, # whether to convert image from BGR to RGB + batch_augments=None), # Batch-level augmentations + backbone=dict( # The config of backbone + type='CSPNeXt', # The type of backbone network. Refer to https://mmdetection.readthedocs.io/en/latest/api.html#mmdet.models.backbones.CSPNeXt + arch='P5', # Architecture of CSPNeXt, from {P5, P6}. Defaults to P5 + expand_ratio=0.5, # Ratio to adjust the number of channels of the hidden layer. Defaults to 0.5 + deepen_factor=1, # Depth multiplier, multiply number of blocks in CSP layer by this amount. Defaults to 1.0 + widen_factor=1, # Width multiplier, multiply number of channels in each layer by this amount. Defaults to 1.0 + channel_attention=True, # Whether to add channel attention in each stage. Defaults to True + norm_cfg=dict(type='SyncBN'), # Dictionary to construct and config norm layer. Defaults to dict(type=’BN’, requires_grad=True) + act_cfg=dict(type='SiLU', inplace=True)), # Config dict for activation layer. Defaults to dict(type=’SiLU’) + neck=dict( + type='CSPNeXtPAFPN', # The type of neck is CSPNeXtPAFPN. Refer to https://mmdetection.readthedocs.io/en/latest/api.html#mmdet.models.necks.CSPNeXtPAFPN + in_channels=[256, 512, 1024], # Number of input channels per scale + out_channels=256, # Number of output channels (used at each scale) + num_csp_blocks=3, # Number of bottlenecks in CSPLayer. Defaults to 3 + expand_ratio=0.5, # Ratio to adjust the number of channels of the hidden layer. Default: 0.5 + norm_cfg=dict(type='SyncBN'), # Config dict for normalization layer. Default: dict(type=’BN’) + act_cfg=dict(type='SiLU', inplace=True)), # Config dict for activation layer. Default: dict(type=’Swish’) + bbox_head=dict( + type='RTMDetSepBNHead', # The type of bbox_head is RTMDetSepBNHead. RTMDetHead with separated BN layers and shared conv layers. Refer to https://mmdetection.readthedocs.io/en/latest/api.html#mmdet.models.dense_heads.RTMDetSepBNHead + num_classes=80, # Number of categories excluding the background category + in_channels=256, # Number of channels in the input feature map + stacked_convs=2, # Whether to share conv layers between stages. Defaults to True + feat_channels=256, # Feature channels of convolutional layers in the head + anchor_generator=dict( # The config of anchor generator + type='MlvlPointGenerator', # The methods use MlvlPointGenerator. Refer to https://github.com/open-mmlab/mmdetection/blob/main/mmdet/models/task_modules/prior_generators/point_generator.py#L92 + offset=0, # The offset of points, the value is normalized with corresponding stride. Defaults to 0.5 + strides=[8, 16, 32]), # Strides of anchors in multiple feature levels in order (w, h) + bbox_coder=dict(type='DistancePointBBoxCoder'), # Distance Point BBox coder.This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,right) and decode it back to the original. Refer to https://github.com/open-mmlab/mmdetection/blob/main/mmdet/models/task_modules/coders/distance_point_bbox_coder.py#L9 + loss_cls=dict( # Config of loss function for the classification branch + type='QualityFocalLoss', # Type of loss for classification branch. Refer to https://mmdetection.readthedocs.io/en/latest/api.html#mmdet.models.losses.QualityFocalLoss + use_sigmoid=True, # Whether sigmoid operation is conducted in QFL. Defaults to True + beta=2.0, # The beta parameter for calculating the modulating factor. Defaults to 2.0 + loss_weight=1.0), # Loss weight of current loss + loss_bbox=dict( # Config of loss function for the regression branch + type='GIoULoss', # Type of loss. Refer to https://mmdetection.readthedocs.io/en/latest/api.html#mmdet.models.losses.GIoULoss + loss_weight=2.0), # Loss weight of the regression branch + with_objectness=False, # Whether to add an objectness branch. Defaults to True + exp_on_reg=True, # Whether to use .exp() in regression + share_conv=True, # Whether to share conv layers between stages. Defaults to True + pred_kernel_size=1, # Kernel size of prediction layer. Defaults to 1 + norm_cfg=dict(type='SyncBN'), # Config dict for normalization layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001) + act_cfg=dict(type='SiLU', inplace=True)), # Config dict for activation layer. Defaults to dict(type='SiLU') + train_cfg=dict( # Config of training hyperparameters for ATSS + assigner=dict( # Config of assigner + type='DynamicSoftLabelAssigner', # Type of assigner. DynamicSoftLabelAssigner computes matching between predictions and ground truth with dynamic soft label assignment. Refer to https://github.com/open-mmlab/mmdetection/blob/main/mmdet/models/task_modules/assigners/dynamic_soft_label_assigner.py#L40 + topk=13), # Select top-k predictions to calculate dynamic k best matches for each gt. Defaults to 13 + allowed_border=-1, # The border allowed after padding for valid anchors + pos_weight=-1, # The weight of positive samples during training + debug=False), # Whether to set the debug mode + test_cfg=dict( # Config for testing hyperparameters for ATSS + nms_pre=30000, # The number of boxes before NMS + min_bbox_size=0, # The allowed minimal box size + score_thr=0.001, # Threshold to filter out boxes + nms=dict( # Config of NMS in the second stage + type='nms', # Type of NMS + iou_threshold=0.65), # NMS threshold + max_per_img=300), # Max number of detections of each image +) +``` diff --git a/mmpose/configs/mmdet/rtmdet/classification/README.md b/mmpose/configs/mmdet/rtmdet/classification/README.md new file mode 100644 index 0000000000000000000000000000000000000000..acc127db2ca82b2cbc5fe93495306c2776acaf33 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/classification/README.md @@ -0,0 +1,56 @@ +# CSPNeXt ImageNet Pre-training + +In this folder, we provide the imagenet pre-training config of RTMDet's backbone CSPNeXt. + +## Requirements + +To train with these configs, please install [MMPreTrain](https://github.com/open-mmlab/mmpretrain) first. + +Install by MIM: + +```shell +mim install mmpretrain +``` + +or install by pip: + +```shell +pip install mmpretrain +``` + +## Prepare Dataset + +To pre-train on ImageNet, you need to prepare the dataset first. Please refer to the [guide](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#imagenet). + +## How to Train + +You can use the classification config in the same way as the detection config. + +For single-GPU training, run: + +```shell +python tools/train.py \ + ${CONFIG_FILE} \ + [optional arguments] +``` + +For multi-GPU training, run: + +```shell +bash ./tools/dist_train.sh \ + ${CONFIG_FILE} \ + ${GPU_NUM} \ + [optional arguments] +``` + +More details can be found in [user guides](https://mmdetection.readthedocs.io/en/latest/user_guides/train.html). + +## Results and Models + +| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +| :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 224x224 | 2.73 | 0.34 | 69.44 | 89.45 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth) | +| CSPNeXt-s | 224x224 | 4.89 | 0.66 | 74.41 | 92.23 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth) | +| CSPNeXt-m | 224x224 | 13.05 | 1.93 | 79.27 | 94.79 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth) | +| CSPNeXt-l | 224x224 | 27.16 | 4.19 | 81.30 | 95.62 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth) | +| CSPNeXt-x | 224x224 | 48.85 | 7.76 | 82.10 | 95.69 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-x_8xb256-rsb-a1-600e_in1k-b3f78edd.pth) | diff --git a/mmpose/configs/mmdet/rtmdet/classification/cspnext-l_8xb256-rsb-a1-600e_in1k.py b/mmpose/configs/mmdet/rtmdet/classification/cspnext-l_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000000000000000000000000000000000000..d2e70539f05da69cca53f273d11e3296c87c4eda --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/classification/cspnext-l_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,5 @@ +_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' + +model = dict( + backbone=dict(deepen_factor=1, widen_factor=1), + head=dict(in_channels=1024)) diff --git a/mmpose/configs/mmdet/rtmdet/classification/cspnext-m_8xb256-rsb-a1-600e_in1k.py b/mmpose/configs/mmdet/rtmdet/classification/cspnext-m_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000000000000000000000000000000000000..e1b1352dd91a803eeafe80f587203f96a247c27f --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/classification/cspnext-m_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,5 @@ +_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' + +model = dict( + backbone=dict(deepen_factor=0.67, widen_factor=0.75), + head=dict(in_channels=768)) diff --git a/mmpose/configs/mmdet/rtmdet/classification/cspnext-s_8xb256-rsb-a1-600e_in1k.py b/mmpose/configs/mmdet/rtmdet/classification/cspnext-s_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000000000000000000000000000000000000..dcfd2ea47d54408ef6d2fe225b57c5c9e540918a --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/classification/cspnext-s_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,64 @@ +_base_ = [ + 'mmpretrain::_base_/datasets/imagenet_bs256_rsb_a12.py', + 'mmpretrain::_base_/schedules/imagenet_bs2048_rsb.py', + 'mmpretrain::_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='mmdet.CSPNeXt', + arch='P5', + out_indices=(4, ), + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + channel_attention=True, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='mmdet.SiLU')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + loss_weight=1.0), + topk=(1, 5)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.2), + dict(type='CutMix', alpha=1.0) + ])) + +# dataset settings +train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True)) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(weight_decay=0.01), + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=595, + eta_min=1.0e-6, + by_epoch=True, + begin=5, + end=600) +] + +train_cfg = dict(by_epoch=True, max_epochs=600) diff --git a/mmpose/configs/mmdet/rtmdet/classification/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py b/mmpose/configs/mmdet/rtmdet/classification/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000000000000000000000000000000000000..af3170bdc51778c4601d4426aa88cc27c608f100 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/classification/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,5 @@ +_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' + +model = dict( + backbone=dict(deepen_factor=0.167, widen_factor=0.375), + head=dict(in_channels=384)) diff --git a/mmpose/configs/mmdet/rtmdet/classification/cspnext-x_8xb256-rsb-a1-600e_in1k.py b/mmpose/configs/mmdet/rtmdet/classification/cspnext-x_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000000000000000000000000000000000000..edec48d78dbefdb7783c5dd50e97873e29ea6497 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/classification/cspnext-x_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,5 @@ +_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' + +model = dict( + backbone=dict(deepen_factor=1.33, widen_factor=1.25), + head=dict(in_channels=1280)) diff --git a/mmpose/configs/mmdet/rtmdet/metafile.yml b/mmpose/configs/mmdet/rtmdet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..a62abcb2faabb2e7d6c4a6c7d3b492392eba9775 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/metafile.yml @@ -0,0 +1,242 @@ +Collections: + - Name: RTMDet + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Flat Cosine Annealing + Training Resources: 8x A100 GPUs + Architecture: + - CSPNeXt + - CSPNeXtPAFPN + README: configs/rtmdet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v3.0.0rc1/mmdet/models/detectors/rtmdet.py#L6 + Version: v3.0.0rc1 + +Models: + - Name: rtmdet_tiny_8xb32-300e_coco + Alias: + - rtmdet-t + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_tiny_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 11.7 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_tiny_8xb32-300e_coco/rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth + + - Name: rtmdet_s_8xb32-300e_coco + Alias: + - rtmdet-s + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_s_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 15.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_s_8xb32-300e_coco/rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth + + - Name: rtmdet_m_8xb32-300e_coco + Alias: + - rtmdet-m + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_m_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 27.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.1 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth + + - Name: rtmdet_l_8xb32-300e_coco + Alias: + - rtmdet-l + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_l_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 43.2 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.3 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_l_8xb32-300e_coco/rtmdet_l_8xb32-300e_coco_20220719_112030-5a0be7c4.pth + + - Name: rtmdet_x_8xb32-300e_coco + Alias: + - rtmdet-x + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_x_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 61.1 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.6 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_x_8xb32-300e_coco/rtmdet_x_8xb32-300e_coco_20220715_230555-cc79b9ae.pth + + - Name: rtmdet_x_p6_4xb8-300e_coco + Alias: + - rtmdet-x_p6 + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_x_p6_4xb8-300e_coco.py + Metadata: + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 54.9 + Weights: https://github.com/orange0-jp/orange-weights/releases/download/v0.1.0rtmdet-p6/rtmdet_x_p6_4xb8-300e_coco-bf32be58.pth + + - Name: rtmdet_l_convnext_b_4xb32-100e_coco + Alias: + - rtmdet-l_convnext_b + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_l_convnext_b_4xb32-100e_coco.py + Metadata: + Epochs: 100 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 53.1 + Weights: https://github.com/orange0-jp/orange-weights/releases/download/v0.1.0rtmdet-swin-convnext/rtmdet_l_convnext_b_4xb32-100e_coco-d4731b3d.pth + + - Name: rtmdet_l_swin_b_4xb32-100e_coco + Alias: + - rtmdet-l_swin_b + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_l_swin_b_4xb32-100e_coco.py + Metadata: + Epochs: 100 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.4 + Weights: https://github.com/orange0-jp/orange-weights/releases/download/v0.1.0rtmdet-swin-convnext/rtmdet_l_swin_b_4xb32-100e_coco-0828ce5d.pth + + - Name: rtmdet_l_swin_b_p6_4xb16-100e_coco + Alias: + - rtmdet-l_swin_b_p6 + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_l_swin_b_p6_4xb16-100e_coco.py + Metadata: + Epochs: 100 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 56.4 + Weights: https://github.com/orange0-jp/orange-weights/releases/download/v0.1.0rtmdet-swin-convnext/rtmdet_l_swin_b_p6_4xb16-100e_coco-a1486b6f.pth + + - Name: rtmdet-ins_tiny_8xb32-300e_coco + Alias: + - rtmdet-ins-t + In Collection: RTMDet + Config: configs/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 18.4 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco/rtmdet-ins_tiny_8xb32-300e_coco_20221130_151727-ec670f7e.pth + + - Name: rtmdet-ins_s_8xb32-300e_coco + Alias: + - rtmdet-ins-s + In Collection: RTMDet + Config: configs/rtmdet/rtmdet-ins_s_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 27.6 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_s_8xb32-300e_coco/rtmdet-ins_s_8xb32-300e_coco_20221121_212604-fdc5d7ec.pth + + - Name: rtmdet-ins_m_8xb32-300e_coco + Alias: + - rtmdet-ins-m + In Collection: RTMDet + Config: configs/rtmdet/rtmdet-ins_m_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 42.5 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_m_8xb32-300e_coco/rtmdet-ins_m_8xb32-300e_coco_20221123_001039-6eba602e.pth + + - Name: rtmdet-ins_l_8xb32-300e_coco + Alias: + - rtmdet-ins-l + In Collection: RTMDet + Config: configs/rtmdet/rtmdet-ins_l_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 59.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.7 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_l_8xb32-300e_coco/rtmdet-ins_l_8xb32-300e_coco_20221124_103237-78d1d652.pth + + - Name: rtmdet-ins_x_8xb16-300e_coco + Alias: + - rtmdet-ins-x + In Collection: RTMDet + Config: configs/rtmdet/rtmdet-ins_x_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 33.7 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.6 + Weights: https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_x_8xb16-300e_coco/rtmdet-ins_x_8xb16-300e_coco_20221124_111313-33d4595b.pth diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c62c27b6da6a0cb9006bf99ab88171ce6aea4d --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_8xb32-300e_coco.py @@ -0,0 +1,104 @@ +_base_ = './rtmdet_l_8xb32-300e_coco.py' +model = dict( + bbox_head=dict( + _delete_=True, + type='RTMDetInsSepBNHead', + num_classes=80, + in_channels=256, + stacked_convs=2, + share_conv=True, + pred_kernel_size=1, + feat_channels=256, + act_cfg=dict(type='SiLU', inplace=True), + norm_cfg=dict(type='SyncBN', requires_grad=True), + anchor_generator=dict( + type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]), + bbox_coder=dict(type='DistancePointBBoxCoder'), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict( + type='DiceLoss', loss_weight=2.0, eps=5e-6, reduction='mean')), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=40, + mask_thr_binary=0.5), +) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=(640, 640), + recompute_bbox=True, + allow_negative_crop=True), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(640, 640), + ratio_range=(1.0, 1.0), + max_cached_images=20, + pad_val=(114, 114, 114)), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='PackDetInputs') +] + +train_dataloader = dict(pin_memory=True, dataset=dict(pipeline=train_pipeline)) + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomResize', + scale=(640, 640), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=(640, 640), + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict(metric=['bbox', 'segm']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_8xb32-300e_cocoHuman.py b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_8xb32-300e_cocoHuman.py new file mode 100644 index 0000000000000000000000000000000000000000..7e66b4558754305a47a0cd193c80eabb4dc30d65 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_8xb32-300e_cocoHuman.py @@ -0,0 +1,213 @@ +_base_ = './rtmdet_l_8xb32-300e_coco.py' +# _base_ = [ +# '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py', +# '../_base_/datasets/coco_human_instance.py', './rtmdet_tta.py' +# ] + +BATCH_SIZE = 16 + +load_from = 'models/pretrained/rtmdet-ins_l_8xb32-300e_coco_20221124_103237-78d1d652.pth' + + +model = dict( + type='RTMDet', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False, + batch_augments=None), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1, + widen_factor=1, + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + type='CSPNeXtPAFPN', + in_channels=[256, 512, 1024], + out_channels=256, + num_csp_blocks=3, + expand_ratio=0.5, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + _delete_=True, + type='RTMDetInsSepBNHead', + num_classes=1, + in_channels=256, + stacked_convs=2, + share_conv=True, + pred_kernel_size=1, + feat_channels=256, + act_cfg=dict(type='SiLU', inplace=True), + norm_cfg=dict(type='SyncBN', requires_grad=True), + anchor_generator=dict( + type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]), + bbox_coder=dict(type='DistancePointBBoxCoder'), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict( + type='DiceLoss', loss_weight=2.0, eps=5e-6, reduction='mean')), + train_cfg=dict( + assigner=dict(type='DynamicSoftLabelAssigner', topk=13), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100, + mask_thr_binary=0.5), +) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0), + # dict(type='RemoveRandomInstances'), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=(640, 640), + recompute_bbox=True, + allow_negative_crop=True), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(640, 640), + ratio_range=(1.0, 1.0), + max_cached_images=20, + pad_val=(114, 114, 114)), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='PackDetInputs') +] + +train_dataloader = dict(pin_memory=True, dataset=dict(pipeline=train_pipeline)) + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomResize', + scale=(640, 640), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=(640, 640), + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=BATCH_SIZE, + num_workers=10, + batch_sampler=None, + pin_memory=True, + dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=BATCH_SIZE//2, num_workers=10, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +max_epochs = 300 +stage2_num_epochs = 20 +base_lr = 0.004 * BATCH_SIZE / 32 +interval = 10 + +train_cfg = dict( + max_epochs=max_epochs, + val_interval=interval, + dynamic_intervals=[(max_epochs - stage2_num_epochs, 1)]) + +val_evaluator = dict(proposal_nums=(100, 1, 10)) +test_evaluator = val_evaluator + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# hooks +default_hooks = dict( + checkpoint=dict( + interval=interval, + max_keep_ckpts=3 # only keep latest 3 checkpoints + )) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict(metric=['bbox', 'segm']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_insmask_8xb32-300e_cocoHuman.py b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_insmask_8xb32-300e_cocoHuman.py new file mode 100644 index 0000000000000000000000000000000000000000..8bfb7ceafbfb74c6d24f100d4abfb4fbe315cd2b --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_l_insmask_8xb32-300e_cocoHuman.py @@ -0,0 +1,212 @@ +# _base_ = './rtmdet_l_8xb32-300e_coco.py' +_base_ = [ + '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py', + '../_base_/datasets/coco_human_instance.py', './rtmdet_tta.py' +] + +BATCH_SIZE = 16 + +load_from = 'models/pretrained/rtmdet-ins_l_8xb32-300e_coco_20221124_103237-78d1d652.pth' + + +model = dict( + type='RTMDet', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False, + batch_augments=None), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1, + widen_factor=1, + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + type='CSPNeXtPAFPN', + in_channels=[256, 512, 1024], + out_channels=256, + num_csp_blocks=3, + expand_ratio=0.5, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='RTMDetInsSepBNHead', + num_classes=1, + in_channels=256, + stacked_convs=2, + share_conv=True, + pred_kernel_size=1, + feat_channels=256, + act_cfg=dict(type='SiLU', inplace=True), + norm_cfg=dict(type='SyncBN', requires_grad=True), + anchor_generator=dict( + type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]), + bbox_coder=dict(type='DistancePointBBoxCoder'), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict( + type='DiceLoss', loss_weight=2.0, eps=5e-6, reduction='mean')), + train_cfg=dict( + assigner=dict(type='DynamicSoftLabelAssigner', topk=13), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100, + mask_thr_binary=0.5), +) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + # dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0), + dict(type='RemoveRandomInstances'), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=(640, 640), + recompute_bbox=True, + allow_negative_crop=True), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + # dict( + # type='CachedMixUp', + # img_scale=(640, 640), + # ratio_range=(1.0, 1.0), + # max_cached_images=20, + # pad_val=(114, 114, 114)), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='PackDetInputs') +] + +train_dataloader = dict(pin_memory=True, dataset=dict(pipeline=train_pipeline)) + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomResize', + scale=(640, 640), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=(640, 640), + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=BATCH_SIZE, + num_workers=10, + batch_sampler=None, + pin_memory=True, + dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=BATCH_SIZE//2, num_workers=10, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +max_epochs = 300 +stage2_num_epochs = 20 +base_lr = 0.004 * BATCH_SIZE / 32 +interval = 10 + +train_cfg = dict( + max_epochs=max_epochs, + val_interval=interval, + dynamic_intervals=[(max_epochs - stage2_num_epochs, 1)]) + +val_evaluator = dict(proposal_nums=(100, 1, 10)) +test_evaluator = val_evaluator + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# hooks +default_hooks = dict( + checkpoint=dict( + interval=interval, + max_keep_ckpts=3 # only keep latest 3 checkpoints + )) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict(metric=['bbox', 'segm']) +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet-ins_m_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_m_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..66da9148775b425c6b0052beb04f9c8ca17257d9 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_m_8xb32-300e_coco.py @@ -0,0 +1,6 @@ +_base_ = './rtmdet-ins_l_8xb32-300e_coco.py' + +model = dict( + backbone=dict(deepen_factor=0.67, widen_factor=0.75), + neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), + bbox_head=dict(in_channels=192, feat_channels=192)) diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet-ins_s_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_s_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..28bc21cc93bb36d2d2fc8601b06bb0f0c58d6d49 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_s_8xb32-300e_coco.py @@ -0,0 +1,80 @@ +_base_ = './rtmdet-ins_l_8xb32-300e_coco.py' +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa +model = dict( + backbone=dict( + deepen_factor=0.33, + widen_factor=0.5, + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), + neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), + bbox_head=dict(in_channels=128, feat_channels=128)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=(640, 640), + recompute_bbox=True, + allow_negative_crop=True), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(640, 640), + ratio_range=(1.0, 1.0), + max_cached_images=20, + pad_val=(114, 114, 114)), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='RandomResize', + scale=(640, 640), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=(640, 640), + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..954f911614e75eb9910effbf1bbc1d7b01120276 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco.py @@ -0,0 +1,48 @@ +_base_ = './rtmdet-ins_s_8xb32-300e_coco.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa + +model = dict( + backbone=dict( + deepen_factor=0.167, + widen_factor=0.375, + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), + neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), + bbox_head=dict(in_channels=96, feat_channels=96)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='CachedMosaic', + img_scale=(640, 640), + pad_val=114.0, + max_cached_images=20, + random_pop=False), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(640, 640), + ratio_range=(1.0, 1.0), + max_cached_images=10, + random_pop=False, + pad_val=(114, 114, 114), + prob=0.5), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet-ins_x_8xb16-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_x_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..daaa640edac6b2114caf13b650d99d7c7632629a --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet-ins_x_8xb16-300e_coco.py @@ -0,0 +1,31 @@ +_base_ = './rtmdet-ins_l_8xb32-300e_coco.py' + +model = dict( + backbone=dict(deepen_factor=1.33, widen_factor=1.25), + neck=dict( + in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), + bbox_head=dict(in_channels=320, feat_channels=320)) + +base_lr = 0.002 + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=_base_.max_epochs // 2, + end=_base_.max_epochs, + T_max=_base_.max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_l_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_l_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1cce4d89c84a81d7aa22197cd6dd70fe08637a35 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_l_8xb32-300e_coco.py @@ -0,0 +1,179 @@ +_base_ = [ + '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py', + '../_base_/datasets/coco_detection.py', './rtmdet_tta.py' +] +model = dict( + type='RTMDet', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False, + batch_augments=None), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1, + widen_factor=1, + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + type='CSPNeXtPAFPN', + in_channels=[256, 512, 1024], + out_channels=256, + num_csp_blocks=3, + expand_ratio=0.5, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='RTMDetSepBNHead', + num_classes=80, + in_channels=256, + stacked_convs=2, + feat_channels=256, + anchor_generator=dict( + type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]), + bbox_coder=dict(type='DistancePointBBoxCoder'), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + with_objectness=False, + exp_on_reg=True, + share_conv=True, + pred_kernel_size=1, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + train_cfg=dict( + assigner=dict(type='DynamicSoftLabelAssigner', topk=13), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=30000, + min_bbox_size=0, + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.65), + max_per_img=300), +) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(640, 640), + ratio_range=(1.0, 1.0), + max_cached_images=20, + pad_val=(114, 114, 114)), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(640, 640), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=32, + num_workers=10, + batch_sampler=None, + pin_memory=True, + dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=5, num_workers=10, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +max_epochs = 300 +stage2_num_epochs = 20 +base_lr = 0.004 +interval = 10 + +train_cfg = dict( + max_epochs=max_epochs, + val_interval=interval, + dynamic_intervals=[(max_epochs - stage2_num_epochs, 1)]) + +val_evaluator = dict(proposal_nums=(100, 1, 10)) +test_evaluator = val_evaluator + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# hooks +default_hooks = dict( + checkpoint=dict( + interval=interval, + max_keep_ckpts=3 # only keep latest 3 checkpoints + )) +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_l_convnext_b_4xb32-100e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_l_convnext_b_4xb32-100e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..85af292bcaba2e1853ed4f3a3f5818c0c0d5813e --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_l_convnext_b_4xb32-100e_coco.py @@ -0,0 +1,81 @@ +_base_ = './rtmdet_l_8xb32-300e_coco.py' + +custom_imports = dict( + imports=['mmpretrain.models'], allow_failed_imports=False) + +norm_cfg = dict(type='GN', num_groups=32) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_in1k-384px_20221219-4570f792.pth' # noqa +model = dict( + type='RTMDet', + data_preprocessor=dict( + _delete_=True, + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + batch_augments=None), + backbone=dict( + _delete_=True, + type='mmpretrain.ConvNeXt', + arch='base', + out_indices=[1, 2, 3], + drop_path_rate=0.7, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + with_cp=True, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + neck=dict(in_channels=[256, 512, 1024], norm_cfg=norm_cfg), + bbox_head=dict(norm_cfg=norm_cfg)) + +max_epochs = 100 +stage2_num_epochs = 10 +interval = 10 +base_lr = 0.001 + +train_cfg = dict( + max_epochs=max_epochs, + val_interval=interval, + dynamic_intervals=[(max_epochs - stage2_num_epochs, 1)]) + +optim_wrapper = dict( + constructor='LearningRateDecayOptimizerConstructor', + paramwise_cfg={ + 'decay_rate': 0.8, + 'decay_type': 'layer_wise', + 'num_layers': 12 + }, + optimizer=dict(lr=base_lr)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 50 to 100 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline={{_base_.train_pipeline_stage2}}) +] diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_l_swin_b_4xb32-100e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_l_swin_b_4xb32-100e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..84b0e0fa7d18848a4c1e305985e33e69e3196790 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_l_swin_b_4xb32-100e_coco.py @@ -0,0 +1,78 @@ +_base_ = './rtmdet_l_8xb32-300e_coco.py' + +norm_cfg = dict(type='GN', num_groups=32) +checkpoint = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa +model = dict( + type='RTMDet', + data_preprocessor=dict( + _delete_=True, + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + batch_augments=None), + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=True, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint)), + neck=dict(in_channels=[256, 512, 1024], norm_cfg=norm_cfg), + bbox_head=dict(norm_cfg=norm_cfg)) + +max_epochs = 100 +stage2_num_epochs = 10 +interval = 10 +base_lr = 0.001 + +train_cfg = dict( + max_epochs=max_epochs, + val_interval=interval, + dynamic_intervals=[(max_epochs - stage2_num_epochs, 1)]) + +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 50 to 100 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline={{_base_.train_pipeline_stage2}}) +] diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_l_swin_b_p6_4xb16-100e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_l_swin_b_p6_4xb16-100e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..37d4215c3f014ef20c7817875cbc1689186e0766 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_l_swin_b_p6_4xb16-100e_coco.py @@ -0,0 +1,114 @@ +_base_ = './rtmdet_l_swin_b_4xb32-100e_coco.py' + +model = dict( + backbone=dict( + depths=[2, 2, 18, 2, 1], + num_heads=[4, 8, 16, 32, 64], + strides=(4, 2, 2, 2, 2), + out_indices=(1, 2, 3, 4)), + neck=dict(in_channels=[256, 512, 1024, 2048]), + bbox_head=dict( + anchor_generator=dict( + type='MlvlPointGenerator', offset=0, strides=[8, 16, 32, 64]))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='CachedMosaic', img_scale=(1280, 1280), pad_val=114.0), + dict( + type='RandomResize', + scale=(2560, 2560), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(1280, 1280)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(1280, 1280), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(1280, 1280), + ratio_range=(1.0, 1.0), + max_cached_images=20, + pad_val=(114, 114, 114)), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(1280, 1280)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(1280, 1280), pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1280, 1280), keep_ratio=True), + dict(type='Pad', size=(1280, 1280), pad_val=dict(img=(114, 114, 114))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=16, num_workers=20, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(num_workers=20, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +max_epochs = 100 +stage2_num_epochs = 10 + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +img_scales = [(1280, 1280), (640, 640), (1920, 1920)] +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=None), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='Resize', scale=s, keep_ratio=True) + for s in img_scales + ], + [ + # ``RandomFlip`` must be placed before ``Pad``, otherwise + # bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], + [ + dict( + type='Pad', + size=(1920, 1920), + pad_val=dict(img=(114, 114, 114))), + ], + [dict(type='LoadAnnotations', with_bbox=True)], + [ + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction')) + ] + ]) +] diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_m_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_m_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c83f5a60bd7d9f85f46574ee4cd19027391b5e1e --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_m_8xb32-300e_coco.py @@ -0,0 +1,6 @@ +_base_ = './rtmdet_l_8xb32-300e_coco.py' + +model = dict( + backbone=dict(deepen_factor=0.67, widen_factor=0.75), + neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), + bbox_head=dict(in_channels=192, feat_channels=192)) diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_s_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_s_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cbf76247b74e94735eea0dd70ce6ac9e57f4dadf --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_s_8xb32-300e_coco.py @@ -0,0 +1,62 @@ +_base_ = './rtmdet_l_8xb32-300e_coco.py' +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa +model = dict( + backbone=dict( + deepen_factor=0.33, + widen_factor=0.5, + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), + neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), + bbox_head=dict(in_channels=128, feat_channels=128, exp_on_reg=False)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(640, 640), + ratio_range=(1.0, 1.0), + max_cached_images=20, + pad_val=(114, 114, 114)), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(640, 640), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_tiny_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_tiny_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a686f4a7f0c4c3bed956c2a3fa504ea8863c669d --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_tiny_8xb32-300e_coco.py @@ -0,0 +1,43 @@ +_base_ = './rtmdet_s_8xb32-300e_coco.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa + +model = dict( + backbone=dict( + deepen_factor=0.167, + widen_factor=0.375, + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), + neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), + bbox_head=dict(in_channels=96, feat_channels=96, exp_on_reg=False)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='CachedMosaic', + img_scale=(640, 640), + pad_val=114.0, + max_cached_images=20, + random_pop=False), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(640, 640), + ratio_range=(1.0, 1.0), + max_cached_images=10, + random_pop=False, + pad_val=(114, 114, 114), + prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_tta.py b/mmpose/configs/mmdet/rtmdet/rtmdet_tta.py new file mode 100644 index 0000000000000000000000000000000000000000..6dde36de3ff06576944a351de9daf53746103f21 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_tta.py @@ -0,0 +1,36 @@ +tta_model = dict( + type='DetTTAModel', + tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) + +img_scales = [(640, 640), (320, 320), (960, 960)] +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=None), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='Resize', scale=s, keep_ratio=True) + for s in img_scales + ], + [ + # ``RandomFlip`` must be placed before ``Pad``, otherwise + # bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], + [ + dict( + type='Pad', + size=(960, 960), + pad_val=dict(img=(114, 114, 114))), + ], + [dict(type='LoadAnnotations', with_bbox=True)], + [ + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction')) + ] + ]) +] diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_x_8xb32-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_x_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..16a33632c00b19b270b237f5dcd8f603350ac0c9 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_x_8xb32-300e_coco.py @@ -0,0 +1,7 @@ +_base_ = './rtmdet_l_8xb32-300e_coco.py' + +model = dict( + backbone=dict(deepen_factor=1.33, widen_factor=1.25), + neck=dict( + in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), + bbox_head=dict(in_channels=320, feat_channels=320)) diff --git a/mmpose/configs/mmdet/rtmdet/rtmdet_x_p6_4xb8-300e_coco.py b/mmpose/configs/mmdet/rtmdet/rtmdet_x_p6_4xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d1bb7fa6a78812e5a415acfb60eccedae9b884e2 --- /dev/null +++ b/mmpose/configs/mmdet/rtmdet/rtmdet_x_p6_4xb8-300e_coco.py @@ -0,0 +1,132 @@ +_base_ = './rtmdet_x_8xb32-300e_coco.py' + +model = dict( + backbone=dict(arch='P6', out_indices=(2, 3, 4, 5)), + neck=dict(in_channels=[320, 640, 960, 1280]), + bbox_head=dict( + anchor_generator=dict( + type='MlvlPointGenerator', offset=0, strides=[8, 16, 32, 64]))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='CachedMosaic', img_scale=(1280, 1280), pad_val=114.0), + dict( + type='RandomResize', + scale=(2560, 2560), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(1280, 1280)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(1280, 1280), pad_val=dict(img=(114, 114, 114))), + dict( + type='CachedMixUp', + img_scale=(1280, 1280), + ratio_range=(1.0, 1.0), + max_cached_images=20, + pad_val=(114, 114, 114)), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(1280, 1280), + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(1280, 1280)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Pad', size=(1280, 1280), pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1280, 1280), keep_ratio=True), + dict(type='Pad', size=(1280, 1280), pad_val=dict(img=(114, 114, 114))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=8, num_workers=20, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=5, num_workers=20, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +max_epochs = 300 +stage2_num_epochs = 20 + +base_lr = 0.004 * 32 / 256 +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +img_scales = [(1280, 1280), (640, 640), (1920, 1920)] +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=None), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='Resize', scale=s, keep_ratio=True) + for s in img_scales + ], + [ + # ``RandomFlip`` must be placed before ``Pad``, otherwise + # bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], + [ + dict( + type='Pad', + size=(1920, 1920), + pad_val=dict(img=(114, 114, 114))), + ], + [dict(type='LoadAnnotations', with_bbox=True)], + [ + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction')) + ] + ]) +] diff --git a/mmpose/configs/mmdet/sabl/README.md b/mmpose/configs/mmdet/sabl/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c730729cfc72a7e3efe885f814ce18c16d2f4a6d --- /dev/null +++ b/mmpose/configs/mmdet/sabl/README.md @@ -0,0 +1,47 @@ +# SABL + +> [Side-Aware Boundary Localization for More Precise Object Detection](https://arxiv.org/abs/1912.04260) + + + +## Abstract + +Current object detection frameworks mainly rely on bounding box regression to localize objects. Despite the remarkable progress in recent years, the precision of bounding box regression remains unsatisfactory, hence limiting performance in object detection. We observe that precise localization requires careful placement of each side of the bounding box. However, the mainstream approach, which focuses on predicting centers and sizes, is not the most effective way to accomplish this task, especially when there exists displacements with large variance between the anchors and the targets. In this paper, we propose an alternative approach, named as Side-Aware Boundary Localization (SABL), where each side of the bounding box is respectively localized with a dedicated network branch. To tackle the difficulty of precise localization in the presence of displacements with large variance, we further propose a two-step localization scheme, which first predicts a range of movement through bucket prediction and then pinpoints the precise position within the predicted bucket. We test the proposed method on both two-stage and single-stage detection frameworks. Replacing the standard bounding box regression branch with the proposed design leads to significant improvements on Faster R-CNN, RetinaNet, and Cascade R-CNN, by 3.0%, 1.7%, and 0.9%, respectively. + +
+ +
+ +## Results and Models + +The results on COCO 2017 val is shown in the below table. (results on test-dev are usually slightly higher than val). +Single-scale testing (1333x800) is adopted in all results. + +| Method | Backbone | Lr schd | ms-train | box AP | Config | Download | +| :----------------: | :-------: | :-----: | :------: | :----: | :-----------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| SABL Faster R-CNN | R-50-FPN | 1x | N | 39.9 | [config](./sabl-faster-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/20200830_130324.log.json) | +| SABL Faster R-CNN | R-101-FPN | 1x | N | 41.7 | [config](./sabl-faster-rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/sabl_faster_rcnn_r101_fpn_1x_coco-f804c6c1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/20200830_183949.log.json) | +| SABL Cascade R-CNN | R-50-FPN | 1x | N | 41.6 | [config](./sabl-cascade-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/sabl_cascade_rcnn_r50_fpn_1x_coco-e1748e5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/20200831_033726.log.json) | +| SABL Cascade R-CNN | R-101-FPN | 1x | N | 43.0 | [config](./sabl-cascade-rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/sabl_cascade_rcnn_r101_fpn_1x_coco-2b83e87c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/20200831_141745.log.json) | + +| Method | Backbone | GN | Lr schd | ms-train | box AP | Config | Download | +| :------------: | :-------: | :-: | :-----: | :---------: | :----: | :----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| SABL RetinaNet | R-50-FPN | N | 1x | N | 37.7 | [config](./sabl-retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/20200830_053451.log.json) | +| SABL RetinaNet | R-50-FPN | Y | 1x | N | 38.8 | [config](./sabl-retinanet_r50-gn_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/sabl_retinanet_r50_fpn_gn_1x_coco-e16dfcf1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/20200831_141955.log.json) | +| SABL RetinaNet | R-101-FPN | N | 1x | N | 39.7 | [config](./sabl-retinanet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/sabl_retinanet_r101_fpn_1x_coco-42026904.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/20200831_034256.log.json) | +| SABL RetinaNet | R-101-FPN | Y | 1x | N | 40.5 | [config](./sabl-retinanet_r101-gn_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/sabl_retinanet_r101_fpn_gn_1x_coco-40a893e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/20200830_201422.log.json) | +| SABL RetinaNet | R-101-FPN | Y | 2x | Y (640~800) | 42.9 | [config](./sabl-retinanet_r101-gn_fpn_ms-640-800-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco-1e63382c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/20200830_144807.log.json) | +| SABL RetinaNet | R-101-FPN | Y | 2x | Y (480~960) | 43.6 | [config](./sabl-retinanet_r101-gn_fpn_ms-480-960-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco-5342f857.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/20200830_164537.log.json) | + +## Citation + +We provide config files to reproduce the object detection results in the ECCV 2020 Spotlight paper for [Side-Aware Boundary Localization for More Precise Object Detection](https://arxiv.org/abs/1912.04260). + +```latex +@inproceedings{Wang_2020_ECCV, + title = {Side-Aware Boundary Localization for More Precise Object Detection}, + author = {Jiaqi Wang and Wenwei Zhang and Yuhang Cao and Kai Chen and Jiangmiao Pang and Tao Gong and Jianping Shi and Chen Change Loy and Dahua Lin}, + booktitle = {ECCV}, + year = {2020} +} +``` diff --git a/mmpose/configs/mmdet/sabl/metafile.yml b/mmpose/configs/mmdet/sabl/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..632b869cc4bec559d442410b1d3a4f18d74556ed --- /dev/null +++ b/mmpose/configs/mmdet/sabl/metafile.yml @@ -0,0 +1,140 @@ +Collections: + - Name: SABL + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - SABL + Paper: + URL: https://arxiv.org/abs/1912.04260 + Title: 'Side-Aware Boundary Localization for More Precise Object Detection' + README: configs/sabl/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/roi_heads/bbox_heads/sabl_head.py#L14 + Version: v2.4.0 + +Models: + - Name: sabl-faster-rcnn_r50_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth + + - Name: sabl-faster-rcnn_r101_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl-faster-rcnn_r101_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/sabl_faster_rcnn_r101_fpn_1x_coco-f804c6c1.pth + + - Name: sabl-cascade-rcnn_r50_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl-cascade-rcnn_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/sabl_cascade_rcnn_r50_fpn_1x_coco-e1748e5e.pth + + - Name: sabl-cascade-rcnn_r101_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl-cascade-rcnn_r101_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/sabl_cascade_rcnn_r101_fpn_1x_coco-2b83e87c.pth + + - Name: sabl-retinanet_r50_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth + + - Name: sabl-retinanet_r50-gn_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl-retinanet_r50-gn_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/sabl_retinanet_r50_fpn_gn_1x_coco-e16dfcf1.pth + + - Name: sabl-retinanet_r101_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl-retinanet_r101_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/sabl_retinanet_r101_fpn_1x_coco-42026904.pth + + - Name: sabl-retinanet_r101-gn_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl-retinanet_r101-gn_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/sabl_retinanet_r101_fpn_gn_1x_coco-40a893e8.pth + + - Name: sabl-retinanet_r101-gn_fpn_ms-640-800-2x_coco + In Collection: SABL + Config: configs/sabl/sabl-retinanet_r101-gn_fpn_ms-640-800-2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco-1e63382c.pth + + - Name: sabl-retinanet_r101-gn_fpn_ms-480-960-2x_coco + In Collection: SABL + Config: configs/sabl/sabl-retinanet_r101-gn_fpn_ms-480-960-2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco-5342f857.pth diff --git a/mmpose/configs/mmdet/sabl/sabl-cascade-rcnn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/sabl/sabl-cascade-rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..404e7fcb2ac52773c9bc74f411e66584114f378e --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-cascade-rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,90 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + roi_head=dict(bbox_head=[ + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)), + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)), + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) + ])) diff --git a/mmpose/configs/mmdet/sabl/sabl-cascade-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/sabl/sabl-cascade-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..69c59ca20d6c16e458292a55b8e4258a3d9a06bb --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-cascade-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,86 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + roi_head=dict(bbox_head=[ + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)), + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)), + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) + ])) diff --git a/mmpose/configs/mmdet/sabl/sabl-faster-rcnn_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/sabl/sabl-faster-rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d1bf8b9c8cf1ac62d351456e7b19f75259ec0625 --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-faster-rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + roi_head=dict( + bbox_head=dict( + _delete_=True, + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)))) diff --git a/mmpose/configs/mmdet/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a727bd6d3da09c86908c3c584509c5313cf732b5 --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict( + _delete_=True, + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)))) diff --git a/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_1x_coco.py b/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f181ad6813e4c6e3729ff80b3b8d915d84b53bf2 --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_1x_coco.py @@ -0,0 +1,57 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + norm_cfg=norm_cfg, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_ms-480-960-2x_coco.py b/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_ms-480-960-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..dc7209aebad3efcb88945460cf20b36e6ec4b419 --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_ms-480-960-2x_coco.py @@ -0,0 +1,68 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + norm_cfg=norm_cfg, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 480), (1333, 960)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_ms-640-800-2x_coco.py b/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_ms-640-800-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ac5f6d9811dc8e45cfc036b3a3d4a04e7fa5ee60 --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-retinanet_r101-gn_fpn_ms-640-800-2x_coco.py @@ -0,0 +1,68 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + norm_cfg=norm_cfg, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 480), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/sabl/sabl-retinanet_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/sabl/sabl-retinanet_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..409695b5dbccfe20bb6e85ee16231211c2ebcdba --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-retinanet_r101_fpn_1x_coco.py @@ -0,0 +1,55 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/sabl/sabl-retinanet_r50-gn_fpn_1x_coco.py b/mmpose/configs/mmdet/sabl/sabl-retinanet_r50-gn_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4facdb6aaab05fd04b95e8c3ba2f0460090b1d6c --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-retinanet_r50-gn_fpn_1x_coco.py @@ -0,0 +1,53 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + norm_cfg=norm_cfg, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/sabl/sabl-retinanet_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/sabl/sabl-retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9073d6f002fcb49aecc280f318b8769b477d2d82 --- /dev/null +++ b/mmpose/configs/mmdet/sabl/sabl-retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/scnet/README.md b/mmpose/configs/mmdet/scnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..08dbfa87f5625ba6500c731910c178a5e2684e0f --- /dev/null +++ b/mmpose/configs/mmdet/scnet/README.md @@ -0,0 +1,63 @@ +# SCNet + +> [SCNet: Training Inference Sample Consistency for Instance Segmentation](https://arxiv.org/abs/2012.10150) + + + +## Abstract + + + +Cascaded architectures have brought significant performance improvement in object detection and instance segmentation. However, there are lingering issues regarding the disparity in the Intersection-over-Union (IoU) distribution of the samples between training and inference. This disparity can potentially exacerbate detection accuracy. This paper proposes an architecture referred to as Sample Consistency Network (SCNet) to ensure that the IoU distribution of the samples at training time is close to that at inference time. Furthermore, SCNet incorporates feature relay and utilizes global contextual information to further reinforce the reciprocal relationships among classifying, detecting, and segmenting sub-tasks. Extensive experiments on the standard COCO dataset reveal the effectiveness of the proposed method over multiple evaluation metrics, including box AP, mask AP, and inference speed. In particular, while running 38% faster, the proposed SCNet improves the AP of the box and mask predictions by respectively 1.3 and 2.3 points compared to the strong Cascade Mask R-CNN baseline. + +
+ +
+ +## Dataset + +SCNet requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +| | ├── stuffthingmaps +``` + +## Results and Models + +The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) + +| Backbone | Style | Lr schd | Mem (GB) | Inf speed (fps) | box AP | mask AP | TTA box AP | TTA mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :-------------: | :----: | :-----: | :--------: | :---------: | :------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 7.0 | 6.2 | 43.5 | 39.2 | 44.8 | 40.9 | [config](./scnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco_20210117_192725.log.json) | +| R-50-FPN | pytorch | 20e | 7.0 | 6.2 | 44.5 | 40.0 | 45.8 | 41.5 | [config](./scnet_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco_20210116_060148.log.json) | +| R-101-FPN | pytorch | 20e | 8.9 | 5.8 | 45.8 | 40.9 | 47.3 | 42.7 | [config](./scnet_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco_20210118_175824.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 13.2 | 4.9 | 47.5 | 42.3 | 48.9 | 44.0 | [config](./scnet_x101-64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco_20210120_045959.log.json) | + +### Notes + +- Training hyper-parameters are identical to those of [HTC](https://github.com/open-mmlab/mmdetection/tree/main/configs/htc). +- TTA means Test Time Augmentation, which applies horizontal flip and multi-scale testing. Refer to [config](./scnet_r50_fpn_1x_coco.py). + +## Citation + +We provide the code for reproducing experiment results of [SCNet](https://arxiv.org/abs/2012.10150). + +```latex +@inproceedings{vu2019cascade, + title={SCNet: Training Inference Sample Consistency for Instance Segmentation}, + author={Vu, Thang and Haeyong, Kang and Yoo, Chang D}, + booktitle={AAAI}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/scnet/metafile.yml b/mmpose/configs/mmdet/scnet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..936d38960a8f423198702194f64a9eb46c770979 --- /dev/null +++ b/mmpose/configs/mmdet/scnet/metafile.yml @@ -0,0 +1,116 @@ +Collections: + - Name: SCNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - SCNet + Paper: + URL: https://arxiv.org/abs/2012.10150 + Title: 'SCNet: Training Inference Sample Consistency for Instance Segmentation' + README: configs/scnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/scnet.py#L6 + Version: v2.9.0 + +Models: + - Name: scnet_r50_fpn_1x_coco + In Collection: SCNet + Config: configs/scnet/scnet_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 161.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth + + - Name: scnet_r50_fpn_20e_coco + In Collection: SCNet + Config: configs/scnet/scnet_r50_fpn_20e_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 161.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth + + - Name: scnet_r101_fpn_20e_coco + In Collection: SCNet + Config: configs/scnet/scnet_r101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 8.9 + inference time (ms/im): + - value: 172.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth + + - Name: scnet_x101-64x4d_fpn_20e_coco + In Collection: SCNet + Config: configs/scnet/scnet_x101-64x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 13.2 + inference time (ms/im): + - value: 204.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth diff --git a/mmpose/configs/mmdet/scnet/scnet_r101_fpn_20e_coco.py b/mmpose/configs/mmdet/scnet/scnet_r101_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ebba52978b23c07a68e3563033c860a95dd515b6 --- /dev/null +++ b/mmpose/configs/mmdet/scnet/scnet_r101_fpn_20e_coco.py @@ -0,0 +1,6 @@ +_base_ = './scnet_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/scnet/scnet_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/scnet/scnet_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a0210fdb456c26b2c05d99a2435da14fc30f088d --- /dev/null +++ b/mmpose/configs/mmdet/scnet/scnet_r50_fpn_1x_coco.py @@ -0,0 +1,138 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' +# model settings +model = dict( + type='SCNet', + roi_head=dict( + _delete_=True, + type='SCNetRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SCNetBBoxHead', + num_shared_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='SCNetBBoxHead', + num_shared_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='SCNetBBoxHead', + num_shared_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='SCNetMaskHead', + num_convs=12, + in_channels=256, + conv_out_channels=256, + num_classes=80, + conv_to_res=True, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='SCNetSemanticHead', + num_ins=5, + fusion_level=1, + seg_scale_factor=1 / 8, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + loss_seg=dict( + type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), + conv_to_res=True), + glbctx_head=dict( + type='GlobalContextHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_weight=3.0, + conv_to_res=True), + feat_relay_head=dict( + type='FeatureRelayHead', + in_channels=1024, + out_conv_channels=256, + roi_feat_size=7, + scale_factor=2))) + +# TODO +# uncomment below code to enable test time augmentations +# img_norm_cfg = dict( +# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# test_pipeline = [ +# dict(type='LoadImageFromFile'), +# dict( +# type='MultiScaleFlipAug', +# img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800), +# (1400, 2100)], +# flip=True, +# transforms=[ +# dict(type='Resize', keep_ratio=True), +# dict(type='RandomFlip', flip_ratio=0.5), +# dict(type='Normalize', **img_norm_cfg), +# dict(type='Pad', size_divisor=32), +# dict(type='ImageToTensor', keys=['img']), +# dict(type='Collect', keys=['img']), +# ]) +# ] +# data = dict( +# val=dict(pipeline=test_pipeline), +# test=dict(pipeline=test_pipeline)) diff --git a/mmpose/configs/mmdet/scnet/scnet_r50_fpn_20e_coco.py b/mmpose/configs/mmdet/scnet/scnet_r50_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..533e1b5f3253387788fbf1a9d6d7a38c7c5c5f30 --- /dev/null +++ b/mmpose/configs/mmdet/scnet/scnet_r50_fpn_20e_coco.py @@ -0,0 +1,15 @@ +_base_ = './scnet_r50_fpn_1x_coco.py' +# learning policy +max_epochs = 20 +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 19], + gamma=0.1) +] +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/scnet/scnet_x101-64x4d_fpn_20e_coco.py b/mmpose/configs/mmdet/scnet/scnet_x101-64x4d_fpn_20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1e54b030fa68f76f22edf66e3594d66a13c2c672 --- /dev/null +++ b/mmpose/configs/mmdet/scnet/scnet_x101-64x4d_fpn_20e_coco.py @@ -0,0 +1,15 @@ +_base_ = './scnet_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/scnet/scnet_x101-64x4d_fpn_8xb1-20e_coco.py b/mmpose/configs/mmdet/scnet/scnet_x101-64x4d_fpn_8xb1-20e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3cdce7d54248e77e98639d68490cc30dfd625c87 --- /dev/null +++ b/mmpose/configs/mmdet/scnet/scnet_x101-64x4d_fpn_8xb1-20e_coco.py @@ -0,0 +1,8 @@ +_base_ = './scnet_x101-64x4d_fpn_20e_coco.py' +train_dataloader = dict(batch_size=1, num_workers=1) + +optim_wrapper = dict(optimizer=dict(lr=0.01)) +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/mmpose/configs/mmdet/scratch/README.md b/mmpose/configs/mmdet/scratch/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7bdd8ff9f20a0b222a37eebfb44311150c130b15 --- /dev/null +++ b/mmpose/configs/mmdet/scratch/README.md @@ -0,0 +1,35 @@ +# Scratch + +> [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883) + + + +## Abstract + +We report competitive results on object detection and instance segmentation on the COCO dataset using standard models trained from random initialization. The results are no worse than their ImageNet pre-training counterparts even when using the hyper-parameters of the baseline system (Mask R-CNN) that were optimized for fine-tuning pre-trained models, with the sole exception of increasing the number of training iterations so the randomly initialized models may converge. Training from random initialization is surprisingly robust; our results hold even when: (i) using only 10% of the training data, (ii) for deeper and wider models, and (iii) for multiple tasks and metrics. Experiments show that ImageNet pre-training speeds up convergence early in training, but does not necessarily provide regularization or improve final target task accuracy. To push the envelope we demonstrate 50.9 AP on COCO object detection without using any external data---a result on par with the top COCO 2017 competition results that used ImageNet pre-training. These observations challenge the conventional wisdom of ImageNet pre-training for dependent tasks and we expect these discoveries will encourage people to rethink the current de facto paradigm of \`pre-training and fine-tuning' in computer vision. + +
+ +
+ +## Results and Models + +| Model | Backbone | Style | Lr schd | box AP | mask AP | Config | Download | +| :----------: | :------: | :-----: | :-----: | :----: | :-----: | :-------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50-FPN | pytorch | 6x | 40.7 | | [config](./faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_20200201_193013.log.json) | +| Mask R-CNN | R-50-FPN | pytorch | 6x | 41.2 | 37.4 | [config](./mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_20200201_193051.log.json) | + +Note: + +- The above models are trained with 16 GPUs. + +## Citation + +```latex +@article{he2018rethinking, + title={Rethinking imagenet pre-training}, + author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr}, + journal={arXiv preprint arXiv:1811.08883}, + year={2018} +} +``` diff --git a/mmpose/configs/mmdet/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py b/mmpose/configs/mmdet/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6e632b9a150871a44b698dfdb0fdc3f07308ef81 --- /dev/null +++ b/mmpose/configs/mmdet/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + frozen_stages=-1, + zero_init_residual=False, + norm_cfg=norm_cfg, + init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg))) + +optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.)) + +max_epochs = 73 + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[65, 71], + gamma=0.1) +] + +train_cfg = dict(max_epochs=max_epochs) + +# only keep latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) diff --git a/mmpose/configs/mmdet/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py b/mmpose/configs/mmdet/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9796f504b677a841919bb058ded414de25e74a50 --- /dev/null +++ b/mmpose/configs/mmdet/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + frozen_stages=-1, + zero_init_residual=False, + norm_cfg=norm_cfg, + init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) + +optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.)) + +max_epochs = 73 + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[65, 71], + gamma=0.1) +] + +train_cfg = dict(max_epochs=max_epochs) + +# only keep latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) diff --git a/mmpose/configs/mmdet/scratch/metafile.yml b/mmpose/configs/mmdet/scratch/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..977b8e5bfc2b6319793ae8abdeb71e5e04d7cb1b --- /dev/null +++ b/mmpose/configs/mmdet/scratch/metafile.yml @@ -0,0 +1,48 @@ +Collections: + - Name: Rethinking ImageNet Pre-training + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - RPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1811.08883 + Title: 'Rethinking ImageNet Pre-training' + README: configs/scratch/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py + Version: v2.0.0 + +Models: + - Name: faster-rcnn_r50_fpn_gn-all_scratch_6x_coco + In Collection: Rethinking ImageNet Pre-training + Config: configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py + Metadata: + Epochs: 72 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth + + - Name: mask-rcnn_r50_fpn_gn-all_scratch_6x_coco + In Collection: Rethinking ImageNet Pre-training + Config: configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py + Metadata: + Epochs: 72 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth diff --git a/mmpose/configs/mmdet/seesaw_loss/README.md b/mmpose/configs/mmdet/seesaw_loss/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7077d75351caf0ca21760939eb0e2cea2fee5f85 --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/README.md @@ -0,0 +1,47 @@ +# Seesaw Loss + +> [Seesaw Loss for Long-Tailed Instance Segmentation](https://arxiv.org/abs/2008.10032) + + + +## Abstract + +Instance segmentation has witnessed a remarkable progress on class-balanced benchmarks. However, they fail to perform as accurately in real-world scenarios, where the category distribution of objects naturally comes with a long tail. Instances of head classes dominate a long-tailed dataset and they serve as negative samples of tail categories. The overwhelming gradients of negative samples on tail classes lead to a biased learning process for classifiers. Consequently, objects of tail categories are more likely to be misclassified as backgrounds or head categories. To tackle this problem, we propose Seesaw Loss to dynamically re-balance gradients of positive and negative samples for each category, with two complementary factors, i.e., mitigation factor and compensation factor. The mitigation factor reduces punishments to tail categories w.r.t. the ratio of cumulative training instances between different categories. Meanwhile, the compensation factor increases the penalty of misclassified instances to avoid false positives of tail categories. We conduct extensive experiments on Seesaw Loss with mainstream frameworks and different data sampling strategies. With a simple end-to-end training pipeline, Seesaw Loss obtains significant gains over Cross-Entropy Loss, and achieves state-of-the-art performance on LVIS dataset without bells and whistles. + +
+ +
+ +- Please setup [LVIS dataset](../lvis/README.md) for MMDetection. + +- RFS indicates to use oversample strategy [here](../../docs/tutorials/customipredataset.md#class-balanced-dataset) with oversample threshold `1e-3`. + +## Results and models of Seasaw Loss on LVIS v1 dataset + +| Method | Backbone | Style | Lr schd | Data Sampler | Norm Mask | box AP | mask AP | Config | Download | +| :----------------: | :-------: | :-----: | :-----: | :----------: | :-------: | :----: | :-----: | :----------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN | R-50-FPN | pytorch | 2x | random | N | 25.6 | 25.0 | [config](./mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-50-FPN | pytorch | 2x | random | Y | 25.6 | 25.4 | [config](./mask-rcnn_r50_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-101-FPN | pytorch | 2x | random | N | 27.4 | 26.7 | [config](./mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-101-FPN | pytorch | 2x | random | Y | 27.2 | 27.3 | [config](./mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-50-FPN | pytorch | 2x | RFS | N | 27.6 | 26.4 | [config](./mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-50-FPN | pytorch | 2x | RFS | Y | 27.6 | 26.8 | [config](./mask-rcnn_r50_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | N | 28.9 | 27.6 | [config](./mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | Y | 28.9 | 28.2 | [config](./mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | random | N | 33.1 | 29.2 | [config](./cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | random | Y | 33.0 | 30.0 | [config](./cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | N | 30.0 | 29.3 | [config](./cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | Y | 32.8 | 30.1 | [config](./cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | + +## Citation + +We provide config files to reproduce the instance segmentation performance in the CVPR 2021 paper for [Seesaw Loss for Long-Tailed Instance Segmentation](https://arxiv.org/abs/2008.10032). + +```latex +@inproceedings{wang2021seesaw, + title={Seesaw Loss for Long-Tailed Instance Segmentation}, + author={Jiaqi Wang and Wenwei Zhang and Yuhang Zang and Yuhang Cao and Jiangmiao Pang and Tao Gong and Kai Chen and Ziwei Liu and Chen Change Loy and Dahua Lin}, + booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..2de87dcca59ccac7fc96c10c2a069fcf0464aeff --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py @@ -0,0 +1,5 @@ +_base_ = './cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py' # noqa: E501 +model = dict( + roi_head=dict( + mask_head=dict( + predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..4d67ad7d4817a32b365bc2567937f69b68a9c97c --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py @@ -0,0 +1,5 @@ +_base_ = './cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py' # noqa: E501 +model = dict( + roi_head=dict( + mask_head=dict( + predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..2a1a87d4203a12a78a26fd873bd6017fafb49cdf --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py @@ -0,0 +1,116 @@ +_base_ = [ + '../_base_/models/cascade-mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +dataset_type = 'LVISV1Dataset' +data_root = 'data/lvis_v1/' +train_dataloader = dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/lvis_v1_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline)) +val_dataloader = dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/lvis_v1_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='LVISMetric', + ann_file=data_root + 'annotations/lvis_v1_val.json', + metric=['bbox', 'segm']) +test_evaluator = val_evaluator + +train_cfg = dict(val_interval=24) diff --git a/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7b4df91368d23092a68f16ba4a35660ea23130 --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py @@ -0,0 +1,95 @@ +_base_ = [ + '../_base_/models/cascade-mask-rcnn_r50_fpn.py', + '../_base_/datasets/lvis_v1_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) + +train_cfg = dict(val_interval=24) diff --git a/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..b518c2135acb39a3d1119a8892c72816910ca496 --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py' # noqa: E501 +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..008bbcae6eb8d189bdd0688b42d663eeba2a661e --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py' # noqa: E501 +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..8a0b6755bf6f218c337d9ee16677e3e64886c019 --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..6143231918e028523b6bb1792887ef7ce16dde02 --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..06d2438cf7c351a2fb352f787bc434cc6afc3ebb --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py @@ -0,0 +1,5 @@ +_base_ = './mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py' +model = dict( + roi_head=dict( + mask_head=dict( + predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..5fc68d3df32015e0fc8d5dd2bc92df416a8fc5fd --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py @@ -0,0 +1,5 @@ +_base_ = './mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py' +model = dict( + roi_head=dict( + mask_head=dict( + predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..25c646c9c75c4468e71442049876a77382528e02 --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py @@ -0,0 +1,59 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict( + num_classes=1203, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0)), + mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +dataset_type = 'LVISV1Dataset' +data_root = 'data/lvis_v1/' +train_dataloader = dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/lvis_v1_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline)) +val_dataloader = dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/lvis_v1_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='LVISMetric', + ann_file=data_root + 'annotations/lvis_v1_val.json', + metric=['bbox', 'segm']) +test_evaluator = val_evaluator + +train_cfg = dict(val_interval=24) diff --git a/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py new file mode 100644 index 0000000000000000000000000000000000000000..d60320e0b78035d24adb86f3aa184433951481fe --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/lvis_v1_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict( + num_classes=1203, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0)), + mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) + +train_cfg = dict(val_interval=24) diff --git a/mmpose/configs/mmdet/seesaw_loss/metafile.yml b/mmpose/configs/mmdet/seesaw_loss/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..374b9cde64ab1ff3c5f23971467846804738b0aa --- /dev/null +++ b/mmpose/configs/mmdet/seesaw_loss/metafile.yml @@ -0,0 +1,203 @@ +Collections: + - Name: Seesaw Loss + Metadata: + Training Data: LVIS + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Softmax + - RPN + - Convolution + - Dense Connections + - FPN + - ResNet + - RoIAlign + - Seesaw Loss + Paper: + URL: https://arxiv.org/abs/2008.10032 + Title: 'Seesaw Loss for Long-Tailed Instance Segmentation' + README: configs/seesaw_loss/README.md + +Models: + - Name: mask-rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 25.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 25.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth + - Name: mask-rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 25.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 25.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth + - Name: mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.4 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth + - Name: mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.2 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 27.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth + - Name: mask-rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth + - Name: mask-rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth + - Name: mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 28.9 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 27.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth + - Name: mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 28.9 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 28.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth + - Name: cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 33.1 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 29.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth + - Name: cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 33.0 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 30.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth + - Name: cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 30.0 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 29.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth + - Name: cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 32.8 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 30.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth diff --git a/mmpose/configs/mmdet/selfsup_pretrain/README.md b/mmpose/configs/mmdet/selfsup_pretrain/README.md new file mode 100644 index 0000000000000000000000000000000000000000..57537dddaca80756b7a6fc582808907edc8d850a --- /dev/null +++ b/mmpose/configs/mmdet/selfsup_pretrain/README.md @@ -0,0 +1,109 @@ +# Backbones Trained by Self-Supervise Algorithms + + + +## Abstract + +Unsupervised image representations have significantly reduced the gap with supervised pretraining, notably with the recent achievements of contrastive learning methods. These contrastive methods typically work online and rely on a large number of explicit pairwise feature comparisons, which is computationally challenging. In this paper, we propose an online algorithm, SwAV, that takes advantage of contrastive methods without requiring to compute pairwise comparisons. Specifically, our method simultaneously clusters the data while enforcing consistency between cluster assignments produced for different augmentations (or views) of the same image, instead of comparing features directly as in contrastive learning. Simply put, we use a swapped prediction mechanism where we predict the cluster assignment of a view from the representation of another view. Our method can be trained with large and small batches and can scale to unlimited amounts of data. Compared to previous contrastive methods, our method is more memory efficient since it does not require a large memory bank or a special momentum network. In addition, we also propose a new data augmentation strategy, multi-crop, that uses a mix of views with different resolutions in place of two full-resolution views, without increasing the memory or compute requirements much. We validate our findings by achieving 75.3% top-1 accuracy on ImageNet with ResNet-50, as well as surpassing supervised pretraining on all the considered transfer tasks. + +
+ +
+ +We present Momentum Contrast (MoCo) for unsupervised visual representation learning. From a perspective on contrastive learning as dictionary look-up, we build a dynamic dictionary with a queue and a moving-averaged encoder. This enables building a large and consistent dictionary on-the-fly that facilitates contrastive unsupervised learning. MoCo provides competitive results under the common linear protocol on ImageNet classification. More importantly, the representations learned by MoCo transfer well to downstream tasks. MoCo can outperform its supervised pre-training counterpart in 7 detection/segmentation tasks on PASCAL VOC, COCO, and other datasets, sometimes surpassing it by large margins. This suggests that the gap between unsupervised and supervised representation learning has been largely closed in many vision tasks. + +
+ +
+ +## Usage + +To use a self-supervisely pretrained backbone, there are two steps to do: + +1. Download and convert the model to PyTorch-style supported by MMDetection +2. Modify the config and change the training setting accordingly + +### Convert model + +For more general usage, we also provide script `selfsup2mmdet.py` in the tools directory to convert the key of models pretrained by different self-supervised methods to PyTorch-style checkpoints used in MMDetection. + +```bash +python -u tools/model_converters/selfsup2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} --selfsup ${method} +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +For example, to use a ResNet-50 backbone released by MoCo, you can download it from [here](https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v2_800ep/moco_v2_800ep_pretrain.pth.tar) and use the following command + +```bash +python -u tools/model_converters/selfsup2mmdet.py ./moco_v2_800ep_pretrain.pth.tar mocov2_r50_800ep_pretrain.pth --selfsup moco +``` + +To use the ResNet-50 backbone released by SwAV, you can download it from [here](https://dl.fbaipublicfiles.com/deepcluster/swav_800ep_pretrain.pth.tar) + +### Modify config + +The backbone requires SyncBN and the `frozen_stages` need to be changed. A config that use the moco backbone is as below + +```python +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + pretrained='./mocov2_r50_800ep_pretrain.pth', + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False)) + +``` + +## Results and Models + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :------------------------------------------------------------: | :-----: | :------------: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask RCNN | [R50 by MoCo v2](./mask-rcnn_r50-mocov2-pre_fpn_1x_coco.py) | pytorch | 1x | | | 38.0 | 34.3 | [config](./mask-rcnn_r50-mocov2-pre_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco_20210604_114614-a8b63483.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco_20210604_114614.log.json) | +| Mask RCNN | [R50 by MoCo v2](./mask-rcnn_r50-mocov2-pre_fpn_ms-2x_coco.py) | pytorch | multi-scale 2x | | | 40.8 | 36.8 | [config](./mask-rcnn_r50-mocov2-pre_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco_20210605_163717-d95df20a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco_20210605_163717.log.json) | +| Mask RCNN | [R50 by SwAV](./mask-rcnn_r50-swav-pre_fpn_1x_coco.py) | pytorch | 1x | | | 39.1 | 35.7 | [config](./mask-rcnn_r50-swav-pre_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco/mask_rcnn_r50_fpn_swav-pretrain_1x_coco_20210604_114640-7b9baf28.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco/mask_rcnn_r50_fpn_swav-pretrain_1x_coco_20210604_114640.log.json) | +| Mask RCNN | [R50 by SwAV](./mask-rcnn_r50-swav-pre_fpn_ms-2x_coco.py) | pytorch | multi-scale 2x | | | 41.3 | 37.3 | [config](./mask-rcnn_r50-swav-pre_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco_20210605_163717-08e26fca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco_20210605_163717.log.json) | + +### Notice + +1. We only provide single-scale 1x and multi-scale 2x configs as examples to show how to use backbones trained by self-supervised algorithms. We will try to reproduce the results in their corresponding paper using the released backbone in the future. Please stay tuned. + +## Citation + +We support to apply the backbone models pre-trained by different self-supervised methods in detection systems and provide their results on Mask R-CNN. + +The pre-trained models are converted from [MoCo](https://github.com/facebookresearch/moco) and downloaded from [SwAV](https://github.com/facebookresearch/swav). + +For SwAV, please cite + +```latex +@article{caron2020unsupervised, + title={Unsupervised Learning of Visual Features by Contrasting Cluster Assignments}, + author={Caron, Mathilde and Misra, Ishan and Mairal, Julien and Goyal, Priya and Bojanowski, Piotr and Joulin, Armand}, + booktitle={Proceedings of Advances in Neural Information Processing Systems (NeurIPS)}, + year={2020} +} +``` + +For MoCo, please cite + +```latex +@Article{he2019moco, + author = {Kaiming He and Haoqi Fan and Yuxin Wu and Saining Xie and Ross Girshick}, + title = {Momentum Contrast for Unsupervised Visual Representation Learning}, + journal = {arXiv preprint arXiv:1911.05722}, + year = {2019}, +} +@Article{chen2020mocov2, + author = {Xinlei Chen and Haoqi Fan and Ross Girshick and Kaiming He}, + title = {Improved Baselines with Momentum Contrastive Learning}, + journal = {arXiv preprint arXiv:2003.04297}, + year = {2020}, +} +``` diff --git a/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-mocov2-pre_fpn_1x_coco.py b/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-mocov2-pre_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..91d45add8aba54de4b25fba11ecf5e18bca0084f --- /dev/null +++ b/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-mocov2-pre_fpn_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + init_cfg=dict( + type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) diff --git a/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-mocov2-pre_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-mocov2-pre_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ddaebf5558a22680d556aa8b3fe79541d634d910 --- /dev/null +++ b/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-mocov2-pre_fpn_ms-2x_coco.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + init_cfg=dict( + type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-swav-pre_fpn_1x_coco.py b/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-swav-pre_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..785c80ec9d14c8e4b54b2e3359f9b4c680eaca17 --- /dev/null +++ b/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-swav-pre_fpn_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + init_cfg=dict( + type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) diff --git a/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-swav-pre_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-swav-pre_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c393e0b36047f731c91c3f0963ef90347a0910e9 --- /dev/null +++ b/mmpose/configs/mmdet/selfsup_pretrain/mask-rcnn_r50-swav-pre_fpn_ms-2x_coco.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + init_cfg=dict( + type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', scale=[(1333, 640), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/simple_copy_paste/README.md b/mmpose/configs/mmdet/simple_copy_paste/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23b09ce5dbb3e2cba41cad7b6b45fccd95996fb1 --- /dev/null +++ b/mmpose/configs/mmdet/simple_copy_paste/README.md @@ -0,0 +1,38 @@ +# SimpleCopyPaste + +> [Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation](https://arxiv.org/abs/2012.07177) + + + +## Abstract + +Building instance segmentation models that are data-efficient and can handle rare object categories is an important challenge in computer vision. Leveraging data augmentations is a promising direction towards addressing this challenge. Here, we perform a systematic study of the Copy-Paste augmentation (\[13, 12\]) for instance segmentation where we randomly paste objects onto an image. Prior studies on Copy-Paste relied on modeling the surrounding visual context for pasting the objects. However, we find that the simple mechanism of pasting objects randomly is good enough and can provide solid gains on top of strong baselines. Furthermore, we show Copy-Paste is additive with semi-supervised methods that leverage extra data through pseudo labeling (e.g. self-training). On COCO instance segmentation, we achieve 49.1 mask AP and 57.3 box AP, an improvement of +0.6 mask AP and +1.5 box AP over the previous state-of-the-art. We further demonstrate that Copy-Paste can lead to significant improvements on the LVIS benchmark. Our baseline model outperforms the LVIS 2020 Challenge winning entry by +3.6 mask AP on rare categories. + +
+ +
+ +## Results and Models + +### Mask R-CNN with Standard Scale Jittering (SSJ) and Simple Copy-Paste(SCP) + +Standard Scale Jittering(SSJ) resizes and crops an image with a resize range of 0.8 to 1.25 of the original image size, and Simple Copy-Paste(SCP) selects a random subset of objects from one of the images and pastes them onto the other image. + +| Backbone | Training schedule | Augmentation | batch size | box AP | mask AP | Config | Download | +| :------: | :---------------: | :----------: | :--------: | :----: | :-----: | :------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | 90k | SSJ | 64 | 43.3 | 39.0 | [config](./mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409.log.json) | +| R-50 | 90k | SSJ+SCP | 64 | 43.8 | 39.2 | [config](./mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307.log.json) | +| R-50 | 270k | SSJ | 64 | 43.5 | 39.1 | [config](./mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940.log.json) | +| R-50 | 270k | SSJ+SCP | 64 | 45.1 | 40.3 | [config](./mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229.log.json) | + +## Citation + +```latex +@inproceedings{ghiasi2021simple, + title={Simple copy-paste is a strong data augmentation method for instance segmentation}, + author={Ghiasi, Golnaz and Cui, Yin and Srinivas, Aravind and Qian, Rui and Lin, Tsung-Yi and Cubuk, Ekin D and Le, Quoc V and Zoph, Barret}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={2918--2928}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-270k_coco.py b/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-270k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6e081e860e1240f8d35efa8176563a8b5be845 --- /dev/null +++ b/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-270k_coco.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs + '../common/ssj_270k_coco-instance.py', +] + +image_size = (1024, 1024) +batch_augments = [ + dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + # the model is trained from scratch, so init_cfg is None + data_preprocessor=dict( + # pad_size_divisor=32 is unnecessary in training but necessary + # in testing. + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), # leads to 0.1+ mAP + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-90k_coco.py b/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..abe8962ac69184241e30628242e5313c52f503f4 --- /dev/null +++ b/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-90k_coco.py @@ -0,0 +1,18 @@ +_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-270k_coco.py' # noqa + +# training schedule for 90k +max_iters = 90000 + +# learning rate policy +# lr steps at [0.9, 0.95, 0.975] of the maximum iterations +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=90000, + by_epoch=False, + milestones=[81000, 85500, 87750], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-270k_coco.py b/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-270k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ea57d19728d7c563e56d139888059dd9c81317 --- /dev/null +++ b/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-270k_coco.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs + '../common/ssj_scp_270k_coco-instance.py' +] + +image_size = (1024, 1024) +batch_augments = [ + dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + # the model is trained from scratch, so init_cfg is None + data_preprocessor=dict( + # pad_size_divisor=32 is unnecessary in training but necessary + # in testing. + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), # leads to 0.1+ mAP + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-90k_coco.py b/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-90k_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e158b5c05aae3345ba9d4d1a55d1bbb82a789726 --- /dev/null +++ b/mmpose/configs/mmdet/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-90k_coco.py @@ -0,0 +1,18 @@ +_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-270k_coco.py' # noqa + +# training schedule for 90k +max_iters = 90000 + +# learning rate policy +# lr steps at [0.9, 0.95, 0.975] of the maximum iterations +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=90000, + by_epoch=False, + milestones=[81000, 85500, 87750], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/simple_copy_paste/metafile.yml b/mmpose/configs/mmdet/simple_copy_paste/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..8a40b658feeefd870300e62934ea21315218bfba --- /dev/null +++ b/mmpose/configs/mmdet/simple_copy_paste/metafile.yml @@ -0,0 +1,92 @@ +Collections: + - Name: SimpleCopyPaste + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 32x A100 GPUs + Architecture: + - Softmax + - RPN + - Convolution + - Dense Connections + - FPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/2012.07177 + Title: "Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" + README: configs/simple_copy_paste/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/datasets/pipelines/transforms.py#L2762 + Version: v2.25.0 + +Models: + - Name: mask-rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco + In Collection: SimpleCopyPaste + Config: configs/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-270k_coco.py + Metadata: + Training Memory (GB): 7.2 + Iterations: 270000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth + + - Name: mask-rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco + In Collection: SimpleCopyPaste + Config: configs/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-90k_coco.py + Metadata: + Training Memory (GB): 7.2 + Iterations: 90000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth + + - Name: mask-rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco + In Collection: SimpleCopyPaste + Config: configs/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-270k_coco.py + Metadata: + Training Memory (GB): 7.2 + Iterations: 270000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth + + - Name: mask-rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco + In Collection: SimpleCopyPaste + Config: configs/simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-90k_coco.py + Metadata: + Training Memory (GB): 7.2 + Iterations: 90000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth diff --git a/mmpose/configs/mmdet/soft_teacher/README.md b/mmpose/configs/mmdet/soft_teacher/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1fd3d84dc36b8f7e4a0342e951f81979f1a9dce9 --- /dev/null +++ b/mmpose/configs/mmdet/soft_teacher/README.md @@ -0,0 +1,33 @@ +# SoftTeacher + +> [End-to-End Semi-Supervised Object Detection with Soft Teacher](https://arxiv.org/abs/2106.09018) + + + +## Abstract + +This paper presents an end-to-end semi-supervised object detection approach, in contrast to previous more complex multi-stage methods. The end-to-end training gradually improves pseudo label qualities during the curriculum, and the more and more accurate pseudo labels in turn benefit object detection training. We also propose two simple yet effective techniques within this framework: a soft teacher mechanism where the classification loss of each unlabeled bounding box is weighed by the classification score produced by the teacher network; a box jittering approach to select reliable pseudo boxes for the learning of box regression. On the COCO benchmark, the proposed approach outperforms previous methods by a large margin under various labeling ratios, i.e. 1%, 5% and 10%. Moreover, our approach proves to perform also well when the amount of labeled data is relatively large. For example, it can improve a 40.9 mAP baseline detector trained using the full COCO training set by +3.6 mAP, reaching 44.5 mAP, by leveraging the 123K unlabeled images of COCO. On the state-of-the-art Swin Transformer based object detector (58.9 mAP on test-dev), it can still significantly improve the detection accuracy by +1.5 mAP, reaching 60.4 mAP, and improve the instance segmentation accuracy by +1.2 mAP, reaching 52.4 mAP. Further incorporating with the Object365 pre-trained model, the detection accuracy reaches 61.3 mAP and the instance segmentation accuracy reaches 53.0 mAP, pushing the new state-of-the-art. + +
+ +
+ +## Results and Models + +| Model | Detector | Labeled Dataset | Iteration | box AP | Config | Download | +| :---------: | :----------: | :-------------: | :-------: | :----: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| SoftTeacher | Faster R-CNN | COCO-1% | 180k | 19.9 | [config](./soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230330_233412-3c8f6d4a.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230330_233412.log.json) | +| SoftTeacher | Faster R-CNN | COCO-2% | 180k | 24.9 | [config](./soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230331_020244-c0d2c3aa.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230331_020244.log.json) | +| SoftTeacher | Faster R-CNN | COCO-5% | 180k | 30.4 | [config](./soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230331_070656-308798ad.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230331_070656.log.json) | +| SoftTeacher | Faster R-CNN | COCO-10% | 180k | 33.8 | [config](./soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230330_232113-b46f78d0.pth) \| [log](https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230330_232113.log.json) | + +## Citation + +```latex +@article{xu2021end, + title={End-to-End Semi-Supervised Object Detection with Soft Teacher}, + author={Xu, Mengde and Zhang, Zheng and Hu, Han and Wang, Jianfeng and Wang, Lijuan and Wei, Fangyun and Bai, Xiang and Liu, Zicheng}, + journal={Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/soft_teacher/metafile.yml b/mmpose/configs/mmdet/soft_teacher/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..9622acec93ad3138daff09930ecfa2807dc7748a --- /dev/null +++ b/mmpose/configs/mmdet/soft_teacher/metafile.yml @@ -0,0 +1,67 @@ +Collections: + - Name: SoftTeacher + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x A100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2106.09018 + Title: "End-to-End Semi-Supervised Object Detection with Soft Teacher" + README: configs/soft_teacher/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v3.0.0rc1/mmdet/models/detectors/soft_teacher.py#L20 + Version: v3.0.0rc1 + +Models: + - Name: soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco.py + In Collection: SoftTeacher + Config: configs/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco.py + Metadata: + Iterations: 180000 + Results: + - Task: Semi-Supervised Object Detection + Dataset: COCO + Metrics: + box AP: 19.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230330_233412-3c8f6d4a.pth + + - Name: soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco.py + In Collection: SoftTeacher + Config: configs/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco.py + Metadata: + Iterations: 180000 + Results: + - Task: Semi-Supervised Object Detection + Dataset: COCO + Metrics: + box AP: 24.9 + Weights: https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230331_020244-c0d2c3aa.pth + + - Name: soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco.py + In Collection: SoftTeacher + Config: configs/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco.py + Metadata: + Iterations: 180000 + Results: + - Task: Semi-Supervised Object Detection + Dataset: COCO + Metrics: + box AP: 30.4 + Weights: https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230331_070656-308798ad.pth + + - Name: soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py + In Collection: SoftTeacher + Config: configs/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py + Metadata: + Iterations: 180000 + Results: + - Task: Semi-Supervised Object Detection + Dataset: COCO + Metrics: + box AP: 33.8 + Weights: https://download.openmmlab.com/mmdetection/v3.0/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0_20230330_232113-b46f78d0.pth diff --git a/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco.py b/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2bd09645598204482e9f88f6baf00d32eba9cab6 --- /dev/null +++ b/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.01-coco.py @@ -0,0 +1,9 @@ +_base_ = ['soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py'] + +# 1% coco train2017 is set as labeled dataset +labeled_dataset = _base_.labeled_dataset +unlabeled_dataset = _base_.unlabeled_dataset +labeled_dataset.ann_file = 'semi_anns/instances_train2017.1@1.json' +unlabeled_dataset.ann_file = 'semi_anns/instances_train2017.1@1-unlabeled.json' +train_dataloader = dict( + dataset=dict(datasets=[labeled_dataset, unlabeled_dataset])) diff --git a/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco.py b/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8ca38c931926cef33321f931b0c6d5c66824ff55 --- /dev/null +++ b/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.02-coco.py @@ -0,0 +1,9 @@ +_base_ = ['soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py'] + +# 2% coco train2017 is set as labeled dataset +labeled_dataset = _base_.labeled_dataset +unlabeled_dataset = _base_.unlabeled_dataset +labeled_dataset.ann_file = 'semi_anns/instances_train2017.1@2.json' +unlabeled_dataset.ann_file = 'semi_anns/instances_train2017.1@2-unlabeled.json' +train_dataloader = dict( + dataset=dict(datasets=[labeled_dataset, unlabeled_dataset])) diff --git a/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco.py b/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco.py new file mode 100644 index 0000000000000000000000000000000000000000..750b7ed6df6c91bab8f68f58f339b2f3696fa693 --- /dev/null +++ b/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.05-coco.py @@ -0,0 +1,9 @@ +_base_ = ['soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py'] + +# 5% coco train2017 is set as labeled dataset +labeled_dataset = _base_.labeled_dataset +unlabeled_dataset = _base_.unlabeled_dataset +labeled_dataset.ann_file = 'semi_anns/instances_train2017.1@5.json' +unlabeled_dataset.ann_file = 'semi_anns/instances_train2017.1@5-unlabeled.json' +train_dataloader = dict( + dataset=dict(datasets=[labeled_dataset, unlabeled_dataset])) diff --git a/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py b/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3713aef442f4add55efafde08b2c98da1773bab0 --- /dev/null +++ b/mmpose/configs/mmdet/soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py @@ -0,0 +1,84 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/default_runtime.py', + '../_base_/datasets/semi_coco_detection.py' +] + +detector = _base_.model +detector.data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32) +detector.backbone = dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')) + +model = dict( + _delete_=True, + type='SoftTeacher', + detector=detector, + data_preprocessor=dict( + type='MultiBranchDataPreprocessor', + data_preprocessor=detector.data_preprocessor), + semi_train_cfg=dict( + freeze_teacher=True, + sup_weight=1.0, + unsup_weight=4.0, + pseudo_label_initial_score_thr=0.5, + rpn_pseudo_thr=0.9, + cls_pseudo_thr=0.9, + reg_pseudo_thr=0.02, + jitter_times=10, + jitter_scale=0.06, + min_pseudo_bbox_wh=(1e-2, 1e-2)), + semi_test_cfg=dict(predict_on='teacher')) + +# 10% coco train2017 is set as labeled dataset +labeled_dataset = _base_.labeled_dataset +unlabeled_dataset = _base_.unlabeled_dataset +labeled_dataset.ann_file = 'semi_anns/instances_train2017.1@10.json' +unlabeled_dataset.ann_file = 'semi_anns/' \ + 'instances_train2017.1@10-unlabeled.json' +unlabeled_dataset.data_prefix = dict(img='train2017/') +train_dataloader = dict( + dataset=dict(datasets=[labeled_dataset, unlabeled_dataset])) + +# training schedule for 180k +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=180000, val_interval=5000) +val_cfg = dict(type='TeacherStudentValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=180000, + by_epoch=False, + milestones=[120000, 160000], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +default_hooks = dict( + checkpoint=dict(by_epoch=False, interval=10000, max_keep_ckpts=2)) +log_processor = dict(by_epoch=False) + +custom_hooks = [dict(type='MeanTeacherHook')] diff --git a/mmpose/configs/mmdet/solo/README.md b/mmpose/configs/mmdet/solo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4a36676b1b5e0fafd3bfb1cbe4a6cef5fd549c57 --- /dev/null +++ b/mmpose/configs/mmdet/solo/README.md @@ -0,0 +1,54 @@ +# SOLO + +> [SOLO: Segmenting Objects by Locations](https://arxiv.org/abs/1912.04488) + + + +## Abstract + +We present a new, embarrassingly simple approach to instance segmentation in images. Compared to many other dense prediction tasks, e.g., semantic segmentation, it is the arbitrary number of instances that have made instance segmentation much more challenging. In order to predict a mask for each instance, mainstream approaches either follow the 'detect-thensegment' strategy as used by Mask R-CNN, or predict category masks first then use clustering techniques to group pixels into individual instances. We view the task of instance segmentation from a completely new perspective by introducing the notion of "instance categories", which assigns categories to each pixel within an instance according to the instance's location and size, thus nicely converting instance mask segmentation into a classification-solvable problem. Now instance segmentation is decomposed into two classification tasks. We demonstrate a much simpler and flexible instance segmentation framework with strong performance, achieving on par accuracy with Mask R-CNN and outperforming recent singleshot instance segmenters in accuracy. We hope that this very simple and strong framework can serve as a baseline for many instance-level recognition tasks besides instance segmentation. + +
+ +
+ +## Results and Models + +### SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | 1x | 8.0 | 14.0 | 33.1 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055.log.json) | +| R-50 | pytorch | Y | 3x | 7.4 | 14.0 | 35.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353.log.json) | + +### Decoupled SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | 1x | 7.8 | 12.5 | 33.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348.log.json) | +| R-50 | pytorch | Y | 3x | 7.9 | 12.5 | 36.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504.log.json) | + +- Decoupled SOLO has a decoupled head which is different from SOLO head. + Decoupled SOLO serves as an efficient and equivalent variant in accuracy + of SOLO. Please refer to the corresponding config files for details. + +### Decoupled Light SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | Y | 3x | 2.2 | 31.2 | 32.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703.log.json) | + +- Decoupled Light SOLO using decoupled structure similar to Decoupled + SOLO head, with light-weight head and smaller input size, Please refer + to the corresponding config files for details. + +## Citation + +```latex +@inproceedings{wang2020solo, + title = {{SOLO}: Segmenting Objects by Locations}, + author = {Wang, Xinlong and Kong, Tao and Shen, Chunhua and Jiang, Yuning and Li, Lei}, + booktitle = {Proc. Eur. Conf. Computer Vision (ECCV)}, + year = {2020} +} +``` diff --git a/mmpose/configs/mmdet/solo/decoupled-solo-light_r50_fpn_3x_coco.py b/mmpose/configs/mmdet/solo/decoupled-solo-light_r50_fpn_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fc35df3c3cbbd70532e066de27b06418549eb906 --- /dev/null +++ b/mmpose/configs/mmdet/solo/decoupled-solo-light_r50_fpn_3x_coco.py @@ -0,0 +1,50 @@ +_base_ = './decoupled-solo_r50_fpn_3x_coco.py' + +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOLightHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384), + (852, 352)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(852, 512), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/solo/decoupled-solo_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/solo/decoupled-solo_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7f4b90c19d9fdcc3c895deb4101cf7acd7bd8e --- /dev/null +++ b/mmpose/configs/mmdet/solo/decoupled-solo_r50_fpn_1x_coco.py @@ -0,0 +1,24 @@ +_base_ = './solo_r50_fpn_1x_coco.py' +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) diff --git a/mmpose/configs/mmdet/solo/decoupled-solo_r50_fpn_3x_coco.py b/mmpose/configs/mmdet/solo/decoupled-solo_r50_fpn_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4a8c19decb72a3d904a277faac06670999f6b322 --- /dev/null +++ b/mmpose/configs/mmdet/solo/decoupled-solo_r50_fpn_3x_coco.py @@ -0,0 +1,25 @@ +_base_ = './solo_r50_fpn_3x_coco.py' + +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) diff --git a/mmpose/configs/mmdet/solo/metafile.yml b/mmpose/configs/mmdet/solo/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..aa38b8c07b3db7eb018bb769b6eca6e010a1d764 --- /dev/null +++ b/mmpose/configs/mmdet/solo/metafile.yml @@ -0,0 +1,115 @@ +Collections: + - Name: SOLO + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Convolution + - ResNet + Paper: https://arxiv.org/abs/1912.04488 + README: configs/solo/README.md + +Models: + - Name: decoupled-solo_r50_fpn_1x_coco + In Collection: SOLO + Config: configs/solo/decoupled-solo_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 12 + inference time (ms/im): + - value: 116.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 33.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth + + - Name: decoupled-solo_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/decoupled-solo_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 7.9 + Epochs: 36 + inference time (ms/im): + - value: 117.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth + + - Name: decoupled-solo-light_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/decoupled-solo-light_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 2.2 + Epochs: 36 + inference time (ms/im): + - value: 35.0 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (852, 512) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 32.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth + + - Name: solo_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/solo_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 36 + inference time (ms/im): + - value: 94.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth + + - Name: solo_r50_fpn_1x_coco + In Collection: SOLO + Config: configs/solo/solo_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + Epochs: 12 + inference time (ms/im): + - value: 95.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 33.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth diff --git a/mmpose/configs/mmdet/solo/solo_r101_fpn_8xb8-lsj-200e_coco.py b/mmpose/configs/mmdet/solo/solo_r101_fpn_8xb8-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0f49c5c1ce67973d15b3fad3ad8c966af8203af7 --- /dev/null +++ b/mmpose/configs/mmdet/solo/solo_r101_fpn_8xb8-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './solo_r50_fpn_8xb8-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/solo/solo_r18_fpn_8xb8-lsj-200e_coco.py b/mmpose/configs/mmdet/solo/solo_r18_fpn_8xb8-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..977ae54dc28e56802289ac552ce20815b7d1d761 --- /dev/null +++ b/mmpose/configs/mmdet/solo/solo_r18_fpn_8xb8-lsj-200e_coco.py @@ -0,0 +1,7 @@ +_base_ = './solo_r50_fpn_8xb8-lsj-200e_coco.py' + +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/solo/solo_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/solo/solo_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..595e9ffe148be84dcc3d5c89e5315e8ef3a24477 --- /dev/null +++ b/mmpose/configs/mmdet/solo/solo_r50_fpn_1x_coco.py @@ -0,0 +1,62 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='SOLO', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + num_outs=5), + mask_head=dict( + type='SOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), + # model training and testing settings + test_cfg=dict( + nms_pre=500, + score_thr=0.1, + mask_thr=0.5, + filter_thr=0.05, + kernel='gaussian', # gaussian/linear + sigma=2.0, + max_per_img=100)) + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.01)) + +val_evaluator = dict(metric='segm') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/solo/solo_r50_fpn_3x_coco.py b/mmpose/configs/mmdet/solo/solo_r50_fpn_3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0d5abbd2f4d4e1fdc2e3cb92c8e0157188b0aa9a --- /dev/null +++ b/mmpose/configs/mmdet/solo/solo_r50_fpn_3x_coco.py @@ -0,0 +1,35 @@ +_base_ = './solo_r50_fpn_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), + (1333, 672), (1333, 640)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# training schedule for 3x +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=36, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/solo/solo_r50_fpn_8xb8-lsj-200e_coco.py b/mmpose/configs/mmdet/solo/solo_r50_fpn_8xb8-lsj-200e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d46bf391c907707d222756e9450b661b6edd6985 --- /dev/null +++ b/mmpose/configs/mmdet/solo/solo_r50_fpn_8xb8-lsj-200e_coco.py @@ -0,0 +1,71 @@ +_base_ = '../common/lsj-200e_coco-instance.py' + +image_size = (1024, 1024) +batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] + +# model settings +model = dict( + type='SOLO', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + num_outs=5), + mask_head=dict( + type='SOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), + # model training and testing settings + test_cfg=dict( + nms_pre=500, + score_thr=0.1, + mask_thr=0.5, + filter_thr=0.05, + kernel='gaussian', # gaussian/linear + sigma=2.0, + max_per_img=100)) + +train_dataloader = dict(batch_size=8, num_workers=4) + +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004), + clip_grad=dict(max_norm=35, norm_type=2)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/solov2/README.md b/mmpose/configs/mmdet/solov2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b216913126e7ee86fc474c2cb1cc8b6023e251d1 --- /dev/null +++ b/mmpose/configs/mmdet/solov2/README.md @@ -0,0 +1,59 @@ +# SOLOv2 + +> [SOLOv2: Dynamic and Fast Instance Segmentation](https://arxiv.org/abs/2003.10152) + + + +## Abstract + +In this work, we aim at building a simple, direct, and fast instance segmentation +framework with strong performance. We follow the principle of the SOLO method of +Wang et al. "SOLO: segmenting objects by locations". Importantly, we take one +step further by dynamically learning the mask head of the object segmenter such +that the mask head is conditioned on the location. Specifically, the mask branch +is decoupled into a mask kernel branch and mask feature branch, which are +responsible for learning the convolution kernel and the convolved features +respectively. Moreover, we propose Matrix NMS (non maximum suppression) to +significantly reduce the inference time overhead due to NMS of masks. Our +Matrix NMS performs NMS with parallel matrix operations in one shot, and +yields better results. We demonstrate a simple direct instance segmentation +system, outperforming a few state-of-the-art methods in both speed and accuracy. +A light-weight version of SOLOv2 executes at 31.3 FPS and yields 37.1% AP. +Moreover, our state-of-the-art results in object detection (from our mask byproduct) +and panoptic segmentation show the potential to serve as a new strong baseline +for many instance-level recognition tasks besides instance segmentation. + +
+ +
+ +## Results and Models + +### SOLOv2 + +| Backbone | Style | MS train | Lr schd | Mem (GB) | mask AP | Config | Download | +| :--------: | :-----: | :------: | :-----: | :------: | :-----: | :-------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | 1x | 5.1 | 34.8 | [config](./solov2_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858.log.json) | +| R-50 | pytorch | Y | 3x | 5.1 | 37.5 | [config](./solov2_r50_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856.log.json) | +| R-101 | pytorch | Y | 3x | 6.9 | 39.1 | [config](./solov2_r101_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119.log.json) | +| R-101(DCN) | pytorch | Y | 3x | 7.1 | 41.2 | [config](./solov2_r101-dcn_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734.log.json) | +| X-101(DCN) | pytorch | Y | 3x | 11.3 | 42.4 | [config](./solov2_x101-dcn_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337.log.json) | + +### Light SOLOv2 + +| Backbone | Style | MS train | Lr schd | Mem (GB) | mask AP | Config | Download | +| :------: | :-----: | :------: | :-----: | :------: | :-----: | :--------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-18 | pytorch | Y | 3x | 9.1 | 29.7 | [config](./solov2-light_r18_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717.log.json) | +| R-34 | pytorch | Y | 3x | 9.3 | 31.9 | [config](./solov2-light_r34_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839-e51659d3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839.log.json) | +| R-50 | pytorch | Y | 3x | 9.9 | 33.7 | [config](./solov2-light_r50_fpn_ms-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256.log.json) | + +## Citation + +```latex +@article{wang2020solov2, + title={SOLOv2: Dynamic and Fast Instance Segmentation}, + author={Wang, Xinlong and Zhang, Rufeng and Kong, Tao and Li, Lei and Shen, Chunhua}, + journal={Proc. Advances in Neural Information Processing Systems (NeurIPS)}, + year={2020} +} +``` diff --git a/mmpose/configs/mmdet/solov2/metafile.yml b/mmpose/configs/mmdet/solov2/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..d0156b2b40cf62537cdc62af4fa57d644a7978ad --- /dev/null +++ b/mmpose/configs/mmdet/solov2/metafile.yml @@ -0,0 +1,93 @@ +Collections: + - Name: SOLOv2 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x A100 GPUs + Architecture: + - FPN + - Convolution + - ResNet + Paper: https://arxiv.org/abs/2003.10152 + README: configs/solov2/README.md + +Models: + - Name: solov2_r50_fpn_1x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.1 + Epochs: 12 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth + + - Name: solov2_r50_fpn_ms-3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_r50_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 5.1 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth + + - Name: solov2_r101-dcn_fpn_ms-3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_r101-dcn_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 7.1 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth + + - Name: solov2_x101-dcn_fpn_ms-3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_x101-dcn_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 11.3 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth + + - Name: solov2-light_r18_fpn_ms-3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2-light_r18_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 9.1 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 29.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth + + - Name: solov2-light_r50_fpn_ms-3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2-light_r50_fpn_ms-3x_coco.py + Metadata: + Training Memory (GB): 9.9 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 33.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth diff --git a/mmpose/configs/mmdet/solov2/solov2-light_r18_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/solov2/solov2-light_r18_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f8fc53e0aed9dd4479f9cd8dcc98ca61db2e50bf --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2-light_r18_fpn_ms-3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './solov2-light_r50_fpn_ms-3x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/solov2/solov2-light_r34_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/solov2/solov2-light_r34_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..149b336655349c70233e78d03f72d7ee3f1a75f3 --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2-light_r34_fpn_ms-3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './solov2-light_r50_fpn_ms-3x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=34, init_cfg=dict(checkpoint='torchvision://resnet34')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/mmpose/configs/mmdet/solov2/solov2-light_r50-dcn_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/solov2/solov2-light_r50-dcn_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..05391944b683985ab975dc8f66be0c8a12f7d255 --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2-light_r50-dcn_fpn_ms-3x_coco.py @@ -0,0 +1,14 @@ +_base_ = './solov2-light_r50_fpn_ms-3x_coco.py' + +# model settings +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + mask_head=dict( + feat_channels=256, + stacked_convs=3, + scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), + mask_feature_head=dict(out_channels=128), + dcn_cfg=dict(type='DCNv2'), + dcn_apply_to_all_conv=False)) # light solov2 head diff --git a/mmpose/configs/mmdet/solov2/solov2-light_r50_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/solov2/solov2-light_r50_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0a7f779c0f587d11c86a31aca19b2663f79a57 --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2-light_r50_fpn_ms-3x_coco.py @@ -0,0 +1,56 @@ +_base_ = './solov2_r50_fpn_1x_coco.py' + +# model settings +model = dict( + mask_head=dict( + stacked_convs=2, + feat_channels=256, + scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)), + mask_feature_head=dict(out_channels=128))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), + (768, 352)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(448, 768), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# training schedule for 3x +max_epochs = 36 +train_cfg = dict(by_epoch=True, max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=36, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/solov2/solov2_r101-dcn_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/solov2/solov2_r101-dcn_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..370a4eb7db811b285cc55282e4b66360ca338a31 --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2_r101-dcn_fpn_ms-3x_coco.py @@ -0,0 +1,13 @@ +_base_ = './solov2_r50_fpn_ms-3x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(checkpoint='torchvision://resnet101'), + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + mask_head=dict( + mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), + dcn_cfg=dict(type='DCNv2'), + dcn_apply_to_all_conv=True)) diff --git a/mmpose/configs/mmdet/solov2/solov2_r101_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/solov2/solov2_r101_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..96aaac0a7c2689a125ac0a68edaff2a76dfc773d --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2_r101_fpn_ms-3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './solov2_r50_fpn_ms-3x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=101, init_cfg=dict(checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/solov2/solov2_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/solov2/solov2_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..138ca010b5f3f96a4f296ffbe66cb1be3add7ec2 --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2_r50_fpn_1x_coco.py @@ -0,0 +1,70 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='SOLOv2', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + num_outs=5), + mask_head=dict( + type='SOLOV2Head', + num_classes=80, + in_channels=256, + feat_channels=512, + stacked_convs=4, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + mask_feature_head=dict( + feat_channels=128, + start_level=0, + end_level=3, + out_channels=256, + mask_stride=4, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), + loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0)), + # model training and testing settings + test_cfg=dict( + nms_pre=500, + score_thr=0.1, + mask_thr=0.5, + filter_thr=0.05, + kernel='gaussian', # gaussian/linear + sigma=2.0, + max_per_img=100)) + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.01), clip_grad=dict(max_norm=35, norm_type=2)) + +val_evaluator = dict(metric='segm') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/solov2/solov2_r50_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/solov2/solov2_r50_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f09827efbe4e135a784b0808604dbc855ed47e --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2_r50_fpn_ms-3x_coco.py @@ -0,0 +1,35 @@ +_base_ = './solov2_r50_fpn_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), + (1333, 672), (1333, 640)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# training schedule for 3x +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=36, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/solov2/solov2_x101-dcn_fpn_ms-3x_coco.py b/mmpose/configs/mmdet/solov2/solov2_x101-dcn_fpn_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..612c45eb437efc481948edb660ef1a3eebbcfebe --- /dev/null +++ b/mmpose/configs/mmdet/solov2/solov2_x101-dcn_fpn_ms-3x_coco.py @@ -0,0 +1,17 @@ +_base_ = './solov2_r50_fpn_ms-3x_coco.py' + +# model settings +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')), + mask_head=dict( + mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), + dcn_cfg=dict(type='DCNv2'), + dcn_apply_to_all_conv=True)) diff --git a/mmpose/configs/mmdet/sort/README.md b/mmpose/configs/mmdet/sort/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8f035fded78e53fbe5ee50df8dce7ad97319cc6c --- /dev/null +++ b/mmpose/configs/mmdet/sort/README.md @@ -0,0 +1,108 @@ +# Simple online and realtime tracking + +## Abstract + + + +This paper explores a pragmatic approach to multiple object tracking where the main focus is to associate objects efficiently for online and realtime applications. To this end, detection quality is identified as a key factor influencing tracking performance, where changing the detector can improve tracking by up to 18.9%. Despite only using a rudimentary combination of familiar techniques such as the Kalman Filter and Hungarian algorithm for the tracking components, this approach achieves an accuracy comparable to state-of-the-art online trackers. Furthermore, due to the simplicity of our tracking method, the tracker updates at a rate of 260 Hz which is over 20x faster than other state-of-the-art trackers. + + + +
+ +
+ +## Citation + + + +```latex +@inproceedings{bewley2016simple, + title={Simple online and realtime tracking}, + author={Bewley, Alex and Ge, Zongyuan and Ott, Lionel and Ramos, Fabio and Upcroft, Ben}, + booktitle={2016 IEEE International Conference on Image Processing (ICIP)}, + pages={3464--3468}, + year={2016}, + organization={IEEE} +} +``` + +## Results and models on MOT17 + +| Method | Detector | ReID | Train Set | Test Set | Public | Inf time (fps) | HOTA | MOTA | IDF1 | FP | FN | IDSw. | Config | Download | +| :----: | :----------------: | :--: | :--------: | :------: | :----: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :----------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------: | +| SORT | R50-FasterRCNN-FPN | - | half-train | half-val | N | 18.6 | 52.0 | 62.0 | 57.8 | 15150 | 40410 | 5847 | [config](sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py) | [detector](https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth) | + +## Get started + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Prepare + +Tracking Dataset Prepare can refer to this [document](../../docs/en/user_guides/tracking_dataset_prepare.md). + +### 3. Training + +We implement SORT with independent detector models. +Note that, due to the influence of parameters such as learning rate in default configuration file, +we recommend using 8 GPUs for training in order to reproduce accuracy. + +You can train the detector as follows. + +```shell script +# Training Faster R-CNN on mot17-half-train dataset with following command. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_train.sh configs/sort/faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py 8 +``` + +If you want to know about more detailed usage of `train.py/dist_train.sh/slurm_train.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 4. Testing and evaluation + +### 4.1 Example on MOTxx-halfval dataset + +**4.1.1 use separate trained detector model to evaluating and testing**\* + +```shell script +# Example 1: Test on motXX-half-val set. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_test_tracking.sh configs/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py 8 --detector ${DETECTOR_CHECKPOINT_PATH} +``` + +**4.1.2 use video_baesd to evaluating and testing** + +we also provide two_ways(img_based or video_based) to evaluating and testing. +if you want to use video_based to evaluating and testing, you can modify config as follows + +``` +val_dataloader = dict( + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False)) +``` + +### 4.2 Example on MOTxx-test dataset + +If you want to get the results of the [MOT Challenge](https://motchallenge.net/) test set, +please use the following command to generate result files that can be used for submission. +It will be stored in `./mot_17_test_res`, you can modify the saved path in `test_evaluator` of the config. + +```shell script +# Example 2: Test on motxx-test set +# The number after config file represents the number of GPUs used +bash tools/dist_test_tracking.sh configs/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test.py 8 --detector ${DETECTOR_CHECKPOINT_PATH} +``` + +If you want to know about more detailed usage of `test_tracking.py/dist_test_tracking.sh/slurm_test_tracking.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 5.Inference + +Use a single GPU to predict a video and save it as a video. + +```shell +python demo/mot_demo.py demo/demo_mot.mp4 configs/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py --detector ${DETECTOR_CHECKPOINT_PATH} --out mot.mp4 +``` + +If you want to know about more detailed usage of `mot_demo.py`, please refer to this [document](../../docs/en/user_guides/tracking_inference.md). diff --git a/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d5b72ce3fff73504a0c032867d246bc4e30123 --- /dev/null +++ b/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py @@ -0,0 +1,41 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/mot_challenge_det.py', '../_base_/default_runtime.py' +] + +model = dict( + rpn_head=dict( + bbox_coder=dict(clip_border=False), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + bbox_head=dict( + num_classes=1, + bbox_coder=dict(clip_border=False), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=1.0))), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth' # noqa: E501 + )) + +# training schedule for 4e +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=4, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict(type='LinearLR', start_factor=0.01, by_epoch=False, begin=0, end=100), + dict( + type='MultiStepLR', + begin=0, + end=4, + by_epoch=True, + milestones=[3], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17train.py b/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17train.py new file mode 100644 index 0000000000000000000000000000000000000000..83647061c7f59dc8a6e8d033cdb8dc81de648df4 --- /dev/null +++ b/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17train.py @@ -0,0 +1,11 @@ +_base_ = ['./faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval'] +# data +data_root = 'data/MOT17/' +train_dataloader = dict( + dataset=dict(ann_file='annotations/train_cocoformat.json')) +val_dataloader = dict( + dataset=dict(ann_file='annotations/train_cocoformat.json')) +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/train_cocoformat.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-8e_mot20halftrain_test-mot20halfval.py b/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-8e_mot20halftrain_test-mot20halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..a6d14ad8be2a939bce168f4f09f08dde50f140c8 --- /dev/null +++ b/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-8e_mot20halftrain_test-mot20halfval.py @@ -0,0 +1,29 @@ +_base_ = ['./faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval'] +model = dict( + rpn_head=dict(bbox_coder=dict(clip_border=True)), + roi_head=dict( + bbox_head=dict(bbox_coder=dict(clip_border=True), num_classes=1))) +# data +data_root = 'data/MOT20/' +train_dataloader = dict(dataset=dict(data_root=data_root)) +val_dataloader = dict(dataset=dict(data_root=data_root)) +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + + 'annotations/half-val_cocoformat.json') +test_evaluator = val_evaluator + +# training schedule for 8e +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=8, val_interval=1) + +# learning rate +param_scheduler = [ + dict(type='LinearLR', start_factor=0.01, by_epoch=False, begin=0, end=100), + dict( + type='MultiStepLR', + begin=0, + end=8, + by_epoch=True, + milestones=[6], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-8e_mot20train_test-mot20train.py b/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-8e_mot20train_test-mot20train.py new file mode 100644 index 0000000000000000000000000000000000000000..85c859732cb3e4742d3003d555f72f4cc7ac2e05 --- /dev/null +++ b/mmpose/configs/mmdet/sort/faster-rcnn_r50_fpn_8xb2-8e_mot20train_test-mot20train.py @@ -0,0 +1,32 @@ +_base_ = ['./faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval'] +model = dict( + rpn_head=dict(bbox_coder=dict(clip_border=True)), + roi_head=dict( + bbox_head=dict(bbox_coder=dict(clip_border=True), num_classes=1))) +# data +data_root = 'data/MOT20/' +train_dataloader = dict( + dataset=dict( + data_root=data_root, ann_file='annotations/train_cocoformat.json')) +val_dataloader = dict( + dataset=dict( + data_root=data_root, ann_file='annotations/train_cocoformat.json')) +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/train_cocoformat.json') +test_evaluator = val_evaluator + +# training schedule for 8e +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=8, val_interval=1) + +# learning rate +param_scheduler = [ + dict(type='LinearLR', start_factor=0.01, by_epoch=False, begin=0, end=100), + dict( + type='MultiStepLR', + begin=0, + end=8, + by_epoch=True, + milestones=[6], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/sort/metafile.yml b/mmpose/configs/mmdet/sort/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..c582ce353df6344aaa2fe25e0f410bb458e50803 --- /dev/null +++ b/mmpose/configs/mmdet/sort/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: SORT + Metadata: + Training Techniques: + - SGD with Momentum + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - FPN + Paper: + URL: https://arxiv.org/abs/1602.00763 + Title: Simple Online and Realtime Tracking + README: configs/sort/README.md + +Models: + - Name: sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval + In Collection: SORT + Config: configs/mot/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py + Metadata: + Training Data: MOT17-half-train + inference time (ms/im): + - value: 53.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640, 1088) + Results: + - Task: Multiple Object Tracking + Dataset: MOT17-half-val + Metrics: + MOTA: 62.0 + IDF1: 57.8 + HOTA: 52.0 + Weights: https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth diff --git a/mmpose/configs/mmdet/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..78acb774ec22b7555e633b541c21fe20beb75ce9 --- /dev/null +++ b/mmpose/configs/mmdet/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', + '../_base_/datasets/mot_challenge.py', '../_base_/default_runtime.py' +] + +default_hooks = dict( + logger=dict(type='LoggerHook', interval=1), + visualization=dict(type='TrackVisualizationHook', draw=False)) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='TrackLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# custom hooks +custom_hooks = [ + # Synchronize model buffers such as running_mean and running_var in BN + # at the end of each epoch + dict(type='SyncBuffersHook') +] + +detector = _base_.model +detector.pop('data_preprocessor') +detector.rpn_head.bbox_coder.update(dict(clip_border=False)) +detector.roi_head.bbox_head.update(dict(num_classes=1)) +detector.roi_head.bbox_head.bbox_coder.update(dict(clip_border=False)) +detector['init_cfg'] = dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmtracking/mot/' + 'faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth') # noqa: E501 +del _base_.model + +model = dict( + type='DeepSORT', + data_preprocessor=dict( + type='TrackDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + rgb_to_bgr=False, + pad_size_divisor=32), + detector=detector, + tracker=dict( + type='SORTTracker', + motion=dict(type='KalmanFilter', center_only=False), + obj_score_thr=0.5, + match_iou_thr=0.5, + reid=None)) + +train_dataloader = None + +train_cfg = None +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/mmpose/configs/mmdet/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test.py b/mmpose/configs/mmdet/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test.py new file mode 100644 index 0000000000000000000000000000000000000000..921652c4430ccf63cd5850884b2a064e8dc73251 --- /dev/null +++ b/mmpose/configs/mmdet/sort/sort_faster-rcnn_r50_fpn_8xb2-4e_mot17train_test-mot17test.py @@ -0,0 +1,15 @@ +_base_ = [ + './sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain' + '_test-mot17halfval.py' +] + +# dataloader +val_dataloader = dict( + dataset=dict(ann_file='annotations/train_cocoformat.json')) +test_dataloader = dict( + dataset=dict( + ann_file='annotations/test_cocoformat.json', + data_prefix=dict(img_path='test'))) + +# evaluator +test_evaluator = dict(format_only=True, outfile_prefix='./mot_17_test_res') diff --git a/mmpose/configs/mmdet/sparse_rcnn/README.md b/mmpose/configs/mmdet/sparse_rcnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2e8e365b3df2476bb2d8f9acfe76f24fcf7756ea --- /dev/null +++ b/mmpose/configs/mmdet/sparse_rcnn/README.md @@ -0,0 +1,38 @@ +# Sparse R-CNN + +> [Sparse R-CNN: End-to-End Object Detection with Learnable Proposals](https://arxiv.org/abs/2011.12450) + + + +## Abstract + +We present Sparse R-CNN, a purely sparse method for object detection in images. Existing works on object detection heavily rely on dense object candidates, such as k anchor boxes pre-defined on all grids of image feature map of size H×W. In our method, however, a fixed sparse set of learned object proposals, total length of N, are provided to object recognition head to perform classification and location. By eliminating HWk (up to hundreds of thousands) hand-designed object candidates to N (e.g. 100) learnable proposals, Sparse R-CNN completely avoids all efforts related to object candidates design and many-to-one label assignment. More importantly, final predictions are directly output without non-maximum suppression post-procedure. Sparse R-CNN demonstrates accuracy, run-time and training convergence performance on par with the well-established detector baselines on the challenging COCO dataset, e.g., achieving 45.0 AP in standard 3× training schedule and running at 22 fps using ResNet-50 FPN model. We hope our work could inspire re-thinking the convention of dense prior in object detectors. + +
+ +
+ +## Results and Models + +| Model | Backbone | Style | Lr schd | Number of Proposals | Multi-Scale | RandomCrop | box AP | Config | Download | +| :----------: | :-------: | :-----: | :-----: | :-----------------: | :---------: | :--------: | :----: | :-----------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Sparse R-CNN | R-50-FPN | pytorch | 1x | 100 | False | False | 37.9 | [config](./sparse-rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.log.json) | +| Sparse R-CNN | R-50-FPN | pytorch | 3x | 100 | True | False | 42.8 | [config](./sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.log.json) | +| Sparse R-CNN | R-50-FPN | pytorch | 3x | 300 | True | True | 45.0 | [config](./sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.log.json) | +| Sparse R-CNN | R-101-FPN | pytorch | 3x | 100 | True | False | 44.2 | [config](./sparse-rcnn_r101_fpn_ms-480-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.log.json) | +| Sparse R-CNN | R-101-FPN | pytorch | 3x | 300 | True | True | 46.2 | [config](./sparse-rcnn_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.log.json) | + +### Notes + +We observe about 0.3 AP noise especially when using ResNet-101 as the backbone. + +## Citation + +```latex +@article{peize2020sparse, + title = {{SparseR-CNN}: End-to-End Object Detection with Learnable Proposals}, + author = {Peize Sun and Rufeng Zhang and Yi Jiang and Tao Kong and Chenfeng Xu and Wei Zhan and Masayoshi Tomizuka and Lei Li and Zehuan Yuan and Changhu Wang and Ping Luo}, + journal = {arXiv preprint arXiv:2011.12450}, + year = {2020} +} +``` diff --git a/mmpose/configs/mmdet/sparse_rcnn/metafile.yml b/mmpose/configs/mmdet/sparse_rcnn/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..8fe2531893b99662bd9e5dbbc1d6f9a6ced00325 --- /dev/null +++ b/mmpose/configs/mmdet/sparse_rcnn/metafile.yml @@ -0,0 +1,80 @@ +Collections: + - Name: Sparse R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - Sparse R-CNN + Paper: + URL: https://arxiv.org/abs/2011.12450 + Title: 'Sparse R-CNN: End-to-End Object Detection with Learnable Proposals' + README: configs/sparse_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/sparse_rcnn.py#L6 + Version: v2.9.0 + +Models: + - Name: sparse-rcnn_r50_fpn_1x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth + + - Name: sparse-rcnn_r50_fpn_ms-480-800-3x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth + + - Name: sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth + + - Name: sparse-rcnn_r101_fpn_ms-480-800-3x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse-rcnn_r101_fpn_ms-480-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth + + - Name: sparse-rcnn_r101_fpn_300-proposals_crop-ms-480-800-3x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse-rcnn_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth diff --git a/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..09c11c6565ea2444fe8ffc930ca49fbffff3e8fa --- /dev/null +++ b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r101_fpn_ms-480-800-3x_coco.py b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r101_fpn_ms-480-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a51f11ce5b6d55b2037461a93aa2bd18c8f2639d --- /dev/null +++ b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r101_fpn_ms-480-800-3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..88354427b4138f4f5587f2a4a047bad654693780 --- /dev/null +++ b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,101 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +num_stages = 6 +num_proposals = 100 +model = dict( + type='SparseRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + add_extra_convs='on_input', + num_outs=4), + rpn_head=dict( + type='EmbeddingRPNHead', + num_proposals=num_proposals, + proposal_feature_channel=256), + roi_head=dict( + type='SparseRoIHead', + num_stages=num_stages, + stage_loss_weights=[1] * num_stages, + proposal_feature_channel=256, + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='DIIHead', + num_classes=80, + num_ffn_fcs=2, + num_heads=8, + num_cls_fcs=1, + num_reg_fcs=3, + feedforward_channels=2048, + in_channels=256, + dropout=0.0, + ffn_act_cfg=dict(type='ReLU', inplace=True), + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=7, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=False, + target_means=[0., 0., 0., 0.], + target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) + ]), + # training and testing settings + train_cfg=dict( + rpn=None, + rcnn=[ + dict( + assigner=dict( + type='HungarianAssigner', + match_costs=[ + dict(type='FocalLossCost', weight=2.0), + dict(type='BBoxL1Cost', weight=5.0, box_format='xyxy'), + dict(type='IoUCost', iou_mode='giou', weight=2.0) + ]), + sampler=dict(type='PseudoSampler'), + pos_weight=1) for _ in range(num_stages) + ]), + test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals))) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + _delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001), + clip_grad=dict(max_norm=1, norm_type=2)) diff --git a/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..93edc0314b510c635f703f82e39c446ed056c6ea --- /dev/null +++ b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py @@ -0,0 +1,43 @@ +_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py' +num_proposals = 300 +model = dict( + rpn_head=dict(num_proposals=num_proposals), + test_cfg=dict( + _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) + +# augmentation strategy originates from DETR. +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[[ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + scales=[(400, 1333), (500, 1333), (600, 1333)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + keep_ratio=True) + ]]), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..156028d7cdd22c32c00a765c6cf86b8f9e2df48b --- /dev/null +++ b/mmpose/configs/mmdet/sparse_rcnn/sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py @@ -0,0 +1,32 @@ +_base_ = './sparse-rcnn_r50_fpn_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# learning policy +max_epochs = 36 +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/ssd/README.md b/mmpose/configs/mmdet/ssd/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8b3ca9128fd483841eaac6943e9fac68a116eb25 --- /dev/null +++ b/mmpose/configs/mmdet/ssd/README.md @@ -0,0 +1,62 @@ +# SSD + +> [SSD: Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) + + + +## Abstract + +We present a method for detecting objects in images using a single deep neural network. Our approach, named SSD, discretizes the output space of bounding boxes into a set of default boxes over different aspect ratios and scales per feature map location. At prediction time, the network generates scores for the presence of each object category in each default box and produces adjustments to the box to better match the object shape. Additionally, the network combines predictions from multiple feature maps with different resolutions to naturally handle objects of various sizes. Our SSD model is simple relative to methods that require object proposals because it completely eliminates proposal generation and subsequent pixel or feature resampling stage and encapsulates all computation in a single network. This makes SSD easy to train and straightforward to integrate into systems that require a detection component. Experimental results on the PASCAL VOC, MS COCO, and ILSVRC datasets confirm that SSD has comparable accuracy to methods that utilize an additional object proposal step and is much faster, while providing a unified framework for both training and inference. Compared to other single stage methods, SSD has much better accuracy, even with a smaller input image size. For 300×300 input, SSD achieves 72.1% mAP on VOC2007 test at 58 FPS on a Nvidia Titan X and for 500×500 input, SSD achieves 75.1% mAP, outperforming a comparable state of the art Faster R-CNN model. + +
+ +
+ +## Results and models of SSD + +| Backbone | Size | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :--: | :---: | :-----: | :------: | :------------: | :----: | :------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| VGG16 | 300 | caffe | 120e | 9.9 | 43.7 | 25.5 | [config](./ssd300_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428.log.json) | +| VGG16 | 512 | caffe | 120e | 19.4 | 30.7 | 29.5 | [config](./ssd512_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849.log.json) | + +## Results and models of SSD-Lite + +| Backbone | Size | Training from scratch | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :--: | :-------------------: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| MobileNetV2 | 320 | yes | 600e | 4.0 | 69.9 | 21.3 | [config](./ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627.log.json) | + +## Notice + +### Compatibility + +In v2.14.0, [PR5291](https://github.com/open-mmlab/mmdetection/pull/5291) refactored SSD neck and head for more +flexible usage. If users want to use the SSD checkpoint trained in the older versions, we provide a scripts +`tools/model_converters/upgrade_ssd_version.py` to convert the model weights. + +```bash +python tools/model_converters/upgrade_ssd_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} + +``` + +- OLD_MODEL_PATH: the path to load the old version SSD model. +- NEW_MODEL_PATH: the path to save the converted model weights. + +### SSD-Lite training settings + +There are some differences between our implementation of MobileNetV2 SSD-Lite and the one in [TensorFlow 1.x detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md) . + +1. Use 320x320 as input size instead of 300x300. +2. The anchor sizes are different. +3. The C4 feature map is taken from the last layer of stage 4 instead of the middle of the block. +4. The model in TensorFlow1.x is trained on coco 2014 and validated on coco minival2014, but we trained and validated the model on coco 2017. The mAP on val2017 is usually a little lower than minival2014 (refer to the results in TensorFlow Object Detection API, e.g., MobileNetV2 SSD gets 22 mAP on minival2014 but 20.2 mAP on val2017). + +## Citation + +```latex +@article{Liu_2016, + title={SSD: Single Shot MultiBox Detector}, + journal={ECCV}, + author={Liu, Wei and Anguelov, Dragomir and Erhan, Dumitru and Szegedy, Christian and Reed, Scott and Fu, Cheng-Yang and Berg, Alexander C.}, + year={2016}, +} +``` diff --git a/mmpose/configs/mmdet/ssd/metafile.yml b/mmpose/configs/mmdet/ssd/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..190a207ccc9b62a002d026f917d66778e5cee8b7 --- /dev/null +++ b/mmpose/configs/mmdet/ssd/metafile.yml @@ -0,0 +1,78 @@ +Collections: + - Name: SSD + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - VGG + Paper: + URL: https://arxiv.org/abs/1512.02325 + Title: 'SSD: Single Shot MultiBox Detector' + README: configs/ssd/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.14.0/mmdet/models/dense_heads/ssd_head.py#L16 + Version: v2.14.0 + +Models: + - Name: ssd300_coco + In Collection: SSD + Config: configs/ssd/ssd300_coco.py + Metadata: + Training Memory (GB): 9.9 + inference time (ms/im): + - value: 22.88 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (300, 300) + Epochs: 120 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 25.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth + + - Name: ssd512_coco + In Collection: SSD + Config: configs/ssd/ssd512_coco.py + Metadata: + Training Memory (GB): 19.4 + inference time (ms/im): + - value: 32.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512, 512) + Epochs: 120 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 29.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth + + - Name: ssdlite_mobilenetv2-scratch_8xb24-600e_coco + In Collection: SSD + Config: configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 14.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (320, 320) + Epochs: 600 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 21.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth diff --git a/mmpose/configs/mmdet/ssd/ssd300_coco.py b/mmpose/configs/mmdet/ssd/ssd300_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..796d25c905350a8ed263b9cd1d2f8027b8c9a3ca --- /dev/null +++ b/mmpose/configs/mmdet/ssd/ssd300_coco.py @@ -0,0 +1,71 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +# dataset settings +input_size = 300 +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean={{_base_.model.data_preprocessor.mean}}, + to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}}, + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=8, + num_workers=2, + batch_sampler=None, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type={{_base_.dataset_type}}, + data_root={{_base_.data_root}}, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args={{_base_.backend_args}}))) +val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)) + +custom_hooks = [ + dict(type='NumClassCheckHook'), + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/ssd/ssd512_coco.py b/mmpose/configs/mmdet/ssd/ssd512_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7acd6144202e8fee232e3ed49a557d3cf7c53e15 --- /dev/null +++ b/mmpose/configs/mmdet/ssd/ssd512_coco.py @@ -0,0 +1,60 @@ +_base_ = 'ssd300_coco.py' + +# model settings +input_size = 512 +model = dict( + neck=dict( + out_channels=(512, 1024, 512, 256, 256, 256, 256), + level_strides=(2, 2, 2, 2, 1), + level_paddings=(1, 1, 1, 1, 1), + last_kernel_size=4), + bbox_head=dict( + in_channels=(512, 1024, 512, 256, 256, 256, 256), + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + input_size=input_size, + basesize_ratio_range=(0.1, 0.9), + strides=[8, 16, 32, 64, 128, 256, 512], + ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]))) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean={{_base_.model.data_preprocessor.mean}}, + to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}}, + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py b/mmpose/configs/mmdet/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4e508f20ecf33e58ddfe6ff8ee94f516d3e03f79 --- /dev/null +++ b/mmpose/configs/mmdet/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py @@ -0,0 +1,158 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1) +model = dict( + type='SingleStageDetector', + data_preprocessor=data_preprocessor, + backbone=dict( + type='MobileNetV2', + out_indices=(4, 7), + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + neck=dict( + type='SSDNeck', + in_channels=(96, 1280), + out_channels=(96, 1280, 512, 256, 256, 128), + level_strides=(2, 2, 2, 2), + level_paddings=(1, 1, 1, 1), + l2_norm_scale=None, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + bbox_head=dict( + type='SSDHead', + in_channels=(96, 1280, 512, 256, 256, 128), + num_classes=80, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), + + # set anchor size manually instead of using the predefined + # SSD300 setting. + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + strides=[16, 32, 64, 107, 160, 320], + ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], + min_sizes=[48, 100, 150, 202, 253, 304], + max_sizes=[100, 150, 202, 253, 304, 320]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + sampler=dict(type='PseudoSampler'), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +env_cfg = dict(cudnn_benchmark=True) + +# dataset settings +input_size = 320 +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=data_preprocessor['mean'], + to_rgb=data_preprocessor['bgr_to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=24, + num_workers=4, + batch_sampler=None, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type={{_base_.dataset_type}}, + data_root={{_base_.data_root}}, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline))) +val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# training schedule +max_epochs = 120 +train_cfg = dict(max_epochs=max_epochs, val_interval=5) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='CosineAnnealingLR', + begin=0, + T_max=max_epochs, + end=max_epochs, + by_epoch=True, + eta_min=0) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5)) + +custom_hooks = [ + dict(type='NumClassCheckHook'), + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (24 samples per GPU) +auto_scale_lr = dict(base_batch_size=192) diff --git a/mmpose/configs/mmdet/strong_baselines/README.md b/mmpose/configs/mmdet/strong_baselines/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e5db3e08e0774060913382b5b25cfe515bd7ead5 --- /dev/null +++ b/mmpose/configs/mmdet/strong_baselines/README.md @@ -0,0 +1,20 @@ +# Strong Baselines + + + +We train Mask R-CNN with large-scale jitter and longer schedule as strong baselines. +The modifications follow those in [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/configs/new_baselines). + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------: | :----------------------: | +| R-50-FPN | pytorch | 50e | | | | | [config](./mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-50e_coco.py) | [model](<>) \| [log](<>) | +| R-50-FPN | pytorch | 100e | | | | | [config](./mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py) | [model](<>) \| [log](<>) | +| R-50-FPN | caffe | 100e | | | 44.7 | 40.4 | [config](./mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py) | [model](<>) \| [log](<>) | +| R-50-FPN | caffe | 400e | | | | | [config](./mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-400e_coco.py) | [model](<>) \| [log](<>) | + +## Notice + +When using large-scale jittering, there are sometimes empty proposals in the box and mask heads during training. +This requires MMSyncBN that allows empty tensors. Therefore, please use mmcv-full>=1.3.14 to train models supported in this directory. diff --git a/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b004d740a8f1e303bc4ad32593baad021ccae710 --- /dev/null +++ b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py @@ -0,0 +1,4 @@ +_base_ = 'mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' # noqa + +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict(type='AmpOptimWrapper') diff --git a/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..70e92a82e0cd1f083fbb87035f61877da4c11022 --- /dev/null +++ b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py @@ -0,0 +1,68 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../common/lsj-100e_coco-instance.py' +] +image_size = (1024, 1024) +batch_augments = [ + dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + # use caffe norm + data_preprocessor=dict( + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + + # pad_size_divisor=32 is unnecessary in training but necessary + # in testing. + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + frozen_stages=-1, + norm_eval=False, + norm_cfg=norm_cfg, + init_cfg=None, + style='caffe'), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomResize', + scale=image_size, + ratio_range=(0.1, 2.0), + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +# Use RepeatDataset to speed up training +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-400e_coco.py b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-400e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..cb64c9b6865634412c8b9d951b588cf0fb8cd32b --- /dev/null +++ b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-400e_coco.py @@ -0,0 +1,20 @@ +_base_ = './mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' # noqa + +# Use RepeatDataset to speed up training +# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs) +train_dataloader = dict(dataset=dict(times=4 * 4)) +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.067, + by_epoch=False, + begin=0, + end=500 * 4), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[22, 24], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7fab2c72114cbe8a4d6cd3bdddb4e7c3b8dc2d0c --- /dev/null +++ b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py @@ -0,0 +1,4 @@ +_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' + +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict(type='AmpOptimWrapper') diff --git a/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8e06587fb03d42958142cac9ce7b15e7a19a9f6d --- /dev/null +++ b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../common/lsj-100e_coco-instance.py' +] + +image_size = (1024, 1024) +batch_augments = [ + dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + # the model is trained from scratch, so init_cfg is None + data_preprocessor=dict( + # pad_size_divisor=32 is unnecessary in training but necessary + # in testing. + pad_size_divisor=32, + batch_augments=batch_augments), + backbone=dict( + frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), # leads to 0.1+ mAP + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-50e_coco.py b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-50e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6621d28c0a80bd669fa857ce4eb7058a6f82296c --- /dev/null +++ b/mmpose/configs/mmdet/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-50e_coco.py @@ -0,0 +1,5 @@ +_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' + +# Use RepeatDataset to speed up training +# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) +train_dataloader = dict(dataset=dict(times=2)) diff --git a/mmpose/configs/mmdet/strong_baselines/metafile.yml b/mmpose/configs/mmdet/strong_baselines/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..f72c07e64b6e72dc0c71ae114877ce5c8513be7b --- /dev/null +++ b/mmpose/configs/mmdet/strong_baselines/metafile.yml @@ -0,0 +1,24 @@ +Models: + - Name: mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco + In Collection: Mask R-CNN + Config: configs/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py + Metadata: + Epochs: 100 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + - LSJ + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - FPN + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + box AP: 40.4 diff --git a/mmpose/configs/mmdet/strongsort/README.md b/mmpose/configs/mmdet/strongsort/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8e08413cbc04d6b552b911b1d9fb6ad2e4205a35 --- /dev/null +++ b/mmpose/configs/mmdet/strongsort/README.md @@ -0,0 +1,108 @@ +# StrongSORT: Make DeepSORT Great Again + +## Abstract + + + +Existing Multi-Object Tracking (MOT) methods can be roughly classified as tracking-by-detection and joint-detection-association paradigms. Although the latter has elicited more attention and demonstrates comparable performance relative to the former, we claim that the tracking-by-detection paradigm is still the optimal solution in terms of tracking accuracy. In this paper, we revisit the classic tracker DeepSORT and upgrade it from various aspects, i.e., detection, embedding and association. The resulting tracker, called StrongSORT, sets new HOTA and IDF1 records on MOT17 and MOT20. We also present two lightweight and plug-and-play algorithms to further refine the tracking results. Firstly, an appearance-free link model (AFLink) is proposed to associate short tracklets into complete trajectories. To the best of our knowledge, this is the first global link model without appearance information. Secondly, we propose Gaussian-smoothed interpolation (GSI) to compensate for missing detections. Instead of ignoring motion information like linear interpolation, GSI is based on the Gaussian process regression algorithm and can achieve more accurate localizations. Moreover, AFLink and GSI can be plugged into various trackers with a negligible extra computational cost (591.9 and 140.9 Hz, respectively, on MOT17). By integrating StrongSORT with the two algorithms, the final tracker StrongSORT++ ranks first on MOT17 and MOT20 in terms of HOTA and IDF1 metrics and surpasses the second-place one by 1.3 - 2.2. Code will be released soon. + + + +
+ +
+ +## Citation + + + +```latex +@article{du2022strongsort, + title={Strongsort: Make deepsort great again}, + author={Du, Yunhao and Song, Yang and Yang, Bo and Zhao, Yanyun}, + journal={arXiv preprint arXiv:2202.13514}, + year={2022} +} +``` + +## Results and models on MOT17 + +| Method | Detector | ReID | Train Set | Test Set | Public | Inf time (fps) | HOTA | MOTA | IDF1 | FP | FN | IDSw. | Config | Download | +| :----------: | :------: | :--: | :---------------------------: | :------------: | :----: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :----------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| StrongSORT++ | YOLOX-X | R50 | CrowdHuman + MOT17-half-train | MOT17-half-val | N | - | 70.9 | 78.4 | 83.3 | 15237 | 19035 | 582 | [config](strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py) | [detector](https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/yolox_x_crowdhuman_mot17-private-half_20220812_192036-b6c9ce9a.pth) [reid](https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot17-4bf6b63d.pth) [AFLink](https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth) | + +## Results and models on MOT20 + +| Method | Detector | ReID | Train Set | Test Set | Public | Inf time (fps) | HOTA | MOTA | IDF1 | FP | FN | IDSw. | Config | Download | +| :----------: | :------: | :--: | :----------------------: | :--------: | :----: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :---------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| StrongSORT++ | YOLOX-X | R50 | CrowdHuman + MOT20-train | MOT20-test | N | - | 62.9 | 75.5 | 77.3 | 29043 | 96155 | 1640 | [config](strongsort_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py) | [detector](https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/yolox_x_crowdhuman_mot20-private_20220812_192123-77c014de.pth) [reid](https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot20_20210803_212426-c83b1c01.pth) [AFLink](https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth) | + +## Get started + +### 1. Development Environment Setup + +Tracking Development Environment Setup can refer to this [document](../../docs/en/get_started.md). + +### 2. Dataset Prepare + +Tracking Dataset Prepare can refer to this [document](../../docs/en/user_guides/tracking_dataset_prepare.md). + +### 3. Training + +We implement StrongSORT with independent detector and ReID models. +Note that, due to the influence of parameters such as learning rate in default configuration file, +we recommend using 8 GPUs for training in order to reproduce accuracy. + +You can train the detector as follows. + +```shell script +# Training YOLOX-X on crowdhuman and mot17-half-train dataset with following command. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_train.sh configs/det/yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py 8 +``` + +And you can train the ReID model as follows. + +```shell script +# Training ReID model on mot17-train80 dataset with following command. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_train.sh configs/reid/reid_r50_8xb32-6e_mot17train80_test-mot17val20.py 8 +``` + +If you want to know about more detailed usage of `train.py/dist_train.sh/slurm_train.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 4. Testing and evaluation + +**2.1 Example on MOTxx-halfval dataset** + +```shell script +# Example 1: Test on motXX-half-val set. +# The number after config file represents the number of GPUs used. Here we use 8 GPUs. +bash tools/dist_test_tracking.sh configs/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py 8 --detector ${CHECKPOINT_PATH} --reid ${CHECKPOINT_PATH} +``` + +**2.2 Example on MOTxx-test dataset** + +If you want to get the results of the [MOT Challenge](https://motchallenge.net/) test set, +please use the following command to generate result files that can be used for submission. +It will be stored in `./mot_20_test_res`, you can modify the saved path in `test_evaluator` of the config. + +```shell script +# Example 2: Test on motxx-test set +# The number after config file represents the number of GPUs used +bash tools/dist_test_tracking.sh configs/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py 8 --detector ${CHECKPOINT_PATH} --reid ${CHECKPOINT_PATH} +``` + +If you want to know about more detailed usage of `test_tracking.py/dist_test_tracking.sh/slurm_test_tracking.sh`, +please refer to this [document](../../docs/en/user_guides/tracking_train_test.md). + +### 3.Inference + +Use a single GPU to predict a video and save it as a video. + +```shell +python demo/mot_demo.py demo/demo_mot.mp4 configs/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py --detector ${CHECKPOINT_FILE} --reid ${CHECKPOINT_PATH} --out mot.mp4 +``` + +If you want to know about more detailed usage of `mot_demo.py`, please refer to this [document](../../docs/en/user_guides/tracking_inference.md). diff --git a/mmpose/configs/mmdet/strongsort/metafile.yml b/mmpose/configs/mmdet/strongsort/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..08a564b77b866ebe55e2b634faa919817a1de09a --- /dev/null +++ b/mmpose/configs/mmdet/strongsort/metafile.yml @@ -0,0 +1,48 @@ +Collections: + - Name: StrongSORT++ + Metadata: + Training Techniques: + - SGD with Momentum + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - YOLOX + Paper: + URL: https://arxiv.org/abs/2202.13514 + Title: "StrongSORT: Make DeepSORT Great Again" + README: configs/strongsort/README.md + +Models: + - Name: strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval + In Collection: StrongSORT++ + Config: configs/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py + Metadata: + Training Data: CrowdHuman + MOT17-half-train + Results: + - Task: Multiple Object Tracking + Dataset: MOT17-half-val + Metrics: + MOTA: 78.3 + IDF1: 83.2 + HOTA: 70.9 + Weights: + - https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/yolox_x_crowdhuman_mot17-private-half_20220812_192036-b6c9ce9a.pth + - https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot17-4bf6b63d.pth + - https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth + + - Name: strongsort_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test + In Collection: StrongSORT++ + Config: configs/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py + Metadata: + Training Data: CrowdHuman + MOT20-train + Results: + - Task: Multiple Object Tracking + Dataset: MOT20-test + Metrics: + MOTA: 75.5 + IDF1: 77.3 + HOTA: 62.9 + Weights: + - https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/yolox_x_crowdhuman_mot20-private_20220812_192123-77c014de.pth + - https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot20_20210803_212426-c83b1c01.pth + - https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth diff --git a/mmpose/configs/mmdet/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..532e2aee718fb481bc81759a2853ac0fddf80e0e --- /dev/null +++ b/mmpose/configs/mmdet/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py @@ -0,0 +1,130 @@ +_base_ = [ + './yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501 +] + +dataset_type = 'MOTChallengeDataset' +detector = _base_.model +detector.pop('data_preprocessor') +del _base_.model + +model = dict( + type='StrongSORT', + data_preprocessor=dict( + type='TrackDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(576, 1024), + size_divisor=32, + interval=10) + ]), + detector=detector, + reid=dict( + type='BaseReID', + data_preprocessor=dict(type='mmpretrain.ClsDataPreprocessor'), + backbone=dict( + type='mmpretrain.ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), + head=dict( + type='LinearReIDHead', + num_fcs=1, + in_channels=2048, + fc_channels=1024, + out_channels=128, + num_classes=380, + loss_cls=dict(type='mmpretrain.CrossEntropyLoss', loss_weight=1.0), + loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'))), + cmc=dict( + type='CameraMotionCompensation', + warp_mode='cv2.MOTION_EUCLIDEAN', + num_iters=100, + stop_eps=0.00001), + tracker=dict( + type='StrongSORTTracker', + motion=dict(type='KalmanFilter', center_only=False, use_nsa=True), + obj_score_thr=0.6, + reid=dict( + num_samples=None, + img_scale=(256, 128), + img_norm_cfg=dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + match_score_thr=0.3, + motion_weight=0.02, + ), + match_iou_thr=0.7, + momentums=dict(embeds=0.1, ), + num_tentatives=2, + num_frames_retain=100), + postprocess_model=dict( + type='AppearanceFreeLink', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth', # noqa: E501 + temporal_threshold=(0, 30), + spatial_threshold=50, + confidence_threshold=0.95, + )) + +train_pipeline = None +test_pipeline = [ + dict( + type='TransformBroadcaster', + transforms=[ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='Resize', scale=_base_.img_scale, keep_ratio=True), + dict( + type='Pad', + size_divisor=32, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadTrackAnnotations'), + ]), + dict(type='PackTrackInputs') +] + +train_dataloader = None +val_dataloader = dict( + # Now StrongSORT only support video_based sampling + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + _delete_=True, + type=dataset_type, + data_root=_base_.data_root, + ann_file='annotations/half-val_cocoformat.json', + data_prefix=dict(img_path='train'), + # when you evaluate track performance, you need to remove metainfo + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +train_cfg = None +optim_wrapper = None + +# evaluator +val_evaluator = dict( + _delete_=True, + type='MOTChallengeMetric', + metric=['HOTA', 'CLEAR', 'Identity'], + # use_postprocess to support AppearanceFreeLink in val_evaluator + use_postprocess=True, + postprocess_tracklet_cfg=[ + dict( + type='InterpolateTracklets', + min_num_frames=5, + max_num_frames=20, + use_gsi=True, + smooth_tau=10) + ]) +test_evaluator = val_evaluator + +default_hooks = dict(logger=dict(type='LoggerHook', interval=1)) + +del _base_.param_scheduler +del _base_.custom_hooks diff --git a/mmpose/configs/mmdet/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py b/mmpose/configs/mmdet/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py new file mode 100644 index 0000000000000000000000000000000000000000..eab97063932528df7e17c7d65bf9f0d13f5dfa73 --- /dev/null +++ b/mmpose/configs/mmdet/strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py @@ -0,0 +1,44 @@ +_base_ = [ + './strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain' + '_test-mot17halfval.py' +] + +img_scale = (1600, 896) # width, height + +model = dict( + data_preprocessor=dict( + type='TrackDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict(type='BatchSyncRandomResize', random_size_range=(640, 1152)) + ])) + +test_pipeline = [ + dict( + type='TransformBroadcaster', + transforms=[ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict( + type='Pad', + size_divisor=32, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadTrackAnnotations'), + ]), + dict(type='PackTrackInputs') +] + +val_dataloader = dict( + dataset=dict( + data_root='data/MOT17', + ann_file='annotations/train_cocoformat.json', + data_prefix=dict(img_path='train'), + pipeline=test_pipeline)) +test_dataloader = dict( + dataset=dict( + data_root='data/MOT20', + ann_file='annotations/test_cocoformat.json', + data_prefix=dict(img_path='test'), + pipeline=test_pipeline)) + +test_evaluator = dict(format_only=True, outfile_prefix='./mot_20_test_res') diff --git a/mmpose/configs/mmdet/strongsort/yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py b/mmpose/configs/mmdet/strongsort/yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py new file mode 100644 index 0000000000000000000000000000000000000000..59a52e4394b5825d40a99e08793147fe836b4c19 --- /dev/null +++ b/mmpose/configs/mmdet/strongsort/yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py @@ -0,0 +1,188 @@ +_base_ = ['../yolox/yolox_x_8xb8-300e_coco.py'] + +data_root = 'data/MOT17/' + +img_scale = (1440, 800) # width, height +batch_size = 4 + +# model settings +model = dict( + bbox_head=dict(num_classes=1), + test_cfg=dict(nms=dict(iou_threshold=0.7)), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth' # noqa: E501 + )) + +train_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + bbox_clip_border=False), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-img_scale[0] // 2, -img_scale[1] // 2), + bbox_clip_border=False), + dict( + type='MixUp', + img_scale=img_scale, + ratio_range=(0.8, 1.6), + pad_val=114.0, + bbox_clip_border=False), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Resize', + scale=img_scale, + keep_ratio=True, + clip_object_border=False), + dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + _delete_=True, + batch_size=batch_size, + num_workers=4, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='MultiImageMixDataset', + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type='CocoDataset', + data_root=data_root, + ann_file='annotations/half-train_cocoformat.json', + data_prefix=dict(img='train'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + dict( + type='CocoDataset', + data_root='data/crowdhuman', + ann_file='annotations/crowdhuman_train.json', + data_prefix=dict(img='train'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + dict( + type='CocoDataset', + data_root='data/crowdhuman', + ann_file='annotations/crowdhuman_val.json', + data_prefix=dict(img='val'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + ]), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + dataset=dict( + data_root=data_root, + ann_file='annotations/half-val_cocoformat.json', + data_prefix=dict(img='train'), + metainfo=dict(classes=('pedestrian', )), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# training settings +max_epochs = 80 +num_last_epochs = 10 +interval = 5 + +train_cfg = dict(max_epochs=max_epochs, val_begin=75, val_interval=1) + +# optimizer +# default 8 gpu +base_lr = 0.001 / 8 * batch_size +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +# learning rate +param_scheduler = [ + dict( + type='QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=1, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=1, + T_max=max_epochs - num_last_epochs, + end=max_epochs - num_last_epochs, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='ConstantLR', + by_epoch=True, + factor=1, + begin=max_epochs - num_last_epochs, + end=max_epochs, + ) +] + +default_hooks = dict( + checkpoint=dict( + interval=1, + max_keep_ckpts=5 # only keep latest 5 checkpoints + )) + +custom_hooks = [ + dict( + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + priority=49) +] + +# evaluator +val_evaluator = dict( + ann_file=data_root + 'annotations/half-val_cocoformat.json', + format_only=False) +test_evaluator = val_evaluator + +del _base_.tta_model +del _base_.tta_pipeline +del _base_.train_dataset diff --git a/mmpose/configs/mmdet/strongsort/yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py b/mmpose/configs/mmdet/strongsort/yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py new file mode 100644 index 0000000000000000000000000000000000000000..d4eb3cb2c9804f0219ba91d0b5d460da342ab668 --- /dev/null +++ b/mmpose/configs/mmdet/strongsort/yolox_x_8xb4-80e_crowdhuman-mot20train_test-mot20test.py @@ -0,0 +1,108 @@ +_base_ = ['./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py'] + +data_root = 'data/MOT20/' + +img_scale = (1600, 896) # width, height + +# model settings +model = dict( + data_preprocessor=dict(batch_augments=[ + dict(type='BatchSyncRandomResize', random_size_range=(640, 1152)) + ])) + +train_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + bbox_clip_border=True), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-img_scale[0] // 2, -img_scale[1] // 2), + bbox_clip_border=True), + dict( + type='MixUp', + img_scale=img_scale, + ratio_range=(0.8, 1.6), + pad_val=114.0, + bbox_clip_border=True), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Resize', + scale=img_scale, + keep_ratio=True, + clip_object_border=True), + dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + dataset=dict( + type='MultiImageMixDataset', + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type='CocoDataset', + data_root=data_root, + ann_file='annotations/train_cocoformat.json', + data_prefix=dict(img='train'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + dict( + type='CocoDataset', + data_root='data/crowdhuman', + ann_file='annotations/crowdhuman_train.json', + data_prefix=dict(img='train'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + dict( + type='CocoDataset', + data_root='data/crowdhuman', + ann_file='annotations/crowdhuman_val.json', + data_prefix=dict(img='val'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + metainfo=dict(classes=('pedestrian', )), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + ]), + ]), + pipeline=train_pipeline)) + +val_dataloader = dict( + dataset=dict( + data_root='data/MOT17', ann_file='annotations/train_cocoformat.json')) +test_dataloader = val_dataloader + +# evaluator +val_evaluator = dict(ann_file='data/MOT17/annotations/train_cocoformat.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/mmdet/swin/README.md b/mmpose/configs/mmdet/swin/README.md new file mode 100644 index 0000000000000000000000000000000000000000..99bcf6ed7102ac7cd9801a7350c7e4070b60cbf4 --- /dev/null +++ b/mmpose/configs/mmdet/swin/README.md @@ -0,0 +1,41 @@ +# Swin + +> [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) + + + +## Abstract + +This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. + +
+ +
+ +## Results and Models + +### Mask R-CNN + +| Backbone | Pretrain | Lr schd | Multi-scale crop | FP16 | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :---------: | :-----: | :--------------: | :--: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Swin-T | ImageNet-1K | 1x | no | no | 7.6 | | 42.7 | 39.3 | [config](./mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937.log.json) | +| Swin-T | ImageNet-1K | 3x | yes | no | 10.2 | | 46.0 | 41.6 | [config](./mask-rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725.log.json) | +| Swin-T | ImageNet-1K | 3x | yes | yes | 7.8 | | 46.0 | 41.7 | [config](./mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006.log.json) | +| Swin-S | ImageNet-1K | 3x | yes | yes | 11.9 | | 48.2 | 43.2 | [config](./mask-rcnn_swin-s-p4-w7_fpn_amp-ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808.log.json) | + +### Notice + +Please follow the example +of `retinanet_swin-t-p4-w7_fpn_1x_coco.py` when you want to combine Swin Transformer with +the one-stage detector. Because there is a layer norm at the outs of Swin Transformer, you must set `start_level` as 0 in FPN, so we have to set the `out_indices` of backbone as `[1,2,3]`. + +## Citation + +```latex +@article{liu2021Swin, + title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + journal={arXiv preprint arXiv:2103.14030}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/swin/mask-rcnn_swin-s-p4-w7_fpn_amp-ms-crop-3x_coco.py b/mmpose/configs/mmdet/swin/mask-rcnn_swin-s-p4-w7_fpn_amp-ms-crop-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4a3e8ad900553c38d11ddc7747cbc0f244f6b4c7 --- /dev/null +++ b/mmpose/configs/mmdet/swin/mask-rcnn_swin-s-p4-w7_fpn_amp-ms-crop-3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco.py' +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa +model = dict( + backbone=dict( + depths=[2, 2, 18, 2], + init_cfg=dict(type='Pretrained', checkpoint=pretrained))) diff --git a/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py b/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5471caa139c0b7670f995501347ddf80383e9268 --- /dev/null +++ b/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py @@ -0,0 +1,60 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +model = dict( + type='MaskRCNN', + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[96, 192, 384, 768])) + +max_epochs = 12 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + }), + optimizer=dict( + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05)) diff --git a/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco.py b/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..622087ba7164fda53a70eb927b9258572b7c8ef0 --- /dev/null +++ b/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco.py @@ -0,0 +1,3 @@ +_base_ = './mask-rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py' +# Enable automatic-mixed-precision training with AmpOptimWrapper. +optim_wrapper = dict(type='AmpOptimWrapper') diff --git a/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py b/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7024b73249ca8c77da89ab9e4653757f36a1d1d2 --- /dev/null +++ b/mmpose/configs/mmdet/swin/mask-rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py @@ -0,0 +1,99 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa + +model = dict( + type='MaskRCNN', + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[96, 192, 384, 768])) + +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[[ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + scales=[(400, 1333), (500, 1333), (600, 1333)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + keep_ratio=True) + ]]), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +max_epochs = 36 +train_cfg = dict(max_epochs=max_epochs) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[27, 33], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + }), + optimizer=dict( + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05)) diff --git a/mmpose/configs/mmdet/swin/metafile.yml b/mmpose/configs/mmdet/swin/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..763f9300d44bcc3f9348951f3640ada171c3ce05 --- /dev/null +++ b/mmpose/configs/mmdet/swin/metafile.yml @@ -0,0 +1,120 @@ +Models: + - Name: mask-rcnn_swin-s-p4-w7_fpn_amp-ms-crop-3x_coco + In Collection: Mask R-CNN + Config: configs/swin/mask-rcnn_swin-s-p4-w7_fpn_amp-ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 11.9 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + + - Name: mask-rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco + In Collection: Mask R-CNN + Config: configs/swin/mask-rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 10.2 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + + - Name: mask-rcnn_swin-t-p4-w7_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + Epochs: 12 + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + + - Name: mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco + In Collection: Mask R-CNN + Config: configs/swin/mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 diff --git a/mmpose/configs/mmdet/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py b/mmpose/configs/mmdet/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2f40a87e8cf8593edd92f024d0bb0ed43a87b4fb --- /dev/null +++ b/mmpose/configs/mmdet/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5)) + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/timm_example/README.md b/mmpose/configs/mmdet/timm_example/README.md new file mode 100644 index 0000000000000000000000000000000000000000..848f8d3c269cc0de2fad5fa60a62ed44bfd9b29e --- /dev/null +++ b/mmpose/configs/mmdet/timm_example/README.md @@ -0,0 +1,62 @@ +# Timm Example + +> [PyTorch Image Models](https://github.com/rwightman/pytorch-image-models) + + + +## Abstract + +Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results. + + + +## Results and Models + +### RetinaNet + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------: | :------: | +| R-50 | pytorch | 1x | | | | [config](./retinanet_timm-tv-resnet50_fpn_1x_coco.py) | | +| EfficientNet-B1 | - | 1x | | | | [config](./retinanet_timm-efficientnet-b1_fpn_1x_coco.py) | | + +## Usage + +### Install additional requirements + +MMDetection supports timm backbones via `TIMMBackbone`, a wrapper class in MMPretrain. +Thus, you need to install `mmpretrain` in addition to timm. +If you have already installed requirements for mmdet, run + +```shell +pip install 'dataclasses; python_version<"3.7"' +pip install timm +pip install mmpretrain +``` + +See [this document](https://mmpretrain.readthedocs.io/en/latest/get_started.html#installation) for the details of MMPretrain installation. + +### Edit config + +- See example configs for basic usage. +- See the documents of [timm feature extraction](https://rwightman.github.io/pytorch-image-models/feature_extraction/#multi-scale-feature-maps-feature-pyramid) and [TIMMBackbone](https://mmpretrain.readthedocs.io/en/latest/api/generated/mmpretrain.models.backbones.TIMMBackbone.html#mmpretrain.models.backbones.TIMMBackbone) for details. +- Which feature map is output depends on the backbone. + Please check `backbone out_channels` and `backbone out_strides` in your log, and modify `model.neck.in_channels` and `model.backbone.out_indices` if necessary. +- If you use Vision Transformer models that do not support `features_only=True`, add `custom_hooks = []` to your config to disable `NumClassCheckHook`. + +## Citation + +```latex +@misc{rw2019timm, + author = {Ross Wightman}, + title = {PyTorch Image Models}, + year = {2019}, + publisher = {GitHub}, + journal = {GitHub repository}, + doi = {10.5281/zenodo.4414861}, + howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} +} +``` diff --git a/mmpose/configs/mmdet/timm_example/retinanet_timm-efficientnet-b1_fpn_1x_coco.py b/mmpose/configs/mmdet/timm_example/retinanet_timm-efficientnet-b1_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b87dddf50f7179dc143b9ab9aecb07d09d4dea4b --- /dev/null +++ b/mmpose/configs/mmdet/timm_example/retinanet_timm-efficientnet-b1_fpn_1x_coco.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmpretrain +# import mmpretrain.models to trigger register_module in mmpretrain +custom_imports = dict( + imports=['mmpretrain.models'], allow_failed_imports=False) + +model = dict( + backbone=dict( + _delete_=True, + type='mmpretrain.TIMMBackbone', + model_name='efficientnet_b1', + features_only=True, + pretrained=True, + out_indices=(1, 2, 3, 4)), + neck=dict(in_channels=[24, 40, 112, 320])) + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/timm_example/retinanet_timm-tv-resnet50_fpn_1x_coco.py b/mmpose/configs/mmdet/timm_example/retinanet_timm-tv-resnet50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..74e43506959574abbf08feb44848f4bfa8d65719 --- /dev/null +++ b/mmpose/configs/mmdet/timm_example/retinanet_timm-tv-resnet50_fpn_1x_coco.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmpretrain +# import mmpretrain.models to trigger register_module in mmpretrain +custom_imports = dict( + imports=['mmpretrain.models'], allow_failed_imports=False) + +model = dict( + backbone=dict( + _delete_=True, + type='mmpretrain.TIMMBackbone', + model_name='tv_resnet50', # ResNet-50 with torchvision weights + features_only=True, + pretrained=True, + out_indices=(1, 2, 3, 4))) + +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/mmpose/configs/mmdet/tood/README.md b/mmpose/configs/mmdet/tood/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9371d9d783ffdca321fa9befc3c93279d45673a7 --- /dev/null +++ b/mmpose/configs/mmdet/tood/README.md @@ -0,0 +1,40 @@ +# TOOD + +> [TOOD: Task-aligned One-stage Object Detection](https://arxiv.org/abs/2108.07755) + + + +## Abstract + +One-stage object detection is commonly implemented by optimizing two sub-tasks: object classification and localization, using heads with two parallel branches, which might lead to a certain level of spatial misalignment in predictions between the two tasks. In this work, we propose a Task-aligned One-stage Object Detection (TOOD) that explicitly aligns the two tasks in a learning-based manner. First, we design a novel Task-aligned Head (T-Head) which offers a better balance between learning task-interactive and task-specific features, as well as a greater flexibility to learn the alignment via a task-aligned predictor. Second, we propose Task Alignment Learning (TAL) to explicitly pull closer (or even unify) the optimal anchors for the two tasks during training via a designed sample assignment scheme and a task-aligned loss. Extensive experiments are conducted on MS-COCO, where TOOD achieves a 51.1 AP at single-model single-scale testing. This surpasses the recent one-stage detectors by a large margin, such as ATSS (47.7 AP), GFL (48.2 AP), and PAA (49.0 AP), with fewer parameters and FLOPs. Qualitative results also demonstrate the effectiveness of TOOD for better aligning the tasks of object classification and localization. + +
+ +
+ +## Results and Models + +| Backbone | Style | Anchor Type | Lr schd | Multi-scale Training | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------------: | :-----: | :----------: | :-----: | :------------------: | :------: | :------------: | :----: | :-------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | Anchor-free | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425.log) | +| R-50 | pytorch | Anchor-based | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_anchor-based_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105.log) | +| R-50 | pytorch | Anchor-free | 2x | Y | 4.1 | | 44.5 | [config](./tood_r50_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231.log) | +| R-101 | pytorch | Anchor-free | 2x | Y | 6.0 | | 46.1 | [config](./tood_r101_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232.log) | +| R-101-dcnv2 | pytorch | Anchor-free | 2x | Y | 6.2 | | 49.3 | [config](./tood_r101-dconv-c3-c5_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728.log) | +| X-101-64x4d | pytorch | Anchor-free | 2x | Y | 10.2 | | 47.6 | [config](./tood_x101-64x4d_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519.log) | +| X-101-64x4d-dcnv2 | pytorch | Anchor-free | 2x | Y | | | | [config](./tood_x101-64x4d-dconv-c4-c5_fpn_ms-2x_coco.py) | [model](<>) \| [log](<>) | + +\[1\] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ +\[2\] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ +\[3\] *`dcnv2` denotes deformable convolutional networks v2.* \\ + +## Citation + +```latex +@inproceedings{feng2021tood, + title={TOOD: Task-aligned One-stage Object Detection}, + author={Feng, Chengjian and Zhong, Yujie and Gao, Yu and Scott, Matthew R and Huang, Weilin}, + booktitle={ICCV}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/tood/metafile.yml b/mmpose/configs/mmdet/tood/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..d2bc08073a10ef153b9c97f4d2742e5f85015aa5 --- /dev/null +++ b/mmpose/configs/mmdet/tood/metafile.yml @@ -0,0 +1,95 @@ +Collections: + - Name: TOOD + Metadata: + Training Data: COCO + Training Techniques: + - SGD + Training Resources: 8x V100 GPUs + Architecture: + - TOOD + Paper: + URL: https://arxiv.org/abs/2108.07755 + Title: 'TOOD: Task-aligned One-stage Object Detection' + README: configs/tood/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.20.0/mmdet/models/detectors/tood.py#L7 + Version: v2.20.0 + +Models: + - Name: tood_r101_fpn_ms-2x_coco + In Collection: TOOD + Config: configs/tood/tood_r101_fpn_ms-2x_coco.py + Metadata: + Training Memory (GB): 6.0 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth + + - Name: tood_x101-64x4d_fpn_ms-2x_coco + In Collection: TOOD + Config: configs/tood/tood_x101-64x4d_fpn_ms-2x_coco.py + Metadata: + Training Memory (GB): 10.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth + + - Name: tood_r101-dconv-c3-c5_fpn_ms-2x_coco + In Collection: TOOD + Config: configs/tood/tood_r101-dconv-c3-c5_fpn_ms-2x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth + + - Name: tood_r50_fpn_anchor-based_1x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_anchor-based_1x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth + + - Name: tood_r50_fpn_1x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth + + - Name: tood_r50_fpn_ms-2x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_ms-2x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth diff --git a/mmpose/configs/mmdet/tood/tood_r101-dconv-c3-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/tood/tood_r101-dconv-c3-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..45030a6832db39a329d0901dde4a5320f34a9b6e --- /dev/null +++ b/mmpose/configs/mmdet/tood/tood_r101-dconv-c3-c5_fpn_ms-2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_r101_fpn_ms-2x_coco.py' + +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + bbox_head=dict(num_dcn=2)) diff --git a/mmpose/configs/mmdet/tood/tood_r101_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/tood/tood_r101_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fc6ae5d942e05ac90162ca9ac67adb311d581e5b --- /dev/null +++ b/mmpose/configs/mmdet/tood/tood_r101_fpn_ms-2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_r50_fpn_ms-2x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/tood/tood_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/tood/tood_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e4839d9d77e64d61b504ed8789bda225cc878da1 --- /dev/null +++ b/mmpose/configs/mmdet/tood/tood_r50_fpn_1x_coco.py @@ -0,0 +1,80 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='TOOD', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='TOODHead', + num_classes=80, + in_channels=256, + stacked_convs=6, + feat_channels=256, + anchor_type='anchor_free', + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + initial_loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + activated=True, # use probability instead of logit as input + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + activated=True, # use probability instead of logit as input + beta=2.0, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), + train_cfg=dict( + initial_epoch=4, + initial_assigner=dict(type='ATSSAssigner', topk=9), + assigner=dict(type='TaskAlignedAssigner', topk=13), + alpha=1, + beta=6, + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/tood/tood_r50_fpn_anchor-based_1x_coco.py b/mmpose/configs/mmdet/tood/tood_r50_fpn_anchor-based_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c7fbf6aff197b821de07f8d4a73f9c72e5f76288 --- /dev/null +++ b/mmpose/configs/mmdet/tood/tood_r50_fpn_anchor-based_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './tood_r50_fpn_1x_coco.py' +model = dict(bbox_head=dict(anchor_type='anchor_based')) diff --git a/mmpose/configs/mmdet/tood/tood_r50_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/tood/tood_r50_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ffb296dccee30438977bac61b970f5844d647cfa --- /dev/null +++ b/mmpose/configs/mmdet/tood/tood_r50_fpn_ms-2x_coco.py @@ -0,0 +1,30 @@ +_base_ = './tood_r50_fpn_1x_coco.py' +max_epochs = 24 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +# training schedule for 2x +train_cfg = dict(max_epochs=max_epochs) + +# multi-scale training +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 480), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/tood/tood_x101-64x4d-dconv-c4-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/tood/tood_x101-64x4d-dconv-c4-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..43405196184715923bb22499958c74fe9bf4a2da --- /dev/null +++ b/mmpose/configs/mmdet/tood/tood_x101-64x4d-dconv-c4-c5_fpn_ms-2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_x101-64x4d_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + ), + bbox_head=dict(num_dcn=2)) diff --git a/mmpose/configs/mmdet/tood/tood_x101-64x4d_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/tood/tood_x101-64x4d_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1651542c7562553f206ba763fb9a43838e042450 --- /dev/null +++ b/mmpose/configs/mmdet/tood/tood_x101-64x4d_fpn_ms-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './tood_r50_fpn_ms-2x_coco.py' + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/tridentnet/README.md b/mmpose/configs/mmdet/tridentnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b972b3a3c9b2de5409af9f76622e8947fd6eace1 --- /dev/null +++ b/mmpose/configs/mmdet/tridentnet/README.md @@ -0,0 +1,38 @@ +# TridentNet + +> [Scale-Aware Trident Networks for Object Detection](https://arxiv.org/abs/1901.01892) + + + +## Abstract + +Scale variation is one of the key challenges in object detection. In this work, we first present a controlled experiment to investigate the effect of receptive fields for scale variation in object detection. Based on the findings from the exploration experiments, we propose a novel Trident Network (TridentNet) aiming to generate scale-specific feature maps with a uniform representational power. We construct a parallel multi-branch architecture in which each branch shares the same transformation parameters but with different receptive fields. Then, we adopt a scale-aware training scheme to specialize each branch by sampling object instances of proper scales for training. As a bonus, a fast approximation version of TridentNet could achieve significant improvements without any additional parameters and computational cost compared with the vanilla detector. On the COCO dataset, our TridentNet with ResNet-101 backbone achieves state-of-the-art single-model results of 48.4 mAP. + +
+ +
+ +## Results and Models + +We reports the test results using only one branch for inference. + +| Backbone | Style | mstrain | Lr schd | Mem (GB) | Inf time (fps) | box AP | Download | +| :------: | :---: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | caffe | N | 1x | | | 37.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838.log.json) | +| R-50 | caffe | Y | 1x | | | 37.6 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839.log.json) | +| R-50 | caffe | Y | 3x | | | 40.3 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539.log.json) | + +**Note** + +Similar to [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/projects/TridentNet), we haven't implemented the Scale-aware Training Scheme in section 4.2 of the paper. + +## Citation + +```latex +@InProceedings{li2019scale, + title={Scale-Aware Trident Networks for Object Detection}, + author={Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang}, + journal={The International Conference on Computer Vision (ICCV)}, + year={2019} +} +``` diff --git a/mmpose/configs/mmdet/tridentnet/metafile.yml b/mmpose/configs/mmdet/tridentnet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..c0081c5be02986efbfdad9f199aa8ccd4b599d0f --- /dev/null +++ b/mmpose/configs/mmdet/tridentnet/metafile.yml @@ -0,0 +1,55 @@ +Collections: + - Name: TridentNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - TridentNet Block + Paper: + URL: https://arxiv.org/abs/1901.01892 + Title: 'Scale-Aware Trident Networks for Object Detection' + README: configs/tridentnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/detectors/trident_faster_rcnn.py#L6 + Version: v2.8.0 + +Models: + - Name: tridentnet_r50-caffe_1x_coco + In Collection: TridentNet + Config: configs/tridentnet/tridentnet_r50-caffe_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth + + - Name: tridentnet_r50-caffe_ms-1x_coco + In Collection: TridentNet + Config: configs/tridentnet/tridentnet_r50-caffe_ms-1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth + + - Name: tridentnet_r50-caffe_ms-3x_coco + In Collection: TridentNet + Config: configs/tridentnet/tridentnet_r50-caffe_ms-3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth diff --git a/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_1x_coco.py b/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..26a4c12316ee80c7dfae1624af3f4146dba0a414 --- /dev/null +++ b/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_1x_coco.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50-caffe-c4.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='TridentFasterRCNN', + backbone=dict( + type='TridentResNet', + trident_dilations=(1, 2, 3), + num_branch=3, + test_branch_idx=1, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1), + train_cfg=dict( + rpn_proposal=dict(max_per_img=500), + rcnn=dict( + sampler=dict(num=128, pos_fraction=0.5, + add_gt_as_proposals=False)))) diff --git a/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_ms-1x_coco.py b/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_ms-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..806d20b90c96be9357eccd9f9ca8c880b0716cae --- /dev/null +++ b/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_ms-1x_coco.py @@ -0,0 +1,15 @@ +_base_ = 'tridentnet_r50-caffe_1x_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_ms-3x_coco.py b/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_ms-3x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4de249c60c234a9d301658594f7b072b0b48017b --- /dev/null +++ b/mmpose/configs/mmdet/tridentnet/tridentnet_r50-caffe_ms-3x_coco.py @@ -0,0 +1,18 @@ +_base_ = 'tridentnet_r50-caffe_ms-1x_coco.py' + +# learning rate +max_epochs = 36 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/mmpose/configs/mmdet/v3det/README.md b/mmpose/configs/mmdet/v3det/README.md new file mode 100644 index 0000000000000000000000000000000000000000..36879316f4fe0066707fecb95af4329852fe55fc --- /dev/null +++ b/mmpose/configs/mmdet/v3det/README.md @@ -0,0 +1,86 @@ +

+

+ +# V3Det: Vast Vocabulary Visual Detection Dataset + +
+ Jiaqi Wang*, + Pan Zhang*, + Tao Chu*, + Yuhang Cao*,
+ Yujie Zhou, + Tong Wu, + Bin Wang, + Conghui He, + Dahua Lin
+ (* equal contribution)
+ Accepted to ICCV 2023 (Oral) +
+

+

+

+ + Paper, + Dataset
+
+
+
+

+ +
+ +
+ + + +## Abstract + +Recent advances in detecting arbitrary objects in the real world are trained and evaluated on object detection datasets with a relatively restricted vocabulary. To facilitate the development of more general visual object detection, we propose V3Det, a vast vocabulary visual detection dataset with precisely annotated bounding boxes on massive images. V3Det has several appealing properties: 1) Vast Vocabulary: It contains bounding boxes of objects from 13,204 categories on real-world images, which is 10 times larger than the existing large vocabulary object detection dataset, e.g., LVIS. 2) Hierarchical Category Organization: The vast vocabulary of V3Det is organized by a hierarchical category tree which annotates the inclusion relationship among categories, encouraging the exploration of category relationships in vast and open vocabulary object detection. 3) Rich Annotations: V3Det comprises precisely annotated objects in 243k images and professional descriptions of each category written by human experts and a powerful chatbot. By offering a vast exploration space, V3Det enables extensive benchmarks on both vast and open vocabulary object detection, leading to new observations, practices, and insights for future research. It has the potential to serve as a cornerstone dataset for developing more general visual perception systems. V3Det is available at https://v3det.openxlab.org.cn/. + +## Prepare Dataset + +Please download and prepare V3Det Dataset at [V3Det Homepage](https://v3det.openxlab.org.cn/) and [V3Det Github](https://github.com/V3Det/V3Det). + +The data includes a training set, a validation set, comprising 13,204 categories. The training set consists of 183,354 images, while the validation set has 29,821 images. The data organization is: + +``` +data/ + V3Det/ + images/ + / + |────.png + ... + ... + annotations/ + |────v3det_2023_v1_category_tree.json # Category tree + |────category_name_13204_v3det_2023_v1.txt # Category name + |────v3det_2023_v1_train.json # Train set + |────v3det_2023_v1_val.json # Validation set +``` + +## Results and Models + +| Backbone | Model | Lr schd | box AP | Config | Download | +| :------: | :-------------: | :-----: | :----: | :----------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | +| R-50 | Faster R-CNN | 2x | 25.4 | [config](./faster_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight//faster_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x) | +| R-50 | Cascade R-CNN | 2x | 31.6 | [config](./cascade_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight//cascade_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x) | +| R-50 | FCOS | 2x | 9.4 | [config](./fcos_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight//fcos_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x) | +| R-50 | Deformable-DETR | 50e | 34.4 | [config](./deformable-detr-refine-twostage_r50_8xb4_sample1e-3_v3det_50e.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight/Deformable_DETR_V3Det_R50) | +| R-50 | DINO | 36e | 33.5 | [config](./dino-4scale_r50_8xb2_sample1e-3_v3det_36e.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight/DINO_V3Det_R50) | +| Swin-B | Faster R-CNN | 2x | 37.6 | [config](./faster_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight//faster_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x) | +| Swin-B | Cascade R-CNN | 2x | 42.5 | [config](./cascade_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight//cascade_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x) | +| Swin-B | FCOS | 2x | 21.0 | [config](./fcos_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight//fcos_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x) | +| Swin-B | Deformable-DETR | 50e | 42.5 | [config](./deformable-detr-refine-twostage_swin_16xb2_sample1e-3_v3det_50e.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight/Deformable_DETR_V3Det_SwinB) | +| Swin-B | DINO | 36e | 42.0 | [config](./dino-4scale_swin_16xb1_sample1e-3_v3det_36e.py) | [model](https://download.openxlab.org.cn/models/V3Det/V3Det/weight/DINO_V3Det_SwinB) | + +## Citation + +```latex +@inproceedings{wang2023v3det, + title = {V3Det: Vast Vocabulary Visual Detection Dataset}, + author = {Wang, Jiaqi and Zhang, Pan and Chu, Tao and Cao, Yuhang and Zhou, Yujie and Wu, Tong and Wang, Bin and He, Conghui and Lin, Dahua}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2023} +} +``` diff --git a/mmpose/configs/mmdet/v3det/cascade_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py b/mmpose/configs/mmdet/v3det/cascade_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py new file mode 100644 index 0000000000000000000000000000000000000000..567c31bd0e986e071b50ff2aac9cb896d4daf6fd --- /dev/null +++ b/mmpose/configs/mmdet/v3det/cascade_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py @@ -0,0 +1,171 @@ +_base_ = [ + '../_base_/models/cascade-rcnn_r50_fpn.py', '../_base_/datasets/v3det.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + rpn_head=dict( + loss_bbox=dict(_delete_=True, type='L1Loss', loss_weight=1.0)), + roi_head=dict(bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=13204, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + cls_predictor_cfg=dict( + type='NormedLinear', tempearture=50, bias=True), + loss_cls=dict( + type='CrossEntropyCustomLoss', + num_classes=13204, + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=13204, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + cls_predictor_cfg=dict( + type='NormedLinear', tempearture=50, bias=True), + loss_cls=dict( + type='CrossEntropyCustomLoss', + num_classes=13204, + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=13204, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + cls_predictor_cfg=dict( + type='NormedLinear', tempearture=50, bias=True), + loss_cls=dict( + type='CrossEntropyCustomLoss', + num_classes=13204, + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn_proposal=dict(nms_pre=4000, max_per_img=2000), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1, + perm_repeat_gt_cfg=dict(iou_thr=0.7, perm_range=0.01)), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1, + perm_repeat_gt_cfg=dict(iou_thr=0.7, perm_range=0.01)), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1, + perm_repeat_gt_cfg=dict(iou_thr=0.7, perm_range=0.01)), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=300))) +# dataset settings +train_dataloader = dict(batch_size=4, num_workers=8) + +# training schedule for 1x +max_iter = 68760 * 2 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=max_iter) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 2048, + by_epoch=False, + begin=0, + end=5000), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[45840 * 2, 63030 * 2], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(_delete_=True, type='AdamW', lr=1e-4 * 1, weight_decay=0.1), + clip_grad=dict(max_norm=35, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=5730 * 2)) +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False) diff --git a/mmpose/configs/mmdet/v3det/cascade_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py b/mmpose/configs/mmdet/v3det/cascade_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py new file mode 100644 index 0000000000000000000000000000000000000000..f6493323ba8d92d2628fb4784f5a12dd564460be --- /dev/null +++ b/mmpose/configs/mmdet/v3det/cascade_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py @@ -0,0 +1,27 @@ +_base_ = [ + './cascade_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py', +] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth' # noqa + +# model settings +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[128, 256, 512, 1024])) diff --git a/mmpose/configs/mmdet/v3det/deformable-detr-refine-twostage_r50_8xb4_sample1e-3_v3det_50e.py b/mmpose/configs/mmdet/v3det/deformable-detr-refine-twostage_r50_8xb4_sample1e-3_v3det_50e.py new file mode 100644 index 0000000000000000000000000000000000000000..97544a27edfd75eef4ba25fd12a122f03b392c1f --- /dev/null +++ b/mmpose/configs/mmdet/v3det/deformable-detr-refine-twostage_r50_8xb4_sample1e-3_v3det_50e.py @@ -0,0 +1,108 @@ +_base_ = '../deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py' # noqa + +model = dict( + bbox_head=dict(num_classes=13204), + test_cfg=dict(max_per_img=300), +) + +data_root = 'data/V3Det/' +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] + +train_dataloader = dict( + _delete_=True, + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type='V3DetDataset', + data_root=data_root, + ann_file='annotations/v3det_2023_v1_train.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + pipeline=train_pipeline, + backend_args=None))) +val_dataloader = dict( + dataset=dict( + type='V3DetDataset', + data_root=data_root, + ann_file='annotations/v3det_2023_v1_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +val_evaluator = dict( + ann_file=data_root + 'annotations/v3det_2023_v1_val.json', + use_mp_eval=True, + proposal_nums=[300]) +test_evaluator = val_evaluator + +# training schedule for 50e +# when using RFS, bs32, each epoch ~ 5730 iter +max_iter = 286500 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=max_iter / 5) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[229200], # 40e + gamma=0.1) +] + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', by_epoch=False, interval=5730, + max_keep_ckpts=3)) + +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False) diff --git a/mmpose/configs/mmdet/v3det/deformable-detr-refine-twostage_swin_16xb2_sample1e-3_v3det_50e.py b/mmpose/configs/mmdet/v3det/deformable-detr-refine-twostage_swin_16xb2_sample1e-3_v3det_50e.py new file mode 100644 index 0000000000000000000000000000000000000000..e640cd604a97813a70588d5ffe23701543ab0087 --- /dev/null +++ b/mmpose/configs/mmdet/v3det/deformable-detr-refine-twostage_swin_16xb2_sample1e-3_v3det_50e.py @@ -0,0 +1,27 @@ +_base_ = 'deformable-detr-refine-twostage_r50_8xb4_sample1e-3_v3det_50e.py' + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth' # noqa + +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[256, 512, 1024]), +) + +train_dataloader = dict(batch_size=2, num_workers=2) diff --git a/mmpose/configs/mmdet/v3det/dino-4scale_r50_8xb2_sample1e-3_v3det_36e.py b/mmpose/configs/mmdet/v3det/dino-4scale_r50_8xb2_sample1e-3_v3det_36e.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e6e6be0715512b111171c4b60cca7433f8ca34 --- /dev/null +++ b/mmpose/configs/mmdet/v3det/dino-4scale_r50_8xb2_sample1e-3_v3det_36e.py @@ -0,0 +1,109 @@ +_base_ = '../dino/dino-4scale_r50_8xb2-36e_coco.py' + +model = dict( + bbox_head=dict(num_classes=13204), + test_cfg=dict(max_per_img=300), +) + +data_root = 'data/V3Det/' +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomChoice', + transforms=[ + [ + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ], + [ + dict( + type='RandomChoiceResize', + # The radio of all image in train dataset < 7 + # follow the original implement + scales=[(400, 4200), (500, 4200), (600, 4200)], + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='RandomChoiceResize', + scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + keep_ratio=True) + ] + ]), + dict(type='PackDetInputs') +] +train_dataloader = dict( + _delete_=True, + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type='V3DetDataset', + data_root=data_root, + ann_file='annotations/v3det_2023_v1_train.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=False), + pipeline=train_pipeline, + backend_args=None))) +val_dataloader = dict( + dataset=dict( + type='V3DetDataset', + data_root=data_root, + ann_file='annotations/v3det_2023_v1_val.json', + data_prefix=dict(img=''))) +test_dataloader = val_dataloader + +val_evaluator = dict( + ann_file=data_root + 'annotations/v3det_2023_v1_val.json', + use_mp_eval=True, + proposal_nums=[300]) +test_evaluator = val_evaluator + +# training schedule for 36e +# when using RFS, bs16, each epoch ~ 11460 iter +max_iter = 412560 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=max_iter / 5) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[343800], # 30e + gamma=0.1) +] + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', + by_epoch=False, + interval=11460, + max_keep_ckpts=3)) + +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False) diff --git a/mmpose/configs/mmdet/v3det/dino-4scale_swin_16xb1_sample1e-3_v3det_36e.py b/mmpose/configs/mmdet/v3det/dino-4scale_swin_16xb1_sample1e-3_v3det_36e.py new file mode 100644 index 0000000000000000000000000000000000000000..100c4ba4b8cb2c0ac3e44f5e9ddcfc37bbfe6b55 --- /dev/null +++ b/mmpose/configs/mmdet/v3det/dino-4scale_swin_16xb1_sample1e-3_v3det_36e.py @@ -0,0 +1,27 @@ +_base_ = 'dino-4scale_r50_8xb2_sample1e-3_v3det_36e.py' + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth' # noqa + +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[256, 512, 1024]), +) + +train_dataloader = dict(batch_size=1) diff --git a/mmpose/configs/mmdet/v3det/faster_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py b/mmpose/configs/mmdet/v3det/faster_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py new file mode 100644 index 0000000000000000000000000000000000000000..3d306fb094806d75ec614b52a43bf6614d13eed4 --- /dev/null +++ b/mmpose/configs/mmdet/v3det/faster_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py @@ -0,0 +1,72 @@ +_base_ = [ + '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/v3det.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + roi_head=dict( + bbox_head=dict( + num_classes=13204, + reg_class_agnostic=True, + cls_predictor_cfg=dict( + type='NormedLinear', tempearture=50, bias=True), + loss_cls=dict( + type='CrossEntropyCustomLoss', + num_classes=13204, + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn_proposal=dict(nms_pre=4000, max_per_img=2000), + rcnn=dict( + assigner=dict( + perm_repeat_gt_cfg=dict(iou_thr=0.7, perm_range=0.01)))), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=300))) +# dataset settings +train_dataloader = dict(batch_size=4, num_workers=8) + +# training schedule for 2x +max_iter = 68760 * 2 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=max_iter) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 2048, + by_epoch=False, + begin=0, + end=5000), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[45840 * 2, 63030 * 2], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(_delete_=True, type='AdamW', lr=1e-4 * 1, weight_decay=0.1), + clip_grad=dict(max_norm=35, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=5730 * 2)) +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False) diff --git a/mmpose/configs/mmdet/v3det/faster_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py b/mmpose/configs/mmdet/v3det/faster_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b1110811230b4bda27da9fd2e58067c7326c52 --- /dev/null +++ b/mmpose/configs/mmdet/v3det/faster_rcnn_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py @@ -0,0 +1,27 @@ +_base_ = [ + './faster_rcnn_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py', +] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth' # noqa + +# model settings +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[128, 256, 512, 1024])) diff --git a/mmpose/configs/mmdet/v3det/fcos_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py b/mmpose/configs/mmdet/v3det/fcos_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py new file mode 100644 index 0000000000000000000000000000000000000000..b78e38c93cb0fdedff3948f1ce7b5b7787efcaea --- /dev/null +++ b/mmpose/configs/mmdet/v3det/fcos_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py @@ -0,0 +1,116 @@ +_base_ = [ + '../_base_/datasets/v3det.py', '../_base_/schedules/schedule_2x.py', + '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='FCOS', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSHead', + num_classes=13204, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + cls_predictor_cfg=dict(type='NormedLinear', tempearture=50, bias=True), + loss_cls=dict( + type='FocalCustomLoss', + use_sigmoid=True, + num_classes=13204, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1, + perm_repeat_gt_cfg=dict(iou_thr=0.7, perm_range=0.01)), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.0001, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=300)) +# dataset settings + +backend_args = None + +train_dataloader = dict(batch_size=2, num_workers=8) + +# training schedule for 2x +max_iter = 68760 * 2 * 2 +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=max_iter, + val_interval=max_iter) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 2048, + by_epoch=False, + begin=0, + end=5000 * 2), + dict( + type='MultiStepLR', + begin=0, + end=max_iter, + by_epoch=False, + milestones=[45840 * 2 * 2, 63030 * 2 * 2], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + _delete_=True, type='AdamW', lr=1e-4 * 0.25, weight_decay=0.1), + clip_grad=dict(max_norm=35, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=5730 * 2)) +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False) + +find_unused_parameters = True diff --git a/mmpose/configs/mmdet/v3det/fcos_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py b/mmpose/configs/mmdet/v3det/fcos_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca952a28fc08ae9b14ad30308eff823b1bba55e --- /dev/null +++ b/mmpose/configs/mmdet/v3det/fcos_swinb_fpn_8x4_sample1e-3_mstrain_v3det_2x.py @@ -0,0 +1,27 @@ +_base_ = [ + './fcos_r50_fpn_8x4_sample1e-3_mstrain_v3det_2x.py', +] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth' # noqa + +# model settings +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[128, 256, 512, 1024], force_grad_on_level=True)) diff --git a/mmpose/configs/mmdet/vfnet/README.md b/mmpose/configs/mmdet/vfnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..73b5c07be9e9eb3419fd363a5becf5f3c2b91641 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/README.md @@ -0,0 +1,48 @@ +# VarifocalNet + +> [VarifocalNet: An IoU-aware Dense Object Detector](https://arxiv.org/abs/2008.13367) + + + +## Abstract + +Accurately ranking the vast number of candidate detections is crucial for dense object detectors to achieve high performance. Prior work uses the classification score or a combination of classification and predicted localization scores to rank candidates. However, neither option results in a reliable ranking, thus degrading detection performance. In this paper, we propose to learn an Iou-aware Classification Score (IACS) as a joint representation of object presence confidence and localization accuracy. We show that dense object detectors can achieve a more accurate ranking of candidate detections based on the IACS. We design a new loss function, named Varifocal Loss, to train a dense object detector to predict the IACS, and propose a new star-shaped bounding box feature representation for IACS prediction and bounding box refinement. Combining these two new components and a bounding box refinement branch, we build an IoU-aware dense object detector based on the FCOS+ATSS architecture, that we call VarifocalNet or VFNet for short. Extensive experiments on MS COCO show that our VFNet consistently surpasses the strong baseline by ∼2.0 AP with different backbones. Our best model VFNet-X-1200 with Res2Net-101-DCN achieves a single-model single-scale AP of 55.1 on COCO test-dev, which is state-of-the-art among various object detectors. + +
+ +
+ +## Introduction + +**VarifocalNet (VFNet)** learns to predict the IoU-aware classification score which mixes the object presence confidence and localization accuracy together as the detection score for a bounding box. The learning is supervised by the proposed Varifocal Loss (VFL), based on a new star-shaped bounding box feature representation (the features at nine yellow sampling points). Given the new representation, the object localization accuracy is further improved by refining the initially regressed bounding box. The full paper is available at: [https://arxiv.org/abs/2008.13367](https://arxiv.org/abs/2008.13367). + +## Results and Models + +| Backbone | Style | DCN | MS train | Lr schd | Inf time (fps) | box AP (val) | box AP (test-dev) | Config | Download | +| :---------: | :-----: | :-: | :------: | :-----: | :------------: | :----------: | :---------------: | :---------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | N | 1x | - | 41.6 | 41.6 | [config](./vfnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco.json) | +| R-50 | pytorch | N | Y | 2x | - | 44.5 | 44.8 | [config](./vfnet_r50_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco.json) | +| R-50 | pytorch | Y | Y | 2x | - | 47.8 | 48.0 | [config](./vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | +| R-101 | pytorch | N | N | 1x | - | 43.0 | 43.6 | [config](./vfnet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco.json) | +| R-101 | pytorch | N | Y | 2x | - | 46.2 | 46.7 | [config](./vfnet_r101_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco.json) | +| R-101 | pytorch | Y | Y | 2x | - | 49.0 | 49.2 | [config](./vfnet_r101-mdconv-c3-c5_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | +| X-101-32x4d | pytorch | Y | Y | 2x | - | 49.7 | 50.0 | [config](./vfnet_x101-32x4d-mdconv-c3-c5_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | +| X-101-64x4d | pytorch | Y | Y | 2x | - | 50.4 | 50.8 | [config](./vfnet_x101-64x4d-mdconv-c3-c5_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | + +**Notes:** + +- The MS-train scale range is 1333x\[480:960\] (`range` mode) and the inference scale keeps 1333x800. +- DCN means using `DCNv2` in both backbone and head. +- Inference time will be updated soon. +- More results and pre-trained models can be found in [VarifocalNet-Github](https://github.com/hyz-xmaster/VarifocalNet) + +## Citation + +```latex +@article{zhang2020varifocalnet, + title={VarifocalNet: An IoU-aware Dense Object Detector}, + author={Zhang, Haoyang and Wang, Ying and Dayoub, Feras and S{\"u}nderhauf, Niko}, + journal={arXiv preprint arXiv:2008.13367}, + year={2020} +} +``` diff --git a/mmpose/configs/mmdet/vfnet/metafile.yml b/mmpose/configs/mmdet/vfnet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..1b791d01d50ad8a28bff225fa1d3f5af8d348207 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/metafile.yml @@ -0,0 +1,116 @@ +Collections: + - Name: VFNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - Varifocal Loss + Paper: + URL: https://arxiv.org/abs/2008.13367 + Title: 'VarifocalNet: An IoU-aware Dense Object Detector' + README: configs/vfnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.6.0/mmdet/models/detectors/vfnet.py#L6 + Version: v2.6.0 + +Models: + - Name: vfnet_r50_fpn_1x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth + + - Name: vfnet_r50_fpn_ms-2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r50_fpn_ms-2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth + + - Name: vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth + + - Name: vfnet_r101_fpn_1x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r101_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth + + - Name: vfnet_r101_fpn_ms-2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r101_fpn_ms-2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth + + - Name: vfnet_r101-mdconv-c3-c5_fpn_ms-2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r101-mdconv-c3-c5_fpn_ms-2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth + + - Name: vfnet_x101-32x4d-mdconv-c3-c5_fpn_ms-2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_x101-32x4d-mdconv-c3-c5_fpn_ms-2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth + + - Name: vfnet_x101-64x4d-mdconv-c3-c5_fpn_ms-2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_x101-64x4d-mdconv-c3-c5_fpn_ms-2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth diff --git a/mmpose/configs/mmdet/vfnet/vfnet_r101-mdconv-c3-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_r101-mdconv-c3-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2dd67a3bcce3bbb66531997133880d65af0c856a --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_r101-mdconv-c3-c5_fpn_ms-2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_1x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b296a07959e43517d792f36f356404a232fb0dc3 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './vfnet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..37a7bacb5e409a75ae2cd71fc022837f09537aa7 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_2x_coco.py @@ -0,0 +1,20 @@ +_base_ = './vfnet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) +# learning policy +max_epochs = 24 +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..62f064b7473f4e6fec3ac50962240ac1f828753f --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_r101_fpn_ms-2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './vfnet_r50_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..08adf927599b7759dea0e2d14c37ce716482b301 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './vfnet_r50_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + bbox_head=dict(dcn_on_last_conv=True)) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_r50_fpn_1x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_r50_fpn_1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..99bc3b5f4c78c7a7cda11e20f209ea40af7dfd80 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_r50_fpn_1x_coco.py @@ -0,0 +1,104 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='VFNet', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='VFNetHead', + num_classes=80, + in_channels=256, + stacked_convs=3, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + center_sampling=False, + dcn_on_last_conv=False, + use_atss=True, + use_vfl=True, + loss_cls=dict( + type='VarifocalLoss', + use_sigmoid=True, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.5), + loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# data setting +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.01), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=None) +# learning rate +max_epochs = 12 +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_r50_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_r50_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0f8eed298e81967582420ac45a241b2726c47f6a --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_r50_fpn_ms-2x_coco.py @@ -0,0 +1,36 @@ +_base_ = './vfnet_r50_fpn_1x_coco.py' +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', scale=[(1333, 480), (1333, 960)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader +# learning policy +max_epochs = 24 +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +train_cfg = dict(max_epochs=max_epochs) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_res2net-101_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_res2net-101_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..94288e8e80e5be2c6e8effd38e30e239cd1e3c5f --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_res2net-101_fpn_ms-2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './vfnet_r50_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_res2net101-mdconv-c3-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_res2net101-mdconv-c3-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..269330d3d8c218e51c3e65b550e4afc3296f2ec4 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_res2net101-mdconv-c3-c5_fpn_ms-2x_coco.py @@ -0,0 +1,18 @@ +_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_x101-32x4d-mdconv-c3-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_x101-32x4d-mdconv-c3-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..465da0cbdf4c4ae34d648349f4f9fa2d3fb13fe6 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_x101-32x4d-mdconv-c3-c5_fpn_ms-2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_x101-32x4d_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_x101-32x4d_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..486bcfe5ebd85f8c4ac3b211694e7dd9d13aa302 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_x101-32x4d_fpn_ms-2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './vfnet_r50_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_x101-64x4d-mdconv-c3-c5_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_x101-64x4d-mdconv-c3-c5_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..14a070e73ff54d6833aced096e2d94da4171ca42 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_x101-64x4d-mdconv-c3-c5_fpn_ms-2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/vfnet/vfnet_x101-64x4d_fpn_ms-2x_coco.py b/mmpose/configs/mmdet/vfnet/vfnet_x101-64x4d_fpn_ms-2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..92e3f71df6818a5653ec9c0475c277d89a1adb47 --- /dev/null +++ b/mmpose/configs/mmdet/vfnet/vfnet_x101-64x4d_fpn_ms-2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './vfnet_r50_fpn_ms-2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/mmpose/configs/mmdet/wider_face/README.md b/mmpose/configs/mmdet/wider_face/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1904506c64a893f2bfd3881c7e95bd7100fcc6f4 --- /dev/null +++ b/mmpose/configs/mmdet/wider_face/README.md @@ -0,0 +1,57 @@ +# WIDER FACE + +> [WIDER FACE: A Face Detection Benchmark](https://arxiv.org/abs/1511.06523) + + + +## Abstract + +Face detection is one of the most studied topics in the computer vision community. Much of the progresses have been made by the availability of face detection benchmark datasets. We show that there is a gap between current face detection performance and the real world requirements. To facilitate future face detection research, we introduce the WIDER FACE dataset, which is 10 times larger than existing datasets. The dataset contains rich annotations, including occlusions, poses, event categories, and face bounding boxes. Faces in the proposed dataset are extremely challenging due to large variations in scale, pose and occlusion, as shown in Fig. 1. Furthermore, we show that WIDER FACE dataset is an effective training source for face detection. We benchmark several representative detection systems, providing an overview of state-of-the-art performance and propose a solution to deal with large scale variation. Finally, we discuss common failure cases that worth to be further investigated. + +
+ +
+ +## Introduction + +To use the WIDER Face dataset you need to download it +and extract to the `data/WIDERFace` folder. Annotation in the VOC format +can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git). +You should move the annotation files from `WIDER_train_annotations` and `WIDER_val_annotations` folders +to the `Annotation` folders inside the corresponding directories `WIDER_train` and `WIDER_val`. +Also annotation lists `val.txt` and `train.txt` should be copied to `data/WIDERFace` from `WIDER_train_annotations` and `WIDER_val_annotations`. +The directory should be like this: + +``` +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── WIDERFace +│ │ ├── WIDER_train +│ | │ ├──0--Parade +│ | │ ├── ... +│ | │ ├── Annotations +│ │ ├── WIDER_val +│ | │ ├──0--Parade +│ | │ ├── ... +│ | │ ├── Annotations +│ │ ├── val.txt +│ │ ├── train.txt + +``` + +After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or +create your own config based on the presented one. + +## Citation + +```latex +@inproceedings{yang2016wider, + Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou}, + Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + Title = {WIDER FACE: A Face Detection Benchmark}, + Year = {2016} +} +``` diff --git a/mmpose/configs/mmdet/wider_face/retinanet_r50_fpn_1x_widerface.py b/mmpose/configs/mmdet/wider_face/retinanet_r50_fpn_1x_widerface.py new file mode 100644 index 0000000000000000000000000000000000000000..78067255f8f69f9d193e8d3ae2fe8a685e4defe1 --- /dev/null +++ b/mmpose/configs/mmdet/wider_face/retinanet_r50_fpn_1x_widerface.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/wider_face.py', '../_base_/schedules/schedule_1x.py', + '../_base_/default_runtime.py' +] +# model settings +model = dict(bbox_head=dict(num_classes=1)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) diff --git a/mmpose/configs/mmdet/wider_face/ssd300_8xb32-24e_widerface.py b/mmpose/configs/mmdet/wider_face/ssd300_8xb32-24e_widerface.py new file mode 100644 index 0000000000000000000000000000000000000000..02c3c927f78ff022b03bf180789ce91d6061ec9e --- /dev/null +++ b/mmpose/configs/mmdet/wider_face/ssd300_8xb32-24e_widerface.py @@ -0,0 +1,64 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_2x.py' +] +model = dict(bbox_head=dict(num_classes=1)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='Expand', + mean={{_base_.model.data_preprocessor.mean}}, + to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}}, + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(300, 300), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='Resize', scale=(300, 300), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +dataset_type = 'WIDERFaceDataset' +data_root = 'data/WIDERFace/' +train_dataloader = dict( + batch_size=32, num_workers=8, dataset=dict(pipeline=train_pipeline)) + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict(type='MultiStepLR', by_epoch=True, milestones=[16, 20], gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.012, momentum=0.9, weight_decay=5e-4), + clip_grad=dict(max_norm=35, norm_type=2)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=256) diff --git a/mmpose/configs/mmdet/yolact/README.md b/mmpose/configs/mmdet/yolact/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e884ad65e7181503efd129e7444391e7ea8e2e51 --- /dev/null +++ b/mmpose/configs/mmdet/yolact/README.md @@ -0,0 +1,75 @@ +# YOLACT + +> [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) + + + +## Abstract + +We present a simple, fully-convolutional model for real-time instance segmentation that achieves 29.8 mAP on MS COCO at 33.5 fps evaluated on a single Titan Xp, which is significantly faster than any previous competitive approach. Moreover, we obtain this result after training on only one GPU. We accomplish this by breaking instance segmentation into two parallel subtasks: (1) generating a set of prototype masks and (2) predicting per-instance mask coefficients. Then we produce instance masks by linearly combining the prototypes with the mask coefficients. We find that because this process doesn't depend on repooling, this approach produces very high-quality masks and exhibits temporal stability for free. Furthermore, we analyze the emergent behavior of our prototypes and show they learn to localize instances on their own in a translation variant manner, despite being fully-convolutional. Finally, we also propose Fast NMS, a drop-in 12 ms faster replacement for standard NMS that only has a marginal performance penalty. + +
+ +
+ +## Introduction + +A simple, fully convolutional model for real-time instance segmentation. This is the code for our paper: + +- [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) + + + +For a real-time demo, check out our ICCV video: +[![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/0pMfmo8qfpQ/0.jpg)](https://www.youtube.com/watch?v=0pMfmo8qfpQ) + +## Evaluation + +Here are our YOLACT models along with their FPS on a Titan Xp and mAP on COCO's `val`: + +| Image Size | GPU x BS | Backbone | \*FPS | mAP | Weights | Configs | Download | +| :--------: | :------: | :-----------: | :---: | :--: | :-----: | :--------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | +| 550 | 1x8 | Resnet50-FPN | 42.5 | 29.0 | | [config](./yolact_r50_1xb8-55e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth) | +| 550 | 8x8 | Resnet50-FPN | 42.5 | 28.4 | | [config](./yolact_r50_8xb8-55e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth) | +| 550 | 1x8 | Resnet101-FPN | 33.5 | 30.4 | | [config](./yolact_r101_1xb8-55e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth) | + +\*Note: The FPS is evaluated by the [original implementation](https://github.com/dbolya/yolact). When calculating FPS, only the model inference time is taken into account. Data loading and post-processing operations such as converting masks to RLE code, generating COCO JSON results, image rendering are not included. + +## Training + +All the aforementioned models are trained with a single GPU. It typically takes ~12GB VRAM when using resnet-101 as the backbone. If you want to try multiple GPUs training, you may have to modify the configuration files accordingly, such as adjusting the training schedule and freezing batch norm. + +```Shell +# Trains using the resnet-101 backbone with a batch size of 8 on a single GPU. +./tools/dist_train.sh configs/yolact/yolact_r101.py 1 +``` + +## Testing + +Please refer to [mmdetection/docs/getting_started.md](https://mmdetection.readthedocs.io/en/latest/1_exist_data_model.html#test-existing-models). + +## Citation + +If you use YOLACT or this code base in your work, please cite + +```latex +@inproceedings{yolact-iccv2019, + author = {Daniel Bolya and Chong Zhou and Fanyi Xiao and Yong Jae Lee}, + title = {YOLACT: {Real-time} Instance Segmentation}, + booktitle = {ICCV}, + year = {2019}, +} +``` + + diff --git a/mmpose/configs/mmdet/yolact/metafile.yml b/mmpose/configs/mmdet/yolact/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..9ca76b3d3910f497e97275d0f25b1b1c3062d12b --- /dev/null +++ b/mmpose/configs/mmdet/yolact/metafile.yml @@ -0,0 +1,81 @@ +Collections: + - Name: YOLACT + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.02689 + Title: 'YOLACT: Real-time Instance Segmentation' + README: configs/yolact/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/yolact.py#L9 + Version: v2.5.0 + +Models: + - Name: yolact_r50_1x8_coco + In Collection: YOLACT + Config: configs/yolact/yolact_r50_1xb8-55e_coco.py + Metadata: + Training Resources: 1x V100 GPU + Batch Size: 8 + Epochs: 55 + inference time (ms/im): + - value: 23.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (550, 550) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 29.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth + + - Name: yolact_r50_8x8_coco + In Collection: YOLACT + Config: configs/yolact/yolact_r50_8xb8-55e_coco.py + Metadata: + Batch Size: 64 + Epochs: 55 + inference time (ms/im): + - value: 23.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (550, 550) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 28.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth + + - Name: yolact_r101_1x8_coco + In Collection: YOLACT + Config: configs/yolact/yolact_r101_1xb8-55e_coco.py + Metadata: + Training Resources: 1x V100 GPU + Batch Size: 8 + Epochs: 55 + inference time (ms/im): + - value: 29.85 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (550, 550) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 30.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth diff --git a/mmpose/configs/mmdet/yolact/yolact_r101_1xb8-55e_coco.py b/mmpose/configs/mmdet/yolact/yolact_r101_1xb8-55e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e6ffe29627ff5bd24b8e53be8d7defaa9eb91df7 --- /dev/null +++ b/mmpose/configs/mmdet/yolact/yolact_r101_1xb8-55e_coco.py @@ -0,0 +1,7 @@ +_base_ = './yolact_r50_1xb8-55e_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/mmpose/configs/mmdet/yolact/yolact_r50_1xb8-55e_coco.py b/mmpose/configs/mmdet/yolact/yolact_r50_1xb8-55e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b7dabf1548a733cbf18b8007ae2fa9033a340af6 --- /dev/null +++ b/mmpose/configs/mmdet/yolact/yolact_r50_1xb8-55e_coco.py @@ -0,0 +1,170 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', '../_base_/default_runtime.py' +] +img_norm_cfg = dict( + mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True) +# model settings +input_size = 550 +model = dict( + type='YOLACT', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=img_norm_cfg['mean'], + std=img_norm_cfg['std'], + bgr_to_rgb=img_norm_cfg['to_rgb'], + pad_mask=True), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, # do not freeze stem + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, # update the statistics of bn + zero_init_residual=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5, + upsample_cfg=dict(mode='bilinear')), + bbox_head=dict( + type='YOLACTHead', + num_classes=80, + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=3, + scales_per_octave=1, + base_sizes=[8, 16, 32, 64, 128], + ratios=[0.5, 1.0, 2.0], + strides=[550.0 / x for x in [69, 35, 18, 9, 5]], + centers=[(550 * 0.5 / x, 550 * 0.5 / x) + for x in [69, 35, 18, 9, 5]]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + reduction='none', + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5), + num_head_convs=1, + num_protos=32, + use_ohem=True), + mask_head=dict( + type='YOLACTProtonet', + in_channels=256, + num_protos=32, + num_classes=80, + max_masks_to_train=100, + loss_mask_weight=6.125, + with_seg_branch=True, + loss_segm=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + sampler=dict(type='PseudoSampler'), # YOLACT should use PseudoSampler + # smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + mask_thr=0.5, + iou_thr=0.5, + top_k=200, + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=8, + num_workers=4, + batch_sampler=None, + dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +max_epochs = 55 +# training schedule for 55e +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[20, 42, 49, 52], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)) + +custom_hooks = [ + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +env_cfg = dict(cudnn_benchmark=True) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (1 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/mmpose/configs/mmdet/yolact/yolact_r50_8xb8-55e_coco.py b/mmpose/configs/mmdet/yolact/yolact_r50_8xb8-55e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e39c285da10ef4821343ebf3c0d0d4c094a97198 --- /dev/null +++ b/mmpose/configs/mmdet/yolact/yolact_r50_8xb8-55e_coco.py @@ -0,0 +1,23 @@ +_base_ = 'yolact_r50_1xb8-55e_coco.py' + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(lr=8e-3), + clip_grad=dict(max_norm=35, norm_type=2)) +# learning rate +max_epochs = 55 +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[20, 42, 49, 52], + gamma=0.1) +] +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/yolo/README.md b/mmpose/configs/mmdet/yolo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9cb47bcc81a1221dcb4a31b278e7bd62eebf1307 --- /dev/null +++ b/mmpose/configs/mmdet/yolo/README.md @@ -0,0 +1,55 @@ +# YOLOv3 + +> [YOLOv3: An Incremental Improvement](https://arxiv.org/abs/1804.02767) + + + +## Abstract + +We present some updates to YOLO! We made a bunch of little design changes to make it better. We also trained this new network that's pretty swell. It's a little bigger than last time but more accurate. It's still fast though, don't worry. At 320x320 YOLOv3 runs in 22 ms at 28.2 mAP, as accurate as SSD but three times faster. When we look at the old .5 IOU mAP detection metric YOLOv3 is quite good. It achieves 57.9 mAP@50 in 51 ms on a Titan X, compared to 57.5 mAP@50 in 198 ms by RetinaNet, similar performance but 3.8x faster. + +
+ +
+ +## Results and Models + +| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------: | :---: | :-----: | :------: | :------------: | :----: | :---------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DarkNet-53 | 320 | 273e | 2.7 | 63.9 | 27.9 | [config](./yolov3_d53_8xb8-320-273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-20200819_172101.log.json) | +| DarkNet-53 | 416 | 273e | 3.8 | 61.2 | 30.9 | [config](./yolov3_d53_8xb8-ms-416-273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-20200819_173424.log.json) | +| DarkNet-53 | 608 | 273e | 7.4 | 48.1 | 33.7 | [config](./yolov3_d53_8xb8-ms-608-273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020.log.json) | + +## Mixed Precision Training + +We also train YOLOv3 with mixed precision training. + +| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------: | :---: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DarkNet-53 | 608 | 273e | 4.7 | 48.1 | 33.8 | [config](./yolov3_d53_8xb8-amp-ms-608-273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542.log.json) | + +## Lightweight models + +| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :---: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| MobileNetV2 | 416 | 300e | 5.3 | | 23.9 | [config](./yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823.log.json) | +| MobileNetV2 | 320 | 300e | 3.2 | | 22.2 | [config](./yolov3_mobilenetv2_8xb24-320-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349.log.json) | + +Notice: We reduce the number of channels to 96 in both head and neck. It can reduce the flops and parameters, which makes these models more suitable for edge devices. + +## Credit + +This implementation originates from the project of Haoyu Wu(@wuhy08) at Western Digital. + +## Citation + +```latex +@misc{redmon2018yolov3, + title={YOLOv3: An Incremental Improvement}, + author={Joseph Redmon and Ali Farhadi}, + year={2018}, + eprint={1804.02767}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/mmpose/configs/mmdet/yolo/metafile.yml b/mmpose/configs/mmdet/yolo/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..627e70c4d368728d3632f4fda6b68475c3a0fa66 --- /dev/null +++ b/mmpose/configs/mmdet/yolo/metafile.yml @@ -0,0 +1,124 @@ +Collections: + - Name: YOLOv3 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - DarkNet + Paper: + URL: https://arxiv.org/abs/1804.02767 + Title: 'YOLOv3: An Incremental Improvement' + README: configs/yolo/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/yolo.py#L8 + Version: v2.4.0 + +Models: + - Name: yolov3_d53_320_273e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_d53_8xb8-320-273e_coco.py + Metadata: + Training Memory (GB): 2.7 + inference time (ms/im): + - value: 15.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (320, 320) + Epochs: 273 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 27.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth + + - Name: yolov3_d53_mstrain-416_273e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_d53_8xb8-ms-416-273e_coco.py + Metadata: + Training Memory (GB): 3.8 + inference time (ms/im): + - value: 16.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (416, 416) + Epochs: 273 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 30.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth + + - Name: yolov3_d53_mstrain-608_273e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_d53_8xb8-ms-608-273e_coco.py + Metadata: + Training Memory (GB): 7.4 + inference time (ms/im): + - value: 20.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (608, 608) + Epochs: 273 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 33.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth + + - Name: yolov3_d53_fp16_mstrain-608_273e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_d53_8xb8-amp-ms-608-273e_coco.py + Metadata: + Training Memory (GB): 4.7 + inference time (ms/im): + - value: 20.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (608, 608) + Epochs: 273 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 33.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth + + - Name: yolov3_mobilenetv2_8xb24-320-300e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py + Metadata: + Training Memory (GB): 3.2 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 22.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth + + - Name: yolov3_mobilenetv2_8xb24-ms-416-300e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py + Metadata: + Training Memory (GB): 5.3 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 23.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth diff --git a/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-320-273e_coco.py b/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-320-273e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a3d08dd7706e5ba5bec5fc9e8da6fab120ed813d --- /dev/null +++ b/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-320-273e_coco.py @@ -0,0 +1,29 @@ +_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' + +input_size = (320, 320) +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + # `mean` and `to_rgb` should be the same with the `preprocess_cfg` + dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=input_size, keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=input_size, keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-amp-ms-608-273e_coco.py b/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-amp-ms-608-273e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..173d8ee22227b3c3f4aa0488cb4e6f131d7dbee4 --- /dev/null +++ b/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-amp-ms-608-273e_coco.py @@ -0,0 +1,3 @@ +_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' +# fp16 settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic') diff --git a/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-ms-416-273e_coco.py b/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-ms-416-273e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ca0127e83edaeb8d5851ed089f6bd6d7385a1f86 --- /dev/null +++ b/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-ms-416-273e_coco.py @@ -0,0 +1,28 @@ +_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + # `mean` and `to_rgb` should be the same with the `preprocess_cfg` + dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(416, 416), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-ms-608-273e_coco.py b/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-ms-608-273e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d4a36dfdaaf9b9e013882a6c28d42cca5942be20 --- /dev/null +++ b/mmpose/configs/mmdet/yolo/yolov3_d53_8xb8-ms-608-273e_coco.py @@ -0,0 +1,167 @@ +_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] +# model settings +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[0, 0, 0], + std=[255., 255., 255.], + bgr_to_rgb=True, + pad_size_divisor=32) +model = dict( + type='YOLOV3', + data_preprocessor=data_preprocessor, + backbone=dict( + type='Darknet', + depth=53, + out_indices=(3, 4, 5), + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')), + neck=dict( + type='YOLOV3Neck', + num_scales=3, + in_channels=[1024, 512, 256], + out_channels=[512, 256, 128]), + bbox_head=dict( + type='YOLOV3Head', + num_classes=80, + in_channels=[512, 256, 128], + out_channels=[1024, 512, 256], + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=2.0, + reduction='sum'), + loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='GridAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=data_preprocessor['mean'], + to_rgb=data_preprocessor['bgr_to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='RandomResize', scale=[(320, 320), (608, 608)], keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(608, 608), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +train_cfg = dict(max_epochs=273, val_interval=7) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning policy +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=2000), + dict(type='MultiStepLR', by_epoch=True, milestones=[218, 246], gamma=0.1) +] + +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=7)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py b/mmpose/configs/mmdet/yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..07b393734329fd3ed5f4bd11fbc15b4abf7846bb --- /dev/null +++ b/mmpose/configs/mmdet/yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py @@ -0,0 +1,42 @@ +_base_ = ['./yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'] + +# yapf:disable +model = dict( + bbox_head=dict( + anchor_generator=dict( + base_sizes=[[(220, 125), (128, 222), (264, 266)], + [(35, 87), (102, 96), (60, 170)], + [(10, 15), (24, 36), (72, 42)]]))) +# yapf:enable + +input_size = (320, 320) +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + # `mean` and `to_rgb` should be the same with the `preprocess_cfg` + dict( + type='Expand', + mean=[123.675, 116.28, 103.53], + to_rgb=True, + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=input_size, keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=input_size, keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py b/mmpose/configs/mmdet/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9a161b66fe92666e904a9580ab5a1ff16d630ab7 --- /dev/null +++ b/mmpose/configs/mmdet/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py @@ -0,0 +1,176 @@ +_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] +# model settings +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32) +model = dict( + type='YOLOV3', + data_preprocessor=data_preprocessor, + backbone=dict( + type='MobileNetV2', + out_indices=(2, 4, 6), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')), + neck=dict( + type='YOLOV3Neck', + num_scales=3, + in_channels=[320, 96, 32], + out_channels=[96, 96, 96]), + bbox_head=dict( + type='YOLOV3Head', + num_classes=80, + in_channels=[96, 96, 96], + out_channels=[96, 96, 96], + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=2.0, + reduction='sum'), + loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='GridAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=data_preprocessor['mean'], + to_rgb=data_preprocessor['bgr_to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(416, 416), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=24, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type='RepeatDataset', # use RepeatDataset to speed up training + times=10, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=24, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +train_cfg = dict(max_epochs=30) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=False, + begin=0, + end=4000), + dict(type='MultiStepLR', by_epoch=True, milestones=[24, 28], gamma=0.1) +] + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (24 samples per GPU) +auto_scale_lr = dict(base_batch_size=192) diff --git a/mmpose/configs/mmdet/yolof/README.md b/mmpose/configs/mmdet/yolof/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b9167f6e6e34a64022b82b212e4bc81808dc3395 --- /dev/null +++ b/mmpose/configs/mmdet/yolof/README.md @@ -0,0 +1,35 @@ +# YOLOF + +> [You Only Look One-level Feature](https://arxiv.org/abs/2103.09460) + + + +## Abstract + +This paper revisits feature pyramids networks (FPN) for one-stage detectors and points out that the success of FPN is due to its divide-and-conquer solution to the optimization problem in object detection rather than multi-scale feature fusion. From the perspective of optimization, we introduce an alternative way to address the problem instead of adopting the complex feature pyramids - {\\em utilizing only one-level feature for detection}. Based on the simple and efficient solution, we present You Only Look One-level Feature (YOLOF). In our method, two key components, Dilated Encoder and Uniform Matching, are proposed and bring considerable improvements. Extensive experiments on the COCO benchmark prove the effectiveness of the proposed model. Our YOLOF achieves comparable results with its feature pyramids counterpart RetinaNet while being 2.5× faster. Without transformer layers, YOLOF can match the performance of DETR in a single-level feature manner with 7× less training epochs. With an image size of 608×608, YOLOF achieves 44.3 mAP running at 60 fps on 2080Ti, which is 13% faster than YOLOv4. + +
+ +
+ +## Results and Models + +| Backbone | Style | Epoch | Lr schd | Mem (GB) | box AP | Config | Download | +| :------: | :---: | :---: | :-----: | :------: | :----: | :--------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-C5 | caffe | Y | 1x | 8.3 | 37.5 | [config](./yolof_r50-c5_8xb8-1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427.log.json) | + +**Note**: + +1. We find that the performance is unstable and may fluctuate by about 0.3 mAP. mAP 37.4 ~ 37.7 is acceptable in YOLOF_R_50_C5_1x. Such fluctuation can also be found in the [original implementation](https://github.com/chensnathan/YOLOF). +2. In addition to instability issues, sometimes there are large loss fluctuations and NAN, so there may still be problems with this project, which will be improved subsequently. + +## Citation + +```latex +@inproceedings{chen2021you, + title={You Only Look One-level Feature}, + author={Chen, Qiang and Wang, Yingming and Yang, Tong and Zhang, Xiangyu and Cheng, Jian and Sun, Jian}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/yolof/metafile.yml b/mmpose/configs/mmdet/yolof/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..b3b7b7f8d5d3d7faec0cd04984ede59a99d06f38 --- /dev/null +++ b/mmpose/configs/mmdet/yolof/metafile.yml @@ -0,0 +1,32 @@ +Collections: + - Name: YOLOF + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Dilated Encoder + - ResNet + Paper: + URL: https://arxiv.org/abs/2103.09460 + Title: 'You Only Look One-level Feature' + README: configs/yolof/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/yolof.py#L6 + Version: v2.12.0 + +Models: + - Name: yolof_r50_c5_8x8_1x_coco + In Collection: YOLOF + Config: configs/yolof/yolof_r50-c5_8xb8-1x_coco.py + Metadata: + Training Memory (GB): 8.3 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth diff --git a/mmpose/configs/mmdet/yolof/yolof_r50-c5_8xb8-1x_coco.py b/mmpose/configs/mmdet/yolof/yolof_r50-c5_8xb8-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5ea228e3e3270e07a4e5b171ab544c704fb172f3 --- /dev/null +++ b/mmpose/configs/mmdet/yolof/yolof_r50-c5_8xb8-1x_coco.py @@ -0,0 +1,116 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='YOLOF', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet50_caffe')), + neck=dict( + type='DilatedEncoder', + in_channels=2048, + out_channels=512, + block_mid_channels=128, + num_residual_blocks=4, + block_dilations=[2, 4, 6, 8]), + bbox_head=dict( + type='YOLOFHead', + num_classes=80, + in_channels=512, + reg_decoded_bbox=True, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[1, 2, 4, 8, 16], + strides=[32]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1., 1., 1., 1.], + add_ctr_clamp=True, + ctr_clamp=32), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001), + paramwise_cfg=dict( + norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)})) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.00066667, + by_epoch=False, + begin=0, + end=1500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='RandomShift', prob=0.5, max_shift_px=32), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=8, num_workers=8, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/yolof/yolof_r50-c5_8xb8-iter-1x_coco.py b/mmpose/configs/mmdet/yolof/yolof_r50-c5_8xb8-iter-1x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..466a820099e3ac1760371e8352a89f93fbeef5ee --- /dev/null +++ b/mmpose/configs/mmdet/yolof/yolof_r50-c5_8xb8-iter-1x_coco.py @@ -0,0 +1,32 @@ +_base_ = './yolof_r50-c5_8xb8-1x_coco.py' + +# We implemented the iter-based config according to the source code. +# COCO dataset has 117266 images after filtering. We use 8 gpu and +# 8 batch size training, so 22500 is equivalent to +# 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch, +# 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large, +# the iter-based and epoch-based setting have about 0.2 difference on +# the mAP evaluation value. + +train_cfg = dict( + _delete_=True, + type='IterBasedTrainLoop', + max_iters=22500, + val_interval=4500) + +# learning rate policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=22500, + by_epoch=False, + milestones=[15000, 20000], + gamma=0.1) +] +train_dataloader = dict(sampler=dict(type='InfiniteSampler')) +default_hooks = dict(checkpoint=dict(by_epoch=False, interval=2500)) + +log_processor = dict(by_epoch=False) diff --git a/mmpose/configs/mmdet/yolox/README.md b/mmpose/configs/mmdet/yolox/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0cde192676db90e8dbd92de80b55d540493e17e5 --- /dev/null +++ b/mmpose/configs/mmdet/yolox/README.md @@ -0,0 +1,39 @@ +# YOLOX + +> [YOLOX: Exceeding YOLO Series in 2021](https://arxiv.org/abs/2107.08430) + + + +## Abstract + +In this report, we present some experienced improvements to YOLO series, forming a new high-performance detector -- YOLOX. We switch the YOLO detector to an anchor-free manner and conduct other advanced detection techniques, i.e., a decoupled head and the leading label assignment strategy SimOTA to achieve state-of-the-art results across a large scale range of models: For YOLO-Nano with only 0.91M parameters and 1.08G FLOPs, we get 25.3% AP on COCO, surpassing NanoDet by 1.8% AP; for YOLOv3, one of the most widely used detectors in industry, we boost it to 47.3% AP on COCO, outperforming the current best practice by 3.0% AP; for YOLOX-L with roughly the same amount of parameters as YOLOv4-CSP, YOLOv5-L, we achieve 50.0% AP on COCO at a speed of 68.9 FPS on Tesla V100, exceeding YOLOv5-L by 1.8% AP. Further, we won the 1st Place on Streaming Perception Challenge (Workshop on Autonomous Driving at CVPR 2021) using a single YOLOX-L model. We hope this report can provide useful experience for developers and researchers in practical scenes, and we also provide deploy versions with ONNX, TensorRT, NCNN, and Openvino supported. + +
+ +
+ +## Results and Models + +| Backbone | size | Mem (GB) | box AP | Config | Download | +| :--------: | :--: | :------: | :----: | :--------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOX-tiny | 416 | 3.5 | 32.0 | [config](./yolox_tiny_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234.log.json) | +| YOLOX-s | 640 | 7.6 | 40.5 | [config](./yolox_s_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711.log.json) | +| YOLOX-l | 640 | 19.9 | 49.4 | [config](./yolox_l_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236.log.json) | +| YOLOX-x | 640 | 28.1 | 50.9 | [config](./yolox_x_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254.log.json) | + +**Note**: + +1. The test score threshold is 0.001, and the box AP indicates the best AP. +2. Due to the need for pre-training weights, we cannot reproduce the performance of the `yolox-nano` model. Please refer to https://github.com/Megvii-BaseDetection/YOLOX/issues/674 for more information. +3. We also trained the model by the official release of YOLOX based on [Megvii-BaseDetection/YOLOX#735](https://github.com/Megvii-BaseDetection/YOLOX/issues/735) with commit ID [38c633](https://github.com/Megvii-BaseDetection/YOLOX/tree/38c633bf176462ee42b110c70e4ffe17b5753208). We found that the best AP of `YOLOX-tiny`, `YOLOX-s`, `YOLOX-l`, and `YOLOX-x` is 31.8, 40.3, 49.2, and 50.9, respectively. The performance is consistent with that of our re-implementation (see Table above) but still has a gap (0.3~0.8 AP) in comparison with the reported performance in their [README](https://github.com/Megvii-BaseDetection/YOLOX/blob/38c633bf176462ee42b110c70e4ffe17b5753208/README.md#benchmark). + +## Citation + +```latex +@article{yolox2021, + title={{YOLOX}: Exceeding YOLO Series in 2021}, + author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2107.08430}, + year={2021} +} +``` diff --git a/mmpose/configs/mmdet/yolox/metafile.yml b/mmpose/configs/mmdet/yolox/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..2f64450e94cae436a05f46da67d3a1264235ffbd --- /dev/null +++ b/mmpose/configs/mmdet/yolox/metafile.yml @@ -0,0 +1,70 @@ +Collections: + - Name: YOLOX + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Nesterov + - Weight Decay + - Cosine Annealing Lr Updater + Training Resources: 8x TITANXp GPUs + Architecture: + - CSPDarkNet + - PAFPN + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'YOLOX: Exceeding YOLO Series in 2021' + README: configs/yolox/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.15.1/mmdet/models/detectors/yolox.py#L6 + Version: v2.15.1 + + +Models: + - Name: yolox_s_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_s_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 7.6 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth + - Name: yolox_l_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_l_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 19.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth + - Name: yolox_x_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_x_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 28.1 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth + - Name: yolox_tiny_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_tiny_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 3.5 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 32.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth diff --git a/mmpose/configs/mmdet/yolox/yolox_l_8xb8-300e_coco.py b/mmpose/configs/mmdet/yolox/yolox_l_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2a4b287bad595db65df69b7d6f80163bd4a49e44 --- /dev/null +++ b/mmpose/configs/mmdet/yolox/yolox_l_8xb8-300e_coco.py @@ -0,0 +1,8 @@ +_base_ = './yolox_s_8xb8-300e_coco.py' + +# model settings +model = dict( + backbone=dict(deepen_factor=1.0, widen_factor=1.0), + neck=dict( + in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3), + bbox_head=dict(in_channels=256, feat_channels=256)) diff --git a/mmpose/configs/mmdet/yolox/yolox_m_8xb8-300e_coco.py b/mmpose/configs/mmdet/yolox/yolox_m_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d82f9e98f1fcd4a1c6089807adc3cca2b48d6b5e --- /dev/null +++ b/mmpose/configs/mmdet/yolox/yolox_m_8xb8-300e_coco.py @@ -0,0 +1,8 @@ +_base_ = './yolox_s_8xb8-300e_coco.py' + +# model settings +model = dict( + backbone=dict(deepen_factor=0.67, widen_factor=0.75), + neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), + bbox_head=dict(in_channels=192, feat_channels=192), +) diff --git a/mmpose/configs/mmdet/yolox/yolox_nano_8xb8-300e_coco.py b/mmpose/configs/mmdet/yolox/yolox_nano_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3f7a1c5ab066439c78ffa005a2a60c9057223849 --- /dev/null +++ b/mmpose/configs/mmdet/yolox/yolox_nano_8xb8-300e_coco.py @@ -0,0 +1,11 @@ +_base_ = './yolox_tiny_8xb8-300e_coco.py' + +# model settings +model = dict( + backbone=dict(deepen_factor=0.33, widen_factor=0.25, use_depthwise=True), + neck=dict( + in_channels=[64, 128, 256], + out_channels=64, + num_csp_blocks=1, + use_depthwise=True), + bbox_head=dict(in_channels=64, feat_channels=64, use_depthwise=True)) diff --git a/mmpose/configs/mmdet/yolox/yolox_s_8xb8-300e_coco.py b/mmpose/configs/mmdet/yolox/yolox_s_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3e324eb5b99202fd42c8d67847a1be1c165b4057 --- /dev/null +++ b/mmpose/configs/mmdet/yolox/yolox_s_8xb8-300e_coco.py @@ -0,0 +1,250 @@ +_base_ = [ + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', + './yolox_tta.py' +] + +img_scale = (640, 640) # width, height + +# model settings +model = dict( + type='YOLOX', + data_preprocessor=dict( + type='DetDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=10) + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(2, 3, 4), + use_depthwise=False, + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + ), + neck=dict( + type='YOLOXPAFPN', + in_channels=[128, 256, 512], + out_channels=128, + num_csp_blocks=1, + use_depthwise=False, + upsample_cfg=dict(scale_factor=2, mode='nearest'), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + bbox_head=dict( + type='YOLOXHead', + num_classes=80, + in_channels=128, + feat_channels=128, + stacked_convs=2, + strides=(8, 16, 32), + use_depthwise=False, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_obj=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0)), + train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), + # In order to align the source code, the threshold of the val phase is + # 0.01, and the threshold of the test phase is 0.001. + test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) + +# dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict( + type='MixUp', + img_scale=img_scale, + ratio_range=(0.8, 1.6), + pad_val=114.0), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + # According to the official implementation, multi-scale + # training is not considered here but in the + # 'mmdet/models/detectors/yolox.py'. + # Resize and Pad are for the last 15 epochs when Mosaic, + # RandomAffine, and MixUp are closed by YOLOXModeSwitchHook. + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + # If the image is three-channel, the pad value needs + # to be set separately for each channel. + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') +] + +train_dataset = dict( + # use MultiImageMixDataset wrapper to support mosaic and mixup + type='MultiImageMixDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True) + ], + filter_cfg=dict(filter_empty_gt=False, min_size=32), + backend_args=backend_args), + pipeline=train_pipeline) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=train_dataset) +val_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +# training settings +max_epochs = 300 +num_last_epochs = 15 +interval = 10 + +train_cfg = dict(max_epochs=max_epochs, val_interval=interval) + +# optimizer +# default 8 gpu +base_lr = 0.01 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', lr=base_lr, momentum=0.9, weight_decay=5e-4, + nesterov=True), + paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) + +# learning rate +param_scheduler = [ + dict( + # use quadratic formula to warm up 5 epochs + # and lr is updated by iteration + # TODO: fix default scope in get function + type='mmdet.QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + # use cosine lr from 5 to 285 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=5, + T_max=max_epochs - num_last_epochs, + end=max_epochs - num_last_epochs, + by_epoch=True, + convert_to_iter_based=True), + dict( + # use fixed lr during last 15 epochs + type='ConstantLR', + by_epoch=True, + factor=1, + begin=max_epochs - num_last_epochs, + end=max_epochs, + ) +] + +default_hooks = dict( + checkpoint=dict( + interval=interval, + max_keep_ckpts=3 # only keep latest 3 checkpoints + )) + +custom_hooks = [ + dict( + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, + priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + priority=49) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpose/configs/mmdet/yolox/yolox_tiny_8xb8-300e_coco.py b/mmpose/configs/mmdet/yolox/yolox_tiny_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..86f7e9a6191066ab9b672d548b93a29e64746f29 --- /dev/null +++ b/mmpose/configs/mmdet/yolox/yolox_tiny_8xb8-300e_coco.py @@ -0,0 +1,54 @@ +_base_ = './yolox_s_8xb8-300e_coco.py' + +# model settings +model = dict( + data_preprocessor=dict(batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(320, 640), + size_divisor=32, + interval=10) + ]), + backbone=dict(deepen_factor=0.33, widen_factor=0.375), + neck=dict(in_channels=[96, 192, 384], out_channels=96), + bbox_head=dict(in_channels=96, feat_channels=96)) + +img_scale = (640, 640) # width, height + +train_pipeline = [ + dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.5, 1.5), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + # Resize and Pad are for the last 15 epochs when Mosaic and + # RandomAffine are closed by YOLOXModeSwitchHook. + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), + dict(type='Resize', scale=(416, 416), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/configs/mmdet/yolox/yolox_tta.py b/mmpose/configs/mmdet/yolox/yolox_tta.py new file mode 100644 index 0000000000000000000000000000000000000000..e65244be6e1bb70393d111ef4d25334d3b2ce8a6 --- /dev/null +++ b/mmpose/configs/mmdet/yolox/yolox_tta.py @@ -0,0 +1,36 @@ +tta_model = dict( + type='DetTTAModel', + tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.65), max_per_img=100)) + +img_scales = [(640, 640), (320, 320), (960, 960)] +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=None), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='Resize', scale=s, keep_ratio=True) + for s in img_scales + ], + [ + # ``RandomFlip`` must be placed before ``Pad``, otherwise + # bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], + [ + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + ], + [dict(type='LoadAnnotations', with_bbox=True)], + [ + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction')) + ] + ]) +] diff --git a/mmpose/configs/mmdet/yolox/yolox_x_8xb8-300e_coco.py b/mmpose/configs/mmdet/yolox/yolox_x_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..34828e0363a2f282af59da74e805e59772dfeb69 --- /dev/null +++ b/mmpose/configs/mmdet/yolox/yolox_x_8xb8-300e_coco.py @@ -0,0 +1,8 @@ +_base_ = './yolox_s_8xb8-300e_coco.py' + +# model settings +model = dict( + backbone=dict(deepen_factor=1.33, widen_factor=1.25), + neck=dict( + in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), + bbox_head=dict(in_channels=320, feat_channels=320)) diff --git a/mmpose/configs/wholebody_2d_keypoint/README.md b/mmpose/configs/wholebody_2d_keypoint/README.md new file mode 100644 index 0000000000000000000000000000000000000000..362a6a89764acec6db1a4ef8216352c1fbbe697e --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/README.md @@ -0,0 +1,19 @@ +# 2D Human Whole-Body Pose Estimation + +2D human whole-body pose estimation aims to localize dense landmarks on the entire human body including face, hands, body, and feet. + +Existing approaches can be categorized into top-down and bottom-up approaches. + +Top-down methods divide the task into two stages: human detection and whole-body pose estimation. They perform human detection first, followed by single-person whole-body pose estimation given human bounding boxes. + +Bottom-up approaches (e.g. AE) first detect all the whole-body keypoints and then group/associate them into person instances. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_wholebody_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/2d_wholebody_pose_demo.md) to run demos. + +
diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/README.md b/mmpose/configs/wholebody_2d_keypoint/dwpose/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d85cb48c5388a56df14209aa007703ff819136e1 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/README.md @@ -0,0 +1,63 @@ +# DWPose + +Whole-body pose estimation localizes the human body, hand, face, and foot keypoints in an image. This task is challenging due to multi-scale body parts, fine-grained localization for low-resolution regions, and data scarcity. Meanwhile, applying a highly efficient and accurate pose estimator to widely human-centric understanding and generation tasks is urgent. In this work, we present a two-stage pose **D**istillation for **W**hole-body **P**ose estimators, named **DWPose**, to improve their effectiveness and efficiency. The first-stage distillation designs a weight-decay strategy while utilizing a teacher's intermediate feature and final logits with both visible and invisible keypoints to supervise the student from scratch. The second stage distills the student model itself to further improve performance. Different from the previous self-knowledge distillation, this stage finetunes the student's head with only 20% training time as a plug-and-play training strategy. For data limitations, we explore the UBody dataset that contains diverse facial expressions and hand gestures for real-life applications. Comprehensive experiments show the superiority of our proposed simple yet effective methods. We achieve new state-of-the-art performance on COCO-WholeBody, significantly boosting the whole-body AP of RTMPose-l from 64.8% to 66.5%, even surpassing RTMPose-x teacher with 65.3% AP. We release a series of models with different sizes, from tiny to large, for satisfying various downstream tasks. + +## Results and Models + +### COCO-WholeBody Dataset + +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +- DWPose Models are supported by [DWPose](https://github.com/IDEA-Research/DWPose) +- Models are trained and distilled on the following datasets: + - [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) + - [UBody](https://github.com/IDEA-Research/OSX) + +| Config | S1 Dis_config | S2 Dis_config | Input Size | Whole AP | Whole AR | FLOPS
(G) | ORT-Latency
(ms)
(i7-11700) | TRT-FP16-Latency
(ms)
(GTX 1660Ti) | Download | +| :----------- | :-----------------: | :-----------------: | :--------: | :------: | :------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------: | +| [DWPose-t](../rtmpose/ubody/rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py) | [DW l-t](../dwpose/ubody/s1_dis/dwpose_l_dis_t_coco-ubody-256x192.py) | [DW t-t](../dwpose/ubody/s2_dis/dwpose_t-tt_coco-ubody-256x192.py) | 256x192 | 48.5 | 58.4 | 0.5 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-ucoco_dw-ucoco_270e-256x192-dcf277bf_20230728.pth) | +| [DWPose-s](../rtmpose/ubody/rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py) | [DW l-s](../dwpose/ubody/s1_dis/dwpose_l_dis_s_coco-ubody-256x192.py) | [DW s-s](../dwpose/ubody/s2_dis/dwpose_s-ss_coco-ubody-256x192.py) | 256x192 | 53.8 | 63.2 | 0.9 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-ucoco_dw-ucoco_270e-256x192-3fd922c8_20230728.pth) | +| [DWPose-m](../rtmpose/ubody/rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py) | [DW l-m](../dwpose/ubody/s1_dis/dwpose_l_dis_m_coco-ubody-256x192.py) | [DW m-m](../dwpose/ubody/s2_dis/dwpose_m-mm_coco-ubody-256x192.py) | 256x192 | 60.6 | 69.5 | 2.22 | 13.50 | 4.00 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ucoco_dw-ucoco_270e-256x192-c8b76419_20230728.pth) | +| [DWPose-l](../rtmpose/ubody/rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py) | [DW x-l](../dwpose/ubody/s1_dis/dwpose_x_dis_l_coco-ubody-256x192.py) | [DW l-l](../dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-256x192.py) | 256x192 | 63.1 | 71.7 | 4.52 | 23.41 | 5.67 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-ucoco_dw-ucoco_270e-256x192-4d6dfc62_20230728.pth) | +| [DWPose-l](../rtmpose/ubody/rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py) | [DW x-l](../dwpose/ubody/s1_dis/dwpose_x_dis_l_coco-ubody-384x288.py) | [DW l-l](../dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-384x288.py) | 384x288 | 66.5 | 74.3 | 10.07 | 44.58 | 7.68 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-ucoco_dw-ucoco_270e-384x288-2438fd99_20230728.pth) | + +## Train a model + +### Train DWPose with the first stage distillation + +``` +bash tools/dist_train.sh configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/rtmpose_x_dis_l_coco-ubody-384x288.py 8 +``` + +### Tansfer the S1 distillation models into regular models + +``` +# first stage distillation +python pth_transfer.py $dis_ckpt $new_pose_ckpt +``` + +⭐Before S2 distillation, you should add your model path into 'teacher_pretrained' of your S2 dis_config. + +### Train DWPose with the second stage distillation + +``` +bash tools/dist_train.sh configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-384x288.py 8 +``` + +### Tansfer the S2 distillation models into regular models + +``` +# second stage distillation +python pth_transfer.py $dis_ckpt $new_pose_ckpt --two_dis +``` + +## Citation + +``` +@article{yang2023effective, + title={Effective Whole-body Pose Estimation with Two-stages Distillation}, + author={Yang, Zhendong and Zeng, Ailing and Yuan, Chun and Li, Yu}, + journal={arXiv preprint arXiv:2307.15880}, + year={2023} +} +``` diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s1_dis/dwpose_l_dis_m_coco-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s1_dis/dwpose_l_dis_m_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..422871acbb08f9ecbb67144c3a76166151b37387 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s1_dis/dwpose_l_dis_m_coco-256x192.py @@ -0,0 +1,48 @@ +_base_ = [ + '../../../rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = False + +# config settings +fea = True +logit = True + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + teacher_pretrained='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/' + 'rtmpose-l_8xb64-270e_coco-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/' + 'rtmpose-m_8xb64-270e_coco-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='FeaLoss', + name='loss_fea', + use_this=fea, + student_channels=768, + teacher_channels=1024, + alpha_fea=0.00007, + ) + ]), + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=0.1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +) +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s1_dis/dwpose_x_dis_l_coco-384x288.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s1_dis/dwpose_x_dis_l_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..150cb2bbe62ba7117b79ecbd3cceec3f6a8f64bf --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s1_dis/dwpose_x_dis_l_coco-384x288.py @@ -0,0 +1,48 @@ +_base_ = [ + '../../../rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py' # noqa: E501 +] + +# model settings +find_unused_parameters = False + +# config settings +fea = True +logit = True + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + teacher_pretrained='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-x_simcc-coco-wholebody_pt-body7_270e-384x288-401dfc90_20230629.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/' + 'rtmpose-x_8xb32-270e_coco-wholebody-384x288.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/' + 'rtmpose-l_8xb32-270e_coco-wholebody-384x288.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='FeaLoss', + name='loss_fea', + use_this=fea, + student_channels=1024, + teacher_channels=1280, + alpha_fea=0.00007, + ) + ]), + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=0.1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +) +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s2_dis/dwpose_l-ll_coco-384x288.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s2_dis/dwpose_l-ll_coco-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..6c63f99b0cee942b7667ac193d1b46e8c8b00196 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s2_dis/dwpose_l-ll_coco-384x288.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../../rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py' # noqa: E501 +] + +# model settings +find_unused_parameters = True + +# dis settings +second_dis = True + +# config settings +logit = True + +train_cfg = dict(max_epochs=60, val_interval=10) + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + two_dis=second_dis, + teacher_pretrained='work_dirs/' + 'dwpose_x_dis_l_coco-384x288/dw-x-l_coco_384.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/' + 'rtmpose-l_8xb32-270e_coco-wholebody-384x288.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/' + 'rtmpose-l_8xb32-270e_coco-wholebody-384x288.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + train_cfg=train_cfg, +) + +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s2_dis/dwpose_m-mm_coco-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s2_dis/dwpose_m-mm_coco-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..943ec60184aa6b2b264eabc219385c93080a04bc --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/coco-wholebody/s2_dis/dwpose_m-mm_coco-256x192.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../../rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = True + +# dis settings +second_dis = True + +# config settings +logit = True + +train_cfg = dict(max_epochs=60, val_interval=10) + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + two_dis=second_dis, + teacher_pretrained='work_dirs/' + 'dwpose_l_dis_m_coco-256x192/dw-l-m_coco_256.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/' + 'rtmpose-m_8xb64-270e_coco-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/' + 'rtmpose-m_8xb64-270e_coco-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + train_cfg=train_cfg, +) + +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_m_coco-ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_m_coco-ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..b3a917b96e855b869844b45f3cc02910ce6b1d52 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_m_coco-ubody-256x192.py @@ -0,0 +1,48 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = False + +# config settings +fea = True +logit = True + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + teacher_pretrained='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_ucoco_256x192-95bb32f5_20230822.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='FeaLoss', + name='loss_fea', + use_this=fea, + student_channels=768, + teacher_channels=1024, + alpha_fea=0.00007, + ) + ]), + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=0.1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +) +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_s_coco-ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_s_coco-ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..c90a0ea6a7693928565840b250063663e54cf3bb --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_s_coco-ubody-256x192.py @@ -0,0 +1,48 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = False + +# config settings +fea = True +logit = True + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + teacher_pretrained='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_ucoco_256x192-95bb32f5_20230822.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='FeaLoss', + name='loss_fea', + use_this=fea, + student_channels=512, + teacher_channels=1024, + alpha_fea=0.00007, + ) + ]), + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=0.1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +) +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_t_coco-ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_t_coco-ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..01618f146a0a150a7fea67e4c0313087ae688312 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_l_dis_t_coco-ubody-256x192.py @@ -0,0 +1,48 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = False + +# config settings +fea = True +logit = True + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + teacher_pretrained='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_ucoco_256x192-95bb32f5_20230822.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='FeaLoss', + name='loss_fea', + use_this=fea, + student_channels=384, + teacher_channels=1024, + alpha_fea=0.00007, + ) + ]), + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=0.1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +) +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_x_dis_l_coco-ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_x_dis_l_coco-ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..85a287324b647c6b19dde1486093b68940df72ff --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/dwpose_x_dis_l_coco-ubody-256x192.py @@ -0,0 +1,48 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = False + +# config settings +fea = True +logit = True + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + teacher_pretrained='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-x_ucoco_256x192-05f5bcb7_20230822.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-x_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='FeaLoss', + name='loss_fea', + use_this=fea, + student_channels=1024, + teacher_channels=1280, + alpha_fea=0.00007, + ) + ]), + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=0.1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +) +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/rtmpose_x_dis_l_coco-ubody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/rtmpose_x_dis_l_coco-ubody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..acde64a03a6b1f09689766eae75548c09f9b26a7 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s1_dis/rtmpose_x_dis_l_coco-ubody-384x288.py @@ -0,0 +1,48 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py' # noqa: E501 +] + +# model settings +find_unused_parameters = False + +# config settings +fea = True +logit = True + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + teacher_pretrained='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-x_ucoco_384x288-f5b50679_20230822.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-x_8xb32-270e_coco-ubody-wholebody-384x288.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='FeaLoss', + name='loss_fea', + use_this=fea, + student_channels=1024, + teacher_channels=1280, + alpha_fea=0.00007, + ) + ]), + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=0.1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +) +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..e3f456a2b9d76d430ae0d894b62c8de6436b6827 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-256x192.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = True + +# dis settings +second_dis = True + +# config settings +logit = True + +train_cfg = dict(max_epochs=60, val_interval=10) + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + two_dis=second_dis, + teacher_pretrained='work_dirs/' + 'dwpose_x_dis_l_coco-ubody-256x192/dw-x-l_ucoco_256.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + train_cfg=train_cfg, +) + +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..3815fad1e2558c7f44b63ad2170021007287e6cb --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_l-ll_coco-ubody-384x288.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py' # noqa: E501 +] + +# model settings +find_unused_parameters = True + +# dis settings +second_dis = True + +# config settings +logit = True + +train_cfg = dict(max_epochs=60, val_interval=10) + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + two_dis=second_dis, + teacher_pretrained='work_dirs/' + 'dwpose_x_dis_l_coco-ubody-384x288/dw-x-l_ucoco_384.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + train_cfg=train_cfg, +) + +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_m-mm_coco-ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_m-mm_coco-ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6834ffca3593604ad2b550ad7c0c8e5481553d --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_m-mm_coco-ubody-256x192.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = True + +# dis settings +second_dis = True + +# config settings +logit = True + +train_cfg = dict(max_epochs=60, val_interval=10) + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + two_dis=second_dis, + teacher_pretrained='work_dirs/' + 'dwpose_l_dis_m_coco-ubody-256x192/dw-l-m_ucoco_256.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + train_cfg=train_cfg, +) + +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_s-ss_coco-ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_s-ss_coco-ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..24a4a94642af4e6858c369787fd22b7d530cba51 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_s-ss_coco-ubody-256x192.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = True + +# dis settings +second_dis = True + +# config settings +logit = True + +train_cfg = dict(max_epochs=60, val_interval=10) + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + two_dis=second_dis, + teacher_pretrained='work_dirs/' + 'dwpose_l_dis_s_coco-ubody-256x192/dw-l-s_ucoco_256.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + train_cfg=train_cfg, +) + +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_t-tt_coco-ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_t-tt_coco-ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c322ece2662b943d399afb0854fa6766478e24 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/dwpose/ubody/s2_dis/dwpose_t-tt_coco-ubody-256x192.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../../rtmpose/ubody/rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py' # noqa: E501 +] + +# model settings +find_unused_parameters = True + +# dis settings +second_dis = True + +# config settings +logit = True + +train_cfg = dict(max_epochs=60, val_interval=10) + +# method details +model = dict( + _delete_=True, + type='DWPoseDistiller', + two_dis=second_dis, + teacher_pretrained='work_dirs/' + 'dwpose_l_dis_t_coco-ubody-256x192/dw-l-t_ucoco_256.pth', # noqa: E501 + teacher_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + student_cfg='configs/wholebody_2d_keypoint/rtmpose/ubody/' + 'rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py', # noqa: E501 + distill_cfg=[ + dict(methods=[ + dict( + type='KDLoss', + name='loss_logit', + use_this=logit, + weight=1, + ) + ]), + ], + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + train_cfg=train_cfg, +) + +optim_wrapper = dict(clip_grad=dict(max_norm=1., norm_type=2)) diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/README.md b/mmpose/configs/wholebody_2d_keypoint/rtmpose/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ac40c016aa67b65eff87aa87f92cdc10be66e452 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/README.md @@ -0,0 +1,18 @@ +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### COCO-WholeBody Dataset + +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | Whole AP | Whole AR | Details and Download | +| :-------: | :--------: | :------: | :------: | :---------------------------------------------------------------------: | +| RTMPose-m | 256x192 | 0.582 | 0.674 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | +| RTMPose-l | 256x192 | 0.611 | 0.700 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | +| RTMPose-l | 384x288 | 0.648 | 0.730 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb1024-270e_cocktail14-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb1024-270e_cocktail14-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..59351d5f4a1920834a1217b7279b70512f151a00 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb1024-270e_cocktail14-256x192.py @@ -0,0 +1,615 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (192, 256) + +# runtime +max_epochs = 270 +stage2_num_epochs = 10 +base_lr = 5e-4 +train_batch_size = 1024 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.1), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=8192) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + channel_attention=True, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_simcc-ucoco_dw-ucoco_270e-256x192-4d6dfc62_20230728.pth' # noqa + )), + neck=dict( + type='CSPNeXtPAFPN', + in_channels=[256, 512, 1024], + out_channels=None, + out_indices=( + 1, + 2, + ), + num_csp_blocks=2, + expand_ratio=0.5, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + head=dict( + type='RTMWHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=1., + label_softmax=True, + label_beta=10., + mask=list(range(23, 91)), + mask_weight=0.5, + ), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping + +aic_coco133 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), (5, 9), (6, 12), + (7, 14), (8, 16), (9, 11), (10, 13), (11, 15)] + +crowdpose_coco133 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16)] + +mpii_coco133 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco133 = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco133 = [(i, i) + for i in range(17)] + [(20, 17), (21, 20), (22, 18), (23, 21), + (24, 19), + (25, 22)] + [(i, i - 3) + for i in range(26, 136)] + +posetrack_coco133 = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +humanart_coco133 = [(i, i) for i in range(17)] + [(17, 99), (18, 120), + (19, 17), (20, 20)] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_coco133) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_coco133) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_coco133) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_coco133) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_coco133) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_coco133) + ], +) + +dataset_humanart = dict( + type='HumanArt21Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart.json', + filter_cfg=dict(scenes=['real_human']), + data_prefix=dict(img='pose/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=humanart_coco133) + ]) + +ubody_scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +ubody_datasets = [] +for scene in ubody_scenes: + each = dict( + type='UBody2dDataset', + data_root=data_root, + data_mode=data_mode, + ann_file=f'Ubody/annotations/{scene}/train_annotations.json', + data_prefix=dict(img='pose/UBody/images/'), + pipeline=[], + sample_interval=10) + ubody_datasets.append(each) + +dataset_ubody = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/ubody2d.py'), + datasets=ubody_datasets, + pipeline=[], + test_mode=False, +) + +face_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale', padding=1.25), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +wflw_coco133 = [(i * 2, 23 + i) + for i in range(17)] + [(33 + i, 40 + i) for i in range(5)] + [ + (42 + i, 45 + i) for i in range(5) + ] + [(51 + i, 50 + i) + for i in range(9)] + [(60, 59), (61, 60), (63, 61), + (64, 62), (65, 63), (67, 64), + (68, 65), (69, 66), (71, 67), + (72, 68), (73, 69), + (75, 70)] + [(76 + i, 71 + i) + for i in range(20)] +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=wflw_coco133), *face_pipeline + ], +) + +mapping_300w_coco133 = [(i, 23 + i) for i in range(68)] +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mapping_300w_coco133), *face_pipeline + ], +) + +cofw_coco133 = [(0, 40), (2, 44), (4, 42), (1, 49), (3, 45), (6, 47), (8, 59), + (10, 62), (9, 68), (11, 65), (18, 54), (19, 58), (20, 53), + (21, 56), (22, 71), (23, 77), (24, 74), (25, 85), (26, 89), + (27, 80), (28, 31)] +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=cofw_coco133), *face_pipeline + ], +) + +lapa_coco133 = [(i * 2, 23 + i) for i in range(17)] + [ + (33 + i, 40 + i) for i in range(5) +] + [(42 + i, 45 + i) for i in range(5)] + [ + (51 + i, 50 + i) for i in range(4) +] + [(58 + i, 54 + i) for i in range(5)] + [(66, 59), (67, 60), (69, 61), + (70, 62), (71, 63), (73, 64), + (75, 65), (76, 66), (78, 67), + (79, 68), (80, 69), + (82, 70)] + [(84 + i, 71 + i) + for i in range(20)] +dataset_lapa = dict( + type='LapaDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=lapa_coco133), *face_pipeline + ], +) + +dataset_wb = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_coco, dataset_halpe, dataset_ubody], + pipeline=[], + test_mode=False, +) + +dataset_body = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_posetrack, + dataset_humanart, + ], + pipeline=[], + test_mode=False, +) + +dataset_face = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_wflw, + dataset_300w, + dataset_cofw, + dataset_lapa, + ], + pipeline=[], + test_mode=False, +) + +hand_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +interhand_left = [(21, 95), (22, 94), (23, 93), (24, 92), (25, 99), (26, 98), + (27, 97), (28, 96), (29, 103), (30, 102), (31, 101), + (32, 100), (33, 107), (34, 106), (35, 105), (36, 104), + (37, 111), (38, 110), (39, 109), (40, 108), (41, 91)] +interhand_right = [(i - 21, j + 21) for i, j in interhand_left] +interhand_coco133 = interhand_right + interhand_left + +dataset_interhand2d = dict( + type='InterHand2DDoubleDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='interhand26m/annotations/all/InterHand2.6M_train_data.json', + camera_param_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_camera.json', + joint_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_joint_3d.json', + data_prefix=dict(img='interhand2.6m/images/train/'), + sample_interval=10, + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=interhand_coco133, + ), *hand_pipeline + ], +) + +dataset_hand = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_interhand2d], + pipeline=[], + test_mode=False, +) + +train_datasets = [dataset_wb, dataset_body, dataset_face, dataset_hand] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=4, + pin_memory=False, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='data/detection/coco/val2017/'), + pipeline=val_pipeline, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + test_mode=True)) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb320-270e_cocktail14-384x288.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb320-270e_cocktail14-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..a687f89ef62fa5b673c273d6702617619d7a3482 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb320-270e_cocktail14-384x288.py @@ -0,0 +1,617 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (288, 384) + +# runtime +max_epochs = 270 +stage2_num_epochs = 10 +base_lr = 5e-4 +train_batch_size = 320 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.1), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=2560) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False, + decode_visibility=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + channel_attention=True, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_simcc-ucoco_dw-ucoco_270e-256x192-4d6dfc62_20230728.pth' # noqa + )), + neck=dict( + type='CSPNeXtPAFPN', + in_channels=[256, 512, 1024], + out_channels=None, + out_indices=( + 1, + 2, + ), + num_csp_blocks=2, + expand_ratio=0.5, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + head=dict( + type='RTMWHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=1., + label_softmax=True, + label_beta=10., + mask=list(range(23, 91)), + mask_weight=0.5, + ), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping + +aic_coco133 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), (5, 9), (6, 12), + (7, 14), (8, 16), (9, 11), (10, 13), (11, 15)] + +crowdpose_coco133 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16)] + +mpii_coco133 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco133 = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco133 = [(i, i) + for i in range(17)] + [(20, 17), (21, 20), (22, 18), (23, 21), + (24, 19), + (25, 22)] + [(i, i - 3) + for i in range(26, 136)] + +posetrack_coco133 = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +humanart_coco133 = [(i, i) for i in range(17)] + [(17, 99), (18, 120), + (19, 17), (20, 20)] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_coco133) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_coco133) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_coco133) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_coco133) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_coco133) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_coco133) + ], +) + +dataset_humanart = dict( + type='HumanArt21Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart.json', + filter_cfg=dict(scenes=['real_human']), + data_prefix=dict(img='pose/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=humanart_coco133) + ]) + +ubody_scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +ubody_datasets = [] +for scene in ubody_scenes: + each = dict( + type='UBody2dDataset', + data_root=data_root, + data_mode=data_mode, + ann_file=f'Ubody/annotations/{scene}/train_annotations.json', + data_prefix=dict(img='pose/UBody/images/'), + pipeline=[], + sample_interval=10) + ubody_datasets.append(each) + +dataset_ubody = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/ubody2d.py'), + datasets=ubody_datasets, + pipeline=[], + test_mode=False, +) + +face_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale', padding=1.25), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +wflw_coco133 = [(i * 2, 23 + i) + for i in range(17)] + [(33 + i, 40 + i) for i in range(5)] + [ + (42 + i, 45 + i) for i in range(5) + ] + [(51 + i, 50 + i) + for i in range(9)] + [(60, 59), (61, 60), (63, 61), + (64, 62), (65, 63), (67, 64), + (68, 65), (69, 66), (71, 67), + (72, 68), (73, 69), + (75, 70)] + [(76 + i, 71 + i) + for i in range(20)] +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=wflw_coco133), *face_pipeline + ], +) + +mapping_300w_coco133 = [(i, 23 + i) for i in range(68)] +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mapping_300w_coco133), *face_pipeline + ], +) + +cofw_coco133 = [(0, 40), (2, 44), (4, 42), (1, 49), (3, 45), (6, 47), (8, 59), + (10, 62), (9, 68), (11, 65), (18, 54), (19, 58), (20, 53), + (21, 56), (22, 71), (23, 77), (24, 74), (25, 85), (26, 89), + (27, 80), (28, 31)] +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=cofw_coco133), *face_pipeline + ], +) + +lapa_coco133 = [(i * 2, 23 + i) for i in range(17)] + [ + (33 + i, 40 + i) for i in range(5) +] + [(42 + i, 45 + i) for i in range(5)] + [ + (51 + i, 50 + i) for i in range(4) +] + [(58 + i, 54 + i) for i in range(5)] + [(66, 59), (67, 60), (69, 61), + (70, 62), (71, 63), (73, 64), + (75, 65), (76, 66), (78, 67), + (79, 68), (80, 69), + (82, 70)] + [(84 + i, 71 + i) + for i in range(20)] +dataset_lapa = dict( + type='LapaDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=lapa_coco133), *face_pipeline + ], +) + +dataset_wb = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_coco, dataset_halpe, dataset_ubody], + pipeline=[], + test_mode=False, +) + +dataset_body = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_posetrack, + dataset_humanart, + ], + pipeline=[], + test_mode=False, +) + +dataset_face = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_wflw, + dataset_300w, + dataset_cofw, + dataset_lapa, + ], + pipeline=[], + test_mode=False, +) + +hand_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +interhand_left = [(21, 95), (22, 94), (23, 93), (24, 92), (25, 99), (26, 98), + (27, 97), (28, 96), (29, 103), (30, 102), (31, 101), + (32, 100), (33, 107), (34, 106), (35, 105), (36, 104), + (37, 111), (38, 110), (39, 109), (40, 108), (41, 91)] +interhand_right = [(i - 21, j + 21) for i, j in interhand_left] +interhand_coco133 = interhand_right + interhand_left + +dataset_interhand2d = dict( + type='InterHand2DDoubleDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='interhand26m/annotations/all/InterHand2.6M_train_data.json', + camera_param_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_camera.json', + joint_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_joint_3d.json', + data_prefix=dict(img='interhand2.6m/images/train/'), + sample_interval=10, + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=interhand_coco133, + ), *hand_pipeline + ], +) + +dataset_hand = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_interhand2d], + pipeline=[], + test_mode=False, +) + +train_datasets = [dataset_wb, dataset_body, dataset_face, dataset_hand] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=4, + pin_memory=False, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='data/detection/coco/val2017/'), + pipeline=val_pipeline, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + test_mode=True)) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-m_8xb1024-270e_cocktail14-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-m_8xb1024-270e_cocktail14-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..fc9d90e5cd61a313c940ec9846a3d784f417dc69 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-m_8xb1024-270e_cocktail14-256x192.py @@ -0,0 +1,615 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (192, 256) + +# runtime +max_epochs = 270 +stage2_num_epochs = 10 +base_lr = 5e-4 +train_batch_size = 1024 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=8192) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + channel_attention=True, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-m_simcc-ucoco_dw-ucoco_270e-256x192-c8b76419_20230728.pth' # noqa + )), + neck=dict( + type='CSPNeXtPAFPN', + in_channels=[192, 384, 768], + out_channels=None, + out_indices=( + 1, + 2, + ), + num_csp_blocks=2, + expand_ratio=0.5, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + head=dict( + type='RTMWHead', + in_channels=768, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=1., + label_softmax=True, + label_beta=10., + mask=list(range(23, 91)), + mask_weight=0.5, + ), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping + +aic_coco133 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), (5, 9), (6, 12), + (7, 14), (8, 16), (9, 11), (10, 13), (11, 15)] + +crowdpose_coco133 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16)] + +mpii_coco133 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco133 = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco133 = [(i, i) + for i in range(17)] + [(20, 17), (21, 20), (22, 18), (23, 21), + (24, 19), + (25, 22)] + [(i, i - 3) + for i in range(26, 136)] + +posetrack_coco133 = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +humanart_coco133 = [(i, i) for i in range(17)] + [(17, 99), (18, 120), + (19, 17), (20, 20)] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_coco133) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_coco133) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_coco133) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_coco133) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_coco133) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_coco133) + ], +) + +dataset_humanart = dict( + type='HumanArt21Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart.json', + filter_cfg=dict(scenes=['real_human']), + data_prefix=dict(img='pose/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=humanart_coco133) + ]) + +ubody_scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +ubody_datasets = [] +for scene in ubody_scenes: + each = dict( + type='UBody2dDataset', + data_root=data_root, + data_mode=data_mode, + ann_file=f'Ubody/annotations/{scene}/train_annotations.json', + data_prefix=dict(img='pose/UBody/images/'), + pipeline=[], + sample_interval=10) + ubody_datasets.append(each) + +dataset_ubody = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/ubody2d.py'), + datasets=ubody_datasets, + pipeline=[], + test_mode=False, +) + +face_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale', padding=1.25), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +wflw_coco133 = [(i * 2, 23 + i) + for i in range(17)] + [(33 + i, 40 + i) for i in range(5)] + [ + (42 + i, 45 + i) for i in range(5) + ] + [(51 + i, 50 + i) + for i in range(9)] + [(60, 59), (61, 60), (63, 61), + (64, 62), (65, 63), (67, 64), + (68, 65), (69, 66), (71, 67), + (72, 68), (73, 69), + (75, 70)] + [(76 + i, 71 + i) + for i in range(20)] +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=wflw_coco133), *face_pipeline + ], +) + +mapping_300w_coco133 = [(i, 23 + i) for i in range(68)] +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mapping_300w_coco133), *face_pipeline + ], +) + +cofw_coco133 = [(0, 40), (2, 44), (4, 42), (1, 49), (3, 45), (6, 47), (8, 59), + (10, 62), (9, 68), (11, 65), (18, 54), (19, 58), (20, 53), + (21, 56), (22, 71), (23, 77), (24, 74), (25, 85), (26, 89), + (27, 80), (28, 31)] +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=cofw_coco133), *face_pipeline + ], +) + +lapa_coco133 = [(i * 2, 23 + i) for i in range(17)] + [ + (33 + i, 40 + i) for i in range(5) +] + [(42 + i, 45 + i) for i in range(5)] + [ + (51 + i, 50 + i) for i in range(4) +] + [(58 + i, 54 + i) for i in range(5)] + [(66, 59), (67, 60), (69, 61), + (70, 62), (71, 63), (73, 64), + (75, 65), (76, 66), (78, 67), + (79, 68), (80, 69), + (82, 70)] + [(84 + i, 71 + i) + for i in range(20)] +dataset_lapa = dict( + type='LapaDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=lapa_coco133), *face_pipeline + ], +) + +dataset_wb = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_coco, dataset_halpe, dataset_ubody], + pipeline=[], + test_mode=False, +) + +dataset_body = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_posetrack, + dataset_humanart, + ], + pipeline=[], + test_mode=False, +) + +dataset_face = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_wflw, + dataset_300w, + dataset_cofw, + dataset_lapa, + ], + pipeline=[], + test_mode=False, +) + +hand_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +interhand_left = [(21, 95), (22, 94), (23, 93), (24, 92), (25, 99), (26, 98), + (27, 97), (28, 96), (29, 103), (30, 102), (31, 101), + (32, 100), (33, 107), (34, 106), (35, 105), (36, 104), + (37, 111), (38, 110), (39, 109), (40, 108), (41, 91)] +interhand_right = [(i - 21, j + 21) for i, j in interhand_left] +interhand_coco133 = interhand_right + interhand_left + +dataset_interhand2d = dict( + type='InterHand2DDoubleDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='interhand26m/annotations/all/InterHand2.6M_train_data.json', + camera_param_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_camera.json', + joint_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_joint_3d.json', + data_prefix=dict(img='interhand2.6m/images/train/'), + sample_interval=10, + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=interhand_coco133, + ), *hand_pipeline + ], +) + +dataset_hand = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_interhand2d], + pipeline=[], + test_mode=False, +) + +train_datasets = [dataset_wb, dataset_body, dataset_face, dataset_hand] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=4, + pin_memory=False, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='data/detection/coco/val2017/'), + pipeline=val_pipeline, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + test_mode=True)) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb320-270e_cocktail14-384x288.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb320-270e_cocktail14-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..115dc9408b7cda685a387fb058f37298e67f28fe --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb320-270e_cocktail14-384x288.py @@ -0,0 +1,617 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (288, 384) + +# runtime +max_epochs = 270 +stage2_num_epochs = 10 +base_lr = 5e-4 +train_batch_size = 320 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.1), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=2560) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False, + decode_visibility=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + channel_attention=True, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'wholebody_2d_keypoint/rtmpose/ubody/rtmpose-x_simcc-ucoco_pt-aic-coco_270e-384x288-f5b50679_20230822.pth' # noqa + )), + neck=dict( + type='CSPNeXtPAFPN', + in_channels=[320, 640, 1280], + out_channels=None, + out_indices=( + 1, + 2, + ), + num_csp_blocks=2, + expand_ratio=0.5, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + head=dict( + type='RTMWHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=1., + label_softmax=True, + label_beta=10., + mask=list(range(23, 91)), + mask_weight=0.5, + ), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping + +aic_coco133 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), (5, 9), (6, 12), + (7, 14), (8, 16), (9, 11), (10, 13), (11, 15)] + +crowdpose_coco133 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16)] + +mpii_coco133 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco133 = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco133 = [(i, i) + for i in range(17)] + [(20, 17), (21, 20), (22, 18), (23, 21), + (24, 19), + (25, 22)] + [(i, i - 3) + for i in range(26, 136)] + +posetrack_coco133 = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +humanart_coco133 = [(i, i) for i in range(17)] + [(17, 99), (18, 120), + (19, 17), (20, 20)] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_coco133) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_coco133) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_coco133) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_coco133) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_coco133) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_coco133) + ], +) + +dataset_humanart = dict( + type='HumanArt21Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart.json', + filter_cfg=dict(scenes=['real_human']), + data_prefix=dict(img='pose/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=humanart_coco133) + ]) + +ubody_scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +ubody_datasets = [] +for scene in ubody_scenes: + each = dict( + type='UBody2dDataset', + data_root=data_root, + data_mode=data_mode, + ann_file=f'Ubody/annotations/{scene}/train_annotations.json', + data_prefix=dict(img='pose/UBody/images/'), + pipeline=[], + sample_interval=10) + ubody_datasets.append(each) + +dataset_ubody = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/ubody2d.py'), + datasets=ubody_datasets, + pipeline=[], + test_mode=False, +) + +face_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale', padding=1.25), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +wflw_coco133 = [(i * 2, 23 + i) + for i in range(17)] + [(33 + i, 40 + i) for i in range(5)] + [ + (42 + i, 45 + i) for i in range(5) + ] + [(51 + i, 50 + i) + for i in range(9)] + [(60, 59), (61, 60), (63, 61), + (64, 62), (65, 63), (67, 64), + (68, 65), (69, 66), (71, 67), + (72, 68), (73, 69), + (75, 70)] + [(76 + i, 71 + i) + for i in range(20)] +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=wflw_coco133), *face_pipeline + ], +) + +mapping_300w_coco133 = [(i, 23 + i) for i in range(68)] +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mapping_300w_coco133), *face_pipeline + ], +) + +cofw_coco133 = [(0, 40), (2, 44), (4, 42), (1, 49), (3, 45), (6, 47), (8, 59), + (10, 62), (9, 68), (11, 65), (18, 54), (19, 58), (20, 53), + (21, 56), (22, 71), (23, 77), (24, 74), (25, 85), (26, 89), + (27, 80), (28, 31)] +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=cofw_coco133), *face_pipeline + ], +) + +lapa_coco133 = [(i * 2, 23 + i) for i in range(17)] + [ + (33 + i, 40 + i) for i in range(5) +] + [(42 + i, 45 + i) for i in range(5)] + [ + (51 + i, 50 + i) for i in range(4) +] + [(58 + i, 54 + i) for i in range(5)] + [(66, 59), (67, 60), (69, 61), + (70, 62), (71, 63), (73, 64), + (75, 65), (76, 66), (78, 67), + (79, 68), (80, 69), + (82, 70)] + [(84 + i, 71 + i) + for i in range(20)] +dataset_lapa = dict( + type='LapaDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=lapa_coco133), *face_pipeline + ], +) + +dataset_wb = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_coco, dataset_halpe, dataset_ubody], + pipeline=[], + test_mode=False, +) + +dataset_body = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_posetrack, + dataset_humanart, + ], + pipeline=[], + test_mode=False, +) + +dataset_face = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_wflw, + dataset_300w, + dataset_cofw, + dataset_lapa, + ], + pipeline=[], + test_mode=False, +) + +hand_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +interhand_left = [(21, 95), (22, 94), (23, 93), (24, 92), (25, 99), (26, 98), + (27, 97), (28, 96), (29, 103), (30, 102), (31, 101), + (32, 100), (33, 107), (34, 106), (35, 105), (36, 104), + (37, 111), (38, 110), (39, 109), (40, 108), (41, 91)] +interhand_right = [(i - 21, j + 21) for i, j in interhand_left] +interhand_coco133 = interhand_right + interhand_left + +dataset_interhand2d = dict( + type='InterHand2DDoubleDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='interhand26m/annotations/all/InterHand2.6M_train_data.json', + camera_param_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_camera.json', + joint_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_joint_3d.json', + data_prefix=dict(img='interhand2.6m/images/train/'), + sample_interval=10, + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=interhand_coco133, + ), *hand_pipeline + ], +) + +dataset_hand = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_interhand2d], + pipeline=[], + test_mode=False, +) + +train_datasets = [dataset_wb, dataset_body, dataset_face, dataset_hand] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=4, + pin_memory=False, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='data/detection/coco/val2017/'), + pipeline=val_pipeline, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + test_mode=True)) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb704-270e_cocktail14-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb704-270e_cocktail14-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..750ad46d3d1c6982837fa75ca3083245a492a9bd --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb704-270e_cocktail14-256x192.py @@ -0,0 +1,615 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (192, 256) + +# runtime +max_epochs = 270 +stage2_num_epochs = 10 +base_lr = 5e-4 +train_batch_size = 704 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.1), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=5632) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + channel_attention=True, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'wholebody_2d_keypoint/rtmpose/ubody/rtmpose-x_simcc-ucoco_pt-aic-coco_270e-256x192-05f5bcb7_20230822.pth' # noqa + )), + neck=dict( + type='CSPNeXtPAFPN', + in_channels=[320, 640, 1280], + out_channels=None, + out_indices=( + 1, + 2, + ), + num_csp_blocks=2, + expand_ratio=0.5, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU', inplace=True)), + head=dict( + type='RTMWHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=1., + label_softmax=True, + label_beta=10., + mask=list(range(23, 91)), + mask_weight=0.5, + ), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping + +aic_coco133 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), (5, 9), (6, 12), + (7, 14), (8, 16), (9, 11), (10, 13), (11, 15)] + +crowdpose_coco133 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16)] + +mpii_coco133 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco133 = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco133 = [(i, i) + for i in range(17)] + [(20, 17), (21, 20), (22, 18), (23, 21), + (24, 19), + (25, 22)] + [(i, i - 3) + for i in range(26, 136)] + +posetrack_coco133 = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +humanart_coco133 = [(i, i) for i in range(17)] + [(17, 99), (18, 120), + (19, 17), (20, 20)] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_coco133) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_coco133) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_coco133) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_coco133) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_coco133) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_coco133) + ], +) + +dataset_humanart = dict( + type='HumanArt21Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart.json', + filter_cfg=dict(scenes=['real_human']), + data_prefix=dict(img='pose/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=humanart_coco133) + ]) + +ubody_scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +ubody_datasets = [] +for scene in ubody_scenes: + each = dict( + type='UBody2dDataset', + data_root=data_root, + data_mode=data_mode, + ann_file=f'Ubody/annotations/{scene}/train_annotations.json', + data_prefix=dict(img='pose/UBody/images/'), + pipeline=[], + sample_interval=10) + ubody_datasets.append(each) + +dataset_ubody = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/ubody2d.py'), + datasets=ubody_datasets, + pipeline=[], + test_mode=False, +) + +face_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale', padding=1.25), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +wflw_coco133 = [(i * 2, 23 + i) + for i in range(17)] + [(33 + i, 40 + i) for i in range(5)] + [ + (42 + i, 45 + i) for i in range(5) + ] + [(51 + i, 50 + i) + for i in range(9)] + [(60, 59), (61, 60), (63, 61), + (64, 62), (65, 63), (67, 64), + (68, 65), (69, 66), (71, 67), + (72, 68), (73, 69), + (75, 70)] + [(76 + i, 71 + i) + for i in range(20)] +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=wflw_coco133), *face_pipeline + ], +) + +mapping_300w_coco133 = [(i, 23 + i) for i in range(68)] +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mapping_300w_coco133), *face_pipeline + ], +) + +cofw_coco133 = [(0, 40), (2, 44), (4, 42), (1, 49), (3, 45), (6, 47), (8, 59), + (10, 62), (9, 68), (11, 65), (18, 54), (19, 58), (20, 53), + (21, 56), (22, 71), (23, 77), (24, 74), (25, 85), (26, 89), + (27, 80), (28, 31)] +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=cofw_coco133), *face_pipeline + ], +) + +lapa_coco133 = [(i * 2, 23 + i) for i in range(17)] + [ + (33 + i, 40 + i) for i in range(5) +] + [(42 + i, 45 + i) for i in range(5)] + [ + (51 + i, 50 + i) for i in range(4) +] + [(58 + i, 54 + i) for i in range(5)] + [(66, 59), (67, 60), (69, 61), + (70, 62), (71, 63), (73, 64), + (75, 65), (76, 66), (78, 67), + (79, 68), (80, 69), + (82, 70)] + [(84 + i, 71 + i) + for i in range(20)] +dataset_lapa = dict( + type='LapaDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=lapa_coco133), *face_pipeline + ], +) + +dataset_wb = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_coco, dataset_halpe, dataset_ubody], + pipeline=[], + test_mode=False, +) + +dataset_body = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_posetrack, + dataset_humanart, + ], + pipeline=[], + test_mode=False, +) + +dataset_face = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[ + dataset_wflw, + dataset_300w, + dataset_cofw, + dataset_lapa, + ], + pipeline=[], + test_mode=False, +) + +hand_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[1.5, 2.0], + rotate_factor=0), +] + +interhand_left = [(21, 95), (22, 94), (23, 93), (24, 92), (25, 99), (26, 98), + (27, 97), (28, 96), (29, 103), (30, 102), (31, 101), + (32, 100), (33, 107), (34, 106), (35, 105), (36, 104), + (37, 111), (38, 110), (39, 109), (40, 108), (41, 91)] +interhand_right = [(i - 21, j + 21) for i, j in interhand_left] +interhand_coco133 = interhand_right + interhand_left + +dataset_interhand2d = dict( + type='InterHand2DDoubleDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='interhand26m/annotations/all/InterHand2.6M_train_data.json', + camera_param_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_camera.json', + joint_file='interhand26m/annotations/all/' + 'InterHand2.6M_train_joint_3d.json', + data_prefix=dict(img='interhand2.6m/images/train/'), + sample_interval=10, + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=interhand_coco133, + ), *hand_pipeline + ], +) + +dataset_hand = dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=[dataset_interhand2d], + pipeline=[], + test_mode=False, +) + +train_datasets = [dataset_wb, dataset_body, dataset_face, dataset_hand] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=4, + pin_memory=False, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='data/detection/coco/val2017/'), + pipeline=val_pipeline, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + test_mode=True)) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw_cocktail14.md b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw_cocktail14.md new file mode 100644 index 0000000000000000000000000000000000000000..b53522226a1f345adbad0750624bf7046246f1d7 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw_cocktail14.md @@ -0,0 +1,80 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +- `Cocktail14` denotes model trained on 14 public datasets: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) + - [COCO-Wholebody](https://github.com/jin-s13/COCO-WholeBody/) + - [UBody](https://github.com/IDEA-Research/OSX) + - [Human-Art](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#human-art-dataset) + - [WFLW](https://wywu.github.io/projects/LAB/WFLW.html) + - [300W](https://ibug.doc.ic.ac.uk/resources/300-W/) + - [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/) + - [LaPa](https://github.com/JDAI-CV/lapa-dataset) + - [InterHand](https://mks0601.github.io/InterHand2.6M/) + +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------------------------: | :-: | +| [rtmw-m](/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-m_8xb1024-270e_cocktail14-256x192.py) | 256x192 | 0.676 | 0.747 | 0.671 | 0.794 | 0.783 | 0.854 | 0.491 | 0.604 | 0.582 | 0.673 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-dw-l-m_simcc-cocktail14_270e-256x192-20231122.pth) | - | +| [rtmw-l](/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb1024-270e_cocktail14-256x192.py) | 256x192 | 0.743 | 0.807 | 0.763 | 0.868 | 0.834 | 0.889 | 0.598 | 0.701 | 0.660 | 0.746 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-dw-x-l_simcc-cocktail14_270e-256x192-20231122.pth) | - | +| [rtmw-x](/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb704-270e_cocktail14-256x192.py) | 256x192 | 0.746 | 0.808 | 0.770 | 0.869 | 0.844 | 0.896 | 0.610 | 0.710 | 0.672 | 0.752 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-x_simcc-cocktail14_pt-ucoco_270e-256x192-13a2546d_20231208.pth) | - | +| [rtmw-l](/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb320-270e_cocktail14-384x288.py) | 384x288 | 0.761 | 0.824 | 0.793 | 0.885 | 0.884 | 0.921 | 0.663 | 0.752 | 0.701 | 0.780 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-dw-x-l_simcc-cocktail14_270e-384x288-20231122.pth) | - | +| [rtmw-x](/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb320-270e_cocktail14-384x288.py) | 384x288 | 0.763 | 0.826 | 0.796 | 0.888 | 0.884 | 0.923 | 0.664 | 0.755 | 0.702 | 0.781 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-x_simcc-cocktail14_pt-ucoco_270e-384x288-f840f204_20231122.pth) | - | diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw_cocktail14.yml b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw_cocktail14.yml new file mode 100644 index 0000000000000000000000000000000000000000..799a966dc87c732f2239db5148bb676a8bb0e692 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw_cocktail14.yml @@ -0,0 +1,108 @@ +Models: +- Config: configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-m_8xb1024-270e_cocktail14-256x192.py + In Collection: RTMPose + Alias: wholebody + Metadata: + Architecture: &id001 + - RTMPose + Training Data: COCO-WholeBody + Name: rtmw-m_8xb1024-270e_cocktail14-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.676 + Body AR: 0.747 + Face AP: 0.783 + Face AR: 0.854 + Foot AP: 0.671 + Foot AR: 0.794 + Hand AP: 0.491 + Hand AR: 0.604 + Whole AP: 0.582 + Whole AR: 0.673 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-dw-l-m_simcc-cocktail14_270e-256x192-20231122.pth +- Config: configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb1024-270e_cocktail14-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: rtmw-l_8xb1024-270e_cocktail14-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.743 + Body AR: 0.807 + Face AP: 0.834 + Face AR: 0.889 + Foot AP: 0.763 + Foot AR: 0.868 + Hand AP: 0.598 + Hand AR: 0.701 + Whole AP: 0.660 + Whole AR: 0.746 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-dw-x-l_simcc-cocktail14_270e-256x192-20231122.pth +- Config: configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb704-270e_cocktail14-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: rtmw-x_8xb704-270e_cocktail14-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.746 + Body AR: 0.808 + Face AP: 0.844 + Face AR: 0.896 + Foot AP: 0.770 + Foot AR: 0.869 + Hand AP: 0.610 + Hand AR: 0.710 + Whole AP: 0.672 + Whole AR: 0.752 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-x_simcc-cocktail14_pt-ucoco_270e-256x192-13a2546d_20231208.pth +- Config: configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-l_8xb320-270e_cocktail14-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: rtmw-l_8xb320-270e_cocktail14-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.761 + Body AR: 0.824 + Face AP: 0.884 + Face AR: 0.921 + Foot AP: 0.793 + Foot AR: 0.885 + Hand AP: 0.663 + Hand AR: 0.752 + Whole AP: 0.701 + Whole AR: 0.780 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-dw-x-l_simcc-cocktail14_270e-384x288-20231122.pth +- Config: configs/wholebody_2d_keypoint/rtmpose/cocktail14/rtmw-x_8xb320-270e_cocktail14-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: rtmw-x_8xb320-270e_cocktail14-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.763 + Body AR: 0.826 + Face AP: 0.884 + Face AR: 0.923 + Foot AP: 0.796 + Foot AR: 0.888 + Hand AP: 0.664 + Hand AR: 0.755 + Whole AP: 0.702 + Whole AR: 0.781 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmw/rtmw-x_simcc-cocktail14_pt-ucoco_270e-384x288-f840f204_20231122.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..39a6ff79d784df9518a1f457d129c6a89cfc97ca --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..9f32f25777af9d6bc8b668f61bfab76b29d9eea0 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..8c8c92d5f792a7516a603d326bb8e138bfe212b6 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py @@ -0,0 +1,232 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..55b11c419ae49b9f8e8e9a579ff89057c6b0ba0f --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py @@ -0,0 +1,233 @@ +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (288, 384) + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 32 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.md b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.md new file mode 100644 index 0000000000000000000000000000000000000000..e43c0b3750028497554c8c283f676985c8d9f03b --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.md @@ -0,0 +1,62 @@ + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [rtmpose-m](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 0.673 | 0.750 | 0.615 | 0.752 | 0.813 | 0.871 | 0.475 | 0.589 | 0.582 | 0.674 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.json) | +| [rtmpose-l](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 0.695 | 0.769 | 0.658 | 0.785 | 0.833 | 0.887 | 0.519 | 0.628 | 0.611 | 0.700 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.json) | +| [rtmpose-l](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 0.712 | 0.781 | 0.693 | 0.811 | 0.882 | 0.919 | 0.579 | 0.677 | 0.648 | 0.730 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.json) | diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml new file mode 100644 index 0000000000000000000000000000000000000000..0c1fa437f51a6297370e1535d2ddda8cd5e762d7 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml @@ -0,0 +1,66 @@ +Models: +- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py + In Collection: RTMPose + Alias: wholebody + Metadata: + Architecture: &id001 + - RTMPose + Training Data: COCO-WholeBody + Name: rtmpose-m_8xb64-270e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.673 + Body AR: 0.750 + Face AP: 0.813 + Face AR: 0.871 + Foot AP: 0.615 + Foot AR: 0.752 + Hand AP: 0.475 + Hand AR: 0.589 + Whole AP: 0.582 + Whole AR: 0.674 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth +- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: rtmpose-l_8xb64-270e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.695 + Body AR: 0.769 + Face AP: 0.833 + Face AR: 0.887 + Foot AP: 0.658 + Foot AR: 0.785 + Hand AP: 0.519 + Hand AR: 0.628 + Whole AP: 0.611 + Whole AR: 0.700 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth +- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: rtmpose-l_8xb32-270e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.712 + Body AR: 0.781 + Face AP: 0.882 + Face AR: 0.919 + Foot AP: 0.693 + Foot AR: 0.811 + Hand AP: 0.579 + Hand AR: 0.677 + Whole AP: 0.648 + Whole AR: 0.730 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..203766402c5189559095290f49b1c376d444a63e --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py @@ -0,0 +1,256 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 32 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=(9, 12), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'UBody2dDataset' +data_mode = 'topdown' +data_root = 'data/UBody/' + +backend_args = dict(backend='local') + +scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +train_datasets = [ + dict( + type='CocoWholeBodyDataset', + data_root='data/coco/', + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=[]) +] + +for scene in scenes: + train_dataset = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file=f'annotations/{scene}/train_annotations.json', + data_prefix=dict(img='images/'), + pipeline=[], + sample_interval=10) + train_datasets.append(train_dataset) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..66c42ad8a80a48ee4784bcffb769ebbb157545f3 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py @@ -0,0 +1,256 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 64 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'UBody2dDataset' +data_mode = 'topdown' +data_root = 'data/UBody/' + +backend_args = dict(backend='local') + +scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +train_datasets = [ + dict( + type='CocoWholeBodyDataset', + data_root='data/coco/', + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=[]) +] + +for scene in scenes: + train_dataset = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file=f'annotations/{scene}/train_annotations.json', + data_prefix=dict(img='images/'), + pipeline=[], + sample_interval=10) + train_datasets.append(train_dataset) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..0856fbbe9bf361df1e28d9690d0ea05f5c70ebc8 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py @@ -0,0 +1,256 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 64 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'UBody2dDataset' +data_mode = 'topdown' +data_root = 'data/UBody/' + +backend_args = dict(backend='local') + +scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +train_datasets = [ + dict( + type='CocoWholeBodyDataset', + data_root='data/coco/', + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=[]) +] + +for scene in scenes: + train_dataset = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file=f'annotations/{scene}/train_annotations.json', + data_prefix=dict(img='images/'), + pipeline=[], + sample_interval=10) + train_datasets.append(train_dataset) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..66562ee8671b2d79e0a20d9903dcb1aa41aad3e2 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py @@ -0,0 +1,256 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 64 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'UBody2dDataset' +data_mode = 'topdown' +data_root = 'data/UBody/' + +backend_args = dict(backend='local') + +scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +train_datasets = [ + dict( + type='CocoWholeBodyDataset', + data_root='data/coco/', + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=[]) +] + +for scene in scenes: + train_dataset = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file=f'annotations/{scene}/train_annotations.json', + data_prefix=dict(img='images/'), + pipeline=[], + sample_interval=10) + train_datasets.append(train_dataset) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..beb10b16f315961e2ce7b8dd6506bd3717ea7023 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py @@ -0,0 +1,256 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 64 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'UBody2dDataset' +data_mode = 'topdown' +data_root = 'data/UBody/' + +backend_args = dict(backend='local') + +scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +train_datasets = [ + dict( + type='CocoWholeBodyDataset', + data_root='data/coco/', + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=[]) +] + +for scene in scenes: + train_dataset = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file=f'annotations/{scene}/train_annotations.json', + data_prefix=dict(img='images/'), + pipeline=[], + sample_interval=10) + train_datasets.append(train_dataset) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-x_8xb32-270e_coco-ubody-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-x_8xb32-270e_coco-ubody-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..695f64089720ce87e765785d04815110930f00bd --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-x_8xb32-270e_coco-ubody-wholebody-384x288.py @@ -0,0 +1,260 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (288, 384) + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 32 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'UBody2dDataset' +data_mode = 'topdown' +data_root = 'data/UBody/' + +backend_args = dict(backend='local') + +scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +train_datasets = [ + dict( + type='CocoWholeBodyDataset', + data_root='data/coco/', + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=[]) +] + +for scene in scenes: + train_dataset = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file=f'annotations/{scene}/train_annotations.json', + data_prefix=dict(img='images/'), + pipeline=[], + sample_interval=10) + train_datasets.append(train_dataset) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-x_8xb64-270e_coco-ubody-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-x_8xb64-270e_coco-ubody-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..30f1015394dffdbd8d0c313375e50c1fb472da07 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/rtmpose/ubody/rtmpose-x_8xb64-270e_coco-ubody-wholebody-256x192.py @@ -0,0 +1,260 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (192, 256) + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 64 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa: E501 + )), + head=dict( + type='RTMCCHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'UBody2dDataset' +data_mode = 'topdown' +data_root = 'data/UBody/' + +backend_args = dict(backend='local') + +scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +train_datasets = [ + dict( + type='CocoWholeBodyDataset', + data_root='data/coco/', + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=[]) +] + +for scene in scenes: + train_dataset = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file=f'annotations/{scene}/train_annotations.json', + data_prefix=dict(img='images/'), + pipeline=[], + sample_interval=10) + train_datasets.append(train_dataset) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/README.md b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..71837c998b09214497ca46b4a19c28e2ec4897fb --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/README.md @@ -0,0 +1,35 @@ +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### COCO-WholeBody Dataset + +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | Whole AP | Whole AR | Details and Download | +| :-----------------: | :--------: | :------: | :------: | :-----------------------------------------------------------------------------: | +| HRNet-w48+Dark+ | 384x288 | 0.661 | 0.743 | [hrnet_dark_coco-wholebody.md](./coco-wholebody/hrnet_dark_coco-wholebody.md) | +| HRNet-w32+Dark | 256x192 | 0.582 | 0.671 | [hrnet_dark_coco-wholebody.md](./coco-wholebody/hrnet_dark_coco-wholebody.md) | +| HRNet-w48 | 256x192 | 0.579 | 0.681 | [hrnet_coco-wholebody.md](./coco-wholebody/hrnet_coco-wholebody.md) | +| CSPNeXt-m | 256x192 | 0.567 | 0.641 | [cspnext_udp_coco-wholebody.md](./coco-wholebody/cspnext_udp_coco-wholebody.md) | +| HRNet-w32 | 256x192 | 0.549 | 0.646 | [hrnet_ubody-coco-wholebody.md](./ubody2d/hrnet_ubody-coco-wholebody.md) | +| ResNet-152 | 256x192 | 0.548 | 0.661 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | +| HRNet-w32 | 256x192 | 0.536 | 0.636 | [hrnet_coco-wholebody.md](./coco-wholebody/hrnet_coco-wholebody.md) | +| ResNet-101 | 256x192 | 0.531 | 0.645 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | +| S-ViPNAS-Res50+Dark | 256x192 | 0.528 | 0.632 | [vipnas_dark_coco-wholebody.md](./coco-wholebody/vipnas_dark_coco-wholebody.md) | +| ResNet-50 | 256x192 | 0.521 | 0.633 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | +| S-ViPNAS-Res50 | 256x192 | 0.495 | 0.607 | [vipnas_coco-wholebody.md](./coco-wholebody/vipnas_coco-wholebody.md) | + +### UBody2D Dataset + +Result on UBody val set, computed with gt keypoints. + +| Model | Input Size | Whole AP | Whole AR | Details and Download | +| :-------: | :--------: | :------: | :------: | :----------------------------------------------------------------------: | +| HRNet-w32 | 256x192 | 0.690 | 0.729 | [hrnet_ubody-coco-wholebody.md](./ubody2d/hrnet_ubody-coco-wholebody.md) | diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-l_udp_8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-l_udp_8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..7182e7a3ed0f235cad12e512008689606ddb8d5c --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-l_udp_8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,212 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..05fae649b8fe7d698255255531e878d954734edd --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,212 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.md b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.md new file mode 100644 index 0000000000000000000000000000000000000000..1fc4a78dfbda287d33c6edd16d9d36944992f365 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.md @@ -0,0 +1,56 @@ + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_cspnext_m_udp](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.687 | 0.735 | 0.680 | 0.763 | 0.697 | 0.755 | 0.460 | 0.543 | 0.567 | 0.641 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.json) | diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml new file mode 100644 index 0000000000000000000000000000000000000000..ebdcc7146ef7969da2fdd26926c92268a9abae90 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml @@ -0,0 +1,24 @@ +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py + In Collection: UDP + Metadata: + Architecture: &id001 + - UDP + - CSPNeXt + Training Data: COCO-WholeBody + Name: cspnext-m_udp_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.687 + Body AR: 0.735 + Face AP: 0.697 + Face AR: 0.755 + Foot AP: 0.680 + Foot AR: 0.763 + Hand AP: 0.46 + Hand AR: 0.567 + Whole AP: 0.567 + Whole AR: 0.641 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.md b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.md new file mode 100644 index 0000000000000000000000000000000000000000..53f240bc528f9c81e65620fb33dbc4546519001c --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.md @@ -0,0 +1,41 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_hrnet_w32](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.678 | 0.755 | 0.543 | 0.661 | 0.630 | 0.708 | 0.467 | 0.566 | 0.536 | 0.636 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192-853765cd_20200918.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_20200918.log.json) | +| [pose_hrnet_w32](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py) | 384x288 | 0.700 | 0.772 | 0.585 | 0.691 | 0.726 | 0.783 | 0.515 | 0.603 | 0.586 | 0.673 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288-78cacac3_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288_20200922.log.json) | +| [pose_hrnet_w48](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.701 | 0.776 | 0.675 | 0.787 | 0.656 | 0.743 | 0.535 | 0.639 | 0.579 | 0.681 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192-643e18cb_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192_20200922.log.json) | +| [pose_hrnet_w48](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.722 | 0.791 | 0.696 | 0.801 | 0.776 | 0.834 | 0.587 | 0.678 | 0.632 | 0.717 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288-6e061c6a_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_20200922.log.json) | diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml new file mode 100644 index 0000000000000000000000000000000000000000..929bd0535671b25499624cf41487008d4be27ab2 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml @@ -0,0 +1,86 @@ +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.678 + Body AR: 0.755 + Face AP: 0.630 + Face AR: 0.708 + Foot AP: 0.543 + Foot AR: 0.661 + Hand AP: 0.467 + Hand AR: 0.566 + Whole AP: 0.536 + Whole AR: 0.636 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192-853765cd_20200918.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.700 + Body AR: 0.772 + Face AP: 0.726 + Face AR: 0.783 + Foot AP: 0.585 + Foot AR: 0.691 + Hand AP: 0.515 + Hand AR: 0.603 + Whole AP: 0.586 + Whole AR: 0.673 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288-78cacac3_20200922.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.701 + Body AR: 0.776 + Face AP: 0.656 + Face AR: 0.743 + Foot AP: 0.675 + Foot AR: 0.787 + Hand AP: 0.535 + Hand AR: 0.639 + Whole AP: 0.579 + Whole AR: 0.681 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192-643e18cb_20200922.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.722 + Body AR: 0.791 + Face AP: 0.776 + Face AR: 0.834 + Foot AP: 0.696 + Foot AR: 0.801 + Hand AP: 0.587 + Hand AR: 0.678 + Whole AP: 0.632 + Whole AR: 0.717 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288-6e061c6a_20200922.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.md b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.md new file mode 100644 index 0000000000000000000000000000000000000000..b215b3c5f25b595ca30ae54b743c907a53629084 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.md @@ -0,0 +1,58 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_hrnet_w32_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.693 | 0.764 | 0.564 | 0.674 | 0.737 | 0.809 | 0.503 | 0.602 | 0.582 | 0.671 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark-469327ef_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark_20200922.log.json) | +| [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.742 | 0.807 | 0.707 | 0.806 | 0.841 | 0.892 | 0.602 | 0.694 | 0.661 | 0.743 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark_20200918.log.json) | + +Note: `+` means the model is first pre-trained on original COCO dataset, and then fine-tuned on COCO-WholeBody dataset. We find this will lead to better performance. diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml new file mode 100644 index 0000000000000000000000000000000000000000..d0e2bd69542be8c482aee9498b9369e4151440c8 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml @@ -0,0 +1,45 @@ +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py + In Collection: DarkPose + Metadata: + Architecture: &id001 + - HRNet + - DarkPose + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.693 + Body AR: 0.764 + Face AP: 0.737 + Face AR: 0.809 + Foot AP: 0.564 + Foot AR: 0.674 + Hand AP: 0.503 + Hand AR: 0.602 + Whole AP: 0.582 + Whole AR: 0.671 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark-469327ef_20200922.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.742 + Body AR: 0.807 + Face AP: 0.841 + Face AR: 0.892 + Foot AP: 0.707 + Foot AR: 0.806 + Hand AP: 0.602 + Hand AR: 0.694 + Whole AP: 0.661 + Whole AR: 0.743 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.md b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.md new file mode 100644 index 0000000000000000000000000000000000000000..e4a189833b6b731d0efe5a5e6b9426c9a78ce1b3 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.md @@ -0,0 +1,43 @@ + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_resnet_50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.652 | 0.738 | 0.615 | 0.749 | 0.606 | 0.715 | 0.460 | 0.584 | 0.521 | 0.633 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192-9e37ed88_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192_20201004.log.json) | +| [pose_resnet_50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py) | 384x288 | 0.666 | 0.747 | 0.634 | 0.763 | 0.731 | 0.811 | 0.536 | 0.646 | 0.574 | 0.670 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288-ce11e294_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288_20201004.log.json) | +| [pose_resnet_101](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.669 | 0.753 | 0.637 | 0.766 | 0.611 | 0.722 | 0.463 | 0.589 | 0.531 | 0.645 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192-7325f982_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192_20201004.log.json) | +| [pose_resnet_101](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.692 | 0.770 | 0.680 | 0.799 | 0.746 | 0.820 | 0.548 | 0.657 | 0.597 | 0.693 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288-6c137b9a_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288_20201004.log.json) | +| [pose_resnet_152](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.682 | 0.764 | 0.661 | 0.787 | 0.623 | 0.728 | 0.481 | 0.607 | 0.548 | 0.661 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192-5de8ae23_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192_20201004.log.json) | +| [pose_resnet_152](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.704 | 0.780 | 0.693 | 0.813 | 0.751 | 0.824 | 0.559 | 0.666 | 0.610 | 0.705 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288-eab8caa8_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288_20201004.log.json) | diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml new file mode 100644 index 0000000000000000000000000000000000000000..0e8db24f6acb4166776e7cbf4ba2109b1f2a28a3 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml @@ -0,0 +1,128 @@ +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + Training Data: COCO-WholeBody + Name: td-hm_res50_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.652 + Body AR: 0.738 + Face AP: 0.606 + Face AR: 0.715 + Foot AP: 0.615 + Foot AR: 0.749 + Hand AP: 0.46 + Hand AR: 0.584 + Whole AP: 0.521 + Whole AR: 0.633 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192-9e37ed88_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res50_8xb64-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.666 + Body AR: 0.747 + Face AP: 0.731 + Face AR: 0.811 + Foot AP: 0.634 + Foot AR: 0.763 + Hand AP: 0.536 + Hand AR: 0.646 + Whole AP: 0.574 + Whole AR: 0.67 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288-ce11e294_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res101_8xb32-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.669 + Body AR: 0.753 + Face AP: 0.611 + Face AR: 0.722 + Foot AP: 0.637 + Foot AR: 0.766 + Hand AP: 0.463 + Hand AR: 0.589 + Whole AP: 0.531 + Whole AR: 0.645 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192-7325f982_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res101_8xb32-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.692 + Body AR: 0.77 + Face AP: 0.746 + Face AR: 0.82 + Foot AP: 0.68 + Foot AR: 0.799 + Hand AP: 0.548 + Hand AR: 0.657 + Whole AP: 0.598 + Whole AR: 0.691 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288-6c137b9a_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res152_8xb32-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.682 + Body AR: 0.764 + Face AP: 0.623 + Face AR: 0.728 + Foot AP: 0.661 + Foot AR: 0.787 + Hand AP: 0.481 + Hand AR: 0.607 + Whole AP: 0.548 + Whole AR: 0.661 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192-5de8ae23_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res152_8xb32-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.704 + Body AR: 0.78 + Face AP: 0.751 + Face AR: 0.824 + Foot AP: 0.693 + Foot AR: 0.813 + Hand AP: 0.559 + Hand AR: 0.666 + Whole AP: 0.61 + Whole AR: 0.705 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288-eab8caa8_20201004.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..2595e3fc13e6913a01af45fa8d7b9c6377511ddb --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..727fa9472ec9c446cb572e6c4fcd49976bf3916b --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..ffee1d1383e4757b79ed0ea4461c69d7b4247b15 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..892b4b7936123840c3192e87491123e5f11b3f7f --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..d587dbc45bf2f90a3912e263d63d0dc64205298a --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py @@ -0,0 +1,150 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..63175b99ea3e604fb87e1e45ef921aee2e7a1b16 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py @@ -0,0 +1,154 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..c0d8187ab47b54d445d3f125da596f381c494309 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..42e98575fba714ab65f3f19f226b5c06c2898a93 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..10c16eb71f9ac28ea6746e85d51ed526dd035abe --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..43ec5fb67c23df4e5e3d1c93072c41e0d08b88a6 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..e568c78b175bf3cc3364235c671d04944d84c53f --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py new file mode 100644 index 0000000000000000000000000000000000000000..6869d17ba998b7918133eefcf98fc3344e729a26 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py @@ -0,0 +1,121 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..cad9c539bef73cce6fc8e48e9d91489ea9f72270 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,122 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_MobileNetV3'), + head=dict( + type='ViPNASHead', + in_channels=160, + out_channels=133, + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..d34ea50db64b6a2716469ebf872045b9308fc413 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,126 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_MobileNetV3'), + head=dict( + type='ViPNASHead', + in_channels=160, + out_channels=133, + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..822e4c698a54a82a62fd30f6cc891f814d024930 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,123 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ViPNAS_ResNet', + depth=50, + ), + head=dict( + type='ViPNASHead', + in_channels=608, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..15b152fe96d3d60806c3461a04a0a4b5c66b3c96 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py @@ -0,0 +1,127 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ViPNAS_ResNet', + depth=50, + ), + head=dict( + type='ViPNASHead', + in_channels=608, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.md b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.md new file mode 100644 index 0000000000000000000000000000000000000000..63fc0aed8af576808a7c6ee7dac89c3235d2ebfc --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.md @@ -0,0 +1,38 @@ + + +
+ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [S-ViPNAS-MobileNetV3](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.619 | 0.700 | 0.477 | 0.608 | 0.585 | 0.689 | 0.386 | 0.505 | 0.473 | 0.578 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192-0fee581a_20211205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_20211205.log.json) | +| [S-ViPNAS-Res50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.643 | 0.726 | 0.553 | 0.694 | 0.587 | 0.698 | 0.410 | 0.529 | 0.495 | 0.607 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192-49e1c3a4_20211112.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_20211112.log.json) | diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml new file mode 100644 index 0000000000000000000000000000000000000000..28148364075f0ea14c844965a82db732620fd3f2 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml @@ -0,0 +1,44 @@ +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: &id001 + - ViPNAS + Training Data: COCO-WholeBody + Name: td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.619 + Body AR: 0.7 + Face AP: 0.585 + Face AR: 0.689 + Foot AP: 0.477 + Foot AR: 0.608 + Hand AP: 0.386 + Hand AR: 0.505 + Whole AP: 0.473 + Whole AR: 0.578 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192-0fee581a_20211205.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.643 + Body AR: 0.726 + Face AP: 0.587 + Face AR: 0.698 + Foot AP: 0.553 + Foot AR: 0.694 + Hand AP: 0.41 + Hand AR: 0.529 + Whole AP: 0.495 + Whole AR: 0.607 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192-49e1c3a4_20211112.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.md b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.md new file mode 100644 index 0000000000000000000000000000000000000000..e39c66e913ab44ae58e511e987397ebae5e30fe1 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.md @@ -0,0 +1,55 @@ + + +
+ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [S-ViPNAS-MobileNetV3_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.632 | 0.710 | 0.530 | 0.660 | 0.672 | 0.771 | 0.404 | 0.519 | 0.508 | 0.607 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark-e2158108_20211205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark_20211205.log.json) | +| [S-ViPNAS-Res50_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.650 | 0.732 | 0.550 | 0.686 | 0.684 | 0.783 | 0.437 | 0.554 | 0.528 | 0.632 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark_20211112.log.json) | diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml new file mode 100644 index 0000000000000000000000000000000000000000..5449af0ccd67ea433218a166b421888df3698ef8 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml @@ -0,0 +1,45 @@ +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: &id001 + - ViPNAS + - DarkPose + Training Data: COCO-WholeBody + Name: td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.632 + Body AR: 0.71 + Face AP: 0.672 + Face AR: 0.771 + Foot AP: 0.53 + Foot AR: 0.66 + Hand AP: 0.404 + Hand AR: 0.519 + Whole AP: 0.508 + Whole AR: 0.607 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark-e2158108_20211205.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.65 + Body AR: 0.732 + Face AP: 0.684 + Face AR: 0.783 + Foot AP: 0.55 + Foot AR: 0.686 + Hand AP: 0.437 + Hand AR: 0.554 + Whole AP: 0.528 + Whole AR: 0.632 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/hrnet_coco-wholebody.yml b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/hrnet_coco-wholebody.yml new file mode 100644 index 0000000000000000000000000000000000000000..d51126cab856fe4120692788a455dbc660d6aa73 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/hrnet_coco-wholebody.yml @@ -0,0 +1,23 @@ +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/td-hm_hrnet-w32_8xb64-210e_ubody-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: UBody-COCO-WholeBody + Name: td-hm_hrnet-w32_8xb64-210e_ubody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.678 + Body AR: 0.755 + Face AP: 0.630 + Face AR: 0.708 + Foot AP: 0.543 + Foot AR: 0.661 + Hand AP: 0.467 + Hand AR: 0.566 + Whole AP: 0.536 + Whole AR: 0.636 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/wholebody_2d_keypoint/ubody/td-hm_hrnet-w32_8xb64-210e_ubody-coco-256x192-7c227391_20230807.pth diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/hrnet_ubody-coco-wholebody.md b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/hrnet_ubody-coco-wholebody.md new file mode 100644 index 0000000000000000000000000000000000000000..bd62073847c80d25392564465a69e106c22bbf12 --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/hrnet_ubody-coco-wholebody.md @@ -0,0 +1,38 @@ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+UBody (CVPR'2023) + +```bibtex +@article{lin2023one, + title={One-Stage 3D Whole-Body Mesh Recovery with Component Aware Transformer}, + author={Lin, Jing and Zeng, Ailing and Wang, Haoqian and Zhang, Lei and Li, Yu}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + year={2023}, +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_hrnet_w32](/configs/wholebody_2d_keypoint/topdown_heatmap/ubody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.685 | 0.759 | 0.564 | 0.675 | 0.625 | 0.705 | 0.516 | 0.609 | 0.549 | 0.646 | [ckpt](https://download.openmmlab.com/mmpose/v1/wholebody_2d_keypoint/ubody/td-hm_hrnet-w32_8xb64-210e_ubody-coco-256x192-7c227391_20230807.pth) | [log](https://download.openmmlab.com/mmpose/v1/wholebody_2d_keypoint/ubody/td-hm_hrnet-w32_8xb64-210e_ubody-coco-256x192-7c227391_20230807.json) | diff --git a/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/td-hm_hrnet-w32_8xb64-210e_ubody-256x192.py b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/td-hm_hrnet-w32_8xb64-210e_ubody-256x192.py new file mode 100644 index 0000000000000000000000000000000000000000..055484d0097a1f1538cc67de6062f19067c84a7c --- /dev/null +++ b/mmpose/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/td-hm_hrnet-w32_8xb64-210e_ubody-256x192.py @@ -0,0 +1,173 @@ +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'UBody2dDataset' +data_mode = 'topdown' +data_root = 'data/UBody/' + +scenes = [ + 'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', + 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', + 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference' +] + +train_datasets = [ + dict( + type='CocoWholeBodyDataset', + data_root='data/coco/', + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=[]) +] + +for scene in scenes: + train_dataset = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file=f'annotations/{scene}/train_annotations.json', + data_prefix=dict(img='images/'), + pipeline=[], + sample_interval=10) + train_datasets.append(train_dataset) + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), + datasets=train_datasets, + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoWholeBodyDataset', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='data/coco/val2017/'), + pipeline=val_pipeline, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + test_mode=True)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/mmpose/datasets/__init__.py b/mmpose/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b90a12db4937ffca9ff103b1e5a0c7604de52e0b --- /dev/null +++ b/mmpose/datasets/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import build_dataset +from .dataset_wrappers import CombinedDataset +from .datasets import * # noqa +from .samplers import MultiSourceSampler +from .transforms import * # noqa + +__all__ = ['build_dataset', 'CombinedDataset', 'MultiSourceSampler'] diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..2e5a236ff49b70b86149d318cbccdfd5af5a6450 --- /dev/null +++ b/mmpose/datasets/builder.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import platform +import random + +import numpy as np +import torch +from mmengine import build_from_cfg, is_seq_of +from mmengine.dataset import ConcatDataset, RepeatDataset + +from mmpose.registry import DATASETS + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + base_soft_limit = rlimit[0] + hard_limit = rlimit[1] + soft_limit = min(max(4096, base_soft_limit), hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + + +def _concat_dataset(cfg, default_args=None): + types = cfg['type'] + ann_files = cfg['ann_file'] + img_prefixes = cfg.get('img_prefix', None) + dataset_infos = cfg.get('dataset_info', None) + + num_joints = cfg['data_cfg'].get('num_joints', None) + dataset_channel = cfg['data_cfg'].get('dataset_channel', None) + + datasets = [] + num_dset = len(ann_files) + for i in range(num_dset): + cfg_copy = copy.deepcopy(cfg) + cfg_copy['ann_file'] = ann_files[i] + + if isinstance(types, (list, tuple)): + cfg_copy['type'] = types[i] + if isinstance(img_prefixes, (list, tuple)): + cfg_copy['img_prefix'] = img_prefixes[i] + if isinstance(dataset_infos, (list, tuple)): + cfg_copy['dataset_info'] = dataset_infos[i] + + if isinstance(num_joints, (list, tuple)): + cfg_copy['data_cfg']['num_joints'] = num_joints[i] + + if is_seq_of(dataset_channel, list): + cfg_copy['data_cfg']['dataset_channel'] = dataset_channel[i] + + datasets.append(build_dataset(cfg_copy, default_args)) + + return ConcatDataset(datasets) + + +def build_dataset(cfg, default_args=None): + """Build a dataset from config dict. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + default_args (dict, optional): Default initialization arguments. + Default: None. + + Returns: + Dataset: The constructed dataset. + """ + + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'ConcatDataset': + dataset = ConcatDataset( + [build_dataset(c, default_args) for c in cfg['datasets']]) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + elif isinstance(cfg.get('ann_file'), (list, tuple)): + dataset = _concat_dataset(cfg, default_args) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + return dataset + + +def worker_init_fn(worker_id, num_workers, rank, seed): + """Init the random seed for various workers.""" + # The seed of each worker equals to + # num_worker * rank + worker_id + user_seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) + torch.manual_seed(worker_seed) diff --git a/mmpose/datasets/dataset_wrappers.py b/mmpose/datasets/dataset_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..5f1bd31afe496bffea3d146f534a87026cdc4bef --- /dev/null +++ b/mmpose/datasets/dataset_wrappers.py @@ -0,0 +1,203 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from copy import deepcopy +from typing import Any, Callable, List, Optional, Tuple, Union, Dict + +import numpy as np +from mmengine.dataset import BaseDataset +from mmengine.registry import build_from_cfg + +from mmpose.registry import DATASETS +from .datasets.utils import parse_pose_metainfo + + +@DATASETS.register_module() +class CombinedDataset(BaseDataset): + """A wrapper of combined dataset. + + Args: + metainfo (dict): The meta information of combined dataset. + datasets (list): The configs of datasets to be combined. + pipeline (list, optional): Processing pipeline. Defaults to []. + sample_ratio_factor (list, optional): A list of sampling ratio + factors for each dataset. Defaults to None + """ + + def __init__(self, + metainfo: dict, + datasets: list, + pipeline: List[Union[dict, Callable]] = [], + sample_ratio_factor: Optional[List[float]] = None, + dataset_ratio_factor: Optional[List[float]] = None, + keypoints_mapping: Optional[List[Dict]] = None, + **kwargs): + + self.datasets = [] + self.resample = sample_ratio_factor is not None + + self.keypoints_mapping = keypoints_mapping + self.num_joints = None + if self.keypoints_mapping is not None: + self.num_joints = 0 + for mapping in self.keypoints_mapping: + self.num_joints = max(self.num_joints, max(mapping.values()) +1) + + + for cfg in datasets: + dataset = build_from_cfg(cfg, DATASETS) + self.datasets.append(dataset) + + # For each dataset, select its random subset based on the sample_ratio_factor + if dataset_ratio_factor is not None: + for i, dataset in enumerate(self.datasets): + dataset_len = len(dataset) + random_subset = np.random.choice( + dataset_len, + int(dataset_len * dataset_ratio_factor[i]), + replace=False, + ) + self.datasets[i] = dataset.get_subset( + random_subset.flatten().tolist(), + ) + + self._lens = [len(dataset) for dataset in self.datasets] + if self.resample: + assert len(sample_ratio_factor) == len(datasets), f'the length ' \ + f'of `sample_ratio_factor` {len(sample_ratio_factor)} does ' \ + f'not match the length of `datasets` {len(datasets)}' + assert min(sample_ratio_factor) >= 0.0, 'the ratio values in ' \ + '`sample_ratio_factor` should not be negative.' + self._lens_ori = self._lens + self._lens = [ + round(l * sample_ratio_factor[i]) + for i, l in enumerate(self._lens_ori) + ] + + self._len = sum(self._lens) + + super(CombinedDataset, self).__init__(pipeline=pipeline, **kwargs) + self._metainfo = parse_pose_metainfo(metainfo) + + print("CombinedDataset initialized\n\tlen: {}\n\tlens: {}".format(self._len, self._lens)) + + @property + def metainfo(self): + return deepcopy(self._metainfo) + + def __len__(self): + return self._len + + def _get_subset_index(self, index: int) -> Tuple[int, int]: + """Given a data sample's global index, return the index of the sub- + dataset the data sample belongs to, and the local index within that + sub-dataset. + + Args: + index (int): The global data sample index + + Returns: + tuple[int, int]: + - subset_index (int): The index of the sub-dataset + - local_index (int): The index of the data sample within + the sub-dataset + """ + if index >= len(self) or index < -len(self): + raise ValueError( + f'index({index}) is out of bounds for dataset with ' + f'length({len(self)}).') + + if index < 0: + index = index + len(self) + + subset_index = 0 + while index >= self._lens[subset_index]: + index -= self._lens[subset_index] + subset_index += 1 + + if self.resample: + gap = (self._lens_ori[subset_index] - + 1e-4) / self._lens[subset_index] + index = round(gap * index + np.random.rand() * gap - 0.5) + + return subset_index, index + + def prepare_data(self, idx: int) -> Any: + """Get data processed by ``self.pipeline``.The source dataset is + depending on the index. + + Args: + idx (int): The index of ``data_info``. + + Returns: + Any: Depends on ``self.pipeline``. + """ + + data_info = self.get_data_info(idx) + + # the assignment of 'dataset' should not be performed within the + # `get_data_info` function. Otherwise, it can lead to the mixed + # data augmentation process getting stuck. + data_info['dataset'] = self + + return self.pipeline(data_info) + + def get_data_info(self, idx: int) -> dict: + """Get annotation by index. + + Args: + idx (int): Global index of ``CombinedDataset``. + Returns: + dict: The idx-th annotation of the datasets. + """ + subset_idx, sample_idx = self._get_subset_index(idx) + # Get data sample processed by ``subset.pipeline`` + data_info = self.datasets[subset_idx][sample_idx] + + if 'dataset' in data_info: + data_info.pop('dataset') + + # Add metainfo items that are required in the pipeline and the model + metainfo_keys = [ + 'upper_body_ids', 'lower_body_ids', 'flip_pairs', + 'dataset_keypoint_weights', 'flip_indices' + ] + + for key in metainfo_keys: + data_info[key] = deepcopy(self._metainfo[key]) + + # Map keypoints based on the dataset keypoint mapping + if self.keypoints_mapping is not None: + mapping = self.keypoints_mapping[subset_idx] + + keypoints = data_info['keypoints'] + N, K, D = keypoints.shape + keypoints_visibility = data_info.get('keypoints_visibility', np.zeros((N, K))) + keypoints_visible = data_info.get('keypoints_visible', np.zeros((N, K))) + + mapped_keypoints = np.zeros((N, self.num_joints, 2)) + mapped_visibility = np.zeros((N, self.num_joints)) + mapped_visible = np.zeros((N, self.num_joints)) + + map_idx = np.stack( + [list(mapping.keys()), list(mapping.values())], axis=1) + mapped_keypoints[:, map_idx[:, 1], :] = data_info['keypoints'][:, map_idx[:, 0], :] + mapped_visibility[:, map_idx[:, 1]] = keypoints_visibility[:, map_idx[:, 0]] + mapped_visible[:, map_idx[:, 1]] = keypoints_visible[:, map_idx[:, 0]] + + data_info['keypoints'] = mapped_keypoints.reshape((N, self.num_joints, 2) ) + data_info['keypoints_visibility'] = mapped_visibility.reshape((N, self.num_joints)) + data_info['keypoints_visible'] = mapped_visible.reshape((N, self.num_joints)) + + # print('data_info', data_info) + + return data_info + + def full_init(self): + """Fully initialize all sub datasets.""" + + if self._fully_initialized: + return + + for dataset in self.datasets: + dataset.full_init() + self._fully_initialized = True diff --git a/mmpose/datasets/datasets/__init__.py b/mmpose/datasets/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0709ab32f265fce2144703e96ff60d487a6d463 --- /dev/null +++ b/mmpose/datasets/datasets/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .animal import * # noqa: F401, F403 +from .base import * # noqa: F401, F403 +from .body import * # noqa: F401, F403 +from .body3d import * # noqa: F401, F403 +from .face import * # noqa: F401, F403 +from .fashion import * # noqa: F401, F403 +from .hand import * # noqa: F401, F403 +from .hand3d import * # noqa: F401, F403 +from .wholebody import * # noqa: F401, F403 +from .wholebody3d import * # noqa: F401, F403 diff --git a/mmpose/datasets/datasets/animal/__init__.py b/mmpose/datasets/datasets/animal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..669f08cddd0ca10756867af160c55303c8a8ac20 --- /dev/null +++ b/mmpose/datasets/datasets/animal/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .animalkingdom_dataset import AnimalKingdomDataset +from .animalpose_dataset import AnimalPoseDataset +from .ap10k_dataset import AP10KDataset +from .atrw_dataset import ATRWDataset +from .fly_dataset import FlyDataset +from .horse10_dataset import Horse10Dataset +from .locust_dataset import LocustDataset +from .macaque_dataset import MacaqueDataset +from .zebra_dataset import ZebraDataset + +__all__ = [ + 'AnimalPoseDataset', 'AP10KDataset', 'Horse10Dataset', 'MacaqueDataset', + 'FlyDataset', 'LocustDataset', 'ZebraDataset', 'ATRWDataset', + 'AnimalKingdomDataset' +] diff --git a/mmpose/datasets/datasets/animal/animalkingdom_dataset.py b/mmpose/datasets/datasets/animal/animalkingdom_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..35ccb8b67a5b607e91f5120b2bc6c21e3d3eba39 --- /dev/null +++ b/mmpose/datasets/datasets/animal/animalkingdom_dataset.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AnimalKingdomDataset(BaseCocoStyleDataset): + """Animal Kingdom dataset for animal pose estimation. + + "[CVPR2022] Animal Kingdom: + A Large and Diverse Dataset for Animal Behavior Understanding" + More details can be found in the `paper + `__ . + + Website: + + The dataset loads raw features and apply specified transforms + to return a dict containing the image tensors and other information. + + Animal Kingdom keypoint indexes:: + + 0: 'Head_Mid_Top', + 1: 'Eye_Left', + 2: 'Eye_Right', + 3: 'Mouth_Front_Top', + 4: 'Mouth_Back_Left', + 5: 'Mouth_Back_Right', + 6: 'Mouth_Front_Bottom', + 7: 'Shoulder_Left', + 8: 'Shoulder_Right', + 9: 'Elbow_Left', + 10: 'Elbow_Right', + 11: 'Wrist_Left', + 12: 'Wrist_Right', + 13: 'Torso_Mid_Back', + 14: 'Hip_Left', + 15: 'Hip_Right', + 16: 'Knee_Left', + 17: 'Knee_Right', + 18: 'Ankle_Left ', + 19: 'Ankle_Right', + 20: 'Tail_Top_Back', + 21: 'Tail_Mid_Back', + 22: 'Tail_End_Back + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/ak.py') diff --git a/mmpose/datasets/datasets/animal/animalpose_dataset.py b/mmpose/datasets/datasets/animal/animalpose_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..0279cf9de0907626f2a6686170dc5e99aafa2d9d --- /dev/null +++ b/mmpose/datasets/datasets/animal/animalpose_dataset.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AnimalPoseDataset(BaseCocoStyleDataset): + """Animal-Pose dataset for animal pose estimation. + + "Cross-domain Adaptation For Animal Pose Estimation" ICCV'2019 + More details can be found in the `paper + `__ . + + Animal-Pose keypoints:: + + 0: 'L_Eye', + 1: 'R_Eye', + 2: 'L_EarBase', + 3: 'R_EarBase', + 4: 'Nose', + 5: 'Throat', + 6: 'TailBase', + 7: 'Withers', + 8: 'L_F_Elbow', + 9: 'R_F_Elbow', + 10: 'L_B_Elbow', + 11: 'R_B_Elbow', + 12: 'L_F_Knee', + 13: 'R_F_Knee', + 14: 'L_B_Knee', + 15: 'R_B_Knee', + 16: 'L_F_Paw', + 17: 'R_F_Paw', + 18: 'L_B_Paw', + 19: 'R_B_Paw' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/animalpose.py') diff --git a/mmpose/datasets/datasets/animal/ap10k_dataset.py b/mmpose/datasets/datasets/animal/ap10k_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..de1efbc67f7be55c57532684174442a3f865d5fd --- /dev/null +++ b/mmpose/datasets/datasets/animal/ap10k_dataset.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AP10KDataset(BaseCocoStyleDataset): + """AP-10K dataset for animal pose estimation. + + "AP-10K: A Benchmark for Animal Pose Estimation in the Wild" + Neurips Dataset Track'2021. + More details can be found in the `paper + `__ . + + AP-10K keypoints:: + + 0: 'L_Eye', + 1: 'R_Eye', + 2: 'Nose', + 3: 'Neck', + 4: 'root of tail', + 5: 'L_Shoulder', + 6: 'L_Elbow', + 7: 'L_F_Paw', + 8: 'R_Shoulder', + 9: 'R_Elbow', + 10: 'R_F_Paw, + 11: 'L_Hip', + 12: 'L_Knee', + 13: 'L_B_Paw', + 14: 'R_Hip', + 15: 'R_Knee', + 16: 'R_B_Paw' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/ap10k.py') diff --git a/mmpose/datasets/datasets/animal/atrw_dataset.py b/mmpose/datasets/datasets/animal/atrw_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..de5b1a09a0510969ea0a6d57c15e5bd13104b99b --- /dev/null +++ b/mmpose/datasets/datasets/animal/atrw_dataset.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class ATRWDataset(BaseCocoStyleDataset): + """ATRW dataset for animal pose estimation. + + "ATRW: A Benchmark for Amur Tiger Re-identification in the Wild" + ACM MM'2020. + More details can be found in the `paper + `__ . + + ATRW keypoints:: + + 0: "left_ear", + 1: "right_ear", + 2: "nose", + 3: "right_shoulder", + 4: "right_front_paw", + 5: "left_shoulder", + 6: "left_front_paw", + 7: "right_hip", + 8: "right_knee", + 9: "right_back_paw", + 10: "left_hip", + 11: "left_knee", + 12: "left_back_paw", + 13: "tail", + 14: "center" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/atrw.py') diff --git a/mmpose/datasets/datasets/animal/fly_dataset.py b/mmpose/datasets/datasets/animal/fly_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b614d9b9f77b1e2eb7f067ea6cfb21d788857554 --- /dev/null +++ b/mmpose/datasets/datasets/animal/fly_dataset.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class FlyDataset(BaseCocoStyleDataset): + """FlyDataset for animal pose estimation. + + "Fast animal pose estimation using deep neural networks" + Nature methods'2019. More details can be found in the `paper + `__ . + + Vinegar Fly keypoints:: + + 0: "head", + 1: "eyeL", + 2: "eyeR", + 3: "neck", + 4: "thorax", + 5: "abdomen", + 6: "forelegR1", + 7: "forelegR2", + 8: "forelegR3", + 9: "forelegR4", + 10: "midlegR1", + 11: "midlegR2", + 12: "midlegR3", + 13: "midlegR4", + 14: "hindlegR1", + 15: "hindlegR2", + 16: "hindlegR3", + 17: "hindlegR4", + 18: "forelegL1", + 19: "forelegL2", + 20: "forelegL3", + 21: "forelegL4", + 22: "midlegL1", + 23: "midlegL2", + 24: "midlegL3", + 25: "midlegL4", + 26: "hindlegL1", + 27: "hindlegL2", + 28: "hindlegL3", + 29: "hindlegL4", + 30: "wingL", + 31: "wingR" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/fly.py') diff --git a/mmpose/datasets/datasets/animal/horse10_dataset.py b/mmpose/datasets/datasets/animal/horse10_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..0c25dba6a705045b731bddd176bf20a46c285764 --- /dev/null +++ b/mmpose/datasets/datasets/animal/horse10_dataset.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class Horse10Dataset(BaseCocoStyleDataset): + """Horse10Dataset for animal pose estimation. + + "Pretraining boosts out-of-domain robustness for pose estimation" + WACV'2021. More details can be found in the `paper + `__ . + + Horse-10 keypoints:: + + 0: 'Nose', + 1: 'Eye', + 2: 'Nearknee', + 3: 'Nearfrontfetlock', + 4: 'Nearfrontfoot', + 5: 'Offknee', + 6: 'Offfrontfetlock', + 7: 'Offfrontfoot', + 8: 'Shoulder', + 9: 'Midshoulder', + 10: 'Elbow', + 11: 'Girth', + 12: 'Wither', + 13: 'Nearhindhock', + 14: 'Nearhindfetlock', + 15: 'Nearhindfoot', + 16: 'Hip', + 17: 'Stifle', + 18: 'Offhindhock', + 19: 'Offhindfetlock', + 20: 'Offhindfoot', + 21: 'Ischium' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/horse10.py') diff --git a/mmpose/datasets/datasets/animal/locust_dataset.py b/mmpose/datasets/datasets/animal/locust_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3ada76034db8e9cbc25d68ccd9a430ea62394c74 --- /dev/null +++ b/mmpose/datasets/datasets/animal/locust_dataset.py @@ -0,0 +1,140 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class LocustDataset(BaseCocoStyleDataset): + """LocustDataset for animal pose estimation. + + "DeepPoseKit, a software toolkit for fast and robust animal + pose estimation using deep learning" Elife'2019. + More details can be found in the `paper + `__ . + + Desert Locust keypoints:: + + 0: "head", + 1: "neck", + 2: "thorax", + 3: "abdomen1", + 4: "abdomen2", + 5: "anttipL", + 6: "antbaseL", + 7: "eyeL", + 8: "forelegL1", + 9: "forelegL2", + 10: "forelegL3", + 11: "forelegL4", + 12: "midlegL1", + 13: "midlegL2", + 14: "midlegL3", + 15: "midlegL4", + 16: "hindlegL1", + 17: "hindlegL2", + 18: "hindlegL3", + 19: "hindlegL4", + 20: "anttipR", + 21: "antbaseR", + 22: "eyeR", + 23: "forelegR1", + 24: "forelegR2", + 25: "forelegR3", + 26: "forelegR4", + 27: "midlegR1", + 28: "midlegR2", + 29: "midlegR3", + 30: "midlegR4", + 31: "hindlegR1", + 32: "hindlegR2", + 33: "hindlegR3", + 34: "hindlegR4" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/locust.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Locust annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # get bbox in shape [1, 4], formatted as xywh + # use the entire image which is 160x160 + bbox = np.array([0, 0, 160, 160], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': ann['num_keypoints'], + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/animal/macaque_dataset.py b/mmpose/datasets/datasets/animal/macaque_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..08da981a1a2299efaadaf727b3960e769999fc35 --- /dev/null +++ b/mmpose/datasets/datasets/animal/macaque_dataset.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class MacaqueDataset(BaseCocoStyleDataset): + """MacaquePose dataset for animal pose estimation. + + "MacaquePose: A novel 'in the wild' macaque monkey pose dataset + for markerless motion capture" bioRxiv'2020. + More details can be found in the `paper + `__ . + + Macaque keypoints:: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/macaque.py') diff --git a/mmpose/datasets/datasets/animal/zebra_dataset.py b/mmpose/datasets/datasets/animal/zebra_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b399a8479bcf18b8b33115b4cd703563e1a846d3 --- /dev/null +++ b/mmpose/datasets/datasets/animal/zebra_dataset.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class ZebraDataset(BaseCocoStyleDataset): + """ZebraDataset for animal pose estimation. + + "DeepPoseKit, a software toolkit for fast and robust animal + pose estimation using deep learning" Elife'2019. + More details can be found in the `paper + `__ . + + Zebra keypoints:: + + 0: "snout", + 1: "head", + 2: "neck", + 3: "forelegL1", + 4: "forelegR1", + 5: "hindlegL1", + 6: "hindlegR1", + 7: "tailbase", + 8: "tailtip" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/zebra.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Zebra annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # get bbox in shape [1, 4], formatted as xywh + # use the entire image which is 160x160 + bbox = np.array([0, 0, 160, 160], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/base/__init__.py b/mmpose/datasets/datasets/base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..810440530e4d091f55aea349b6b2a4f8d3ba593b --- /dev/null +++ b/mmpose/datasets/datasets/base/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_coco_style_dataset import BaseCocoStyleDataset +from .base_mocap_dataset import BaseMocapDataset + +__all__ = ['BaseCocoStyleDataset', 'BaseMocapDataset'] diff --git a/mmpose/datasets/datasets/base/base_coco_style_dataset.py b/mmpose/datasets/datasets/base/base_coco_style_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..61fad40e1d31458798e4ac04c2fa0d7e7eb4d31f --- /dev/null +++ b/mmpose/datasets/datasets/base/base_coco_style_dataset.py @@ -0,0 +1,518 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from copy import deepcopy +from itertools import chain, filterfalse, groupby +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.dataset import BaseDataset, force_full_init +from mmengine.fileio import exists, get_local_path, load +from mmengine.logging import MessageHub +from mmengine.utils import is_list_of +from xtcocotools.coco import COCO + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_xywh2xyxy +from mmpose.structures.keypoint import find_min_padding_exact +from ..utils import parse_pose_metainfo + + +@DATASETS.register_module() +class BaseCocoStyleDataset(BaseDataset): + """Base class for COCO-style datasets. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + sample_interval (int, optional): The sample interval of the dataset. + Default: 1. + """ + + METAINFO: dict = dict() + + def __init__(self, + ann_file: str = '', + bbox_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000, + sample_interval: int = 1): + + if data_mode not in {'topdown', 'bottomup'}: + raise ValueError( + f'{self.__class__.__name__} got invalid data_mode: ' + f'{data_mode}. Should be "topdown" or "bottomup".') + self.data_mode = data_mode + + if bbox_file: + if self.data_mode != 'topdown': + raise ValueError( + f'{self.__class__.__name__} is set to {self.data_mode}: ' + 'mode, while "bbox_file" is only ' + 'supported in topdown mode.') + + if not test_mode: + raise ValueError( + f'{self.__class__.__name__} has `test_mode==False` ' + 'while "bbox_file" is only ' + 'supported when `test_mode==True`.') + self.bbox_file = bbox_file + self.sample_interval = sample_interval + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + if self.test_mode: + # save the ann_file into MessageHub for CocoMetric + message = MessageHub.get_current_instance() + dataset_name = self.metainfo['dataset_name'] + message.update_info_dict( + {f'{dataset_name}_ann_file': self.ann_file}) + + @classmethod + def _load_metainfo(cls, metainfo: dict = None) -> dict: + """Collect meta information from the dictionary of meta. + + Args: + metainfo (dict): Raw data of pose meta information. + + Returns: + dict: Parsed meta information. + """ + + if metainfo is None: + metainfo = deepcopy(cls.METAINFO) + + if not isinstance(metainfo, dict): + raise TypeError( + f'metainfo should be a dict, but got {type(metainfo)}') + + # parse pose metainfo if it has been assigned + if metainfo: + metainfo = parse_pose_metainfo(metainfo) + return metainfo + + @force_full_init + def prepare_data(self, idx) -> Any: + """Get data processed by ``self.pipeline``. + + :class:`BaseCocoStyleDataset` overrides this method from + :class:`mmengine.dataset.BaseDataset` to add the metainfo into + the ``data_info`` before it is passed to the pipeline. + + Args: + idx (int): The index of ``data_info``. + + Returns: + Any: Depends on ``self.pipeline``. + """ + data_info = self.get_data_info(idx) + + # Mixed image transformations require multiple source images for + # effective blending. Therefore, we assign the 'dataset' field in + # `data_info` to provide these auxiliary images. + # Note: The 'dataset' assignment should not occur within the + # `get_data_info` function, as doing so may cause the mixed image + # transformations to stall or hang. + data_info['dataset'] = self + + return self.pipeline(data_info) + + def get_data_info(self, idx: int) -> dict: + """Get data info by index. + + Args: + idx (int): Index of data info. + + Returns: + dict: Data info. + """ + data_info = super().get_data_info(idx) + + # Add metainfo items that are required in the pipeline and the model + metainfo_keys = [ + 'dataset_name', 'upper_body_ids', 'lower_body_ids', 'flip_pairs', + 'dataset_keypoint_weights', 'flip_indices', 'skeleton_links' + ] + + for key in metainfo_keys: + assert key not in data_info, ( + f'"{key}" is a reserved key for `metainfo`, but already ' + 'exists in the `data_info`.') + + data_info[key] = deepcopy(self._metainfo[key]) + + return data_info + + def load_data_list(self) -> List[dict]: + """Load data list from COCO annotation file or person detection result + file.""" + + if self.bbox_file: + data_list = self._load_detection_results() + else: + instance_list, image_list = self._load_annotations() + + if self.data_mode == 'topdown': + data_list = self._get_topdown_data_infos(instance_list) + else: + data_list = self._get_bottomup_data_infos( + instance_list, image_list) + + return data_list + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in COCO format.""" + + assert exists(self.ann_file), ( + f'Annotation file `{self.ann_file}`does not exist') + + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + # set the metainfo about categories, which is a list of dict + # and each dict contains the 'id', 'name', etc. about this category + if 'categories' in self.coco.dataset: + self._metainfo['CLASSES'] = self.coco.loadCats( + self.coco.getCatIds()) + + instance_list = [] + image_list = [] + + for img_id in self.coco.getImgIds(): + if img_id % self.sample_interval != 0: + continue + img = self.coco.loadImgs(img_id)[0] + img.update({ + 'img_id': + img_id, + 'img_path': + osp.join(self.data_prefix['img'], img['file_name']), + }) + image_list.append(img) + + ann_ids = self.coco.getAnnIds(imgIds=img_id) + for ann in self.coco.loadAnns(ann_ids): + + instance_info = self.parse_data_info( + dict(raw_ann_info=ann, raw_img_info=img)) + + # skip invalid instance annotation. + if not instance_info: + continue + + instance_list.append(instance_info) + return instance_list, image_list + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict | None: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + # filter invalid instance + if 'bbox' not in ann or 'keypoints' not in ann: + return None + + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + # keypoints_annotated = (_keypoints[..., 2] > 0).astype(np.float32) + keypoints_visibility = (_keypoints[..., 2] == 2).astype(np.float32) + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + if 'num_keypoints' in ann: + num_keypoints = ann['num_keypoints'] + else: + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + if 'area' in ann: + area = np.array(ann['area'], dtype=np.float32) + else: + area = np.clip((x2 - x1) * (y2 - y1) * 0.53, a_min=1.0, a_max=None) + area = np.array(area, dtype=np.float32) + + id_similarity = np.array([ann.get('identity_similarity', 0.0)]) + identified = np.array([ann.get('identified', 0)]) + pad_to_contain = ann.get('pad_to_contain', None) + if pad_to_contain is None: + pad_to_contain = find_min_padding_exact(bbox, _keypoints.reshape(-1, 3)) + data_info = { + 'img_id': ann['image_id'], + 'img_path': img['img_path'], + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'keypoints_visibility': keypoints_visibility, + 'pad_to_contain': pad_to_contain, + 'area': area, + 'iscrowd': ann.get('iscrowd', 0), + 'segmentation': ann.get('segmentation', None), + 'id': ann['id'], + 'id_similarity': id_similarity, + 'identified': identified, + 'category_id': np.array(ann['category_id']), + # store the raw annotation of the instance + # it is useful for evaluation without providing ann_file + 'raw_ann_info': copy.deepcopy(ann), + 'source_dataset': self.metainfo['dataset_name'], + } + + if 'crowdIndex' in img: + data_info['crowd_index'] = img['crowdIndex'] + + return data_info + + @staticmethod + def _is_valid_instance(data_info: Dict) -> bool: + """Check a data info is an instance with valid bbox and keypoint + annotations.""" + # crowd annotation + if 'iscrowd' in data_info and data_info['iscrowd']: + return False + # invalid keypoints + if 'num_keypoints' in data_info and data_info['num_keypoints'] == 0: + return False + # invalid bbox + if 'bbox' in data_info: + bbox = data_info['bbox'][0] + w, h = bbox[2:4] - bbox[:2] + if w <= 0 or h <= 0: + return False + # invalid keypoints + if 'keypoints' in data_info: + if np.max(data_info['keypoints']) <= 0: + return False + return True + + def _get_topdown_data_infos(self, instance_list: List[Dict]) -> List[Dict]: + """Organize the data list in top-down mode.""" + # sanitize data samples + data_list_tp = list(filter(self._is_valid_instance, instance_list)) + + return data_list_tp + + def _get_bottomup_data_infos(self, instance_list: List[Dict], + image_list: List[Dict]) -> List[Dict]: + """Organize the data list in bottom-up mode.""" + + # bottom-up data list + data_list_bu = [] + + used_img_ids = set() + + # group instances by img_id + for img_id, data_infos in groupby(instance_list, + lambda x: x['img_id']): + used_img_ids.add(img_id) + data_infos = list(data_infos) + + # image data + img_path = data_infos[0]['img_path'] + data_info_bu = { + 'img_id': img_id, + 'img_path': img_path, + } + + for key in data_infos[0].keys(): + if key not in data_info_bu: + seq = [d[key] for d in data_infos] + if isinstance(seq[0], np.ndarray): + if seq[0].ndim > 0: + seq = np.concatenate(seq, axis=0) + else: + seq = np.stack(seq, axis=0) + elif isinstance(seq[0], (tuple, list)): + seq = list(chain.from_iterable(seq)) + + data_info_bu[key] = seq + + # The segmentation annotation of invalid objects will be used + # to generate valid region mask in the pipeline. + invalid_segs = [] + for data_info_invalid in filterfalse(self._is_valid_instance, + data_infos): + if 'segmentation' in data_info_invalid: + invalid_segs.append(data_info_invalid['segmentation']) + data_info_bu['invalid_segs'] = invalid_segs + + data_list_bu.append(data_info_bu) + + # add images without instance for evaluation + if self.test_mode: + for img_info in image_list: + if img_info['img_id'] not in used_img_ids: + data_info_bu = { + 'img_id': img_info['img_id'], + 'img_path': img_info['img_path'], + 'id': list(), + 'raw_ann_info': None, + } + data_list_bu.append(data_info_bu) + + return data_list_bu + + def _load_detection_results(self) -> List[dict]: + """Load data from detection results with dummy keypoint annotations.""" + + assert exists(self.ann_file), ( + f'Annotation file `{self.ann_file}` does not exist') + assert exists( + self.bbox_file), (f'Bbox file `{self.bbox_file}` does not exist') + # load detection results + det_results = load(self.bbox_file) + assert is_list_of( + det_results, + dict), (f'BBox file `{self.bbox_file}` should be a list of dict, ' + f'but got {type(det_results)}') + + # load coco annotations to build image id-to-name index + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + # set the metainfo about categories, which is a list of dict + # and each dict contains the 'id', 'name', etc. about this category + self._metainfo['CLASSES'] = self.coco.loadCats(self.coco.getCatIds()) + + num_keypoints = self.metainfo['num_keypoints'] + data_list = [] + id_ = 0 + for det in det_results: + # remove non-human instances + if det['category_id'] != 1: + continue + + img = self.coco.loadImgs(det['image_id'])[0] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + bbox_xywh = np.array( + det['bbox'][:4], dtype=np.float32).reshape(1, 4) + bbox = bbox_xywh2xyxy(bbox_xywh) + bbox_score = np.array(det['score'], dtype=np.float32).reshape(1) + + # use dummy keypoint location and visibility + keypoints = np.zeros((1, num_keypoints, 2), dtype=np.float32) + keypoints_visible = np.ones((1, num_keypoints), dtype=np.float32) + + # If segmentation in the detection results, save it for later use + segmentation = det.get('segmentation', None) + + data_list.append({ + 'img_id': det['image_id'], + 'img_path': img_path, + 'img_shape': (img['height'], img['width']), + 'bbox': bbox, + 'bbox_score': bbox_score, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'id': id_, + 'segmentation': segmentation, + }) + + id_ += 1 + + return data_list + + def filter_data(self) -> List[dict]: + """Filter annotations according to filter_cfg. Defaults return full + ``data_list``. + + If 'bbox_score_thr` in filter_cfg, the annotation with bbox_score below + the threshold `bbox_score_thr` will be filtered out. + """ + + data_list = self.data_list + + if self.filter_cfg is None: + return data_list + + # filter out annotations with a bbox_score below the threshold + if 'bbox_score_thr' in self.filter_cfg: + + if self.data_mode != 'topdown': + raise ValueError( + f'{self.__class__.__name__} is set to {self.data_mode} ' + 'mode, while "bbox_score_thr" is only supported in ' + 'topdown mode.') + + thr = self.filter_cfg['bbox_score_thr'] + data_list = list( + filterfalse(lambda ann: ann['bbox_score'] < thr, data_list)) + + return data_list diff --git a/mmpose/datasets/datasets/base/base_mocap_dataset.py b/mmpose/datasets/datasets/base/base_mocap_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..f9cea2987c647a1111bb60f94329e80961c8d0b2 --- /dev/null +++ b/mmpose/datasets/datasets/base/base_mocap_dataset.py @@ -0,0 +1,453 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +import logging +import os.path as osp +from copy import deepcopy +from itertools import filterfalse, groupby +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +import cv2 +import numpy as np +from mmengine.dataset import BaseDataset, force_full_init +from mmengine.fileio import exists, get_local_path, load +from mmengine.logging import print_log +from mmengine.utils import is_abs + +from mmpose.registry import DATASETS +from ..utils import parse_pose_metainfo + + +@DATASETS.register_module() +class BaseMocapDataset(BaseDataset): + """Base class for 3d body datasets. + + Args: + ann_file (str): Annotation file path. Default: ''. + seq_len (int): Number of frames in a sequence. Default: 1. + multiple_target (int): If larger than 0, merge every + ``multiple_target`` sequence together. Default: 0. + causal (bool): If set to ``True``, the rightmost input frame will be + the target frame. Otherwise, the middle input frame will be the + target frame. Default: ``True``. + subset_frac (float): The fraction to reduce dataset size. If set to 1, + the dataset size is not reduced. Default: 1. + camera_param_file (str): Cameras' parameters file. Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict() + + def __init__(self, + ann_file: str = '', + seq_len: int = 1, + multiple_target: int = 0, + causal: bool = True, + subset_frac: float = 1.0, + camera_param_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + + if data_mode not in {'topdown', 'bottomup'}: + raise ValueError( + f'{self.__class__.__name__} got invalid data_mode: ' + f'{data_mode}. Should be "topdown" or "bottomup".') + self.data_mode = data_mode + + _ann_file = ann_file + if not is_abs(_ann_file): + _ann_file = osp.join(data_root, _ann_file) + assert exists(_ann_file), ( + f'Annotation file `{_ann_file}` does not exist.') + + self._load_ann_file(_ann_file) + + self.camera_param_file = camera_param_file + if self.camera_param_file: + if not is_abs(self.camera_param_file): + self.camera_param_file = osp.join(data_root, + self.camera_param_file) + assert exists(self.camera_param_file), ( + f'Camera parameters file `{self.camera_param_file}` does not ' + 'exist.') + self.camera_param = load(self.camera_param_file) + + self.seq_len = seq_len + self.causal = causal + + self.multiple_target = multiple_target + if self.multiple_target: + assert (self.seq_len == 1), ( + 'Multi-target data sample only supports seq_len=1.') + + assert 0 < subset_frac <= 1, ( + f'Unsupported `subset_frac` {subset_frac}. Supported range ' + 'is (0, 1].') + self.subset_frac = subset_frac + + self.sequence_indices = self.get_sequence_indices() + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + def _load_ann_file(self, ann_file: str) -> dict: + """Load annotation file to get image information. + + Args: + ann_file (str): Annotation file path. + + Returns: + dict: Annotation information. + """ + + with get_local_path(ann_file) as local_path: + self.ann_data = np.load(local_path) + + @classmethod + def _load_metainfo(cls, metainfo: dict = None) -> dict: + """Collect meta information from the dictionary of meta. + + Args: + metainfo (dict): Raw data of pose meta information. + + Returns: + dict: Parsed meta information. + """ + + if metainfo is None: + metainfo = deepcopy(cls.METAINFO) + + if not isinstance(metainfo, dict): + raise TypeError( + f'metainfo should be a dict, but got {type(metainfo)}') + + # parse pose metainfo if it has been assigned + if metainfo: + metainfo = parse_pose_metainfo(metainfo) + return metainfo + + @force_full_init + def prepare_data(self, idx) -> Any: + """Get data processed by ``self.pipeline``. + + :class:`BaseCocoStyleDataset` overrides this method from + :class:`mmengine.dataset.BaseDataset` to add the metainfo into + the ``data_info`` before it is passed to the pipeline. + + Args: + idx (int): The index of ``data_info``. + + Returns: + Any: Depends on ``self.pipeline``. + """ + data_info = self.get_data_info(idx) + + return self.pipeline(data_info) + + def get_data_info(self, idx: int) -> dict: + """Get data info by index. + + Args: + idx (int): Index of data info. + + Returns: + dict: Data info. + """ + data_info = super().get_data_info(idx) + + # Add metainfo items that are required in the pipeline and the model + metainfo_keys = [ + 'upper_body_ids', 'lower_body_ids', 'flip_pairs', + 'dataset_keypoint_weights', 'flip_indices', 'skeleton_links' + ] + + for key in metainfo_keys: + assert key not in data_info, ( + f'"{key}" is a reserved key for `metainfo`, but already ' + 'exists in the `data_info`.') + + data_info[key] = deepcopy(self._metainfo[key]) + + return data_info + + def load_data_list(self) -> List[dict]: + """Load data list from COCO annotation file or person detection result + file.""" + + instance_list, image_list = self._load_annotations() + + if self.data_mode == 'topdown': + data_list = self._get_topdown_data_infos(instance_list) + else: + data_list = self._get_bottomup_data_infos(instance_list, + image_list) + + return data_list + + def get_img_info(self, img_idx, img_name): + try: + with get_local_path(osp.join(self.data_prefix['img'], + img_name)) as local_path: + im = cv2.imread(local_path) + h, w, _ = im.shape + except: # noqa: E722 + print_log( + f'Failed to read image {img_name}.', + logger='current', + level=logging.DEBUG) + return None + + img = { + 'file_name': img_name, + 'height': h, + 'width': w, + 'id': img_idx, + 'img_id': img_idx, + 'img_path': osp.join(self.data_prefix['img'], img_name), + } + return img + + def get_sequence_indices(self) -> List[List[int]]: + """Build sequence indices. + + The default method creates sample indices that each sample is a single + frame (i.e. seq_len=1). Override this method in the subclass to define + how frames are sampled to form data samples. + + Outputs: + sample_indices: the frame indices of each sample. + For a sample, all frames will be treated as an input sequence, + and the ground-truth pose of the last frame will be the target. + """ + sequence_indices = [] + if self.seq_len == 1: + num_imgs = len(self.ann_data['imgname']) + sequence_indices = [[idx] for idx in range(num_imgs)] + else: + raise NotImplementedError('Multi-frame data sample unsupported!') + + if self.multiple_target > 0: + sequence_indices_merged = [] + for i in range(0, len(sequence_indices), self.multiple_target): + if i + self.multiple_target > len(sequence_indices): + break + sequence_indices_merged.append( + list( + itertools.chain.from_iterable( + sequence_indices[i:i + self.multiple_target]))) + sequence_indices = sequence_indices_merged + return sequence_indices + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in COCO format.""" + num_keypoints = self.metainfo['num_keypoints'] + + img_names = self.ann_data['imgname'] + num_imgs = len(img_names) + + if 'S' in self.ann_data.keys(): + kpts_3d = self.ann_data['S'] + else: + kpts_3d = np.zeros((num_imgs, num_keypoints, 4), dtype=np.float32) + + if 'part' in self.ann_data.keys(): + kpts_2d = self.ann_data['part'] + else: + kpts_2d = np.zeros((num_imgs, num_keypoints, 3), dtype=np.float32) + + if 'center' in self.ann_data.keys(): + centers = self.ann_data['center'] + else: + centers = np.zeros((num_imgs, 2), dtype=np.float32) + + if 'scale' in self.ann_data.keys(): + scales = self.ann_data['scale'].astype(np.float32) + else: + scales = np.zeros(num_imgs, dtype=np.float32) + + instance_list = [] + image_list = [] + + for idx, frame_ids in enumerate(self.sequence_indices): + expected_num_frames = self.seq_len + if self.multiple_target: + expected_num_frames = self.multiple_target + + assert len(frame_ids) == (expected_num_frames), ( + f'Expected `frame_ids` == {expected_num_frames}, but ' + f'got {len(frame_ids)} ') + + _img_names = img_names[frame_ids] + + _keypoints = kpts_2d[frame_ids].astype(np.float32) + keypoints = _keypoints[..., :2] + keypoints_visible = _keypoints[..., 2] + + _keypoints_3d = kpts_3d[frame_ids].astype(np.float32) + keypoints_3d = _keypoints_3d[..., :3] + keypoints_3d_visible = _keypoints_3d[..., 3] + + target_idx = [-1] if self.causal else [int(self.seq_len) // 2] + if self.multiple_target: + target_idx = list(range(self.multiple_target)) + + instance_info = { + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'keypoints_3d': keypoints_3d, + 'keypoints_3d_visible': keypoints_3d_visible, + 'scale': scales[idx], + 'center': centers[idx].astype(np.float32).reshape(1, -1), + 'id': idx, + 'category_id': 1, + 'iscrowd': 0, + 'img_paths': list(_img_names), + 'img_ids': frame_ids, + 'lifting_target': keypoints_3d[target_idx], + 'lifting_target_visible': keypoints_3d_visible[target_idx], + 'target_img_path': _img_names[target_idx], + } + + if self.camera_param_file: + _cam_param = self.get_camera_param(_img_names[0]) + instance_info['camera_param'] = _cam_param + + instance_list.append(instance_info) + + if self.data_mode == 'bottomup': + for idx, imgname in enumerate(img_names): + img_info = self.get_img_info(idx, imgname) + image_list.append(img_info) + + return instance_list, image_list + + def get_camera_param(self, imgname): + """Get camera parameters of a frame by its image name. + + Override this method to specify how to get camera parameters. + """ + raise NotImplementedError + + @staticmethod + def _is_valid_instance(data_info: Dict) -> bool: + """Check a data info is an instance with valid bbox and keypoint + annotations.""" + # crowd annotation + if 'iscrowd' in data_info and data_info['iscrowd']: + return False + # invalid keypoints + if 'num_keypoints' in data_info and data_info['num_keypoints'] == 0: + return False + # invalid keypoints + if 'keypoints' in data_info: + if np.max(data_info['keypoints']) <= 0: + return False + return True + + def _get_topdown_data_infos(self, instance_list: List[Dict]) -> List[Dict]: + """Organize the data list in top-down mode.""" + # sanitize data samples + data_list_tp = list(filter(self._is_valid_instance, instance_list)) + + return data_list_tp + + def _get_bottomup_data_infos(self, instance_list: List[Dict], + image_list: List[Dict]) -> List[Dict]: + """Organize the data list in bottom-up mode.""" + + # bottom-up data list + data_list_bu = [] + + used_img_ids = set() + + # group instances by img_id + for img_ids, data_infos in groupby(instance_list, + lambda x: x['img_ids']): + for img_id in img_ids: + used_img_ids.add(img_id) + data_infos = list(data_infos) + + # image data + img_paths = data_infos[0]['img_paths'] + data_info_bu = { + 'img_ids': img_ids, + 'img_paths': img_paths, + } + + for key in data_infos[0].keys(): + if key not in data_info_bu: + seq = [d[key] for d in data_infos] + if isinstance(seq[0], np.ndarray): + seq = np.concatenate(seq, axis=0) + data_info_bu[key] = seq + + # The segmentation annotation of invalid objects will be used + # to generate valid region mask in the pipeline. + invalid_segs = [] + for data_info_invalid in filterfalse(self._is_valid_instance, + data_infos): + if 'segmentation' in data_info_invalid: + invalid_segs.append(data_info_invalid['segmentation']) + data_info_bu['invalid_segs'] = invalid_segs + + data_list_bu.append(data_info_bu) + + # add images without instance for evaluation + if self.test_mode: + for img_info in image_list: + if img_info['img_id'] not in used_img_ids: + data_info_bu = { + 'img_ids': [img_info['img_id']], + 'img_path': [img_info['img_path']], + 'id': list(), + } + data_list_bu.append(data_info_bu) + + return data_list_bu diff --git a/mmpose/datasets/datasets/body/__init__.py b/mmpose/datasets/datasets/body/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f2d29b9cd457501f5a2f2101088e304bc0cb096a --- /dev/null +++ b/mmpose/datasets/datasets/body/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .aic_dataset import AicDataset +from .coco_dataset import CocoDataset +from .cococrop_dataset import CocoCropDataset +from .crowdpose_dataset import CrowdPoseDataset +from .exlpose_dataset import ExlposeDataset +from .humanart21_dataset import HumanArt21Dataset +from .humanart_dataset import HumanArtDataset +from .jhmdb_dataset import JhmdbDataset +from .mhp_dataset import MhpDataset +from .mpii_dataset import MpiiDataset +from .mpii_trb_dataset import MpiiTrbDataset +from .ochuman_dataset import OCHumanDataset +from .posetrack18_dataset import PoseTrack18Dataset +from .posetrack18_video_dataset import PoseTrack18VideoDataset + +__all__ = [ + 'CocoDataset', 'MpiiDataset', 'MpiiTrbDataset', 'AicDataset', + 'CrowdPoseDataset', 'OCHumanDataset', 'MhpDataset', 'PoseTrack18Dataset', + 'JhmdbDataset', 'PoseTrack18VideoDataset', 'HumanArtDataset', + 'HumanArt21Dataset', 'ExlposeDataset', 'CocoCropDataset' +] diff --git a/mmpose/datasets/datasets/body/aic_dataset.py b/mmpose/datasets/datasets/body/aic_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b9c7cccc76fb47b53cd73f3152878e051b442199 --- /dev/null +++ b/mmpose/datasets/datasets/body/aic_dataset.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AicDataset(BaseCocoStyleDataset): + """AIC dataset for pose estimation. + + "AI Challenger : A Large-scale Dataset for Going Deeper + in Image Understanding", arXiv'2017. + More details can be found in the `paper + `__ + + AIC keypoints:: + + 0: "right_shoulder", + 1: "right_elbow", + 2: "right_wrist", + 3: "left_shoulder", + 4: "left_elbow", + 5: "left_wrist", + 6: "right_hip", + 7: "right_knee", + 8: "right_ankle", + 9: "left_hip", + 10: "left_knee", + 11: "left_ankle", + 12: "head_top", + 13: "neck" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/aic.py') diff --git a/mmpose/datasets/datasets/body/coco_dataset.py b/mmpose/datasets/datasets/body/coco_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..7cc971f91f70ba28de1b9ae520d10a2f491eb32b --- /dev/null +++ b/mmpose/datasets/datasets/body/coco_dataset.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoDataset(BaseCocoStyleDataset): + """COCO dataset for pose estimation. + + "Microsoft COCO: Common Objects in Context", ECCV'2014. + More details can be found in the `paper + `__ . + + COCO keypoints:: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/coco.py') diff --git a/mmpose/datasets/datasets/body/cococrop_dataset.py b/mmpose/datasets/datasets/body/cococrop_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e9dfc36c95a1a7d402269cb27e4754ceb3b825 --- /dev/null +++ b/mmpose/datasets/datasets/body/cococrop_dataset.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoCropDataset(BaseCocoStyleDataset): + """COCO dataset for pose estimation. + + "Microsoft COCO: Common Objects in Context", ECCV'2014. + More details can be found in the `paper + `__ . + + COCO keypoints:: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/coco_crop.py') diff --git a/mmpose/datasets/datasets/body/crowdpose_dataset.py b/mmpose/datasets/datasets/body/crowdpose_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4218708ff27b37dce7992d73695193442207b6d9 --- /dev/null +++ b/mmpose/datasets/datasets/body/crowdpose_dataset.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CrowdPoseDataset(BaseCocoStyleDataset): + """CrowdPose dataset for pose estimation. + + "CrowdPose: Efficient Crowded Scenes Pose Estimation and + A New Benchmark", CVPR'2019. + More details can be found in the `paper + `__. + + CrowdPose keypoints:: + + 0: 'left_shoulder', + 1: 'right_shoulder', + 2: 'left_elbow', + 3: 'right_elbow', + 4: 'left_wrist', + 5: 'right_wrist', + 6: 'left_hip', + 7: 'right_hip', + 8: 'left_knee', + 9: 'right_knee', + 10: 'left_ankle', + 11: 'right_ankle', + 12: 'top_head', + 13: 'neck' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/crowdpose.py') diff --git a/mmpose/datasets/datasets/body/exlpose_dataset.py b/mmpose/datasets/datasets/body/exlpose_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..ad29f5d751ea9147d417188333a08ac793d5821e --- /dev/null +++ b/mmpose/datasets/datasets/body/exlpose_dataset.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class ExlposeDataset(BaseCocoStyleDataset): + """Exlpose dataset for pose estimation. + + "Human Pose Estimation in Extremely Low-Light Conditions", + CVPR'2023. + More details can be found in the `paper + `__. + + ExLPose keypoints: + 0: "left_shoulder", + 1: "right_shoulder", + 2: "left_elbow", + 3: "right_elbow", + 4: "left_wrist", + 5: "right_wrist", + 6: "left_hip", + 7: "right_hip", + 8: "left_knee", + 9: "right_knee", + 10: "left_ankle", + 11: "right_ankle", + 12: "head", + 13: "neck" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/exlpose.py') diff --git a/mmpose/datasets/datasets/body/humanart21_dataset.py b/mmpose/datasets/datasets/body/humanart21_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b5695261289d3a9ea3e5006cd95c7dd8ec6172 --- /dev/null +++ b/mmpose/datasets/datasets/body/humanart21_dataset.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from .humanart_dataset import HumanArtDataset + + +@DATASETS.register_module() +class HumanArt21Dataset(HumanArtDataset): + """Human-Art dataset for pose estimation with 21 kpts. + + "Human-Art: A Versatile Human-Centric Dataset + Bridging Natural and Artificial Scenes", CVPR'2023. + More details can be found in the `paper + `__ . + + Human-Art keypoints:: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle', + 17: 'left_finger', + 18: 'right_finger', + 19: 'left_toe', + 20: 'right_toe', + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/humanart21.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict | None: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + # filter invalid instance + if 'bbox' not in ann or 'keypoints' not in ann: + return None + + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints_21'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + if 'num_keypoints' in ann: + num_keypoints = ann['num_keypoints'] + else: + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img['img_path'], + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann.get('iscrowd', 0), + 'segmentation': ann.get('segmentation', None), + 'id': ann['id'], + 'category_id': ann['category_id'], + # store the raw annotation of the instance + # it is useful for evaluation without providing ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + if 'crowdIndex' in img: + data_info['crowd_index'] = img['crowdIndex'] + + return data_info diff --git a/mmpose/datasets/datasets/body/humanart_dataset.py b/mmpose/datasets/datasets/body/humanart_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..6f8aa2943d60ed668a2b93cc3d093f2ee929b6f1 --- /dev/null +++ b/mmpose/datasets/datasets/body/humanart_dataset.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class HumanArtDataset(BaseCocoStyleDataset): + """Human-Art dataset for pose estimation with 17 kpts. + + "Human-Art: A Versatile Human-Centric Dataset + Bridging Natural and Artificial Scenes", CVPR'2023. + More details can be found in the `paper + `__ . + + Human-Art keypoints:: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/humanart.py') diff --git a/mmpose/datasets/datasets/body/jhmdb_dataset.py b/mmpose/datasets/datasets/body/jhmdb_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..940a4cd4dc8f407cf483aeda2c4c02f48d32b92f --- /dev/null +++ b/mmpose/datasets/datasets/body/jhmdb_dataset.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class JhmdbDataset(BaseCocoStyleDataset): + """JhmdbDataset dataset for pose estimation. + + "Towards understanding action recognition", ICCV'2013. + More details can be found in the `paper + `__ + + sub-JHMDB keypoints:: + + 0: "neck", + 1: "belly", + 2: "head", + 3: "right_shoulder", + 4: "left_shoulder", + 5: "right_hip", + 6: "left_hip", + 7: "right_elbow", + 8: "left_elbow", + 9: "right_knee", + 10: "left_knee", + 11: "right_wrist", + 12: "left_wrist", + 13: "right_ankle", + 14: "left_ankle" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/jhmdb.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + # JHMDB uses matlab format, index is 1-based, + # we should first convert to 0-based index + x -= 1 + y -= 1 + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + # JHMDB uses matlab format, index is 1-based, + # we should first convert to 0-based index + keypoints = _keypoints[..., :2] - 1 + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + area = np.clip((x2 - x1) * (y2 - y1) * 0.53, a_min=1.0, a_max=None) + category_id = ann.get('category_id', [1] * len(keypoints)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'area': np.array(area, dtype=np.float32), + 'iscrowd': ann.get('iscrowd', 0), + 'segmentation': ann.get('segmentation', None), + 'id': ann['id'], + 'category_id': category_id, + } + + return data_info diff --git a/mmpose/datasets/datasets/body/mhp_dataset.py b/mmpose/datasets/datasets/body/mhp_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..55d33602536383898c8b65ca48994d33c1616bea --- /dev/null +++ b/mmpose/datasets/datasets/body/mhp_dataset.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class MhpDataset(BaseCocoStyleDataset): + """MHPv2.0 dataset for pose estimation. + + "Understanding Humans in Crowded Scenes: Deep Nested Adversarial + Learning and A New Benchmark for Multi-Human Parsing", ACM MM'2018. + More details can be found in the `paper + `__ + + MHP keypoints:: + + 0: "right ankle", + 1: "right knee", + 2: "right hip", + 3: "left hip", + 4: "left knee", + 5: "left ankle", + 6: "pelvis", + 7: "thorax", + 8: "upper neck", + 9: "head top", + 10: "right wrist", + 11: "right elbow", + 12: "right shoulder", + 13: "left shoulder", + 14: "left elbow", + 15: "left wrist", + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/mhp.py') diff --git a/mmpose/datasets/datasets/body/mpii_dataset.py b/mmpose/datasets/datasets/body/mpii_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..904d94854b49ea7eda06155195b199d725906e9e --- /dev/null +++ b/mmpose/datasets/datasets/body/mpii_dataset.py @@ -0,0 +1,231 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.fileio import exists, get_local_path +from scipy.io import loadmat + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class MpiiDataset(BaseCocoStyleDataset): + """MPII Dataset for pose estimation. + + "2D Human Pose Estimation: New Benchmark and State of the Art Analysis" + ,CVPR'2014. More details can be found in the `paper + `__ . + + MPII keypoints:: + + 0: 'right_ankle' + 1: 'right_knee', + 2: 'right_hip', + 3: 'left_hip', + 4: 'left_knee', + 5: 'left_ankle', + 6: 'pelvis', + 7: 'thorax', + 8: 'upper_neck', + 9: 'head_top', + 10: 'right_wrist', + 11: 'right_elbow', + 12: 'right_shoulder', + 13: 'left_shoulder', + 14: 'left_elbow', + 15: 'left_wrist' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + headbox_file (str, optional): The path of ``mpii_gt_val.mat`` which + provides the headboxes information used for ``PCKh``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii.py') + + def __init__(self, + ann_file: str = '', + bbox_file: Optional[str] = None, + headbox_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + + if headbox_file: + if data_mode != 'topdown': + raise ValueError( + f'{self.__class__.__name__} is set to {data_mode}: ' + 'mode, while "headbox_file" is only ' + 'supported in topdown mode.') + + if not test_mode: + raise ValueError( + f'{self.__class__.__name__} has `test_mode==False` ' + 'while "headbox_file" is only ' + 'supported when `test_mode==True`.') + + headbox_file_type = headbox_file[-3:] + allow_headbox_file_type = ['mat'] + if headbox_file_type not in allow_headbox_file_type: + raise KeyError( + f'The head boxes file type {headbox_file_type} is not ' + f'supported. Should be `mat` but got {headbox_file_type}.') + self.headbox_file = headbox_file + + super().__init__( + ann_file=ann_file, + bbox_file=bbox_file, + data_mode=data_mode, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in MPII format.""" + + assert exists(self.ann_file), ( + f'Annotation file `{self.ann_file}` does not exist') + + with get_local_path(self.ann_file) as local_path: + with open(local_path) as anno_file: + self.anns = json.load(anno_file) + + if self.headbox_file: + assert exists(self.headbox_file), ( + f'Headbox file `{self.headbox_file}` does not exist') + + with get_local_path(self.headbox_file) as local_path: + self.headbox_dict = loadmat(local_path) + headboxes_src = np.transpose(self.headbox_dict['headboxes_src'], + [2, 0, 1]) + SC_BIAS = 0.6 + + instance_list = [] + image_list = [] + used_img_ids = set() + ann_id = 0 + + # mpii bbox scales are normalized with factor 200. + pixel_std = 200. + + for idx, ann in enumerate(self.anns): + center = np.array(ann['center'], dtype=np.float32) + scale = np.array([ann['scale'], ann['scale']], + dtype=np.float32) * pixel_std + + # Adjust center/scale slightly to avoid cropping limbs + if center[0] != -1: + center[1] = center[1] + 15. / pixel_std * scale[1] + + # MPII uses matlab format, index is 1-based, + # we should first convert to 0-based index + center = center - 1 + + # unify shape with coco datasets + center = center.reshape(1, -1) + scale = scale.reshape(1, -1) + bbox = bbox_cs2xyxy(center, scale) + + # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + keypoints = np.array( + ann['joints'], dtype=np.float32).reshape(1, -1, 2) + keypoints_visible = np.array(ann['joints_vis']).reshape(1, -1) + + x1, y1, x2, y2 = np.split(bbox, axis=1, indices_or_sections=4) + area = np.clip((x2 - x1) * (y2 - y1) * 0.53, a_min=1.0, a_max=None) + area = area[..., 0].astype(np.float32) + + category_id = ann.get('category_id', [1] * len(bbox)) + + segmentation = ann.get('segmentation', None) + + instance_info = { + 'id': ann_id, + 'img_id': int(ann['image'].split('.')[0]), + 'img_path': osp.join(self.data_prefix['img'], ann['image']), + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'keypoints_visibility': keypoints_visible, + 'area': area, + 'category_id': category_id, + } + + if segmentation is not None: + instance_info['segmentation'] = segmentation + + if self.headbox_file: + # calculate the diagonal length of head box as norm_factor + headbox = headboxes_src[idx] + head_size = np.linalg.norm(headbox[1] - headbox[0], axis=0) + head_size *= SC_BIAS + instance_info['head_size'] = head_size.reshape(1, -1) + + if instance_info['img_id'] not in used_img_ids: + used_img_ids.add(instance_info['img_id']) + image_list.append({ + 'img_id': instance_info['img_id'], + 'img_path': instance_info['img_path'], + }) + + instance_list.append(instance_info) + ann_id = ann_id + 1 + + return instance_list, image_list diff --git a/mmpose/datasets/datasets/body/mpii_trb_dataset.py b/mmpose/datasets/datasets/body/mpii_trb_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..36f76166a91ea35f512972cb26f0a62e9cf78b9d --- /dev/null +++ b/mmpose/datasets/datasets/body/mpii_trb_dataset.py @@ -0,0 +1,171 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +from typing import List, Tuple + +import numpy as np +from mmengine.fileio import exists, get_local_path + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class MpiiTrbDataset(BaseCocoStyleDataset): + """MPII-TRB Dataset dataset for pose estimation. + + "TRB: A Novel Triplet Representation for Understanding 2D Human Body", + ICCV'2019. More details can be found in the `paper + `__ . + + MPII-TRB keypoints:: + + 0: 'left_shoulder' + 1: 'right_shoulder' + 2: 'left_elbow' + 3: 'right_elbow' + 4: 'left_wrist' + 5: 'right_wrist' + 6: 'left_hip' + 7: 'right_hip' + 8: 'left_knee' + 9: 'right_knee' + 10: 'left_ankle' + 11: 'right_ankle' + 12: 'head' + 13: 'neck' + + 14: 'right_neck' + 15: 'left_neck' + 16: 'medial_right_shoulder' + 17: 'lateral_right_shoulder' + 18: 'medial_right_bow' + 19: 'lateral_right_bow' + 20: 'medial_right_wrist' + 21: 'lateral_right_wrist' + 22: 'medial_left_shoulder' + 23: 'lateral_left_shoulder' + 24: 'medial_left_bow' + 25: 'lateral_left_bow' + 26: 'medial_left_wrist' + 27: 'lateral_left_wrist' + 28: 'medial_right_hip' + 29: 'lateral_right_hip' + 30: 'medial_right_knee' + 31: 'lateral_right_knee' + 32: 'medial_right_ankle' + 33: 'lateral_right_ankle' + 34: 'medial_left_hip' + 35: 'lateral_left_hip' + 36: 'medial_left_knee' + 37: 'lateral_left_knee' + 38: 'medial_left_ankle' + 39: 'lateral_left_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii_trb.py') + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in MPII-TRB format.""" + + assert exists(self.ann_file), ( + f'Annotation file `{self.ann_file}` does not exist') + + with get_local_path(self.ann_file) as local_path: + with open(local_path) as anno_file: + self.data = json.load(anno_file) + + imgid2info = {img['id']: img for img in self.data['images']} + + instance_list = [] + image_list = [] + used_img_ids = set() + + # mpii-trb bbox scales are normalized with factor 200. + pixel_std = 200. + + for ann in self.data['annotations']: + img_id = ann['image_id'] + + # center, scale in shape [1, 2] and bbox in [1, 4] + center = np.array([ann['center']], dtype=np.float32) + scale = np.array([[ann['scale'], ann['scale']]], + dtype=np.float32) * pixel_std + bbox = bbox_cs2xyxy(center, scale) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + img_path = osp.join(self.data_prefix['img'], + imgid2info[img_id]['file_name']) + + instance_info = { + 'id': ann['id'], + 'img_id': img_id, + 'img_path': img_path, + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': ann['num_joints'], + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + } + + # val set + if 'headbox' in ann: + instance_info['headbox'] = np.array( + ann['headbox'], dtype=np.float32) + + instance_list.append(instance_info) + if instance_info['img_id'] not in used_img_ids: + used_img_ids.add(instance_info['img_id']) + image_list.append({ + 'img_id': instance_info['img_id'], + 'img_path': instance_info['img_path'], + }) + + instance_list = sorted(instance_list, key=lambda x: x['id']) + return instance_list, image_list diff --git a/mmpose/datasets/datasets/body/ochuman_dataset.py b/mmpose/datasets/datasets/body/ochuman_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..695d090ea998dd530e0f65f902916107e77c4f6d --- /dev/null +++ b/mmpose/datasets/datasets/body/ochuman_dataset.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class OCHumanDataset(BaseCocoStyleDataset): + """OChuman dataset for pose estimation. + + "Pose2Seg: Detection Free Human Instance Segmentation", CVPR'2019. + More details can be found in the `paper + `__ . + + "Occluded Human (OCHuman)" dataset contains 8110 heavily occluded + human instances within 4731 images. OCHuman dataset is designed for + validation and testing. To evaluate on OCHuman, the model should be + trained on COCO training set, and then test the robustness of the + model to occlusion using OCHuman. + + OCHuman keypoints (same as COCO):: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/ochuman.py') diff --git a/mmpose/datasets/datasets/body/posetrack18_dataset.py b/mmpose/datasets/datasets/body/posetrack18_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b8110c107f6869085ed795c8f1f0338d2c6ed21d --- /dev/null +++ b/mmpose/datasets/datasets/body/posetrack18_dataset.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class PoseTrack18Dataset(BaseCocoStyleDataset): + """PoseTrack18 dataset for pose estimation. + + "Posetrack: A benchmark for human pose estimation and tracking", CVPR'2018. + More details can be found in the `paper + `__ . + + PoseTrack2018 keypoints:: + + 0: 'nose', + 1: 'head_bottom', + 2: 'head_top', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/posetrack18.py') diff --git a/mmpose/datasets/datasets/body/posetrack18_video_dataset.py b/mmpose/datasets/datasets/body/posetrack18_video_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..f862d9bc5aa039123633663fc2277b9a61c87fc8 --- /dev/null +++ b/mmpose/datasets/datasets/body/posetrack18_video_dataset.py @@ -0,0 +1,393 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Callable, List, Optional, Sequence, Union + +import numpy as np +from mmengine.fileio import exists, get_local_path, load +from mmengine.utils import is_list_of +from xtcocotools.coco import COCO + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_xywh2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class PoseTrack18VideoDataset(BaseCocoStyleDataset): + """PoseTrack18 dataset for video pose estimation. + + "Posetrack: A benchmark for human pose estimation and tracking", CVPR'2018. + More details can be found in the `paper + `__ . + + PoseTrack2018 keypoints:: + + 0: 'nose', + 1: 'head_bottom', + 2: 'head_top', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + frame_weights (List[Union[int, float]] ): The weight of each frame + for aggregation. The first weight is for the center frame, then on + ascending order of frame indices. Note that the length of + ``frame_weights`` should be consistent with the number of sampled + frames. Default: [0.0, 1.0] + frame_sampler_mode (str): Specifies the mode of frame sampler: + ``'fixed'`` or ``'random'``. In ``'fixed'`` mode, each frame + index relative to the center frame is fixed, specified by + ``frame_indices``, while in ``'random'`` mode, each frame index + relative to the center frame is sampled from ``frame_range`` + with certain randomness. Default: ``'random'``. + frame_range (int | List[int], optional): The sampling range of + supporting frames in the same video for center frame. + Only valid when ``frame_sampler_mode`` is ``'random'``. + Default: ``None``. + num_sampled_frame(int, optional): The number of sampled frames, except + the center frame. Only valid when ``frame_sampler_mode`` is + ``'random'``. Default: 1. + frame_indices (Sequence[int], optional): The sampled frame indices, + including the center frame indicated by 0. Only valid when + ``frame_sampler_mode`` is ``'fixed'``. Default: ``None``. + ph_fill_len (int): The length of the placeholder to fill in the + image filenames. Default: 6 + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/posetrack18.py') + + def __init__(self, + ann_file: str = '', + bbox_file: Optional[str] = None, + data_mode: str = 'topdown', + frame_weights: List[Union[int, float]] = [0.0, 1.0], + frame_sampler_mode: str = 'random', + frame_range: Optional[Union[int, List[int]]] = None, + num_sampled_frame: Optional[int] = None, + frame_indices: Optional[Sequence[int]] = None, + ph_fill_len: int = 6, + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + assert sum(frame_weights) == 1, 'Invalid `frame_weights`: should sum'\ + f' to 1.0, but got {frame_weights}.' + for weight in frame_weights: + assert weight >= 0, 'frame_weight can not be a negative value.' + self.frame_weights = np.array(frame_weights) + + if frame_sampler_mode not in {'fixed', 'random'}: + raise ValueError( + f'{self.__class__.__name__} got invalid frame_sampler_mode: ' + f'{frame_sampler_mode}. Should be `"fixed"` or `"random"`.') + self.frame_sampler_mode = frame_sampler_mode + + if frame_sampler_mode == 'random': + assert frame_range is not None, \ + '`frame_sampler_mode` is set as `random`, ' \ + 'please specify the `frame_range`.' + + if isinstance(frame_range, int): + assert frame_range >= 0, \ + 'frame_range can not be a negative value.' + self.frame_range = [-frame_range, frame_range] + + elif isinstance(frame_range, Sequence): + assert len(frame_range) == 2, 'The length must be 2.' + assert frame_range[0] <= 0 and frame_range[ + 1] >= 0 and frame_range[1] > frame_range[ + 0], 'Invalid `frame_range`' + for i in frame_range: + assert isinstance(i, int), 'Each element must be int.' + self.frame_range = frame_range + else: + raise TypeError( + f'The type of `frame_range` must be int or Sequence, ' + f'but got {type(frame_range)}.') + + assert num_sampled_frame is not None, \ + '`frame_sampler_mode` is set as `random`, please specify ' \ + '`num_sampled_frame`, e.g. the number of sampled frames.' + + assert len(frame_weights) == num_sampled_frame + 1, \ + f'the length of frame_weights({len(frame_weights)}) '\ + f'does not match the number of sampled adjacent '\ + f'frames({num_sampled_frame})' + self.frame_indices = None + self.num_sampled_frame = num_sampled_frame + + if frame_sampler_mode == 'fixed': + assert frame_indices is not None, \ + '`frame_sampler_mode` is set as `fixed`, ' \ + 'please specify the `frame_indices`.' + assert len(frame_weights) == len(frame_indices), \ + f'the length of frame_weights({len(frame_weights)}) does not '\ + f'match the length of frame_indices({len(frame_indices)}).' + frame_indices.sort() + self.frame_indices = frame_indices + self.frame_range = None + self.num_sampled_frame = None + + self.ph_fill_len = ph_fill_len + + super().__init__( + ann_file=ann_file, + bbox_file=bbox_file, + data_mode=data_mode, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + # filter invalid instance + if 'bbox' not in ann or 'keypoints' not in ann or max( + ann['keypoints']) == 0: + return None + + img_w, img_h = img['width'], img['height'] + # get the bbox of the center frame + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # get the keypoints of the center frame + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + # deal with multiple image paths + img_paths: list = [] + # get the image path of the center frame + center_img_path = osp.join(self.data_prefix['img'], img['file_name']) + # append the center image path first + img_paths.append(center_img_path) + + # select the frame indices + if self.frame_sampler_mode == 'fixed': + indices = self.frame_indices + else: # self.frame_sampler_mode == 'random': + low, high = self.frame_range + indices = np.random.randint(low, high + 1, self.num_sampled_frame) + + nframes = int(img['nframes']) + file_name = img['file_name'] + ref_idx = int(osp.splitext(osp.basename(file_name))[0]) + + for idx in indices: + if self.test_mode and idx == 0: + continue + # the supporting frame index + support_idx = ref_idx + idx + # clip the frame index to make sure that it does not exceed + # the boundings of frame indices + support_idx = np.clip(support_idx, 0, nframes - 1) + sup_img_path = osp.join( + osp.dirname(center_img_path), + str(support_idx).zfill(self.ph_fill_len) + '.jpg') + + img_paths.append(sup_img_path) + + data_info = { + 'img_id': int(img['frame_id']), + 'img_path': img_paths, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': ann['num_keypoints'], + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'frame_weights': self.frame_weights, + 'id': ann['id'], + } + + return data_info + + def _load_detection_results(self) -> List[dict]: + """Load data from detection results with dummy keypoint annotations.""" + assert exists(self.ann_file), ( + f'Annotation file `{self.ann_file}` does not exist') + assert exists( + self.bbox_file), (f'Bbox file `{self.bbox_file}` does not exist') + + # load detection results + det_results = load(self.bbox_file) + assert is_list_of(det_results, dict), ( + f'annotation file `{self.bbox_file}` should be a list of dicts, ' + f'but got type {type(det_results)}') + + # load coco annotations to build image id-to-name index + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + + # mapping image name to id + name2id = {} + # mapping image id to name + id2name = {} + for img_id, image in self.coco.imgs.items(): + file_name = image['file_name'] + id2name[img_id] = file_name + name2id[file_name] = img_id + + num_keypoints = self.metainfo['num_keypoints'] + data_list = [] + id_ = 0 + for det in det_results: + # remove non-human instances + if det['category_id'] != 1: + continue + + # get the predicted bbox and bbox_score + bbox_xywh = np.array( + det['bbox'][:4], dtype=np.float32).reshape(1, 4) + bbox = bbox_xywh2xyxy(bbox_xywh) + bbox_score = np.array(det['score'], dtype=np.float32).reshape(1) + + # use dummy keypoint location and visibility + keypoints = np.zeros((1, num_keypoints, 2), dtype=np.float32) + keypoints_visible = np.ones((1, num_keypoints), dtype=np.float32) + + # deal with different bbox file formats + if 'nframes' in det: + nframes = int(det['nframes']) + else: + if 'image_name' in det: + img_id = name2id[det['image_name']] + else: + img_id = det['image_id'] + img_ann = self.coco.loadImgs(img_id)[0] + nframes = int(img_ann['nframes']) + + # deal with multiple image paths + img_paths: list = [] + if 'image_name' in det: + image_name = det['image_name'] + else: + image_name = id2name[det['image_id']] + # get the image path of the center frame + center_img_path = osp.join(self.data_prefix['img'], image_name) + # append the center image path first + img_paths.append(center_img_path) + + # "images/val/012834_mpii_test/000000.jpg" -->> "000000.jpg" + center_image_name = image_name.split('/')[-1] + ref_idx = int(center_image_name.replace('.jpg', '')) + + # select the frame indices + if self.frame_sampler_mode == 'fixed': + indices = self.frame_indices + else: # self.frame_sampler_mode == 'random': + low, high = self.frame_range + indices = np.random.randint(low, high + 1, + self.num_sampled_frame) + + for idx in indices: + if self.test_mode and idx == 0: + continue + # the supporting frame index + support_idx = ref_idx + idx + # clip the frame index to make sure that it does not exceed + # the boundings of frame indices + support_idx = np.clip(support_idx, 0, nframes - 1) + sup_img_path = center_img_path.replace( + center_image_name, + str(support_idx).zfill(self.ph_fill_len) + '.jpg') + + img_paths.append(sup_img_path) + + data_list.append({ + 'img_id': det['image_id'], + 'img_path': img_paths, + 'frame_weights': self.frame_weights, + 'bbox': bbox, + 'bbox_score': bbox_score, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'id': id_, + }) + + id_ += 1 + + return data_list diff --git a/mmpose/datasets/datasets/body3d/__init__.py b/mmpose/datasets/datasets/body3d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d5afeca578a7c937cfcfe89302e62d03dcaab05d --- /dev/null +++ b/mmpose/datasets/datasets/body3d/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .h36m_dataset import Human36mDataset + +__all__ = ['Human36mDataset'] diff --git a/mmpose/datasets/datasets/body3d/h36m_dataset.py b/mmpose/datasets/datasets/body3d/h36m_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..397738c2769731cdbde612a214522e32b2721e3c --- /dev/null +++ b/mmpose/datasets/datasets/body3d/h36m_dataset.py @@ -0,0 +1,315 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from collections import defaultdict +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.fileio import exists, get_local_path +from mmengine.utils import is_abs + +from mmpose.datasets.datasets import BaseMocapDataset +from mmpose.registry import DATASETS + + +@DATASETS.register_module() +class Human36mDataset(BaseMocapDataset): + """Human3.6M dataset for 3D human pose estimation. + + "Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human + Sensing in Natural Environments", TPAMI`2014. + More details can be found in the `paper + `__. + + Human3.6M keypoint indexes:: + + 0: 'root (pelvis)', + 1: 'right_hip', + 2: 'right_knee', + 3: 'right_foot', + 4: 'left_hip', + 5: 'left_knee', + 6: 'left_foot', + 7: 'spine', + 8: 'thorax', + 9: 'neck_base', + 10: 'head', + 11: 'left_shoulder', + 12: 'left_elbow', + 13: 'left_wrist', + 14: 'right_shoulder', + 15: 'right_elbow', + 16: 'right_wrist' + + Args: + ann_file (str): Annotation file path. Default: ''. + seq_len (int): Number of frames in a sequence. Default: 1. + seq_step (int): The interval for extracting frames from the video. + Default: 1. + multiple_target (int): If larger than 0, merge every + ``multiple_target`` sequence together. Default: 0. + multiple_target_step (int): The interval for merging sequence. Only + valid when ``multiple_target`` is larger than 0. Default: 0. + pad_video_seq (bool): Whether to pad the video so that poses will be + predicted for every frame in the video. Default: ``False``. + causal (bool): If set to ``True``, the rightmost input frame will be + the target frame. Otherwise, the middle input frame will be the + target frame. Default: ``True``. + subset_frac (float): The fraction to reduce dataset size. If set to 1, + the dataset size is not reduced. Default: 1. + keypoint_2d_src (str): Specifies 2D keypoint information options, which + should be one of the following options: + + - ``'gt'``: load from the annotation file + - ``'detection'``: load from a detection + result file of 2D keypoint + - 'pipeline': the information will be generated by the pipeline + + Default: ``'gt'``. + keypoint_2d_det_file (str, optional): The 2D keypoint detection file. + If set, 2d keypoint loaded from this file will be used instead of + ground-truth keypoints. This setting is only when + ``keypoint_2d_src`` is ``'detection'``. Default: ``None``. + factor_file (str, optional): The projection factors' file. If set, + factor loaded from this file will be used instead of calculated + factors. Default: ``None``. + camera_param_file (str): Cameras' parameters file. Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/h36m.py') + SUPPORTED_keypoint_2d_src = {'gt', 'detection', 'pipeline'} + + def __init__(self, + ann_file: str = '', + seq_len: int = 1, + seq_step: int = 1, + multiple_target: int = 0, + multiple_target_step: int = 0, + pad_video_seq: bool = False, + causal: bool = True, + subset_frac: float = 1.0, + keypoint_2d_src: str = 'gt', + keypoint_2d_det_file: Optional[str] = None, + factor_file: Optional[str] = None, + camera_param_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + # check keypoint_2d_src + self.keypoint_2d_src = keypoint_2d_src + if self.keypoint_2d_src not in self.SUPPORTED_keypoint_2d_src: + raise ValueError( + f'Unsupported `keypoint_2d_src` "{self.keypoint_2d_src}". ' + f'Supported options are {self.SUPPORTED_keypoint_2d_src}') + + if keypoint_2d_det_file: + if not is_abs(keypoint_2d_det_file): + self.keypoint_2d_det_file = osp.join(data_root, + keypoint_2d_det_file) + else: + self.keypoint_2d_det_file = keypoint_2d_det_file + + self.seq_step = seq_step + self.pad_video_seq = pad_video_seq + + if factor_file: + if not is_abs(factor_file): + factor_file = osp.join(data_root, factor_file) + assert exists(factor_file), (f'`factor_file`: {factor_file}' + 'does not exist.') + self.factor_file = factor_file + + if multiple_target > 0 and multiple_target_step == 0: + multiple_target_step = multiple_target + self.multiple_target_step = multiple_target_step + + super().__init__( + ann_file=ann_file, + seq_len=seq_len, + multiple_target=multiple_target, + causal=causal, + subset_frac=subset_frac, + camera_param_file=camera_param_file, + data_mode=data_mode, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + def get_sequence_indices(self) -> List[List[int]]: + """Split original videos into sequences and build frame indices. + + This method overrides the default one in the base class. + """ + imgnames = self.ann_data['imgname'] + video_frames = defaultdict(list) + for idx, imgname in enumerate(imgnames): + subj, action, camera = self._parse_h36m_imgname(imgname) + video_frames[(subj, action, camera)].append(idx) + + # build sample indices + sequence_indices = [] + _len = (self.seq_len - 1) * self.seq_step + 1 + _step = self.seq_step + + if self.multiple_target: + for _, _indices in sorted(video_frames.items()): + n_frame = len(_indices) + seqs_from_video = [ + _indices[i:(i + self.multiple_target):_step] + for i in range(0, n_frame, self.multiple_target_step) + ][:(n_frame + self.multiple_target_step - + self.multiple_target) // self.multiple_target_step] + sequence_indices.extend(seqs_from_video) + + else: + for _, _indices in sorted(video_frames.items()): + n_frame = len(_indices) + + if self.pad_video_seq: + # Pad the sequence so that every frame in the sequence will + # be predicted. + if self.causal: + frames_left = self.seq_len - 1 + frames_right = 0 + else: + frames_left = (self.seq_len - 1) // 2 + frames_right = frames_left + for i in range(n_frame): + pad_left = max(0, frames_left - i // _step) + pad_right = max( + 0, frames_right - (n_frame - 1 - i) // _step) + start = max(i % _step, i - frames_left * _step) + end = min(n_frame - (n_frame - 1 - i) % _step, + i + frames_right * _step + 1) + sequence_indices.append([_indices[0]] * pad_left + + _indices[start:end:_step] + + [_indices[-1]] * pad_right) + else: + seqs_from_video = [ + _indices[i:(i + _len):_step] + for i in range(0, n_frame - _len + 1) + ] + sequence_indices.extend(seqs_from_video) + + # reduce dataset size if needed + subset_size = int(len(sequence_indices) * self.subset_frac) + start = np.random.randint(0, len(sequence_indices) - subset_size + 1) + end = start + subset_size + + sequence_indices = sequence_indices[start:end] + + return sequence_indices + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + instance_list, image_list = super()._load_annotations() + + h36m_data = self.ann_data + kpts_3d = h36m_data['S'] + + if self.keypoint_2d_src == 'detection': + assert exists(self.keypoint_2d_det_file), ( + f'`keypoint_2d_det_file`: `{self.keypoint_2d_det_file}`' + 'does not exist.') + kpts_2d = self._load_keypoint_2d_detection( + self.keypoint_2d_det_file) + assert kpts_2d.shape[0] == kpts_3d.shape[0], ( + f'Number of `kpts_2d` ({kpts_2d.shape[0]}) does not match ' + f'number of `kpts_3d` ({kpts_3d.shape[0]}).') + + assert kpts_2d.shape[2] == 3, ( + f'Expect `kpts_2d.shape[2]` == 3, but got ' + f'{kpts_2d.shape[2]}. Please check the format of ' + f'{self.keypoint_2d_det_file}') + + for idx, frame_ids in enumerate(self.sequence_indices): + kpt_2d = kpts_2d[frame_ids].astype(np.float32) + keypoints = kpt_2d[..., :2] + keypoints_visible = kpt_2d[..., 2] + instance_list[idx].update({ + 'keypoints': + keypoints, + 'keypoints_visible': + keypoints_visible + }) + if self.factor_file: + with get_local_path(self.factor_file) as local_path: + factors = np.load(local_path).astype(np.float32) + else: + factors = np.zeros((kpts_3d.shape[0], ), dtype=np.float32) + assert factors.shape[0] == kpts_3d.shape[0], ( + f'Number of `factors` ({factors.shape[0]}) does not match ' + f'number of `kpts_3d` ({kpts_3d.shape[0]}).') + + for idx, frame_ids in enumerate(self.sequence_indices): + factor = factors[frame_ids].astype(np.float32) + instance_list[idx].update({'factor': factor}) + + return instance_list, image_list + + @staticmethod + def _parse_h36m_imgname(imgname) -> Tuple[str, str, str]: + """Parse imgname to get information of subject, action and camera. + + A typical h36m image filename is like: + S1_Directions_1.54138969_000001.jpg + """ + subj, rest = osp.basename(imgname).split('_', 1) + action, rest = rest.split('.', 1) + camera, rest = rest.split('_', 1) + return subj, action, camera + + def get_camera_param(self, imgname) -> dict: + """Get camera parameters of a frame by its image name.""" + assert hasattr(self, 'camera_param') + subj, _, camera = self._parse_h36m_imgname(imgname) + return self.camera_param[(subj, camera)] + + def _load_keypoint_2d_detection(self, det_file): + """"Load 2D joint detection results from file.""" + with get_local_path(det_file) as local_path: + kpts_2d = np.load(local_path).astype(np.float32) + + return kpts_2d diff --git a/mmpose/datasets/datasets/face/__init__.py b/mmpose/datasets/datasets/face/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b78d87502f660342d7a9822070f6cd4b47eb3be --- /dev/null +++ b/mmpose/datasets/datasets/face/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .aflw_dataset import AFLWDataset +from .coco_wholebody_face_dataset import CocoWholeBodyFaceDataset +from .cofw_dataset import COFWDataset +from .face_300w_dataset import Face300WDataset +from .face_300wlp_dataset import Face300WLPDataset +from .lapa_dataset import LapaDataset +from .wflw_dataset import WFLWDataset + +__all__ = [ + 'Face300WDataset', 'WFLWDataset', 'AFLWDataset', 'COFWDataset', + 'CocoWholeBodyFaceDataset', 'LapaDataset', 'Face300WLPDataset' +] diff --git a/mmpose/datasets/datasets/face/aflw_dataset.py b/mmpose/datasets/datasets/face/aflw_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..deda0974bb58ba52371f727e788342b5502987a5 --- /dev/null +++ b/mmpose/datasets/datasets/face/aflw_dataset.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AFLWDataset(BaseCocoStyleDataset): + """AFLW dataset for face keypoint localization. + + "Annotated Facial Landmarks in the Wild: A Large-scale, + Real-world Database for Facial Landmark Localization". + In Proc. First IEEE International Workshop on Benchmarking + Facial Image Analysis Technologies, 2011. + + The landmark annotations follow the 19 points mark-up. The definition + can be found in `https://www.tugraz.at/institute/icg/research` + `/team-bischof/lrs/downloads/aflw/` + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/aflw.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Face AFLW annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # aflw bbox scales are normalized with factor 200. + pixel_std = 200. + + # center, scale in shape [1, 2] and bbox in [1, 4] + center = np.array([ann['center']], dtype=np.float32) + scale = np.array([[ann['scale'], ann['scale']]], + dtype=np.float32) * pixel_std + bbox = bbox_cs2xyxy(center, scale) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + + if self.test_mode: + # 'box_size' is used as normalization factor + assert 'box_size' in ann, '"box_size" is missing in annotation, '\ + 'which is required for evaluation.' + data_info['box_size'] = ann['box_size'] + + return data_info diff --git a/mmpose/datasets/datasets/face/coco_wholebody_face_dataset.py b/mmpose/datasets/datasets/face/coco_wholebody_face_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..bc2c5be386012a341879a3910dcf72e5672e5d6f --- /dev/null +++ b/mmpose/datasets/datasets/face/coco_wholebody_face_dataset.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoWholeBodyFaceDataset(BaseCocoStyleDataset): + """CocoWholeBodyDataset for face keypoint localization. + + `Whole-Body Human Pose Estimation in the Wild', ECCV'2020. + More details can be found in the `paper + `__ . + + The face landmark annotations follow the 68 points mark-up. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict( + from_file='configs/_base_/datasets/coco_wholebody_face.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw CocoWholeBody Face annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + # filter invalid instance + if not ann['face_valid'] or max(ann['face_kpts']) <= 0: + return None + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['face_box'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['face_kpts'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + return data_info diff --git a/mmpose/datasets/datasets/face/cofw_dataset.py b/mmpose/datasets/datasets/face/cofw_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5ec2a37efd8b7fc125ebd87df88bc9c99cd86250 --- /dev/null +++ b/mmpose/datasets/datasets/face/cofw_dataset.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class COFWDataset(BaseCocoStyleDataset): + """COFW dataset for face keypoint localization. + + "Robust face landmark estimation under occlusion", ICCV'2013. + + The landmark annotations follow the 29 points mark-up. The definition + can be found in `http://www.vision.caltech.edu/xpburgos/ICCV13/`__ . + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/cofw.py') diff --git a/mmpose/datasets/datasets/face/face_300w_dataset.py b/mmpose/datasets/datasets/face/face_300w_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..c70e892b4f707dc5990566b760e0a2566eb4a53f --- /dev/null +++ b/mmpose/datasets/datasets/face/face_300w_dataset.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class Face300WDataset(BaseCocoStyleDataset): + """300W dataset for face keypoint localization. + + "300 faces In-the-wild challenge: Database and results", + Image and Vision Computing (IMAVIS) 2019. + + The landmark annotations follow the 68 points mark-up. The definition + can be found in `https://ibug.doc.ic.ac.uk/resources/300-W/`. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/300w.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Face300W annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # 300w bbox scales are normalized with factor 200. + pixel_std = 200. + + # center, scale in shape [1, 2] and bbox in [1, 4] + center = np.array([ann['center']], dtype=np.float32) + scale = np.array([[ann['scale'], ann['scale']]], + dtype=np.float32) * pixel_std + bbox = bbox_cs2xyxy(center, scale) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + return data_info diff --git a/mmpose/datasets/datasets/face/face_300wlp_dataset.py b/mmpose/datasets/datasets/face/face_300wlp_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..215df09a532146740eb60c822e77f438e04d100e --- /dev/null +++ b/mmpose/datasets/datasets/face/face_300wlp_dataset.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class Face300WLPDataset(BaseCocoStyleDataset): + """300W dataset for face keypoint localization. + + "300 faces In-the-wild challenge: Database and results", + Image and Vision Computing (IMAVIS) 2019. + + The landmark annotations follow the 68 points mark-up. The definition + can be found in `https://ibug.doc.ic.ac.uk/resources/300-W/`. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/300wlp.py') diff --git a/mmpose/datasets/datasets/face/lapa_dataset.py b/mmpose/datasets/datasets/face/lapa_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..1a5bdc4ec08cebe690ae1f5f2a659e9c087634ec --- /dev/null +++ b/mmpose/datasets/datasets/face/lapa_dataset.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class LapaDataset(BaseCocoStyleDataset): + """LaPa dataset for face keypoint localization. + + "A New Dataset and Boundary-Attention Semantic Segmentation + for Face Parsing", AAAI'2020. + + The landmark annotations follow the 106 points mark-up. The definition + can be found in `https://github.com/JDAI-CV/lapa-dataset/`__ . + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/lapa.py') diff --git a/mmpose/datasets/datasets/face/wflw_dataset.py b/mmpose/datasets/datasets/face/wflw_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1c23053ce87fc92a234334e637e7a8e0402a9e --- /dev/null +++ b/mmpose/datasets/datasets/face/wflw_dataset.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class WFLWDataset(BaseCocoStyleDataset): + """WFLW dataset for face keypoint localization. + + "Look at Boundary: A Boundary-Aware Face Alignment Algorithm", + CVPR'2018. + + The landmark annotations follow the 98 points mark-up. The definition + can be found in `https://wywu.github.io/projects/LAB/WFLW.html`__ . + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/wflw.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Face WFLW annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # wflw bbox scales are normalized with factor 200. + pixel_std = 200. + + # center, scale in shape [1, 2] and bbox in [1, 4] + center = np.array([ann['center']], dtype=np.float32) + scale = np.array([[ann['scale'], ann['scale']]], + dtype=np.float32) * pixel_std + bbox = bbox_cs2xyxy(center, scale) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + return data_info diff --git a/mmpose/datasets/datasets/fashion/__init__.py b/mmpose/datasets/datasets/fashion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8be25dede3d16dfb7754c794d86d7f236e8f647b --- /dev/null +++ b/mmpose/datasets/datasets/fashion/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .deepfashion2_dataset import DeepFashion2Dataset +from .deepfashion_dataset import DeepFashionDataset + +__all__ = ['DeepFashionDataset', 'DeepFashion2Dataset'] diff --git a/mmpose/datasets/datasets/fashion/deepfashion2_dataset.py b/mmpose/datasets/datasets/fashion/deepfashion2_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..c3cde9bf97be254927aa6a06f46bdcc225f14283 --- /dev/null +++ b/mmpose/datasets/datasets/fashion/deepfashion2_dataset.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module(name='DeepFashion2Dataset') +class DeepFashion2Dataset(BaseCocoStyleDataset): + """DeepFashion2 dataset for fashion landmark detection.""" + + METAINFO: dict = dict(from_file='configs/_base_/datasets/deepfashion2.py') diff --git a/mmpose/datasets/datasets/fashion/deepfashion_dataset.py b/mmpose/datasets/datasets/fashion/deepfashion_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a0aa4937323e41333d48a82a11862e68ffc697f0 --- /dev/null +++ b/mmpose/datasets/datasets/fashion/deepfashion_dataset.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, List, Optional, Sequence, Union + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class DeepFashionDataset(BaseCocoStyleDataset): + """DeepFashion dataset (full-body clothes) for fashion landmark detection. + + "DeepFashion: Powering Robust Clothes Recognition + and Retrieval with Rich Annotations", CVPR'2016. + "Fashion Landmark Detection in the Wild", ECCV'2016. + + The dataset contains 3 categories for full-body, upper-body and lower-body. + + Fashion landmark indexes for upper-body clothes:: + + 0: 'left collar', + 1: 'right collar', + 2: 'left sleeve', + 3: 'right sleeve', + 4: 'left hem', + 5: 'right hem' + + Fashion landmark indexes for lower-body clothes:: + + 0: 'left waistline', + 1: 'right waistline', + 2: 'left hem', + 3: 'right hem' + + Fashion landmark indexes for full-body clothes:: + + 0: 'left collar', + 1: 'right collar', + 2: 'left sleeve', + 3: 'right sleeve', + 4: 'left waistline', + 5: 'right waistline', + 6: 'left hem', + 7: 'right hem' + + Args: + ann_file (str): Annotation file path. Default: ''. + subset (str): Specifies the subset of body: ``'full'``, ``'upper'`` or + ``'lower'``. Default: '', which means ``'full'``. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + def __init__(self, + ann_file: str = '', + subset: str = '', + bbox_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + self._check_subset_and_metainfo(subset) + + super().__init__( + ann_file=ann_file, + bbox_file=bbox_file, + data_mode=data_mode, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + @classmethod + def _check_subset_and_metainfo(cls, subset: str = '') -> None: + """Check the subset of body and set the corresponding metainfo. + + Args: + subset(str): the subset of body: could be ``'full'``, ``'upper'`` + or ``'lower'``. Default: '', which means ``'full'``. + """ + if subset == '' or subset == 'full': + cls.METAINFO = dict( + from_file='configs/_base_/datasets/deepfashion_full.py') + elif subset == 'upper': + cls.METAINFO = dict( + from_file='configs/_base_/datasets/deepfashion_upper.py') + elif subset == 'lower': + cls.METAINFO = dict( + from_file='configs/_base_/datasets/deepfashion_lower.py') + else: + raise ValueError( + f'{cls.__class__.__name__} got invalid subset: ' + f'{subset}. Should be "full", "lower" or "upper".') diff --git a/mmpose/datasets/datasets/hand/__init__.py b/mmpose/datasets/datasets/hand/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72f9bc14f19a4499b9b098c4d5313acabb9e45ee --- /dev/null +++ b/mmpose/datasets/datasets/hand/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .coco_wholebody_hand_dataset import CocoWholeBodyHandDataset +from .freihand_dataset import FreiHandDataset +from .interhand2d_double_dataset import InterHand2DDoubleDataset +from .onehand10k_dataset import OneHand10KDataset +from .panoptic_hand2d_dataset import PanopticHand2DDataset +from .rhd2d_dataset import Rhd2DDataset + +__all__ = [ + 'OneHand10KDataset', 'FreiHandDataset', 'PanopticHand2DDataset', + 'Rhd2DDataset', 'CocoWholeBodyHandDataset', 'InterHand2DDoubleDataset' +] diff --git a/mmpose/datasets/datasets/hand/coco_wholebody_hand_dataset.py b/mmpose/datasets/datasets/hand/coco_wholebody_hand_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..15ac669d40b012a0d19cbb5d2931b40709199a50 --- /dev/null +++ b/mmpose/datasets/datasets/hand/coco_wholebody_hand_dataset.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import List, Tuple + +import numpy as np +from mmengine.fileio import exists, get_local_path +from xtcocotools.coco import COCO + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_xywh2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoWholeBodyHandDataset(BaseCocoStyleDataset): + """CocoWholeBodyDataset for hand pose estimation. + + "Whole-Body Human Pose Estimation in the Wild", ECCV'2020. + More details can be found in the `paper + `__ . + + COCO-WholeBody Hand keypoints:: + + 0: 'wrist', + 1: 'thumb1', + 2: 'thumb2', + 3: 'thumb3', + 4: 'thumb4', + 5: 'forefinger1', + 6: 'forefinger2', + 7: 'forefinger3', + 8: 'forefinger4', + 9: 'middle_finger1', + 10: 'middle_finger2', + 11: 'middle_finger3', + 12: 'middle_finger4', + 13: 'ring_finger1', + 14: 'ring_finger2', + 15: 'ring_finger3', + 16: 'ring_finger4', + 17: 'pinky_finger1', + 18: 'pinky_finger2', + 19: 'pinky_finger3', + 20: 'pinky_finger4' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict( + from_file='configs/_base_/datasets/coco_wholebody_hand.py') + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in COCO format.""" + + assert exists(self.ann_file), ( + f'Annotation file `{self.ann_file}` does not exist') + + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + instance_list = [] + image_list = [] + id = 0 + + for img_id in self.coco.getImgIds(): + img = self.coco.loadImgs(img_id)[0] + + img.update({ + 'img_id': + img_id, + 'img_path': + osp.join(self.data_prefix['img'], img['file_name']), + }) + image_list.append(img) + + ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False) + anns = self.coco.loadAnns(ann_ids) + for ann in anns: + for type in ['left', 'right']: + # filter invalid hand annotations, there might be two + # valid instances (left and right hand) in one image + if ann[f'{type}hand_valid'] and max( + ann[f'{type}hand_kpts']) > 0: + + bbox_xywh = np.array( + ann[f'{type}hand_box'], + dtype=np.float32).reshape(1, 4) + + bbox = bbox_xywh2xyxy(bbox_xywh) + + _keypoints = np.array( + ann[f'{type}hand_kpts'], + dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + instance_info = { + 'img_id': ann['image_id'], + 'img_path': img['img_path'], + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'segmentation': ann['segmentation'], + 'id': id, + } + instance_list.append(instance_info) + id = id + 1 + + instance_list = sorted(instance_list, key=lambda x: x['id']) + return instance_list, image_list diff --git a/mmpose/datasets/datasets/hand/freihand_dataset.py b/mmpose/datasets/datasets/hand/freihand_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0e23cdd577d12e6d20656fde59f7da58a45150 --- /dev/null +++ b/mmpose/datasets/datasets/hand/freihand_dataset.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class FreiHandDataset(BaseCocoStyleDataset): + """FreiHand dataset for hand pose estimation. + + "FreiHAND: A Dataset for Markerless Capture of Hand Pose + and Shape from Single RGB Images", ICCV'2019. + More details can be found in the `paper + `__ . + + FreiHand keypoints:: + + 0: 'wrist', + 1: 'thumb1', + 2: 'thumb2', + 3: 'thumb3', + 4: 'thumb4', + 5: 'forefinger1', + 6: 'forefinger2', + 7: 'forefinger3', + 8: 'forefinger4', + 9: 'middle_finger1', + 10: 'middle_finger2', + 11: 'middle_finger3', + 12: 'middle_finger4', + 13: 'ring_finger1', + 14: 'ring_finger2', + 15: 'ring_finger3', + 16: 'ring_finger4', + 17: 'pinky_finger1', + 18: 'pinky_finger2', + 19: 'pinky_finger3', + 20: 'pinky_finger4' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/freihand2d.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # use the entire image which is 224x224 + bbox = np.array([0, 0, 224, 224], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'segmentation': ann['segmentation'], + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/hand/interhand2d_double_dataset.py b/mmpose/datasets/datasets/hand/interhand2d_double_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..e8841e6f54e93c90758e3e9133db8cc24994d134 --- /dev/null +++ b/mmpose/datasets/datasets/hand/interhand2d_double_dataset.py @@ -0,0 +1,342 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import json +import os.path as osp +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.fileio import exists, get_local_path +from mmengine.utils import is_abs +from xtcocotools.coco import COCO + +from mmpose.codecs.utils import camera_to_pixel +from mmpose.datasets.datasets import BaseCocoStyleDataset +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_xywh2xyxy + + +@DATASETS.register_module() +class InterHand2DDoubleDataset(BaseCocoStyleDataset): + """InterHand2.6M dataset for 2d double hands. + + "InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose + Estimation from a Single RGB Image", ECCV'2020. + More details can be found in the `paper + `__ . + + The dataset loads raw features and apply specified transforms + to return a dict containing the image tensors and other information. + + InterHand2.6M keypoint indexes:: + + 0: 'r_thumb4', + 1: 'r_thumb3', + 2: 'r_thumb2', + 3: 'r_thumb1', + 4: 'r_index4', + 5: 'r_index3', + 6: 'r_index2', + 7: 'r_index1', + 8: 'r_middle4', + 9: 'r_middle3', + 10: 'r_middle2', + 11: 'r_middle1', + 12: 'r_ring4', + 13: 'r_ring3', + 14: 'r_ring2', + 15: 'r_ring1', + 16: 'r_pinky4', + 17: 'r_pinky3', + 18: 'r_pinky2', + 19: 'r_pinky1', + 20: 'r_wrist', + 21: 'l_thumb4', + 22: 'l_thumb3', + 23: 'l_thumb2', + 24: 'l_thumb1', + 25: 'l_index4', + 26: 'l_index3', + 27: 'l_index2', + 28: 'l_index1', + 29: 'l_middle4', + 30: 'l_middle3', + 31: 'l_middle2', + 32: 'l_middle1', + 33: 'l_ring4', + 34: 'l_ring3', + 35: 'l_ring2', + 36: 'l_ring1', + 37: 'l_pinky4', + 38: 'l_pinky3', + 39: 'l_pinky2', + 40: 'l_pinky1', + 41: 'l_wrist' + + Args: + ann_file (str): Annotation file path. Default: ''. + camera_param_file (str): Cameras' parameters file. Default: ''. + joint_file (str): Path to the joint file. Default: ''. + use_gt_root_depth (bool): Using the ground truth depth of the wrist + or given depth from rootnet_result_file. Default: ``True``. + rootnet_result_file (str): Path to the wrist depth file. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + sample_interval (int, optional): The sample interval of the dataset. + Default: 1. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/interhand3d.py') + + def __init__(self, + ann_file: str = '', + camera_param_file: str = '', + joint_file: str = '', + use_gt_root_depth: bool = True, + rootnet_result_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000, + sample_interval: int = 1): + _ann_file = ann_file + if data_root is not None and not is_abs(_ann_file): + _ann_file = osp.join(data_root, _ann_file) + assert exists(_ann_file), 'Annotation file does not exist.' + self.ann_file = _ann_file + + _camera_param_file = camera_param_file + if data_root is not None and not is_abs(_camera_param_file): + _camera_param_file = osp.join(data_root, _camera_param_file) + assert exists(_camera_param_file), 'Camera file does not exist.' + self.camera_param_file = _camera_param_file + + _joint_file = joint_file + if data_root is not None and not is_abs(_joint_file): + _joint_file = osp.join(data_root, _joint_file) + assert exists(_joint_file), 'Joint file does not exist.' + self.joint_file = _joint_file + + self.use_gt_root_depth = use_gt_root_depth + if not self.use_gt_root_depth: + assert rootnet_result_file is not None + _rootnet_result_file = rootnet_result_file + if data_root is not None and not is_abs(_rootnet_result_file): + _rootnet_result_file = osp.join(data_root, + _rootnet_result_file) + assert exists( + _rootnet_result_file), 'Rootnet result file does not exist.' + self.rootnet_result_file = _rootnet_result_file + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_mode=data_mode, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch, + sample_interval=sample_interval) + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in COCO format.""" + + assert exists(self.ann_file), 'Annotation file does not exist' + + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + # set the metainfo about categories, which is a list of dict + # and each dict contains the 'id', 'name', etc. about this category + if 'categories' in self.coco.dataset: + self._metainfo['CLASSES'] = self.coco.loadCats( + self.coco.getCatIds()) + + with get_local_path(self.camera_param_file) as local_path: + with open(local_path, 'r') as f: + self.cameras = json.load(f) + with get_local_path(self.joint_file) as local_path: + with open(local_path, 'r') as f: + self.joints = json.load(f) + + instance_list = [] + image_list = [] + + for idx, img_id in enumerate(self.coco.getImgIds()): + if idx % self.sample_interval != 0: + continue + img = self.coco.loadImgs(img_id)[0] + img.update({ + 'img_id': + img_id, + 'img_path': + osp.join(self.data_prefix['img'], img['file_name']), + }) + image_list.append(img) + + ann_ids = self.coco.getAnnIds(imgIds=img_id) + ann = self.coco.loadAnns(ann_ids)[0] + + instance_info = self.parse_data_info( + dict(raw_ann_info=ann, raw_img_info=img)) + + # skip invalid instance annotation. + if not instance_info: + continue + + instance_list.append(instance_info) + return instance_list, image_list + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict | None: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + if not self.use_gt_root_depth: + rootnet_result = {} + with get_local_path(self.rootnet_result_file) as local_path: + rootnet_annot = json.load(local_path) + for i in range(len(rootnet_annot)): + rootnet_result[str( + rootnet_annot[i]['annot_id'])] = rootnet_annot[i] + + num_keypoints = self.metainfo['num_keypoints'] + + capture_id = str(img['capture']) + camera_name = img['camera'] + frame_idx = str(img['frame_idx']) + camera_pos = np.array( + self.cameras[capture_id]['campos'][camera_name], dtype=np.float32) + camera_rot = np.array( + self.cameras[capture_id]['camrot'][camera_name], dtype=np.float32) + focal = np.array( + self.cameras[capture_id]['focal'][camera_name], dtype=np.float32) + principal_pt = np.array( + self.cameras[capture_id]['princpt'][camera_name], dtype=np.float32) + joint_world = np.array( + self.joints[capture_id][frame_idx]['world_coord'], + dtype=np.float32) + joint_valid = np.array(ann['joint_valid'], dtype=np.float32).flatten() + + keypoints_cam = np.dot( + camera_rot, + joint_world.transpose(1, 0) - + camera_pos.reshape(3, 1)).transpose(1, 0) + + if self.use_gt_root_depth: + bbox_xywh = np.array(ann['bbox'], dtype=np.float32).reshape(1, 4) + + else: + rootnet_ann_data = rootnet_result[str(ann['id'])] + bbox_xywh = np.array( + rootnet_ann_data['bbox'], dtype=np.float32).reshape(1, 4) + + bbox = bbox_xywh2xyxy(bbox_xywh) + + # 41: 'l_wrist', left hand root + # 20: 'r_wrist', right hand root + + # if root is not valid -> root-relative 3D pose is also not valid. + # Therefore, mark all joints as invalid + joint_valid[:20] *= joint_valid[20] + joint_valid[21:] *= joint_valid[41] + + joints_3d_visible = np.minimum(1, + joint_valid.reshape(-1, + 1)).reshape(1, -1) + keypoints_img = camera_to_pixel( + keypoints_cam, + focal[0], + focal[1], + principal_pt[0], + principal_pt[1], + shift=True)[..., :2] + joints_3d = np.zeros((keypoints_cam.shape[-2], 3), + dtype=np.float32).reshape(1, -1, 3) + joints_3d[..., :2] = keypoints_img + joints_3d[..., :21, + 2] = keypoints_cam[..., :21, 2] - keypoints_cam[..., 20, 2] + joints_3d[..., 21:, + 2] = keypoints_cam[..., 21:, 2] - keypoints_cam[..., 41, 2] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img['img_path'], + 'keypoints': joints_3d[:, :, :2], + 'keypoints_visible': joints_3d_visible, + 'hand_type': self.encode_handtype(ann['hand_type']), + 'hand_type_valid': np.array([ann['hand_type_valid']]), + 'dataset': self.metainfo['dataset_name'], + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'iscrowd': ann.get('iscrowd', False), + 'id': ann['id'], + # store the raw annotation of the instance + # it is useful for evaluation without providing ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + return data_info + + @staticmethod + def encode_handtype(hand_type): + if hand_type == 'right': + return np.array([[1, 0]], dtype=np.float32) + elif hand_type == 'left': + return np.array([[0, 1]], dtype=np.float32) + elif hand_type == 'interacting': + return np.array([[1, 1]], dtype=np.float32) + else: + assert 0, f'Not support hand type: {hand_type}' diff --git a/mmpose/datasets/datasets/hand/onehand10k_dataset.py b/mmpose/datasets/datasets/hand/onehand10k_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3519ace560ef70ce680955bfa82d52c1a11b6b3e --- /dev/null +++ b/mmpose/datasets/datasets/hand/onehand10k_dataset.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class OneHand10KDataset(BaseCocoStyleDataset): + """OneHand10K dataset for hand pose estimation. + + "Mask-pose Cascaded CNN for 2D Hand Pose Estimation from + Single Color Images", TCSVT'2019. + More details can be found in the `paper + `__ . + + OneHand10K keypoints:: + + 0: 'wrist', + 1: 'thumb1', + 2: 'thumb2', + 3: 'thumb3', + 4: 'thumb4', + 5: 'forefinger1', + 6: 'forefinger2', + 7: 'forefinger3', + 8: 'forefinger4', + 9: 'middle_finger1', + 10: 'middle_finger2', + 11: 'middle_finger3', + 12: 'middle_finger4', + 13: 'ring_finger1', + 14: 'ring_finger2', + 15: 'ring_finger3', + 16: 'ring_finger4', + 17: 'pinky_finger1', + 18: 'pinky_finger2', + 19: 'pinky_finger3', + 20: 'pinky_finger4' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/onehand10k.py') diff --git a/mmpose/datasets/datasets/hand/panoptic_hand2d_dataset.py b/mmpose/datasets/datasets/hand/panoptic_hand2d_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..26d364840ebe5756687a72a4de52b0213ffdcea2 --- /dev/null +++ b/mmpose/datasets/datasets/hand/panoptic_hand2d_dataset.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class PanopticHand2DDataset(BaseCocoStyleDataset): + """Panoptic 2D dataset for hand pose estimation. + + "Hand Keypoint Detection in Single Images using Multiview + Bootstrapping", CVPR'2017. + More details can be found in the `paper + `__ . + + Panoptic keypoints:: + + 0: 'wrist', + 1: 'thumb1', + 2: 'thumb2', + 3: 'thumb3', + 4: 'thumb4', + 5: 'forefinger1', + 6: 'forefinger2', + 7: 'forefinger3', + 8: 'forefinger4', + 9: 'middle_finger1', + 10: 'middle_finger2', + 11: 'middle_finger3', + 12: 'middle_finger4', + 13: 'ring_finger1', + 14: 'ring_finger2', + 15: 'ring_finger3', + 16: 'ring_finger4', + 17: 'pinky_finger1', + 18: 'pinky_finger2', + 19: 'pinky_finger3', + 20: 'pinky_finger4' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict( + from_file='configs/_base_/datasets/panoptic_hand2d.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'segmentation': ann['segmentation'], + 'head_size': ann['head_size'], + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/hand/rhd2d_dataset.py b/mmpose/datasets/datasets/hand/rhd2d_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc4301590a5f8c8d474b0ef37de4d03309ad0b9 --- /dev/null +++ b/mmpose/datasets/datasets/hand/rhd2d_dataset.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class Rhd2DDataset(BaseCocoStyleDataset): + """Rendered Handpose Dataset for hand pose estimation. + + "Learning to Estimate 3D Hand Pose from Single RGB Images", + ICCV'2017. + More details can be found in the `paper + `__ . + + Rhd keypoints:: + + 0: 'wrist', + 1: 'thumb4', + 2: 'thumb3', + 3: 'thumb2', + 4: 'thumb1', + 5: 'forefinger4', + 6: 'forefinger3', + 7: 'forefinger2', + 8: 'forefinger1', + 9: 'middle_finger4', + 10: 'middle_finger3', + 11: 'middle_finger2', + 12: 'middle_finger1', + 13: 'ring_finger4', + 14: 'ring_finger3', + 15: 'ring_finger2', + 16: 'ring_finger1', + 17: 'pinky_finger4', + 18: 'pinky_finger3', + 19: 'pinky_finger2', + 20: 'pinky_finger1' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/rhd2d.py') diff --git a/mmpose/datasets/datasets/hand3d/__init__.py b/mmpose/datasets/datasets/hand3d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..20d4049ef81cd91f3594e221e9e685774b6a2032 --- /dev/null +++ b/mmpose/datasets/datasets/hand3d/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .interhand_3d_dataset import InterHand3DDataset + +__all__ = ['InterHand3DDataset'] diff --git a/mmpose/datasets/datasets/hand3d/interhand_3d_dataset.py b/mmpose/datasets/datasets/hand3d/interhand_3d_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..13d0bd26b3801742ec442e5b0146fec42b774e26 --- /dev/null +++ b/mmpose/datasets/datasets/hand3d/interhand_3d_dataset.py @@ -0,0 +1,347 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import json +import os.path as osp +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.fileio import exists, get_local_path +from mmengine.utils import is_abs +from xtcocotools.coco import COCO + +from mmpose.codecs.utils import camera_to_pixel +from mmpose.datasets.datasets import BaseCocoStyleDataset +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_xywh2xyxy + + +@DATASETS.register_module() +class InterHand3DDataset(BaseCocoStyleDataset): + """InterHand2.6M dataset for 3d hand. + + "InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose + Estimation from a Single RGB Image", ECCV'2020. + More details can be found in the `paper + `__ . + + The dataset loads raw features and apply specified transforms + to return a dict containing the image tensors and other information. + + InterHand2.6M keypoint indexes:: + + 0: 'r_thumb4', + 1: 'r_thumb3', + 2: 'r_thumb2', + 3: 'r_thumb1', + 4: 'r_index4', + 5: 'r_index3', + 6: 'r_index2', + 7: 'r_index1', + 8: 'r_middle4', + 9: 'r_middle3', + 10: 'r_middle2', + 11: 'r_middle1', + 12: 'r_ring4', + 13: 'r_ring3', + 14: 'r_ring2', + 15: 'r_ring1', + 16: 'r_pinky4', + 17: 'r_pinky3', + 18: 'r_pinky2', + 19: 'r_pinky1', + 20: 'r_wrist', + 21: 'l_thumb4', + 22: 'l_thumb3', + 23: 'l_thumb2', + 24: 'l_thumb1', + 25: 'l_index4', + 26: 'l_index3', + 27: 'l_index2', + 28: 'l_index1', + 29: 'l_middle4', + 30: 'l_middle3', + 31: 'l_middle2', + 32: 'l_middle1', + 33: 'l_ring4', + 34: 'l_ring3', + 35: 'l_ring2', + 36: 'l_ring1', + 37: 'l_pinky4', + 38: 'l_pinky3', + 39: 'l_pinky2', + 40: 'l_pinky1', + 41: 'l_wrist' + + Args: + ann_file (str): Annotation file path. Default: ''. + camera_param_file (str): Cameras' parameters file. Default: ''. + joint_file (str): Path to the joint file. Default: ''. + use_gt_root_depth (bool): Using the ground truth depth of the wrist + or given depth from rootnet_result_file. Default: ``True``. + rootnet_result_file (str): Path to the wrist depth file. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/interhand3d.py') + + def __init__(self, + ann_file: str = '', + camera_param_file: str = '', + joint_file: str = '', + use_gt_root_depth: bool = True, + rootnet_result_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + + _ann_file = ann_file + if not is_abs(_ann_file): + _ann_file = osp.join(data_root, _ann_file) + assert exists(_ann_file), 'Annotation file does not exist.' + self.ann_file = _ann_file + + _camera_param_file = camera_param_file + if not is_abs(_camera_param_file): + _camera_param_file = osp.join(data_root, _camera_param_file) + assert exists(_camera_param_file), 'Camera file does not exist.' + self.camera_param_file = _camera_param_file + + _joint_file = joint_file + if not is_abs(_joint_file): + _joint_file = osp.join(data_root, _joint_file) + assert exists(_joint_file), 'Joint file does not exist.' + self.joint_file = _joint_file + + self.use_gt_root_depth = use_gt_root_depth + if not self.use_gt_root_depth: + assert rootnet_result_file is not None + _rootnet_result_file = rootnet_result_file + if not is_abs(_rootnet_result_file): + _rootnet_result_file = osp.join(data_root, + _rootnet_result_file) + assert exists( + _rootnet_result_file), 'Rootnet result file does not exist.' + self.rootnet_result_file = _rootnet_result_file + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_mode=data_mode, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in COCO format.""" + + assert exists(self.ann_file), 'Annotation file does not exist' + + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + # set the metainfo about categories, which is a list of dict + # and each dict contains the 'id', 'name', etc. about this category + if 'categories' in self.coco.dataset: + self._metainfo['CLASSES'] = self.coco.loadCats( + self.coco.getCatIds()) + + with get_local_path(self.camera_param_file) as local_path: + with open(local_path, 'r') as f: + self.cameras = json.load(f) + with get_local_path(self.joint_file) as local_path: + with open(local_path, 'r') as f: + self.joints = json.load(f) + + instance_list = [] + image_list = [] + + for idx, img_id in enumerate(self.coco.getImgIds()): + img = self.coco.loadImgs(img_id)[0] + img.update({ + 'img_id': + img_id, + 'img_path': + osp.join(self.data_prefix['img'], img['file_name']), + }) + image_list.append(img) + + ann_ids = self.coco.getAnnIds(imgIds=img_id) + ann = self.coco.loadAnns(ann_ids)[0] + + instance_info = self.parse_data_info( + dict(raw_ann_info=ann, raw_img_info=img)) + + # skip invalid instance annotation. + if not instance_info: + continue + + instance_list.append(instance_info) + return instance_list, image_list + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict | None: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + if not self.use_gt_root_depth: + rootnet_result = {} + with get_local_path(self.rootnet_result_file) as local_path: + rootnet_annot = json.load(local_path) + for i in range(len(rootnet_annot)): + rootnet_result[str( + rootnet_annot[i]['annot_id'])] = rootnet_annot[i] + + num_keypoints = self.metainfo['num_keypoints'] + + capture_id = str(img['capture']) + camera_name = img['camera'] + frame_idx = str(img['frame_idx']) + camera_pos = np.array( + self.cameras[capture_id]['campos'][camera_name], dtype=np.float32) + camera_rot = np.array( + self.cameras[capture_id]['camrot'][camera_name], dtype=np.float32) + focal = np.array( + self.cameras[capture_id]['focal'][camera_name], dtype=np.float32) + principal_pt = np.array( + self.cameras[capture_id]['princpt'][camera_name], dtype=np.float32) + joint_world = np.array( + self.joints[capture_id][frame_idx]['world_coord'], + dtype=np.float32) + joint_valid = np.array(ann['joint_valid'], dtype=np.float32).flatten() + + keypoints_cam = np.dot( + camera_rot, + joint_world.transpose(1, 0) - + camera_pos.reshape(3, 1)).transpose(1, 0) + + if self.use_gt_root_depth: + bbox_xywh = np.array(ann['bbox'], dtype=np.float32).reshape(1, 4) + abs_depth = [keypoints_cam[20, 2], keypoints_cam[41, 2]] + else: + rootnet_ann_data = rootnet_result[str(ann['id'])] + bbox_xywh = np.array( + rootnet_ann_data['bbox'], dtype=np.float32).reshape(1, 4) + abs_depth = rootnet_ann_data['abs_depth'] + bbox = bbox_xywh2xyxy(bbox_xywh) + + # 41: 'l_wrist', left hand root + # 20: 'r_wrist', right hand root + rel_root_depth = keypoints_cam[41, 2] - keypoints_cam[20, 2] + # if root is not valid, root-relative 3D depth is also invalid. + rel_root_valid = joint_valid[20] * joint_valid[41] + + # if root is not valid -> root-relative 3D pose is also not valid. + # Therefore, mark all joints as invalid + joint_valid[:20] *= joint_valid[20] + joint_valid[21:] *= joint_valid[41] + + joints_3d_visible = np.minimum(1, + joint_valid.reshape(-1, + 1)).reshape(1, -1) + keypoints_img = camera_to_pixel( + keypoints_cam, + focal[0], + focal[1], + principal_pt[0], + principal_pt[1], + shift=True)[..., :2] + joints_3d = np.zeros((keypoints_cam.shape[-2], 3), + dtype=np.float32).reshape(1, -1, 3) + joints_3d[..., :2] = keypoints_img + joints_3d[..., :21, + 2] = keypoints_cam[..., :21, 2] - keypoints_cam[..., 20, 2] + joints_3d[..., 21:, + 2] = keypoints_cam[..., 21:, 2] - keypoints_cam[..., 41, 2] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img['img_path'], + 'rotation': 0, + 'keypoints': joints_3d, + 'keypoints_cam': keypoints_cam.reshape(1, -1, 3), + 'keypoints_visible': joints_3d_visible, + 'hand_type': self.encode_handtype(ann['hand_type']), + 'hand_type_valid': np.array([ann['hand_type_valid']]), + 'rel_root_depth': rel_root_depth, + 'rel_root_valid': rel_root_valid, + 'abs_depth': abs_depth, + 'focal': focal, + 'principal_pt': principal_pt, + 'dataset': self.metainfo['dataset_name'], + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'iscrowd': ann.get('iscrowd', False), + 'id': ann['id'], + # store the raw annotation of the instance + # it is useful for evaluation without providing ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + return data_info + + @staticmethod + def encode_handtype(hand_type): + if hand_type == 'right': + return np.array([[1, 0]], dtype=np.float32) + elif hand_type == 'left': + return np.array([[0, 1]], dtype=np.float32) + elif hand_type == 'interacting': + return np.array([[1, 1]], dtype=np.float32) + else: + assert 0, f'Not support hand type: {hand_type}' diff --git a/mmpose/datasets/datasets/utils.py b/mmpose/datasets/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7433a168b9ef8d9c267095301abfcbf8422886f5 --- /dev/null +++ b/mmpose/datasets/datasets/utils.py @@ -0,0 +1,202 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings + +import numpy as np +from mmengine import Config + + +def parse_pose_metainfo(metainfo: dict): + """Load meta information of pose dataset and check its integrity. + + Args: + metainfo (dict): Raw data of pose meta information, which should + contain following contents: + + - "dataset_name" (str): The name of the dataset + - "keypoint_info" (dict): The keypoint-related meta information, + e.g., name, upper/lower body, and symmetry + - "skeleton_info" (dict): The skeleton-related meta information, + e.g., start/end keypoint of limbs + - "joint_weights" (list[float]): The loss weights of keypoints + - "sigmas" (list[float]): The keypoint distribution parameters + to calculate OKS score. See `COCO keypoint evaluation + `__. + + An example of metainfo is shown as follows. + + .. code-block:: none + { + "dataset_name": "coco", + "keypoint_info": + { + 0: + { + "name": "nose", + "type": "upper", + "swap": "", + "color": [51, 153, 255], + }, + 1: + { + "name": "right_eye", + "type": "upper", + "swap": "left_eye", + "color": [51, 153, 255], + }, + ... + }, + "skeleton_info": + { + 0: + { + "link": ("left_ankle", "left_knee"), + "color": [0, 255, 0], + }, + ... + }, + "joint_weights": [1., 1., ...], + "sigmas": [0.026, 0.025, ...], + } + + + A special case is that `metainfo` can have the key "from_file", + which should be the path of a config file. In this case, the + actual metainfo will be loaded by: + + .. code-block:: python + metainfo = mmengine.Config.fromfile(metainfo['from_file']) + + Returns: + Dict: pose meta information that contains following contents: + + - "dataset_name" (str): Same as ``"dataset_name"`` in the input + - "num_keypoints" (int): Number of keypoints + - "keypoint_id2name" (dict): Mapping from keypoint id to name + - "keypoint_name2id" (dict): Mapping from keypoint name to id + - "upper_body_ids" (list): Ids of upper-body keypoint + - "lower_body_ids" (list): Ids of lower-body keypoint + - "flip_indices" (list): The Id of each keypoint's symmetric keypoint + - "flip_pairs" (list): The Ids of symmetric keypoint pairs + - "keypoint_colors" (numpy.ndarray): The keypoint color matrix of + shape [K, 3], where each row is the color of one keypint in bgr + - "num_skeleton_links" (int): The number of links + - "skeleton_links" (list): The links represented by Id pairs of start + and end points + - "skeleton_link_colors" (numpy.ndarray): The link color matrix + - "dataset_keypoint_weights" (numpy.ndarray): Same as the + ``"joint_weights"`` in the input + - "sigmas" (numpy.ndarray): Same as the ``"sigmas"`` in the input + """ + + if 'from_file' in metainfo: + cfg_file = metainfo['from_file'] + if not osp.isfile(cfg_file): + # Search configs in 'mmpose/.mim/configs/' in case that mmpose + # is installed in non-editable mode. + import mmpose + mmpose_path = osp.dirname(mmpose.__file__) + _cfg_file = osp.join(mmpose_path, '.mim', 'configs', '_base_', + 'datasets', osp.basename(cfg_file)) + if osp.isfile(_cfg_file): + warnings.warn( + f'The metainfo config file "{cfg_file}" does not exist. ' + f'A matched config file "{_cfg_file}" will be used ' + 'instead.') + cfg_file = _cfg_file + else: + raise FileNotFoundError( + f'The metainfo config file "{cfg_file}" does not exist.') + + # TODO: remove the nested structure of dataset_info + # metainfo = Config.fromfile(metainfo['from_file']) + metainfo = Config.fromfile(cfg_file).dataset_info + + # check data integrity + assert 'dataset_name' in metainfo + assert 'keypoint_info' in metainfo + assert 'skeleton_info' in metainfo + assert 'joint_weights' in metainfo + assert 'sigmas' in metainfo + + # parse metainfo + parsed = dict( + dataset_name=None, + num_keypoints=None, + keypoint_id2name={}, + keypoint_name2id={}, + upper_body_ids=[], + lower_body_ids=[], + flip_indices=[], + flip_pairs=[], + keypoint_colors=[], + num_skeleton_links=None, + skeleton_links=[], + skeleton_link_colors=[], + dataset_keypoint_weights=None, + sigmas=None, + ) + + parsed['dataset_name'] = metainfo['dataset_name'] + + # parse keypoint information + parsed['num_keypoints'] = len(metainfo['keypoint_info']) + + for kpt_id, kpt in metainfo['keypoint_info'].items(): + kpt_name = kpt['name'] + parsed['keypoint_id2name'][kpt_id] = kpt_name + parsed['keypoint_name2id'][kpt_name] = kpt_id + parsed['keypoint_colors'].append(kpt.get('color', [255, 128, 0])) + + kpt_type = kpt.get('type', '') + if kpt_type == 'upper': + parsed['upper_body_ids'].append(kpt_id) + elif kpt_type == 'lower': + parsed['lower_body_ids'].append(kpt_id) + + swap_kpt = kpt.get('swap', '') + if swap_kpt == kpt_name or swap_kpt == '': + parsed['flip_indices'].append(kpt_name) + else: + parsed['flip_indices'].append(swap_kpt) + pair = (swap_kpt, kpt_name) + if pair not in parsed['flip_pairs']: + parsed['flip_pairs'].append(pair) + + # parse skeleton information + parsed['num_skeleton_links'] = len(metainfo['skeleton_info']) + for _, sk in metainfo['skeleton_info'].items(): + parsed['skeleton_links'].append(sk['link']) + parsed['skeleton_link_colors'].append(sk.get('color', [96, 96, 255])) + + # parse extra information + parsed['dataset_keypoint_weights'] = np.array( + metainfo['joint_weights'], dtype=np.float32) + parsed['sigmas'] = np.array(metainfo['sigmas'], dtype=np.float32) + + if 'stats_info' in metainfo: + parsed['stats_info'] = {} + for name, val in metainfo['stats_info'].items(): + parsed['stats_info'][name] = np.array(val, dtype=np.float32) + + # formatting + def _map(src, mapping: dict): + if isinstance(src, (list, tuple)): + cls = type(src) + return cls(_map(s, mapping) for s in src) + else: + return mapping[src] + + parsed['flip_pairs'] = _map( + parsed['flip_pairs'], mapping=parsed['keypoint_name2id']) + parsed['flip_indices'] = _map( + parsed['flip_indices'], mapping=parsed['keypoint_name2id']) + parsed['skeleton_links'] = _map( + parsed['skeleton_links'], mapping=parsed['keypoint_name2id']) + + parsed['keypoint_colors'] = np.array( + parsed['keypoint_colors'], dtype=np.uint8) + parsed['skeleton_link_colors'] = np.array( + parsed['skeleton_link_colors'], dtype=np.uint8) + + return parsed diff --git a/mmpose/datasets/datasets/wholebody/__init__.py b/mmpose/datasets/datasets/wholebody/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3934fc225e301251e356f9c2d8880d982ec6dc9 --- /dev/null +++ b/mmpose/datasets/datasets/wholebody/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .coco_wholebody_dataset import CocoWholeBodyDataset +from .halpe_dataset import HalpeDataset +from .ubody2d_dataset import UBody2dDataset + +__all__ = ['CocoWholeBodyDataset', 'HalpeDataset', 'UBody2dDataset'] diff --git a/mmpose/datasets/datasets/wholebody/coco_wholebody_dataset.py b/mmpose/datasets/datasets/wholebody/coco_wholebody_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9c8b88c20fb7471a3cbb0e904ac023a0b300fcc1 --- /dev/null +++ b/mmpose/datasets/datasets/wholebody/coco_wholebody_dataset.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoWholeBodyDataset(BaseCocoStyleDataset): + """CocoWholeBody dataset for pose estimation. + + "Whole-Body Human Pose Estimation in the Wild", ECCV'2020. + More details can be found in the `paper + `__ . + + COCO-WholeBody keypoints:: + + 0-16: 17 body keypoints, + 17-22: 6 foot keypoints, + 23-90: 68 face keypoints, + 91-132: 42 hand keypoints + + In total, we have 133 keypoints for wholebody pose estimation. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict( + from_file='configs/_base_/datasets/coco_wholebody.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + # COCO-Wholebody: consisting of body, foot, face and hand keypoints + _keypoints = np.array(ann['keypoints'] + ann['foot_kpts'] + + ann['face_kpts'] + ann['lefthand_kpts'] + + ann['righthand_kpts']).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2] > 0) + + if 'area' in ann: + area = np.array(ann['area'], dtype=np.float32) + else: + area = np.clip((x2 - x1) * (y2 - y1) * 0.53, a_min=1.0, a_max=None) + area = np.array(area, dtype=np.float32) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'segmentation': ann['segmentation'], + 'area': area, + 'id': ann['id'], + 'category_id': ann['category_id'], + # store the raw annotation of the instance + # it is useful for evaluation without providing ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + return data_info diff --git a/mmpose/datasets/datasets/wholebody/halpe_dataset.py b/mmpose/datasets/datasets/wholebody/halpe_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..0699f3b7023b200ee42e3cfe7f475a51123ef190 --- /dev/null +++ b/mmpose/datasets/datasets/wholebody/halpe_dataset.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class HalpeDataset(BaseCocoStyleDataset): + """Halpe dataset for pose estimation. + + 'https://github.com/Fang-Haoshu/Halpe-FullBody' + + Halpe keypoints:: + + 0-19: 20 body keypoints, + 20-25: 6 foot keypoints, + 26-93: 68 face keypoints, + 94-135: 42 hand keypoints + + In total, we have 136 keypoints for wholebody pose estimation. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/halpe.py') diff --git a/mmpose/datasets/datasets/wholebody/ubody2d_dataset.py b/mmpose/datasets/datasets/wholebody/ubody2d_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9a0cb1711a18f9abcf534367db9b12f585b82281 --- /dev/null +++ b/mmpose/datasets/datasets/wholebody/ubody2d_dataset.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from .coco_wholebody_dataset import CocoWholeBodyDataset + + +@DATASETS.register_module() +class UBody2dDataset(CocoWholeBodyDataset): + """Ubody2d dataset for pose estimation. + + "One-Stage 3D Whole-Body Mesh Recovery with Component Aware Transformer", + CVPR'2023. More details can be found in the `paper + `__ . + + Ubody2D keypoints:: + + 0-16: 17 body keypoints, + 17-22: 6 foot keypoints, + 23-90: 68 face keypoints, + 91-132: 42 hand keypoints + + In total, we have 133 keypoints for wholebody pose estimation. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + sample_interval (int, optional): The sample interval of the dataset. + Default: 1. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/ubody2d.py') diff --git a/mmpose/datasets/datasets/wholebody3d/__init__.py b/mmpose/datasets/datasets/wholebody3d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..19e1fe2f6cd449cf691e56fec776d604804badda --- /dev/null +++ b/mmpose/datasets/datasets/wholebody3d/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .h3wb_dataset import H36MWholeBodyDataset +from .ubody3d_dataset import UBody3dDataset + +__all__ = ['UBody3dDataset', 'H36MWholeBodyDataset'] diff --git a/mmpose/datasets/datasets/wholebody3d/h3wb_dataset.py b/mmpose/datasets/datasets/wholebody3d/h3wb_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..95e40db4b406742386bdbe02dd40ea5c3edda282 --- /dev/null +++ b/mmpose/datasets/datasets/wholebody3d/h3wb_dataset.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import numpy as np +from mmengine.fileio import get_local_path + +from mmpose.registry import DATASETS +from ..body3d import Human36mDataset + + +@DATASETS.register_module() +class H36MWholeBodyDataset(Human36mDataset): + METAINFO: dict = dict(from_file='configs/_base_/datasets/h3wb.py') + """Human3.6M 3D WholeBody Dataset. + + "H3WB: Human3.6M 3D WholeBody Dataset and Benchmark", ICCV'2023. + More details can be found in the `paper + `__. + + H36M-WholeBody keypoints:: + + 0-16: 17 body keypoints, + 17-22: 6 foot keypoints, + 23-90: 68 face keypoints, + 91-132: 42 hand keypoints + + In total, we have 133 keypoints for wholebody pose estimation. + + Args: + ann_file (str): Annotation file path. Default: ''. + seq_len (int): Number of frames in a sequence. Default: 1. + seq_step (int): The interval for extracting frames from the video. + Default: 1. + multiple_target (int): If larger than 0, merge every + ``multiple_target`` sequence together. Default: 0. + multiple_target_step (int): The interval for merging sequence. Only + valid when ``multiple_target`` is larger than 0. Default: 0. + pad_video_seq (bool): Whether to pad the video so that poses will be + predicted for every frame in the video. Default: ``False``. + causal (bool): If set to ``True``, the rightmost input frame will be + the target frame. Otherwise, the middle input frame will be the + target frame. Default: ``True``. + subset_frac (float): The fraction to reduce dataset size. If set to 1, + the dataset size is not reduced. Default: 1. + keypoint_2d_src (str): Specifies 2D keypoint information options, which + should be one of the following options: + + - ``'gt'``: load from the annotation file + - ``'detection'``: load from a detection + result file of 2D keypoint + - 'pipeline': the information will be generated by the pipeline + + Default: ``'gt'``. + keypoint_2d_det_file (str, optional): The 2D keypoint detection file. + If set, 2d keypoint loaded from this file will be used instead of + ground-truth keypoints. This setting is only when + ``keypoint_2d_src`` is ``'detection'``. Default: ``None``. + factor_file (str, optional): The projection factors' file. If set, + factor loaded from this file will be used instead of calculated + factors. Default: ``None``. + camera_param_file (str): Cameras' parameters file. Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + def __init__(self, test_mode: bool = False, **kwargs): + + self.camera_order_id = ['54138969', '55011271', '58860488', '60457274'] + if not test_mode: + self.subjects = ['S1', 'S5', 'S6'] + else: + self.subjects = ['S7'] + + super().__init__(test_mode=test_mode, **kwargs) + + def _load_ann_file(self, ann_file: str) -> dict: + with get_local_path(ann_file) as local_path: + data = np.load(local_path, allow_pickle=True) + + self.ann_data = data['train_data'].item() + self.camera_data = data['metadata'].item() + + def get_sequence_indices(self) -> List[List[int]]: + return [] + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + + instance_list = [] + image_list = [] + + instance_id = 0 + for subject in self.subjects: + actions = self.ann_data[subject].keys() + for act in actions: + for cam in self.camera_order_id: + if cam not in self.ann_data[subject][act]: + continue + keypoints_2d = self.ann_data[subject][act][cam]['pose_2d'] + keypoints_3d = self.ann_data[subject][act][cam][ + 'camera_3d'] + num_keypoints = keypoints_2d.shape[1] + + camera_param = self.camera_data[subject][cam] + camera_param = { + 'K': camera_param['K'][0, :2, ...], + 'R': camera_param['R'][0], + 'T': camera_param['T'].reshape(3, 1), + 'Distortion': camera_param['Distortion'][0] + } + + seq_step = 1 + _len = (self.seq_len - 1) * seq_step + 1 + _indices = list( + range(len(self.ann_data[subject][act]['frame_id']))) + seq_indices = [ + _indices[i:(i + _len):seq_step] + for i in list(range(0, + len(_indices) - _len + 1)) + ] + + for idx, frame_ids in enumerate(seq_indices): + expected_num_frames = self.seq_len + if self.multiple_target: + expected_num_frames = self.multiple_target + + assert len(frame_ids) == (expected_num_frames), ( + f'Expected `frame_ids` == {expected_num_frames}, but ' # noqa + f'got {len(frame_ids)} ') + + _kpts_2d = keypoints_2d[frame_ids] + _kpts_3d = keypoints_3d[frame_ids] + + target_idx = [-1] if self.causal else [ + int(self.seq_len) // 2 + ] + if self.multiple_target > 0: + target_idx = list(range(self.multiple_target)) + + instance_info = { + 'num_keypoints': + num_keypoints, + 'keypoints': + _kpts_2d, + 'keypoints_3d': + _kpts_3d / 1000, + 'keypoints_visible': + np.ones_like(_kpts_2d[..., 0], dtype=np.float32), + 'keypoints_3d_visible': + np.ones_like(_kpts_2d[..., 0], dtype=np.float32), + 'scale': + np.zeros((1, 1), dtype=np.float32), + 'center': + np.zeros((1, 2), dtype=np.float32), + 'factor': + np.zeros((1, 1), dtype=np.float32), + 'id': + instance_id, + 'category_id': + 1, + 'iscrowd': + 0, + 'camera_param': + camera_param, + 'img_paths': [ + f'{subject}/{act}/{cam}/{i:06d}.jpg' + for i in frame_ids + ], + 'img_ids': + frame_ids, + 'lifting_target': + _kpts_3d[target_idx] / 1000, + 'lifting_target_visible': + np.ones_like(_kpts_2d[..., 0], + dtype=np.float32)[target_idx], + } + instance_list.append(instance_info) + + if self.data_mode == 'bottomup': + for idx, img_name in enumerate( + instance_info['img_paths']): + img_info = self.get_img_info(idx, img_name) + image_list.append(img_info) + + instance_id += 1 + + return instance_list, image_list diff --git a/mmpose/datasets/datasets/wholebody3d/ubody3d_dataset.py b/mmpose/datasets/datasets/wholebody3d/ubody3d_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..85b8d893e7bd131d6a9ccf179771964f843e89e5 --- /dev/null +++ b/mmpose/datasets/datasets/wholebody3d/ubody3d_dataset.py @@ -0,0 +1,247 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from collections import defaultdict +from typing import List, Tuple + +import numpy as np +from mmengine.fileio import get_local_path +from xtcocotools.coco import COCO + +from mmpose.datasets.datasets import BaseMocapDataset +from mmpose.registry import DATASETS + + +@DATASETS.register_module() +class UBody3dDataset(BaseMocapDataset): + """Ubody3d dataset for 3D human pose estimation. + + "One-Stage 3D Whole-Body Mesh Recovery with Component Aware Transformer", + CVPR'2023. More details can be found in the `paper + `__ . + + Ubody3D keypoints:: + + 0-24: 25 body keypoints, + 25-64: 40 hand keypoints, + 65-136: 72 face keypoints, + + In total, we have 137 keypoints for wholebody 3D pose estimation. + + Args: + ann_file (str): Annotation file path. Default: ''. + seq_len (int): Number of frames in a sequence. Default: 1. + multiple_target (int): If larger than 0, merge every + ``multiple_target`` sequence together. Default: 0. + causal (bool): If set to ``True``, the rightmost input frame will be + the target frame. Otherwise, the middle input frame will be the + target frame. Default: ``True``. + subset_frac (float): The fraction to reduce dataset size. If set to 1, + the dataset size is not reduced. Default: 1. + camera_param_file (str): Cameras' parameters file. Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + def __init__(self, + multiple_target: int = 0, + multiple_target_step: int = 0, + seq_step: int = 1, + pad_video_seq: bool = False, + **kwargs): + self.seq_step = seq_step + self.pad_video_seq = pad_video_seq + + if multiple_target > 0 and multiple_target_step == 0: + multiple_target_step = multiple_target + self.multiple_target_step = multiple_target_step + + super().__init__(multiple_target=multiple_target, **kwargs) + + METAINFO: dict = dict(from_file='configs/_base_/datasets/ubody3d.py') + + def _load_ann_file(self, ann_file: str) -> dict: + """Load annotation file.""" + with get_local_path(ann_file) as local_path: + self.ann_data = COCO(local_path) + + def get_sequence_indices(self) -> List[List[int]]: + video_frames = defaultdict(list) + img_ids = self.ann_data.getImgIds() + for img_id in img_ids: + img_info = self.ann_data.loadImgs(img_id)[0] + subj, _, _ = self._parse_image_name(img_info['file_name']) + video_frames[subj].append(img_id) + + sequence_indices = [] + _len = (self.seq_len - 1) * self.seq_step + 1 + _step = self.seq_step + + if self.multiple_target: + for _, _img_ids in sorted(video_frames.items()): + n_frame = len(_img_ids) + _ann_ids = self.ann_data.getAnnIds(imgIds=_img_ids) + seqs_from_video = [ + _ann_ids[i:(i + self.multiple_target):_step] + for i in range(0, n_frame, self.multiple_target_step) + ][:(n_frame + self.multiple_target_step - + self.multiple_target) // self.multiple_target_step] + sequence_indices.extend(seqs_from_video) + else: + for _, _img_ids in sorted(video_frames.items()): + n_frame = len(_img_ids) + _ann_ids = self.ann_data.getAnnIds(imgIds=_img_ids) + if self.pad_video_seq: + # Pad the sequence so that every frame in the sequence will + # be predicted. + if self.causal: + frames_left = self.seq_len - 1 + frames_right = 0 + else: + frames_left = (self.seq_len - 1) // 2 + frames_right = frames_left + for i in range(n_frame): + pad_left = max(0, frames_left - i // _step) + pad_right = max( + 0, frames_right - (n_frame - 1 - i) // _step) + start = max(i % _step, i - frames_left * _step) + end = min(n_frame - (n_frame - 1 - i) % _step, + i + frames_right * _step + 1) + sequence_indices.append([_ann_ids[0]] * pad_left + + _ann_ids[start:end:_step] + + [_ann_ids[-1]] * pad_right) + else: + seqs_from_video = [ + _ann_ids[i:(i + _len):_step] + for i in range(0, n_frame - _len + 1, _step) + ] + sequence_indices.extend(seqs_from_video) + + # reduce dataset size if needed + subset_size = int(len(sequence_indices) * self.subset_frac) + start = np.random.randint(0, len(sequence_indices) - subset_size + 1) + end = start + subset_size + + sequence_indices = sequence_indices[start:end] + + return sequence_indices + + def _parse_image_name(self, image_path: str) -> Tuple[str, int]: + """Parse image name to get video name and frame index. + + Args: + image_name (str): Image name. + + Returns: + tuple[str, int]: Video name and frame index. + """ + trim, file_name = image_path.split('/')[-2:] + frame_id, suffix = file_name.split('.') + return trim, frame_id, suffix + + def _load_annotations(self): + """Load data from annotations in COCO format.""" + num_keypoints = self.metainfo['num_keypoints'] + self._metainfo['CLASSES'] = self.ann_data.loadCats( + self.ann_data.getCatIds()) + + instance_list = [] + image_list = [] + + for i, _ann_ids in enumerate(self.sequence_indices): + expected_num_frames = self.seq_len + if self.multiple_target: + expected_num_frames = self.multiple_target + + assert len(_ann_ids) == (expected_num_frames), ( + f'Expected `frame_ids` == {expected_num_frames}, but ' + f'got {len(_ann_ids)} ') + + anns = self.ann_data.loadAnns(_ann_ids) + img_ids = [] + kpts = np.zeros((len(anns), num_keypoints, 2), dtype=np.float32) + kpts_3d = np.zeros((len(anns), num_keypoints, 3), dtype=np.float32) + keypoints_visible = np.zeros((len(anns), num_keypoints, 1), + dtype=np.float32) + for j, ann in enumerate(anns): + img_ids.append(ann['image_id']) + kpts[j] = np.array(ann['keypoints'], dtype=np.float32) + kpts_3d[j] = np.array(ann['keypoints_3d'], dtype=np.float32) + keypoints_visible[j] = np.array( + ann['keypoints_valid'], dtype=np.float32) + imgs = self.ann_data.loadImgs(img_ids) + keypoints_visible = keypoints_visible.squeeze(-1) + + scales = np.zeros(len(imgs), dtype=np.float32) + centers = np.zeros((len(imgs), 2), dtype=np.float32) + img_paths = np.array([img['file_name'] for img in imgs]) + factors = np.zeros((kpts_3d.shape[0], ), dtype=np.float32) + + target_idx = [-1] if self.causal else [int(self.seq_len // 2)] + if self.multiple_target: + target_idx = list(range(self.multiple_target)) + + cam_param = anns[-1]['camera_param'] + if 'w' not in cam_param or 'h' not in cam_param: + cam_param['w'] = 1000 + cam_param['h'] = 1000 + + instance_info = { + 'num_keypoints': num_keypoints, + 'keypoints': kpts, + 'keypoints_3d': kpts_3d, + 'keypoints_visible': keypoints_visible, + 'scale': scales, + 'center': centers, + 'id': i, + 'category_id': 1, + 'iscrowd': 0, + 'img_paths': list(img_paths), + 'img_ids': [img['id'] for img in imgs], + 'lifting_target': kpts_3d[target_idx], + 'lifting_target_visible': keypoints_visible[target_idx], + 'target_img_paths': img_paths[target_idx], + 'camera_param': cam_param, + 'factor': factors, + 'target_idx': target_idx, + } + + instance_list.append(instance_info) + + for img_id in self.ann_data.getImgIds(): + img = self.ann_data.loadImgs(img_id)[0] + img.update({ + 'img_id': + img_id, + 'img_path': + osp.join(self.data_prefix['img'], img['file_name']), + }) + image_list.append(img) + + return instance_list, image_list diff --git a/mmpose/datasets/samplers.py b/mmpose/datasets/samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..d6bb34287a8c6b43552601eeb9b2e7c9a4fa90df --- /dev/null +++ b/mmpose/datasets/samplers.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +import math +from typing import Iterator, List, Optional, Sized, Union + +import torch +from mmengine.dist import get_dist_info, sync_random_seed +from torch.utils.data import Sampler + +from mmpose.datasets import CombinedDataset +from mmpose.registry import DATA_SAMPLERS + + +@DATA_SAMPLERS.register_module() +class MultiSourceSampler(Sampler): + """Multi-Source Sampler. According to the sampling ratio, sample data from + different datasets to form batches. + + Args: + dataset (Sized): The dataset + batch_size (int): Size of mini-batch + source_ratio (list[int | float]): The sampling ratio of different + source datasets in a mini-batch + shuffle (bool): Whether shuffle the dataset or not. Defaults to + ``True`` + round_up (bool): Whether to add extra samples to make the number of + samples evenly divisible by the world size. Defaults to True. + seed (int, optional): Random seed. If ``None``, set a random seed. + Defaults to ``None`` + """ + + def __init__(self, + dataset: Sized, + batch_size: int, + source_ratio: List[Union[int, float]], + shuffle: bool = True, + round_up: bool = True, + seed: Optional[int] = None) -> None: + + assert isinstance(dataset, CombinedDataset),\ + f'The dataset must be CombinedDataset, but get {dataset}' + assert isinstance(batch_size, int) and batch_size > 0, \ + 'batch_size must be a positive integer value, ' \ + f'but got batch_size={batch_size}' + assert isinstance(source_ratio, list), \ + f'source_ratio must be a list, but got source_ratio={source_ratio}' + assert len(source_ratio) == len(dataset._lens), \ + 'The length of source_ratio must be equal to ' \ + f'the number of datasets, but got source_ratio={source_ratio}' + + rank, world_size = get_dist_info() + self.rank = rank + self.world_size = world_size + + self.dataset = dataset + self.cumulative_sizes = [0] + list(itertools.accumulate(dataset._lens)) + self.batch_size = batch_size + self.source_ratio = source_ratio + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / world_size)) + self.num_per_source = [ + int(batch_size * sr / sum(source_ratio)) for sr in source_ratio + ] + self.num_per_source[0] = batch_size - sum(self.num_per_source[1:]) + + assert sum(self.num_per_source) == batch_size, \ + 'The sum of num_per_source must be equal to ' \ + f'batch_size, but get {self.num_per_source}' + + self.seed = sync_random_seed() if seed is None else seed + self.shuffle = shuffle + self.round_up = round_up + self.source2inds = { + source: self._indices_of_rank(len(ds)) + for source, ds in enumerate(dataset.datasets) + } + + def _infinite_indices(self, sample_size: int) -> Iterator[int]: + """Infinitely yield a sequence of indices.""" + g = torch.Generator() + g.manual_seed(self.seed) + while True: + if self.shuffle: + yield from torch.randperm(sample_size, generator=g).tolist() + else: + yield from torch.arange(sample_size).tolist() + + def _indices_of_rank(self, sample_size: int) -> Iterator[int]: + """Slice the infinite indices by rank.""" + yield from itertools.islice( + self._infinite_indices(sample_size), self.rank, None, + self.world_size) + + def __iter__(self) -> Iterator[int]: + batch_buffer = [] + num_iters = self.num_samples // self.batch_size + if self.round_up and self.num_samples > num_iters * self.batch_size: + num_iters += 1 + for i in range(num_iters): + for source, num in enumerate(self.num_per_source): + batch_buffer_per_source = [] + for idx in self.source2inds[source]: + idx += self.cumulative_sizes[source] + batch_buffer_per_source.append(idx) + if len(batch_buffer_per_source) == num: + batch_buffer += batch_buffer_per_source + break + return iter(batch_buffer) + + def __len__(self) -> int: + return self.num_samples + + def set_epoch(self, epoch: int) -> None: + """Compatible in `epoch-based runner.""" + pass diff --git a/mmpose/datasets/transforms/__init__.py b/mmpose/datasets/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..56780d4e6a69ce9883e9295922d40b9767949732 --- /dev/null +++ b/mmpose/datasets/transforms/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bottomup_transforms import (BottomupGetHeatmapMask, BottomupRandomAffine, + BottomupRandomChoiceResize, + BottomupRandomCrop, BottomupResize) +from .common_transforms import (Albumentation, FilterAnnotations, + GenerateTarget, GetBBoxCenterScale, + PhotometricDistortion, RandomBBoxTransform, + RandomFlip, RandomHalfBody, YOLOXHSVRandomAug, + RandomPatchesBlackout) +from .converting import KeypointConverter, SingleHandConverter +from .formatting import PackPoseInputs +from .hand_transforms import HandRandomFlip +from .loading import LoadImage +from .mix_img_transforms import Mosaic, YOLOXMixUp +from .pose3d_transforms import RandomFlipAroundRoot +from .topdown_transforms import TopdownAffine + +__all__ = [ + 'GetBBoxCenterScale', 'RandomBBoxTransform', 'RandomFlip', + 'RandomHalfBody', 'TopdownAffine', 'Albumentation', + 'PhotometricDistortion', 'PackPoseInputs', 'LoadImage', + 'BottomupGetHeatmapMask', 'BottomupRandomAffine', 'BottomupResize', + 'GenerateTarget', 'KeypointConverter', 'RandomFlipAroundRoot', + 'FilterAnnotations', 'YOLOXHSVRandomAug', 'YOLOXMixUp', 'Mosaic', + 'BottomupRandomCrop', 'BottomupRandomChoiceResize', 'HandRandomFlip', + 'SingleHandConverter', 'RandomPatchesBlackout' +] diff --git a/mmpose/datasets/transforms/bottomup_transforms.py b/mmpose/datasets/transforms/bottomup_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..c27afd042a502569603baddb6d07e6f4265ab678 --- /dev/null +++ b/mmpose/datasets/transforms/bottomup_transforms.py @@ -0,0 +1,1024 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import cv2 +import numpy as np +import xtcocotools.mask as cocomask +from mmcv.image import imflip_, imresize +from mmcv.image.geometric import imrescale +from mmcv.transforms import BaseTransform +from mmcv.transforms.utils import cache_randomness +from scipy.stats import truncnorm + +from mmpose.registry import TRANSFORMS +from mmpose.structures.bbox import (bbox_clip_border, bbox_corner2xyxy, + bbox_xyxy2corner, get_pers_warp_matrix, + get_udp_warp_matrix, get_warp_matrix) +from mmpose.structures.keypoint import keypoint_clip_border + + +@TRANSFORMS.register_module() +class BottomupGetHeatmapMask(BaseTransform): + """Generate the mask of valid regions from the segmentation annotation. + + Required Keys: + + - img_shape + - invalid_segs (optional) + - warp_mat (optional) + - flip (optional) + - flip_direction (optional) + - heatmaps (optional) + + Added Keys: + + - heatmap_mask + """ + + def __init__(self, get_invalid: bool = False): + super().__init__() + self.get_invalid = get_invalid + + def _segs_to_mask(self, segs: list, img_shape: Tuple[int, + int]) -> np.ndarray: + """Calculate mask from object segmentations. + + Args: + segs (List): The object segmentation annotations in COCO format + img_shape (Tuple): The image shape in (h, w) + + Returns: + np.ndarray: The binary object mask in size (h, w), where the + object pixels are 1 and background pixels are 0 + """ + + # RLE is a simple yet efficient format for storing binary masks. + # details can be found at `COCO tools `__ + rles = [] + for seg in segs: + if isinstance(seg, (tuple, list)): + rle = cocomask.frPyObjects(seg, img_shape[0], img_shape[1]) + if isinstance(rle, list): + # For non-crowded objects (e.g. human with no visible + # keypoints), the results is a list of rles + rles.extend(rle) + else: + # For crowded objects, the result is a single rle + rles.append(rle) + + if rles: + mask = cocomask.decode(cocomask.merge(rles)) + else: + mask = np.zeros(img_shape, dtype=np.uint8) + + return mask + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`BottomupGetHeatmapMask` to perform + photometric distortion on images. + + See ``transform()`` method of :class:`BaseTransform` for details. + + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + invalid_segs = results.get('invalid_segs', []) + img_shape = results['img_shape'] # (img_h, img_w) + input_size = results['input_size'] + mask = self._segs_to_mask(invalid_segs, img_shape) + + if not self.get_invalid: + # Calculate the mask of the valid region by negating the + # segmentation mask of invalid objects + mask = np.logical_not(mask) + + # Apply an affine transform to the mask if the image has been + # transformed + if 'warp_mat' in results: + warp_mat = results['warp_mat'] + + mask = mask.astype(np.float32) + mask = cv2.warpAffine( + mask, warp_mat, input_size, flags=cv2.INTER_LINEAR) + + # Flip the mask if the image has been flipped + if results.get('flip', False): + flip_dir = results['flip_direction'] + if flip_dir is not None: + mask = imflip_(mask, flip_dir) + + # Resize the mask to the same size of heatmaps + if 'heatmaps' in results: + heatmaps = results['heatmaps'] + if isinstance(heatmaps, list): + # Multi-level heatmaps + heatmap_mask = [] + for hm in results['heatmaps']: + h, w = hm.shape[1:3] + _mask = imresize( + mask, size=(w, h), interpolation='bilinear') + heatmap_mask.append(_mask) + else: + h, w = heatmaps.shape[1:3] + heatmap_mask = imresize( + mask, size=(w, h), interpolation='bilinear') + else: + heatmap_mask = mask + + # Binarize the mask(s) + if isinstance(heatmap_mask, list): + results['heatmap_mask'] = [hm > 0.5 for hm in heatmap_mask] + else: + results['heatmap_mask'] = heatmap_mask > 0.5 + + return results + + +@TRANSFORMS.register_module() +class BottomupRandomAffine(BaseTransform): + r"""Randomly shift, resize and rotate the image. + + Required Keys: + + - img + - img_shape + - keypoints (optional) + + Modified Keys: + + - img + - keypoints (optional) + + Added Keys: + + - input_size + - warp_mat + + Args: + input_size (Tuple[int, int]): The input image size of the model in + [w, h] + shift_factor (float): Randomly shift the image in range + :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, + where :math:`dx(y) = img_w(h) \cdot shift_factor` in pixels. + Defaults to 0.2 + shift_prob (float): Probability of applying random shift. Defaults to + 1.0 + scale_factor (Tuple[float, float]): Randomly resize the image in range + :math:`[scale_factor[0], scale_factor[1]]`. Defaults to + (0.75, 1.5) + scale_prob (float): Probability of applying random resizing. Defaults + to 1.0 + scale_type (str): wrt ``long`` or ``short`` length of the image. + Defaults to ``short`` + rotate_factor (float): Randomly rotate the bbox in + :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults + to 40.0 + use_udp (bool): Whether use unbiased data processing. See + `UDP (CVPR 2020)`_ for details. Defaults to ``False`` + + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + + def __init__(self, + input_size: Optional[Tuple[int, int]] = None, + shift_factor: float = 0.2, + shift_prob: float = 1., + scale_factor: Tuple[float, float] = (0.75, 1.5), + scale_prob: float = 1., + scale_type: str = 'short', + rotate_factor: float = 30., + rotate_prob: float = 1, + shear_factor: float = 2.0, + shear_prob: float = 1.0, + use_udp: bool = False, + pad_val: Union[float, Tuple[float]] = 0, + border: Tuple[int, int] = (0, 0), + distribution='trunc_norm', + transform_mode='affine', + bbox_keep_corner: bool = True, + clip_border: bool = False) -> None: + super().__init__() + + assert transform_mode in ('affine', 'affine_udp', 'perspective'), \ + f'the argument transform_mode should be either \'affine\', ' \ + f'\'affine_udp\' or \'perspective\', but got \'{transform_mode}\'' + + self.input_size = input_size + self.shift_factor = shift_factor + self.shift_prob = shift_prob + self.scale_factor = scale_factor + self.scale_prob = scale_prob + self.scale_type = scale_type + self.rotate_factor = rotate_factor + self.rotate_prob = rotate_prob + self.shear_factor = shear_factor + self.shear_prob = shear_prob + + self.use_udp = use_udp + self.distribution = distribution + self.clip_border = clip_border + self.bbox_keep_corner = bbox_keep_corner + + self.transform_mode = transform_mode + + if isinstance(pad_val, (int, float)): + pad_val = (pad_val, pad_val, pad_val) + + if 'affine' in transform_mode: + self._transform = partial( + cv2.warpAffine, flags=cv2.INTER_LINEAR, borderValue=pad_val) + else: + self._transform = partial(cv2.warpPerspective, borderValue=pad_val) + + def _random(self, + low: float = -1., + high: float = 1., + size: tuple = ()) -> np.ndarray: + if self.distribution == 'trunc_norm': + """Sample from a truncated normal distribution.""" + return truncnorm.rvs(low, high, size=size).astype(np.float32) + elif self.distribution == 'uniform': + x = np.random.rand(*size) + return x * (high - low) + low + else: + raise ValueError(f'the argument `distribution` should be either' + f'\'trunc_norn\' or \'uniform\', but got ' + f'{self.distribution}.') + + def _fix_aspect_ratio(self, scale: np.ndarray, aspect_ratio: float): + """Extend the scale to match the given aspect ratio. + + Args: + scale (np.ndarray): The image scale (w, h) in shape (2, ) + aspect_ratio (float): The ratio of ``w/h`` + + Returns: + np.ndarray: The reshaped image scale in (2, ) + """ + w, h = scale + if w > h * aspect_ratio: + if self.scale_type == 'long': + _w, _h = w, w / aspect_ratio + elif self.scale_type == 'short': + _w, _h = h * aspect_ratio, h + else: + raise ValueError(f'Unknown scale type: {self.scale_type}') + else: + if self.scale_type == 'short': + _w, _h = w, w / aspect_ratio + elif self.scale_type == 'long': + _w, _h = h * aspect_ratio, h + else: + raise ValueError(f'Unknown scale type: {self.scale_type}') + return np.array([_w, _h], dtype=scale.dtype) + + @cache_randomness + def _get_transform_params(self) -> Tuple: + """Get random transform parameters. + + Returns: + tuple: + - offset (np.ndarray): Image offset rate in shape (2, ) + - scale (np.ndarray): Image scaling rate factor in shape (1, ) + - rotate (np.ndarray): Image rotation degree in shape (1, ) + """ + # get offset + if np.random.rand() < self.shift_prob: + offset = self._random(size=(2, )) * self.shift_factor + else: + offset = np.zeros((2, ), dtype=np.float32) + + # get scale + if np.random.rand() < self.scale_prob: + scale_min, scale_max = self.scale_factor + scale = scale_min + (scale_max - scale_min) * ( + self._random(size=(1, )) + 1) / 2 + else: + scale = np.ones(1, dtype=np.float32) + + # get rotation + if np.random.rand() < self.rotate_prob: + rotate = self._random() * self.rotate_factor + else: + rotate = 0 + + # get shear + if 'perspective' in self.transform_mode and np.random.rand( + ) < self.shear_prob: + shear = self._random(size=(2, )) * self.shear_factor + else: + shear = np.zeros((2, ), dtype=np.float32) + + return offset, scale, rotate, shear + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`BottomupRandomAffine` to perform + photometric distortion on images. + + See ``transform()`` method of :class:`BaseTransform` for details. + + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + img_h, img_w = results['img_shape'][:2] + w, h = self.input_size + + offset_rate, scale_rate, rotate, shear = self._get_transform_params() + + if 'affine' in self.transform_mode: + offset = offset_rate * [img_w, img_h] + scale = scale_rate * [img_w, img_h] + # adjust the scale to match the target aspect ratio + scale = self._fix_aspect_ratio(scale, aspect_ratio=w / h) + + if self.transform_mode == 'affine_udp': + center = np.array([(img_w - 1.0) / 2, (img_h - 1.0) / 2], + dtype=np.float32) + warp_mat = get_udp_warp_matrix( + center=center + offset, + scale=scale, + rot=rotate, + output_size=(w, h)) + else: + center = np.array([img_w / 2, img_h / 2], dtype=np.float32) + warp_mat = get_warp_matrix( + center=center + offset, + scale=scale, + rot=rotate, + output_size=(w, h)) + + else: + offset = offset_rate * [w, h] + center = np.array([w / 2, h / 2], dtype=np.float32) + warp_mat = get_pers_warp_matrix( + center=center, + translate=offset, + scale=scale_rate[0], + rot=rotate, + shear=shear) + + # warp image and keypoints + results['img'] = self._transform(results['img'], warp_mat, + (int(w), int(h))) + + if 'keypoints' in results: + # Only transform (x, y) coordinates + kpts = cv2.transform(results['keypoints'], warp_mat) + if kpts.shape[-1] == 3: + kpts = kpts[..., :2] / kpts[..., 2:3] + results['keypoints'] = kpts + + if self.clip_border: + results['keypoints'], results[ + 'keypoints_visible'] = keypoint_clip_border( + results['keypoints'], results['keypoints_visible'], + (w, h)) + + if 'bbox' in results: + bbox = bbox_xyxy2corner(results['bbox']) + bbox = cv2.transform(bbox, warp_mat) + if bbox.shape[-1] == 3: + bbox = bbox[..., :2] / bbox[..., 2:3] + if not self.bbox_keep_corner: + bbox = bbox_corner2xyxy(bbox) + if self.clip_border: + bbox = bbox_clip_border(bbox, (w, h)) + results['bbox'] = bbox + + if 'area' in results: + warp_mat_for_area = warp_mat + if warp_mat.shape[0] == 2: + aux_row = np.array([[0.0, 0.0, 1.0]], dtype=warp_mat.dtype) + warp_mat_for_area = np.concatenate((warp_mat, aux_row)) + results['area'] *= np.linalg.det(warp_mat_for_area) + + results['input_size'] = self.input_size + results['warp_mat'] = warp_mat + + return results + + +@TRANSFORMS.register_module() +class BottomupResize(BaseTransform): + """Resize the image to the input size of the model. Optionally, the image + can be resized to multiple sizes to build a image pyramid for multi-scale + inference. + + Required Keys: + + - img + - ori_shape + + Modified Keys: + + - img + - img_shape + + Added Keys: + + - input_size + - warp_mat + - aug_scale + + Args: + input_size (Tuple[int, int]): The input size of the model in [w, h]. + Note that the actually size of the resized image will be affected + by ``resize_mode`` and ``size_factor``, thus may not exactly equals + to the ``input_size`` + aug_scales (List[float], optional): The extra input scales for + multi-scale testing. If given, the input image will be resized + to different scales to build a image pyramid. And heatmaps from + all scales will be aggregated to make final prediction. Defaults + to ``None`` + size_factor (int): The actual input size will be ceiled to + a multiple of the `size_factor` value at both sides. + Defaults to 16 + resize_mode (str): The method to resize the image to the input size. + Options are: + + - ``'fit'``: The image will be resized according to the + relatively longer side with the aspect ratio kept. The + resized image will entirely fits into the range of the + input size + - ``'expand'``: The image will be resized according to the + relatively shorter side with the aspect ratio kept. The + resized image will exceed the given input size at the + longer side + use_udp (bool): Whether use unbiased data processing. See + `UDP (CVPR 2020)`_ for details. Defaults to ``False`` + + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + + def __init__(self, + input_size: Tuple[int, int], + aug_scales: Optional[List[float]] = None, + size_factor: int = 32, + resize_mode: str = 'fit', + pad_val: tuple = (0, 0, 0), + use_udp: bool = False): + super().__init__() + + self.input_size = input_size + self.aug_scales = aug_scales + self.resize_mode = resize_mode + self.size_factor = size_factor + self.use_udp = use_udp + self.pad_val = pad_val + + @staticmethod + def _ceil_to_multiple(size: Tuple[int, int], base: int): + """Ceil the given size (tuple of [w, h]) to a multiple of the base.""" + return tuple(int(np.ceil(s / base) * base) for s in size) + + def _get_input_size(self, img_size: Tuple[int, int], + input_size: Tuple[int, int]) -> Tuple: + """Calculate the actual input size (which the original image will be + resized to) and the padded input size (which the resized image will be + padded to, or which is the size of the model input). + + Args: + img_size (Tuple[int, int]): The original image size in [w, h] + input_size (Tuple[int, int]): The expected input size in [w, h] + + Returns: + tuple: + - actual_input_size (Tuple[int, int]): The target size to resize + the image + - padded_input_size (Tuple[int, int]): The target size to generate + the model input which will contain the resized image + """ + img_w, img_h = img_size + ratio = img_w / img_h + + if self.resize_mode == 'fit': + padded_input_size = self._ceil_to_multiple(input_size, + self.size_factor) + if padded_input_size != input_size: + raise ValueError( + 'When ``resize_mode==\'fit\', the input size (height and' + ' width) should be mulitples of the size_factor(' + f'{self.size_factor}) at all scales. Got invalid input ' + f'size {input_size}.') + + pad_w, pad_h = padded_input_size + rsz_w = min(pad_w, pad_h * ratio) + rsz_h = min(pad_h, pad_w / ratio) + actual_input_size = (rsz_w, rsz_h) + + elif self.resize_mode == 'expand': + _padded_input_size = self._ceil_to_multiple( + input_size, self.size_factor) + pad_w, pad_h = _padded_input_size + rsz_w = max(pad_w, pad_h * ratio) + rsz_h = max(pad_h, pad_w / ratio) + + actual_input_size = (rsz_w, rsz_h) + padded_input_size = self._ceil_to_multiple(actual_input_size, + self.size_factor) + + else: + raise ValueError(f'Invalid resize mode {self.resize_mode}') + + return actual_input_size, padded_input_size + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`BottomupResize` to perform + photometric distortion on images. + + See ``transform()`` method of :class:`BaseTransform` for details. + + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + img = results['img'] + img_h, img_w = results['ori_shape'] + w, h = self.input_size + + input_sizes = [(w, h)] + if self.aug_scales: + input_sizes += [(int(w * s), int(h * s)) for s in self.aug_scales] + + imgs = [] + for i, (_w, _h) in enumerate(input_sizes): + + actual_input_size, padded_input_size = self._get_input_size( + img_size=(img_w, img_h), input_size=(_w, _h)) + + if self.use_udp: + center = np.array([(img_w - 1.0) / 2, (img_h - 1.0) / 2], + dtype=np.float32) + scale = np.array([img_w, img_h], dtype=np.float32) + warp_mat = get_udp_warp_matrix( + center=center, + scale=scale, + rot=0, + output_size=actual_input_size) + else: + center = np.array([img_w / 2, img_h / 2], dtype=np.float32) + scale = np.array([ + img_w * padded_input_size[0] / actual_input_size[0], + img_h * padded_input_size[1] / actual_input_size[1] + ], + dtype=np.float32) + warp_mat = get_warp_matrix( + center=center, + scale=scale, + rot=0, + output_size=padded_input_size) + + _img = cv2.warpAffine( + img, + warp_mat, + padded_input_size, + flags=cv2.INTER_LINEAR, + borderValue=self.pad_val) + + imgs.append(_img) + + # Store the transform information w.r.t. the main input size + if i == 0: + results['img_shape'] = padded_input_size[::-1] + results['input_center'] = center + results['input_scale'] = scale + results['input_size'] = padded_input_size + + if self.aug_scales: + results['img'] = imgs + results['aug_scales'] = self.aug_scales + else: + results['img'] = imgs[0] + results['aug_scale'] = None + + return results + + +@TRANSFORMS.register_module() +class BottomupRandomCrop(BaseTransform): + """Random crop the image & bboxes & masks. + + The absolute ``crop_size`` is sampled based on ``crop_type`` and + ``image_size``, then the cropped results are generated. + + Required Keys: + + - img + - keypoints + - bbox (optional) + - masks (BitmapMasks | PolygonMasks) (optional) + + Modified Keys: + + - img + - img_shape + - keypoints + - keypoints_visible + - num_keypoints + - bbox (optional) + - bbox_score (optional) + - id (optional) + - category_id (optional) + - raw_ann_info (optional) + - iscrowd (optional) + - segmentation (optional) + - masks (optional) + + Added Keys: + + - warp_mat + + Args: + crop_size (tuple): The relative ratio or absolute pixels of + (width, height). + crop_type (str, optional): One of "relative_range", "relative", + "absolute", "absolute_range". "relative" randomly crops + (h * crop_size[0], w * crop_size[1]) part from an input of size + (h, w). "relative_range" uniformly samples relative crop size from + range [crop_size[0], 1] and [crop_size[1], 1] for height and width + respectively. "absolute" crops from an input with absolute size + (crop_size[0], crop_size[1]). "absolute_range" uniformly samples + crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w + in range [crop_size[0], min(w, crop_size[1])]. + Defaults to "absolute". + allow_negative_crop (bool, optional): Whether to allow a crop that does + not contain any bbox area. Defaults to False. + recompute_bbox (bool, optional): Whether to re-compute the boxes based + on cropped instance masks. Defaults to False. + bbox_clip_border (bool, optional): Whether clip the objects outside + the border of the image. Defaults to True. + + Note: + - If the image is smaller than the absolute crop size, return the + original image. + - If the crop does not contain any gt-bbox region and + ``allow_negative_crop`` is set to False, skip this image. + """ + + def __init__(self, + crop_size: tuple, + crop_type: str = 'absolute', + allow_negative_crop: bool = False, + recompute_bbox: bool = False, + bbox_clip_border: bool = True) -> None: + if crop_type not in [ + 'relative_range', 'relative', 'absolute', 'absolute_range' + ]: + raise ValueError(f'Invalid crop_type {crop_type}.') + if crop_type in ['absolute', 'absolute_range']: + assert crop_size[0] > 0 and crop_size[1] > 0 + assert isinstance(crop_size[0], int) and isinstance( + crop_size[1], int) + if crop_type == 'absolute_range': + assert crop_size[0] <= crop_size[1] + else: + assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 + self.crop_size = crop_size + self.crop_type = crop_type + self.allow_negative_crop = allow_negative_crop + self.bbox_clip_border = bbox_clip_border + self.recompute_bbox = recompute_bbox + + def _crop_data(self, results: dict, crop_size: Tuple[int, int], + allow_negative_crop: bool) -> Union[dict, None]: + """Function to randomly crop images, bounding boxes, masks, semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + crop_size (Tuple[int, int]): Expected absolute size after + cropping, (h, w). + allow_negative_crop (bool): Whether to allow a crop that does not + contain any bbox area. + + Returns: + results (Union[dict, None]): Randomly cropped results, 'img_shape' + key in result dict is updated according to crop size. None will + be returned when there is no valid bbox after cropping. + """ + assert crop_size[0] > 0 and crop_size[1] > 0 + img = results['img'] + margin_h = max(img.shape[0] - crop_size[0], 0) + margin_w = max(img.shape[1] - crop_size[1], 0) + offset_h, offset_w = self._rand_offset((margin_h, margin_w)) + crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] + crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] + + # Record the warp matrix for the RandomCrop + warp_mat = np.array([[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]], + dtype=np.float32) + if results.get('warp_mat', None) is None: + results['warp_mat'] = warp_mat + else: + results['warp_mat'] = warp_mat @ results['warp_mat'] + + # crop the image + img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] + img_shape = img.shape + results['img'] = img + results['img_shape'] = img_shape[:2] + + # crop bboxes accordingly and clip to the image boundary + if results.get('bbox', None) is not None: + distances = (-offset_w, -offset_h) + bboxes = results['bbox'] + bboxes = bboxes + np.tile(np.asarray(distances), 2) + + if self.bbox_clip_border: + bboxes[..., 0::2] = bboxes[..., 0::2].clip(0, img_shape[1]) + bboxes[..., 1::2] = bboxes[..., 1::2].clip(0, img_shape[0]) + + valid_inds = (bboxes[..., 0] < img_shape[1]) & \ + (bboxes[..., 1] < img_shape[0]) & \ + (bboxes[..., 2] > 0) & \ + (bboxes[..., 3] > 0) + + # If the crop does not contain any gt-bbox area and + # allow_negative_crop is False, skip this image. + if (not valid_inds.any() and not allow_negative_crop): + return None + + results['bbox'] = bboxes[valid_inds] + meta_keys = [ + 'bbox_score', 'id', 'category_id', 'raw_ann_info', 'iscrowd' + ] + for key in meta_keys: + if results.get(key): + if isinstance(results[key], list): + results[key] = np.asarray( + results[key])[valid_inds].tolist() + else: + results[key] = results[key][valid_inds] + + if results.get('keypoints', None) is not None: + keypoints = results['keypoints'] + distances = np.asarray(distances).reshape(1, 1, 2) + keypoints = keypoints + distances + if self.bbox_clip_border: + keypoints_outside_x = keypoints[:, :, 0] < 0 + keypoints_outside_y = keypoints[:, :, 1] < 0 + keypoints_outside_width = keypoints[:, :, 0] > img_shape[1] + keypoints_outside_height = keypoints[:, :, + 1] > img_shape[0] + + kpt_outside = np.logical_or.reduce( + (keypoints_outside_x, keypoints_outside_y, + keypoints_outside_width, keypoints_outside_height)) + + results['keypoints_visible'][kpt_outside] *= 0 + keypoints[:, :, 0] = keypoints[:, :, 0].clip(0, img_shape[1]) + keypoints[:, :, 1] = keypoints[:, :, 1].clip(0, img_shape[0]) + results['keypoints'] = keypoints[valid_inds] + results['keypoints_visible'] = results['keypoints_visible'][ + valid_inds] + + if results.get('segmentation', None) is not None: + results['segmentation'] = results['segmentation'][ + crop_y1:crop_y2, crop_x1:crop_x2] + + if results.get('masks', None) is not None: + results['masks'] = results['masks'][valid_inds.nonzero( + )[0]].crop(np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) + if self.recompute_bbox: + results['bbox'] = results['masks'].get_bboxes( + type(results['bbox'])) + + return results + + @cache_randomness + def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]: + """Randomly generate crop offset. + + Args: + margin (Tuple[int, int]): The upper bound for the offset generated + randomly. + + Returns: + Tuple[int, int]: The random offset for the crop. + """ + margin_h, margin_w = margin + offset_h = np.random.randint(0, margin_h + 1) + offset_w = np.random.randint(0, margin_w + 1) + + return offset_h, offset_w + + @cache_randomness + def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]: + """Randomly generates the absolute crop size based on `crop_type` and + `image_size`. + + Args: + image_size (Tuple[int, int]): (h, w). + + Returns: + crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels. + """ + h, w = image_size + if self.crop_type == 'absolute': + return min(self.crop_size[1], h), min(self.crop_size[0], w) + elif self.crop_type == 'absolute_range': + crop_h = np.random.randint( + min(h, self.crop_size[0]), + min(h, self.crop_size[1]) + 1) + crop_w = np.random.randint( + min(w, self.crop_size[0]), + min(w, self.crop_size[1]) + 1) + return crop_h, crop_w + elif self.crop_type == 'relative': + crop_w, crop_h = self.crop_size + return int(h * crop_h + 0.5), int(w * crop_w + 0.5) + else: + # 'relative_range' + crop_size = np.asarray(self.crop_size, dtype=np.float32) + crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) + return int(h * crop_h + 0.5), int(w * crop_w + 0.5) + + def transform(self, results: dict) -> Union[dict, None]: + """Transform function to randomly crop images, bounding boxes, masks, + semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + results (Union[dict, None]): Randomly cropped results, 'img_shape' + key in result dict is updated according to crop size. None will + be returned when there is no valid bbox after cropping. + """ + image_size = results['img'].shape[:2] + crop_size = self._get_crop_size(image_size) + results = self._crop_data(results, crop_size, self.allow_negative_crop) + return results + + +@TRANSFORMS.register_module() +class BottomupRandomChoiceResize(BaseTransform): + """Resize images & bbox & mask from a list of multiple scales. + + This transform resizes the input image to some scale. Bboxes and masks are + then resized with the same scale factor. Resize scale will be randomly + selected from ``scales``. + + How to choose the target scale to resize the image will follow the rules + below: + + - if `scale` is a list of tuple, the target scale is sampled from the list + uniformally. + - if `scale` is a tuple, the target scale will be set to the tuple. + + Required Keys: + + - img + - bbox + - keypoints + + Modified Keys: + + - img + - img_shape + - bbox + - keypoints + + Added Keys: + + - scale + - scale_factor + - scale_idx + + Args: + scales (Union[list, Tuple]): Images scales for resizing. + + **resize_kwargs: Other keyword arguments for the ``resize_type``. + """ + + def __init__( + self, + scales: Sequence[Union[int, Tuple]], + keep_ratio: bool = False, + clip_object_border: bool = True, + backend: str = 'cv2', + **resize_kwargs, + ) -> None: + super().__init__() + if isinstance(scales, list): + self.scales = scales + else: + self.scales = [scales] + + self.keep_ratio = keep_ratio + self.clip_object_border = clip_object_border + self.backend = backend + + @cache_randomness + def _random_select(self) -> Tuple[int, int]: + """Randomly select an scale from given candidates. + + Returns: + (tuple, int): Returns a tuple ``(scale, scale_dix)``, + where ``scale`` is the selected image scale and + ``scale_idx`` is the selected index in the given candidates. + """ + + scale_idx = np.random.randint(len(self.scales)) + scale = self.scales[scale_idx] + return scale, scale_idx + + def _resize_img(self, results: dict) -> None: + """Resize images with ``self.scale``.""" + + if self.keep_ratio: + + img, scale_factor = imrescale( + results['img'], + self.scale, + interpolation='bilinear', + return_scale=True, + backend=self.backend) + # the w_scale and h_scale has minor difference + # a real fix should be done in the mmcv.imrescale in the future + new_h, new_w = img.shape[:2] + h, w = results['img'].shape[:2] + w_scale = new_w / w + h_scale = new_h / h + else: + img, w_scale, h_scale = imresize( + results['img'], + self.scale, + interpolation='bilinear', + return_scale=True, + backend=self.backend) + + results['img'] = img + results['img_shape'] = img.shape[:2] + results['scale_factor'] = (w_scale, h_scale) + results['input_size'] = img.shape[:2] + w, h = results['ori_shape'] + center = np.array([w / 2, h / 2], dtype=np.float32) + scale = np.array([w, h], dtype=np.float32) + results['input_center'] = center + results['input_scale'] = scale + + def _resize_bboxes(self, results: dict) -> None: + """Resize bounding boxes with ``self.scale``.""" + if results.get('bbox', None) is not None: + bboxes = results['bbox'] * np.tile( + np.array(results['scale_factor']), 2) + if self.clip_object_border: + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, + results['img_shape'][1]) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, + results['img_shape'][0]) + results['bbox'] = bboxes + + def _resize_keypoints(self, results: dict) -> None: + """Resize keypoints with ``self.scale``.""" + if results.get('keypoints', None) is not None: + keypoints = results['keypoints'] + + keypoints[:, :, :2] = keypoints[:, :, :2] * np.array( + results['scale_factor']) + if self.clip_object_border: + keypoints[:, :, 0] = np.clip(keypoints[:, :, 0], 0, + results['img_shape'][1]) + keypoints[:, :, 1] = np.clip(keypoints[:, :, 1], 0, + results['img_shape'][0]) + results['keypoints'] = keypoints + + def transform(self, results: dict) -> dict: + """Apply resize transforms on results from a list of scales. + + Args: + results (dict): Result dict contains the data to transform. + + Returns: + dict: Resized results, 'img', 'bbox', + 'keypoints', 'scale', 'scale_factor', 'img_shape', + and 'keep_ratio' keys are updated in result dict. + """ + + target_scale, scale_idx = self._random_select() + + self.scale = target_scale + self._resize_img(results) + self._resize_bboxes(results) + self._resize_keypoints(results) + + results['scale_idx'] = scale_idx + return results diff --git a/mmpose/datasets/transforms/common_transforms.py b/mmpose/datasets/transforms/common_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..c469c0ea4bea634f27bc837528eeddbf1e6f02fd --- /dev/null +++ b/mmpose/datasets/transforms/common_transforms.py @@ -0,0 +1,1960 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from copy import deepcopy +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import cv2 +import mmcv +import mmengine +import numpy as np +from mmcv.image import imflip +from mmcv.transforms import BaseTransform +from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness +from mmengine import is_list_of +from mmengine.dist import get_dist_info +from scipy.stats import truncnorm +from scipy.ndimage import distance_transform_edt + +from mmpose.codecs import * # noqa: F401, F403 +from mmpose.registry import KEYPOINT_CODECS, TRANSFORMS +from mmpose.structures.bbox import bbox_xyxy2cs, flip_bbox, bbox_cs2xyxy +from mmpose.structures.keypoint import flip_keypoints +from mmpose.utils.typing import MultiConfig + +from pycocotools import mask as Mask + +try: + import albumentations +except ImportError: + albumentations = None + +Number = Union[int, float] + + +@TRANSFORMS.register_module() +class GetBBoxCenterScale(BaseTransform): + """Convert bboxes from [x, y, w, h] to center and scale. + + The center is the coordinates of the bbox center, and the scale is the + bbox width and height normalized by a scale factor. + + Required Keys: + + - bbox + + Added Keys: + + - bbox_center + - bbox_scale + + Args: + padding (float): The bbox padding scale that will be multilied to + `bbox_scale`. Defaults to 1.25 + """ + + def __init__(self, padding: float = 1.25) -> None: + super().__init__() + + self.padding = padding + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`GetBBoxCenterScale`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + # Save the original bbox wrt. input + results['bbox_xyxy_wrt_input'] = results['bbox'] + + if 'bbox_center' in results and 'bbox_scale' in results: + rank, _ = get_dist_info() + if rank == 0: + warnings.warn('Use the existing "bbox_center" and "bbox_scale"' + '. The padding will still be applied.') + results['bbox_scale'] = results['bbox_scale'] * self.padding + + else: + bbox = results['bbox'] + center, scale = bbox_xyxy2cs(bbox, padding=self.padding) + + results['bbox_center'] = center + results['bbox_scale'] = scale + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + f'(padding={self.padding})' + return repr_str + + +@TRANSFORMS.register_module() +class MaskBackground(BaseTransform): + """Convert bboxes from [x, y, w, h] to center and scale. + + The center is the coordinates of the bbox center, and the scale is the + bbox width and height normalized by a scale factor. + + Required Keys: + + - bbox + + Added Keys: + + - bbox_center + - bbox_scale + + Args: + padding (float): The bbox padding scale that will be multilied to + `bbox_scale`. Defaults to 1.25 + """ + + def __init__(self, + continue_on_failure: bool = True, + prob: float = 1.0, + alpha: float = 1.0, + erode_prob: float = 0.0, + erode_amount: float = 0.5, + dilate_prob: float = 0.0, + dilate_amount: float = 0.5, + ) -> None: + + super().__init__() + + assert 0 <= alpha <= 1, 'alpha should be in [0, 1]' + assert 0 <= prob <= 1, 'prob should be in [0, 1]' + + self.continue_on_failure = continue_on_failure + self.alpha = alpha + self.prob = prob + + assert 0 <= erode_prob <= 1, 'erode_prob should be in [0, 1]' + assert 0 <= dilate_prob <= 1, 'dilate_prob should be in [0, 1]' + assert 0 < erode_amount < 1, 'erode_amount should be in [0, 1]' + assert 0 < dilate_amount < 1, 'dilate_amount should be in [0, 1]' + assert erode_prob + dilate_prob <= 1, 'erode_prob + dilate_prob should be less than or equal to 1' + self.noise_prob = erode_prob + dilate_prob + if self.noise_prob > 0: + self.erode_prob = erode_prob / (self.noise_prob) + self.dilate_prob = dilate_prob / (self.noise_prob) + else: + self.erode_prob = 0 + self.dilate_prob = 0 + + self.erode_amount = erode_amount + self.dilate_amount = dilate_amount + + def _perturb_by_dilation(self, mask: np.ndarray) -> np.ndarray: + """Perturb the mask to simulate real-world detector.""" + mask_shape = mask.shape + + mask_area = (mask>0).sum() + + # Close the mask to erase small holes + k = max(mask_area // 1000, 5) + kernel = np.ones((k, k), np.uint8) + mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) + + # Dilate the mask to increase it a bit + k = max(mask_area // 3000, 5) + kernel = np.ones((k, k), np.uint8) + mask = cv2.dilate(mask, kernel, iterations=1) + + return mask.reshape(mask_shape) + + def _perturb_by_erosion(self, mask: np.ndarray) -> np.ndarray: + """Perturb the mask to simulate real-world detector.""" + mask_shape = mask.shape + + mask_area = (mask>0).sum() + + # Close the mask to erase small holes + k = max(mask_area // 1000, 5) + kernel = np.ones((k, k), np.uint8) + mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) + + # Erode the mask to decrease it a bit and cut-off limbs + k = max(mask_area // 3000, 5) + kernel = np.ones((k, k), np.uint8) + mask = cv2.erode(mask, kernel, iterations=1) + + return mask.reshape(mask_shape) + + @cache_randomness + def _perturb_by_patches(self, mask: np.ndarray, amount: float, num_patches: int = 10) -> np.ndarray: + mask_shape = mask.shape + + # Generate 10 random seeds uniformly distributed in the mask + mask_idx = np.where(mask.flatten() > 0)[0] + seeds = np.random.choice(mask_idx, num_patches, replace=False) + sx, sy = np.unravel_index(seeds, mask.shape) + + # For each pixel, label it by it nearest seed + labels = np.ones_like(mask) + seed_labels = np.zeros_like(mask) + seed_labels[sx, sy] = np.arange(num_patches) + 1 + + _, indices = distance_transform_edt(seed_labels == 0, return_indices=True) + labels = seed_labels[indices[0], indices[1]] + labels = labels * mask + + # Select labels for removal + random_remove_amount = np.random.uniform(0.0, amount) + random_remove_ratio = int(num_patches * random_remove_amount) + remove_labels = np.random.choice(np.unique(labels), random_remove_ratio, replace=False) + binary_labels = np.isin(labels, remove_labels, invert=True) + + mask = (binary_labels > 0).astype(np.uint8) * mask + + return mask.reshape(mask_shape) + + @cache_randomness + def _coin_flip(self) -> bool: + return np.random.rand() < 0.5 + + @cache_randomness + def _perturb_mask(self, mask: np.ndarray) -> np.ndarray: + """Perturb the mask to simulate real-world detector.""" + + mask_shape = mask.shape + + if not np.random.rand() < self.noise_prob: + return mask + + # Erode and dilate the mask to increase smoothness + kernel = np.ones((5, 5), np.uint8) + mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) + mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) + + increase_mask = np.random.choice([False, True], p=[self.erode_prob, self.dilate_prob]) + + if increase_mask: + if self._coin_flip(): + try: + mask = self._perturb_by_patches( + mask=1-mask, + amount=self.dilate_amount, + num_patches=50, + ) + mask = 1-mask + except ValueError: + pass + else: + mask = self._perturb_by_dilation(mask) + + else: + if self._coin_flip(): + try: + mask = self._perturb_by_patches( + mask=mask, + amount=self.erode_amount, + num_patches=10, + ) + except ValueError: + pass + + else: + mask = self._perturb_by_erosion(mask) + + mask = (mask>0).astype(np.uint8) + return mask.reshape(mask_shape) + + @cache_randomness + def _do_masking(self): + return np.random.rand() < self.prob + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`GetBBoxCenterScale`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + # Try to load the mask from the results + mask = results.get('segmentation', None) + # print("\nMaskBackground: ", mask is not None) + + if mask is None and not self.continue_on_failure: + raise ValueError('No mask found in the results and self.continue_on_failure is set to False.') + + if mask is not None and self._do_masking(): + # Convert mask from polygons to binary mask + try: + mask_rle = Mask.frPyObjects(mask, results['img_shape'][0], results['img_shape'][1]) + except IndexError: + # breakpoint() + # print("Mask shape:", mask.shape) + # print("Mask max:", mask.max()) + # print("Mask min:", mask.min()) + # print("Image shape:", results['img_shape']) + + return results + + + mask_rle = Mask.merge(mask_rle) + img = results['img'].copy() + masked_image = results['img'].copy() + mask = Mask.decode(mask_rle).reshape((img.shape[0], img.shape[1], 1)) + binary_mask = (mask > 0).astype(np.uint8) + + # Perturb the mask to simulate real-world detector + # print("Here I would perturb the mask") + old_mask = mask.copy() + binary_mask = self._perturb_mask(binary_mask) + + masked_image = masked_image * binary_mask + results['img'] = cv2.addWeighted(img, 1 - self.alpha, masked_image, self.alpha, 0) + + # hash_id = abs(hash(555)) + # cv2.imwrite("tmp_visualization/_perturbed_mask_{:d}.jpg".format(hash_id), mask * 255) + # cv2.imwrite("tmp_visualization/_old_mask_{:d}.jpg".format(hash_id), old_mask * 255) + # cv2.imwrite("tmp_visualization/_weighted_masked_image_{:d}.jpg".format(hash_id), results['img']) + # breakpoint() + # Save the mask as a binary mask + + # Save the image + img = results['img'] + + # img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + # cv2.imwrite("tmp_visualization/masked_image_{:d}.jpg".format(abs(hash(555))), img) + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + f'(continue_on_failure={self.continue_on_failure})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomFlip(BaseTransform): + """Randomly flip the image, bbox and keypoints. + + Required Keys: + + - img + - img_shape + - flip_indices + - input_size (optional) + - bbox (optional) + - bbox_center (optional) + - keypoints (optional) + - keypoints_visible (optional) + - keypoints_visibility (optional) + - img_mask (optional) + + Modified Keys: + + - img + - bbox (optional) + - bbox_center (optional) + - keypoints (optional) + - keypoints_visible (optional) + - keypoints_visibility (optional) + - img_mask (optional) + + Added Keys: + + - flip + - flip_direction + + Args: + prob (float | list[float]): The flipping probability. If a list is + given, the argument `direction` should be a list with the same + length. And each element in `prob` indicates the flipping + probability of the corresponding one in ``direction``. Defaults + to 0.5 + direction (str | list[str]): The flipping direction. Options are + ``'horizontal'``, ``'vertical'`` and ``'diagonal'``. If a list is + is given, each data sample's flipping direction will be sampled + from a distribution determined by the argument ``prob``. Defaults + to ``'horizontal'``. + """ + + def __init__(self, + prob: Union[float, List[float]] = 0.5, + direction: Union[str, List[str]] = 'horizontal') -> None: + if isinstance(prob, list): + assert is_list_of(prob, float) + assert 0 <= sum(prob) <= 1 + elif isinstance(prob, float): + assert 0 <= prob <= 1 + else: + raise ValueError(f'probs must be float or list of float, but \ + got `{type(prob)}`.') + self.prob = prob + + valid_directions = ['horizontal', 'vertical', 'diagonal'] + if isinstance(direction, str): + assert direction in valid_directions + elif isinstance(direction, list): + assert is_list_of(direction, str) + assert set(direction).issubset(set(valid_directions)) + else: + raise ValueError(f'direction must be either str or list of str, \ + but got `{type(direction)}`.') + self.direction = direction + + if isinstance(prob, list): + assert len(prob) == len(self.direction) + + @cache_randomness + def _choose_direction(self) -> str: + """Choose the flip direction according to `prob` and `direction`""" + if isinstance(self.direction, + List) and not isinstance(self.direction, str): + # None means non-flip + direction_list: list = list(self.direction) + [None] + elif isinstance(self.direction, str): + # None means non-flip + direction_list = [self.direction, None] + + if isinstance(self.prob, list): + non_prob: float = 1 - sum(self.prob) + prob_list = self.prob + [non_prob] + elif isinstance(self.prob, float): + non_prob = 1. - self.prob + # exclude non-flip + single_ratio = self.prob / (len(direction_list) - 1) + prob_list = [single_ratio] * (len(direction_list) - 1) + [non_prob] + + cur_dir = np.random.choice(direction_list, p=prob_list) + + return cur_dir + + def transform(self, results: dict) -> dict: + """The transform function of :class:`RandomFlip`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + flip_dir = self._choose_direction() + + if flip_dir is None: + results['flip'] = False + results['flip_direction'] = None + else: + results['flip'] = True + results['flip_direction'] = flip_dir + + h, w = results.get('input_size', results['img_shape']) + # flip image and mask + if isinstance(results['img'], list): + results['img'] = [ + imflip(img, direction=flip_dir) for img in results['img'] + ] + else: + results['img'] = imflip(results['img'], direction=flip_dir) + + if 'img_mask' in results: + results['img_mask'] = imflip( + results['img_mask'], direction=flip_dir) + + # flip bboxes + if results.get('bbox', None) is not None: + results['bbox'] = flip_bbox( + results['bbox'], + image_size=(w, h), + bbox_format='xyxy', + direction=flip_dir) + + # flip bboxes + if results.get('bbox_xyxy_wrt_input', None) is not None: + results['bbox_xyxy_wrt_input'] = flip_bbox( + results['bbox_xyxy_wrt_input'], + image_size=(w, h), + bbox_format='xyxy', + direction=flip_dir) + + if results.get('bbox_center', None) is not None: + results['bbox_center'] = flip_bbox( + results['bbox_center'], + image_size=(w, h), + bbox_format='center', + direction=flip_dir) + + # flip keypoints + if results.get('keypoints', None) is not None: + keypoints, keypoints_visible = flip_keypoints( + results['keypoints'], + results.get('keypoints_visible', None), + image_size=(w, h), + flip_indices=results['flip_indices'], + direction=flip_dir) + _, keypoints_visibility = flip_keypoints( + results['keypoints'], + results.get('keypoints_visibility', None), + image_size=(w, h), + flip_indices=results['flip_indices'], + direction=flip_dir) + + results['keypoints'] = keypoints + results['keypoints_visible'] = keypoints_visible + results['keypoints_visibility'] = keypoints_visibility + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'direction={self.direction})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomHalfBody(BaseTransform): + """Data augmentation with half-body transform that keeps only the upper or + lower body at random. + + Required Keys: + + - keypoints + - keypoints_visible + - upper_body_ids + - lower_body_ids + + Modified Keys: + + - bbox + - bbox_center + - bbox_scale + + Args: + min_total_keypoints (int): The minimum required number of total valid + keypoints of a person to apply half-body transform. Defaults to 8 + min_half_keypoints (int): The minimum required number of valid + half-body keypoints of a person to apply half-body transform. + Defaults to 2 + padding (float): The bbox padding scale that will be multilied to + `bbox_scale`. Defaults to 1.5 + prob (float): The probability to apply half-body transform when the + keypoint number meets the requirement. Defaults to 0.3 + """ + + def __init__(self, + min_total_keypoints: int = 9, + min_upper_keypoints: int = 2, + min_lower_keypoints: int = 3, + padding: float = 1.5, + prob: float = 0.3, + upper_prioritized_prob: float = 0.7) -> None: + super().__init__() + self.min_total_keypoints = min_total_keypoints + self.min_upper_keypoints = min_upper_keypoints + self.min_lower_keypoints = min_lower_keypoints + self.padding = padding + self.prob = prob + self.upper_prioritized_prob = upper_prioritized_prob + + def _get_half_body_bbox(self, keypoints: np.ndarray, + half_body_ids: List[int] + ) -> Tuple[np.ndarray, np.ndarray]: + """Get half-body bbox center and scale of a single instance. + + Args: + keypoints (np.ndarray): Keypoints in shape (K, D) + upper_body_ids (list): The list of half-body keypont indices + + Returns: + tuple: A tuple containing half-body bbox center and scale + - center: Center (x, y) of the bbox + - scale: Scale (w, h) of the bbox + """ + + selected_keypoints = keypoints[half_body_ids] + center = selected_keypoints.mean(axis=0)[:2] + + x1, y1 = selected_keypoints.min(axis=0) + x2, y2 = selected_keypoints.max(axis=0) + w = x2 - x1 + h = y2 - y1 + scale = np.array([w, h], dtype=center.dtype) * self.padding + + return center, scale + + def _get_half_body_exact_bbox(self, keypoints: np.ndarray, + half_body_ids: List[int], + bbox: np.ndarray, + ) -> np.ndarray: + """Get half-body bbox center and scale of a single instance. + + Args: + keypoints (np.ndarray): Keypoints in shape (K, D) + upper_body_ids (list): The list of half-body keypont indices + + Returns: + tuple: A tuple containing half-body bbox center and scale + - center: Center (x, y) of the bbox + - scale: Scale (w, h) of the bbox + """ + + selected_keypoints = keypoints[half_body_ids] + center = selected_keypoints.mean(axis=0)[:2] + + x1, y1 = selected_keypoints.min(axis=0) + x2, y2 = selected_keypoints.max(axis=0) + w = x2 - x1 + h = y2 - y1 + scale = np.array([w, h], dtype=center.dtype) * self.padding + + x1, y1 = center - scale / 2 + x2, y2 = center + scale / 2 + + # Do not exceed the original bbox + x1 = np.maximum(x1, bbox[0]) + y1 = np.maximum(y1, bbox[1]) + x2 = np.minimum(x2, bbox[2]) + y2 = np.minimum(y2, bbox[3]) + + return np.array([x1, y1, x2, y2]) + + @cache_randomness + def _random_select_half_body(self, keypoints_visible: np.ndarray, + upper_body_ids: List[int], + lower_body_ids: List[int] + ) -> List[Optional[List[int]]]: + """Randomly determine whether applying half-body transform and get the + half-body keyponit indices of each instances. + + Args: + keypoints_visible (np.ndarray, optional): The visibility of + keypoints in shape (N, K, 1) or (N, K, 2). + upper_body_ids (list): The list of upper body keypoint indices + lower_body_ids (list): The list of lower body keypoint indices + + Returns: + list[list[int] | None]: The selected half-body keypoint indices + of each instance. ``None`` means not applying half-body transform. + """ + + if keypoints_visible.ndim == 3: + keypoints_visible = keypoints_visible[..., 0] + + half_body_ids = [] + + for visible in keypoints_visible: + if visible.sum() < self.min_total_keypoints: + indices = None + elif np.random.rand() > self.prob: + indices = None + else: + upper_valid_ids = [i for i in upper_body_ids if visible[i] > 0] + lower_valid_ids = [i for i in lower_body_ids if visible[i] > 0] + + num_upper = len(upper_valid_ids) + num_lower = len(lower_valid_ids) + + prefer_upper = np.random.rand() < self.upper_prioritized_prob + if (num_upper < self.min_upper_keypoints + and num_lower < self.min_lower_keypoints): + indices = None + elif num_lower < self.min_lower_keypoints: + indices = upper_valid_ids + elif num_upper < self.min_upper_keypoints: + indices = lower_valid_ids + else: + indices = ( + upper_valid_ids if prefer_upper else lower_valid_ids) + + half_body_ids.append(indices) + + return half_body_ids + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`HalfBodyTransform`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + half_body_ids = self._random_select_half_body( + keypoints_visible=results['keypoints_visible'], + upper_body_ids=results['upper_body_ids'], + lower_body_ids=results['lower_body_ids']) + + bbox_center = [] + bbox_scale = [] + + bbox_xyxy_wrt_input = [] + + for i, indices in enumerate(half_body_ids): + if indices is None: + bbox_center.append(results['bbox_center'][i]) + bbox_scale.append(results['bbox_scale'][i]) + bbox_xyxy_wrt_input.append(results['bbox_xyxy_wrt_input'][i]) + else: + _center, _scale = self._get_half_body_bbox( + results['keypoints'][i], indices) + bbox_center.append(_center) + bbox_scale.append(_scale) + exact_bbox = self._get_half_body_exact_bbox( + results['keypoints'][i], indices, results['bbox_xyxy_wrt_input'][i]) + bbox_xyxy_wrt_input.append(exact_bbox) + + results['bbox_center'] = np.stack(bbox_center) + results['bbox_scale'] = np.stack(bbox_scale) + results['bbox_xyxy_wrt_input'] = np.stack(bbox_xyxy_wrt_input) + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(min_total_keypoints={self.min_total_keypoints}, ' + repr_str += f'min_upper_keypoints={self.min_upper_keypoints}, ' + repr_str += f'min_lower_keypoints={self.min_lower_keypoints}, ' + repr_str += f'padding={self.padding}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'upper_prioritized_prob={self.upper_prioritized_prob})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomPatchesBlackout(BaseTransform): + """Data augmentation that divide image into patches and set color of random + pathes to black. In AID paper marked as 'hide and seek'. + + Required Keys: + + - keypoints + - keypoints_visible + - keypoint_visibility + + Modified Keys: + + - img + - keypoint_visibility + + Args: + grid_size (tuple(int, int)): Grid size of the patches. Defaults to + (8, 6) + mask_ratio (float): Ratio of patches to blackout. Defaults to 0.3 + prob (float): The probability to apply black patches. Defaults to 0.8 + """ + + def __init__(self, + grid_size: Tuple[int, int] = (8, 6), + mask_ratio: float = 0.3, + prob: float = 0.8) -> None: + super().__init__() + self.grid_size = grid_size + self.mask_ratio = mask_ratio + self.prob = prob + + @cache_randomness + def _get_random_patches(self, grid_h, grid_w) -> np.ndarray: + black_patches = np.zeros((grid_h, grid_w), dtype=bool) + + if np.random.rand() < self.prob: + + # Split image into grid + num_patches = int(self.grid_size[0] * self.grid_size[1]) + + # Randomly choose patches to blackout + black_patches = np.random.choice( + [0, 1], + num_patches, + p=[1 - self.mask_ratio, self.mask_ratio] + ) + black_patches = black_patches.reshape(grid_h, grid_w).astype(bool) + + return black_patches + + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`HalfBodyTransform`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + img = results['img'] + + if "transformed_keypoints" in results: + kpts = results['transformed_keypoints'].squeeze() + else: + kpts = results['keypoints'].squeeze() + + h, w = img.shape[:2] + grid_h, grid_w = self.grid_size + dh = np.ceil(h / grid_h).astype(int) + dw = np.ceil(w / grid_w).astype(int) + + black_patches = self._get_random_patches(grid_h, grid_w) + + for i in range(grid_h): + for j in range(grid_w): + if black_patches[i, j]: + # Set all pixel in the patch to black + img[i*dh : (i+1)*dh, j*dw : (j+1)*dw, :] = 0 + + + # Set keypoints in the patch to invisible + in_black = ( + (kpts[:, 0] >= j*dw) & + (kpts[:, 0] < (j+1)*dw) & + (kpts[:, 1] >= i*dh) & + (kpts[:, 1] < (i+1)*dh) + ) + results['keypoints_visibility'][:, in_black] = 0 + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(grid_size={self.grid_size}, ' + repr_str += f'mask_ratio={self.mask_ratio}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomEdgesBlackout(BaseTransform): + """Data augmentation that masks edged of the image with black color + simulating image edge or random texture. + + Required Keys: + + - keypoints + - keypoints_visible + - keypoint_visibility + + Modified Keys: + + - img + - keypoint_visibility + + Args: + mask_ratio_range (tuple[float, float]): Range or mask-to-image ratio. Defaults to + (0.1, 0.3) + prob (float): The probability to apply black patches. Defaults to 0.8 + texture_prob (float): The probability to apply texture to the blackout area. Defaults to 0.0 + """ + + def __init__(self, + mask_ratio_range: tuple[float, float] = (0.1, 0.3), + prob: float = 0.8, + texture_prob: float = 0.0, + context_size:float = 1.25) -> None: + super().__init__() + self.mask_ratio_range = mask_ratio_range + self.prob = prob + self.texture_prob = texture_prob + self.context_size = context_size + + @cache_randomness + def _get_random_mask(self, w, h, bbox_xyxy) -> float: + """Get random mask ratio. + + Args: + w (int): Width of the image + h (int): Height of the image + bbox_xyxy (tuple): Bounding box (x1, y1, x2, y2) + + Returns: + np.array: mask (1 for blackout, 0 for keep) + tuple: bounds of the blackout area (x1, y1, x2, y2) + """ + mask = np.zeros((h, w), dtype=bool) + bbox_c, bbox_s = bbox_xyxy2cs(bbox_xyxy, padding=self.context_size) + x0, y0, x1, y1 = bbox_cs2xyxy(bbox_c, bbox_s) + + # Clip the bounding box to the image + x0 = np.maximum(x0, 0).astype(int) + y0 = np.maximum(y0, 0).astype(int) + x1 = np.minimum(x1, w).astype(int) + y1 = np.minimum(y1, h).astype(int) + + # Set default values + x = 0 + y = 0 + dw = w + dh = h + is_textured = False + + if np.random.rand() < self.prob: + + # Generate random rectangle to keep + rh, rw = np.random.uniform( + 1-self.mask_ratio_range[1], + 1-self.mask_ratio_range[0], + 2 + ) + dh = int((y1-y0) * rh) + dw = int((x1-x0) * rw) + x_end = x1-dw if x1-dw > x0 else x0+1 + y_end = y1-dh if y1-dh > y0 else y0+1 + try: + x = np.random.randint(x0, x_end) + y = np.random.randint(y0, y_end) + except ValueError: + print(x, x0, dw, x1, x1-dw, x_end) + print(y, y0, dh, y1, y1-dh, y_end) + raise ValueError + + # Set all pixel outside of the rectangle to black + mask[y:y+dh, x:x+dw] = True + + # Invert the mask. True means blackout + mask = ~mask + + # Add texture + is_textured = np.random.rand() < self.texture_prob + + return mask, (x, y, dw+x, dh+y), is_textured + + def _get_random_color(self) -> np.ndarray: + """Get random color. + + Returns: + np.array: color + """ + h = np.random.randint(0, 360) + s = np.random.uniform(0.75, 1) + l = np.random.uniform(0.3, 0.7) + hls_color = np.array([h, l, s]) + rgb_color = cv2.cvtColor( + np.array([[hls_color]], dtype=np.float32), + cv2.COLOR_HLS2RGB + ).squeeze() * 255 + color = rgb_color.astype(np.uint8) + return color.tolist() + + def _get_random_texture(self, w, h) -> np.ndarray: + """Get random texture. + + Args: + w (int): Width of the image + h (int): Height of the image + + Returns: + np.array: texture + """ + mode = np.random.choice([ + 'lines', + 'squares', + 'circles', + # 'noise', + # 'uniform', + ]) + + if mode == 'lines': + texture = np.zeros((h, w, 3), dtype=np.uint8) + texture[:, :, :] = self._get_random_color() + num_lines = np.random.randint(1, 20) + for _ in range(num_lines): + x1, y1 = np.random.randint(0, w), np.random.randint(0, h) + x2, y2 = np.random.randint(0, w), np.random.randint(0, h) + line_width = np.random.randint(1, 10) + color = self._get_random_color() + cv2.line(texture, (x1, y1), (x2, y2), color, line_width) + elif mode == 'squares': + texture = np.zeros((h, w, 3), dtype=np.uint8) + texture[:, :, :] = self._get_random_color() + num_squares = np.random.randint(1, 20) + for _ in range(num_squares): + x1, y1 = np.random.randint(0, w), np.random.randint(0, h) + x2, y2 = np.random.randint(0, w), np.random.randint(0, h) + color = self._get_random_color() + cv2.rectangle(texture, (x1, y1), (x2, y2), color, -1) + elif mode == 'circles': + texture = np.zeros((h, w, 3), dtype=np.uint8) + texture[:, :, :] = self._get_random_color() + num_circles = np.random.randint(1, 20) + for _ in range(num_circles): + x, y = np.random.randint(0, w), np.random.randint(0, h) + r = np.random.randint(1, min(w, h) // 2) + color = self._get_random_color() + cv2.circle(texture, (x, y), r, color, -1) + elif mode == 'noise': + texture = np.random.randint(0, 256, (h, w, 3), dtype=np.uint8) + elif mode == 'uniform': + texture = np.zeros((h, w, 3), dtype=np.uint8) + texture[:, :, :] = self._get_random_color() + + return texture + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`HalfBodyTransform`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + img = results['img'] + + if "transformed_keypoints" in results: + kpts = results['transformed_keypoints'].squeeze() + else: + kpts = results['keypoints'].squeeze() + + # Generate random mask + mask, (x1, y1, x2, y2), is_textured = self._get_random_mask(img.shape[1], img.shape[0], results['bbox_xyxy_wrt_input'].flatten()) + # breakpoint() + # print("img shape", img.shape) + # print("results", results.keys()) + + # Apply the mask + if is_textured: + textured_img = self._get_random_texture(img.shape[1], img.shape[0]) + textured_img[~mask, :] = img[~mask, :] + img = textured_img + else: + # Set all pixel outside of the rectangle to black + img[mask, :] = 0 + results['img'] = img + + # Set keypoints outside of the rectangle to invisible + in_rect = ( + (kpts[:, 0] >= x1) & + (kpts[:, 0] < x2) & + (kpts[:, 1] >= y1) & + (kpts[:, 1] < y2) + ) + results['keypoints_visibility'][:, ~in_rect] = 0 + + # Create new entry describing keypoints in the 'cropped' area + results['keypoints_in_image'] = in_rect.squeeze().astype(int) + + # Crop the bbox_xyxy_wrt_input according to the blackout area + if 'bbox_xyxy_wrt_input' in results: + bbox_xyxy = results['bbox_xyxy_wrt_input'].flatten() + bbox_xyxy[0] = np.maximum(bbox_xyxy[0], x1) + bbox_xyxy[1] = np.maximum(bbox_xyxy[1], y1) + bbox_xyxy[2] = np.minimum(bbox_xyxy[2], x2) + bbox_xyxy[3] = np.minimum(bbox_xyxy[3], y2) + results['bbox_xyxy_wrt_input'] = bbox_xyxy.reshape(-1, 4) + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(mask_ratio_range={self.mask_ratio_range}, ' + repr_str += f'prob={self.prob}), ' + repr_str += f'texture_prob={self.texture_prob})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomBBoxTransform(BaseTransform): + r"""Rnadomly shift, resize and rotate the bounding boxes. + + Required Keys: + + - bbox_center + - bbox_scale + + Modified Keys: + + - bbox_center + - bbox_scale + + Added Keys: + - bbox_rotation + + Args: + shift_factor (float): Randomly shift the bbox in range + :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, + where :math:`dx(y) = x(y)_scale \cdot shift_factor` in pixels. + Defaults to 0.16 + shift_prob (float): Probability of applying random shift. Defaults to + 0.3 + scale_factor (Tuple[float, float]): Randomly resize the bbox in range + :math:`[scale_factor[0], scale_factor[1]]`. Defaults to (0.5, 1.5) + scale_prob (float): Probability of applying random resizing. Defaults + to 1.0 + rotate_factor (float): Randomly rotate the bbox in + :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults + to 80.0 + rotate_prob (float): Probability of applying random rotation. Defaults + to 0.6 + """ + + def __init__(self, + shift_factor: float = 0.16, + shift_prob: float = 0.3, + scale_factor: Tuple[float, float] = (0.5, 1.5), + scale_prob: float = 1.0, + rotate_factor: float = 80.0, + rotate_prob: float = 0.6) -> None: + super().__init__() + + self.shift_factor = shift_factor + self.shift_prob = shift_prob + self.scale_factor = scale_factor + self.scale_prob = scale_prob + self.rotate_factor = rotate_factor + self.rotate_prob = rotate_prob + + @staticmethod + def _truncnorm(low: float = -1., + high: float = 1., + size: tuple = ()) -> np.ndarray: + """Sample from a truncated normal distribution.""" + return truncnorm.rvs(low, high, size=size).astype(np.float32) + + @cache_randomness + def _get_transform_params(self, num_bboxes: int) -> Tuple: + """Get random transform parameters. + + Args: + num_bboxes (int): The number of bboxes + + Returns: + tuple: + - offset (np.ndarray): Offset factor of each bbox in shape (n, 2) + - scale (np.ndarray): Scaling factor of each bbox in shape (n, 1) + - rotate (np.ndarray): Rotation degree of each bbox in shape (n,) + """ + random_v = self._truncnorm(size=(num_bboxes, 4)) + offset_v = random_v[:, :2] + scale_v = random_v[:, 2:3] + rotate_v = random_v[:, 3] + + # Get shift parameters + offset = offset_v * self.shift_factor + offset = np.where( + np.random.rand(num_bboxes, 1) < self.shift_prob, offset, 0.) + + # Get scaling parameters + scale_min, scale_max = self.scale_factor + mu = (scale_max + scale_min) * 0.5 + sigma = (scale_max - scale_min) * 0.5 + scale = scale_v * sigma + mu + scale = np.where( + np.random.rand(num_bboxes, 1) < self.scale_prob, scale, 1.) + + # Get rotation parameters + rotate = rotate_v * self.rotate_factor + rotate = np.where( + np.random.rand(num_bboxes) < self.rotate_prob, rotate, 0.) + + return offset, scale, rotate + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`RandomBboxTransform`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + bbox_scale = results['bbox_scale'] + num_bboxes = bbox_scale.shape[0] + + offset, scale, rotate = self._get_transform_params(num_bboxes) + + results['bbox_center'] = results['bbox_center'] + offset * bbox_scale + results['bbox_scale'] = results['bbox_scale'] * scale + results['bbox_rotation'] = rotate + + bbox_xyxy_wrt_input = results.get('bbox_xyxy_wrt_input', None) + if bbox_xyxy_wrt_input is not None: + _c, _s = bbox_xyxy2cs(bbox_xyxy_wrt_input, padding=1.0) + _c = _c + offset * _s + _s = _s * scale + results['bbox_xyxy_wrt_input'] = bbox_cs2xyxy(_c, _s).flatten() + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(shift_prob={self.shift_prob}, ' + repr_str += f'shift_factor={self.shift_factor}, ' + repr_str += f'scale_prob={self.scale_prob}, ' + repr_str += f'scale_factor={self.scale_factor}, ' + repr_str += f'rotate_prob={self.rotate_prob}, ' + repr_str += f'rotate_factor={self.rotate_factor})' + return repr_str + + +@TRANSFORMS.register_module() +@avoid_cache_randomness +class Albumentation(BaseTransform): + """Albumentation augmentation (pixel-level transforms only). + + Adds custom pixel-level transformations from Albumentations library. + Please visit `https://albumentations.ai/docs/` + to get more information. + + Note: we only support pixel-level transforms. + Please visit `https://github.com/albumentations-team/` + `albumentations#pixel-level-transforms` + to get more information about pixel-level transforms. + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + transforms (List[dict]): A list of Albumentation transforms. + An example of ``transforms`` is as followed: + .. code-block:: python + + [ + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), + ] + keymap (dict | None): key mapping from ``input key`` to + ``albumentation-style key``. + Defaults to None, which will use {'img': 'image'}. + """ + + def __init__(self, + transforms: List[dict], + keymap: Optional[dict] = None) -> None: + if albumentations is None: + raise RuntimeError('albumentations is not installed') + + self.transforms = transforms + + self.aug = albumentations.Compose( + [self.albu_builder(t) for t in self.transforms]) + + if not keymap: + self.keymap_to_albu = { + 'img': 'image', + } + else: + self.keymap_to_albu = keymap + + def albu_builder(self, cfg: dict) -> albumentations: + """Import a module from albumentations. + + It resembles some of :func:`build_from_cfg` logic. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + + Returns: + albumentations.BasicTransform: The constructed transform object + """ + + assert isinstance(cfg, dict) and 'type' in cfg + args = cfg.copy() + + obj_type = args.pop('type') + if mmengine.is_str(obj_type): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + rank, _ = get_dist_info() + if rank == 0 and not hasattr( + albumentations.augmentations.transforms, obj_type): + warnings.warn( + f'{obj_type} is not pixel-level transformations. ' + 'Please use with caution.') + obj_cls = getattr(albumentations, obj_type) + elif isinstance(obj_type, type): + obj_cls = obj_type + else: + raise TypeError(f'type must be a str, but got {type(obj_type)}') + + if 'transforms' in args: + args['transforms'] = [ + self.albu_builder(transform) + for transform in args['transforms'] + ] + + return obj_cls(**args) + + def transform(self, results: dict) -> dict: + """The transform function of :class:`Albumentation` to apply + albumentations transforms. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): Result dict from the data pipeline. + + Return: + dict: updated result dict. + """ + # map result dict to albumentations format + results_albu = {} + for k, v in self.keymap_to_albu.items(): + assert k in results, \ + f'The `{k}` is required to perform albumentations transforms' + results_albu[v] = results[k] + + # Apply albumentations transforms + results_albu = self.aug(**results_albu) + + # map the albu results back to the original format + for k, v in self.keymap_to_albu.items(): + results[k] = results_albu[v] + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' + return repr_str + + +@TRANSFORMS.register_module() +class PhotometricDistortion(BaseTransform): + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + 8. randomly swap channels + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta: int = 32, + contrast_range: Sequence[Number] = (0.5, 1.5), + saturation_range: Sequence[Number] = (0.5, 1.5), + hue_delta: int = 18) -> None: + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + @cache_randomness + def _random_flags(self) -> Sequence[Number]: + """Generate the random flags for subsequent transforms. + + Returns: + Sequence[Number]: a sequence of numbers that indicate whether to + do the corresponding transforms. + """ + # contrast_mode == 0 --> do random contrast first + # contrast_mode == 1 --> do random contrast last + contrast_mode = np.random.randint(2) + # whether to apply brightness distortion + brightness_flag = np.random.randint(2) + # whether to apply contrast distortion + contrast_flag = np.random.randint(2) + # the mode to convert color from BGR to HSV + hsv_mode = np.random.randint(4) + # whether to apply channel swap + swap_flag = np.random.randint(2) + + # the beta in `self._convert` to be added to image array + # in brightness distortion + brightness_beta = np.random.uniform(-self.brightness_delta, + self.brightness_delta) + # the alpha in `self._convert` to be multiplied to image array + # in contrast distortion + contrast_alpha = np.random.uniform(self.contrast_lower, + self.contrast_upper) + # the alpha in `self._convert` to be multiplied to image array + # in saturation distortion to hsv-formatted img + saturation_alpha = np.random.uniform(self.saturation_lower, + self.saturation_upper) + # delta of hue to add to image array in hue distortion + hue_delta = np.random.randint(-self.hue_delta, self.hue_delta) + # the random permutation of channel order + swap_channel_order = np.random.permutation(3) + + return (contrast_mode, brightness_flag, contrast_flag, hsv_mode, + swap_flag, brightness_beta, contrast_alpha, saturation_alpha, + hue_delta, swap_channel_order) + + def _convert(self, + img: np.ndarray, + alpha: float = 1, + beta: float = 0) -> np.ndarray: + """Multiple with alpha and add beta with clip. + + Args: + img (np.ndarray): The image array. + alpha (float): The random multiplier. + beta (float): The random offset. + + Returns: + np.ndarray: The updated image array. + """ + img = img.astype(np.float32) * alpha + beta + img = np.clip(img, 0, 255) + return img.astype(np.uint8) + + def transform(self, results: dict) -> dict: + """The transform function of :class:`PhotometricDistortion` to perform + photometric distortion on images. + + See ``transform()`` method of :class:`BaseTransform` for details. + + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + assert 'img' in results, '`img` is not found in results' + img = results['img'] + + (contrast_mode, brightness_flag, contrast_flag, hsv_mode, swap_flag, + brightness_beta, contrast_alpha, saturation_alpha, hue_delta, + swap_channel_order) = self._random_flags() + + # random brightness distortion + if brightness_flag: + img = self._convert(img, beta=brightness_beta) + + # contrast_mode == 0 --> do random contrast first + # contrast_mode == 1 --> do random contrast last + if contrast_mode == 1: + if contrast_flag: + img = self._convert(img, alpha=contrast_alpha) + + if hsv_mode: + # random saturation/hue distortion + img = mmcv.bgr2hsv(img) + if hsv_mode == 1 or hsv_mode == 3: + # apply saturation distortion to hsv-formatted img + img[:, :, 1] = self._convert( + img[:, :, 1], alpha=saturation_alpha) + if hsv_mode == 2 or hsv_mode == 3: + # apply hue distortion to hsv-formatted img + img[:, :, 0] = img[:, :, 0].astype(int) + hue_delta + img = mmcv.hsv2bgr(img) + + if contrast_mode == 1: + if contrast_flag: + img = self._convert(img, alpha=contrast_alpha) + + # randomly swap channels + if swap_flag: + img = img[..., swap_channel_order] + + results['img'] = img + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += (f'(brightness_delta={self.brightness_delta}, ' + f'contrast_range=({self.contrast_lower}, ' + f'{self.contrast_upper}), ' + f'saturation_range=({self.saturation_lower}, ' + f'{self.saturation_upper}), ' + f'hue_delta={self.hue_delta})') + return repr_str + + +@TRANSFORMS.register_module() +class GenerateTarget(BaseTransform): + """Encode keypoints into Target. + + The generated target is usually the supervision signal of the model + learning, e.g. heatmaps or regression labels. + + Required Keys: + + - keypoints + - keypoints_visible + - dataset_keypoint_weights + + Added Keys: + + - The keys of the encoded items from the codec will be updated into + the results, e.g. ``'heatmaps'`` or ``'keypoint_weights'``. See + the specific codec for more details. + + Args: + encoder (dict | list[dict]): The codec config for keypoint encoding. + Both single encoder and multiple encoders (given as a list) are + supported + multilevel (bool): Determine the method to handle multiple encoders. + If ``multilevel==True``, generate multilevel targets from a group + of encoders of the same type (e.g. multiple :class:`MSRAHeatmap` + encoders with different sigma values); If ``multilevel==False``, + generate combined targets from a group of different encoders. This + argument will have no effect in case of single encoder. Defaults + to ``False`` + use_dataset_keypoint_weights (bool): Whether use the keypoint weights + from the dataset meta information. Defaults to ``False`` + target_type (str, deprecated): This argument is deprecated and has no + effect. Defaults to ``None`` + """ + + def __init__(self, + encoder: MultiConfig, + target_type: Optional[str] = None, + multilevel: bool = False, + use_dataset_keypoint_weights: bool = False) -> None: + super().__init__() + + if target_type is not None: + rank, _ = get_dist_info() + if rank == 0: + warnings.warn( + 'The argument `target_type` is deprecated in' + ' GenerateTarget. The target type and encoded ' + 'keys will be determined by encoder(s).', + DeprecationWarning) + + self.encoder_cfg = deepcopy(encoder) + self.multilevel = multilevel + self.use_dataset_keypoint_weights = use_dataset_keypoint_weights + + if isinstance(self.encoder_cfg, list): + self.encoder = [ + KEYPOINT_CODECS.build(cfg) for cfg in self.encoder_cfg + ] + else: + assert not self.multilevel, ( + 'Need multiple encoder configs if ``multilevel==True``') + self.encoder = KEYPOINT_CODECS.build(self.encoder_cfg) + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`GenerateTarget`. + + See ``transform()`` method of :class:`BaseTransform` for details. + """ + + if results.get('transformed_keypoints', None) is not None: + # use keypoints transformed by TopdownAffine + keypoints = results['transformed_keypoints'] + elif results.get('keypoints', None) is not None: + # use original keypoints + keypoints = results['keypoints'] + else: + raise ValueError( + 'GenerateTarget requires \'transformed_keypoints\' or' + ' \'keypoints\' in the results.') + + keypoints_visible = results['keypoints_visible'] + if keypoints_visible.ndim == 3 and keypoints_visible.shape[2] == 2: + keypoints_visible, keypoints_visible_weights = \ + keypoints_visible[..., 0], keypoints_visible[..., 1] + results['keypoints_visible'] = keypoints_visible + results['keypoints_visible_weights'] = keypoints_visible_weights + + id_similarity = results.get('id_similarity', np.array([0])) + keypoints_visibility = results.get("keypoints_visibility", None) + + # Encoded items from the encoder(s) will be updated into the results. + # Please refer to the document of the specific codec for details about + # encoded items. + if not isinstance(self.encoder, list): + # For single encoding, the encoded items will be directly added + # into results. + auxiliary_encode_kwargs = { + key: results[key] + for key in self.encoder.auxiliary_encode_keys + } + encoded = self.encoder.encode( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + keypoints_visibility=keypoints_visibility, + id_similarity=id_similarity, + **auxiliary_encode_kwargs) + + if self.encoder.field_mapping_table: + encoded[ + 'field_mapping_table'] = self.encoder.field_mapping_table + if self.encoder.instance_mapping_table: + encoded['instance_mapping_table'] = \ + self.encoder.instance_mapping_table + if self.encoder.label_mapping_table: + encoded[ + 'label_mapping_table'] = self.encoder.label_mapping_table + + else: + encoded_list = [] + _field_mapping_table = dict() + _instance_mapping_table = dict() + _label_mapping_table = dict() + for _encoder in self.encoder: + auxiliary_encode_kwargs = { + key: results[key] + for key in _encoder.auxiliary_encode_keys + } + encoded_list.append( + _encoder.encode( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + keypoints_visibility=keypoints_visibility, + id_similarity=id_similarity, + **auxiliary_encode_kwargs)) + + _field_mapping_table.update(_encoder.field_mapping_table) + _instance_mapping_table.update(_encoder.instance_mapping_table) + _label_mapping_table.update(_encoder.label_mapping_table) + + if self.multilevel: + # For multilevel encoding, the encoded items from each encoder + # should have the same keys. + + keys = encoded_list[0].keys() + if not all(_encoded.keys() == keys + for _encoded in encoded_list): + raise ValueError( + 'Encoded items from all encoders must have the same ' + 'keys if ``multilevel==True``.') + + encoded = { + k: [_encoded[k] for _encoded in encoded_list] + for k in keys + } + + else: + # For combined encoding, the encoded items from different + # encoders should have no overlapping items, except for + # `keypoint_weights`. If multiple `keypoint_weights` are given, + # they will be multiplied as the final `keypoint_weights`. + + encoded = dict() + keypoint_weights = [] + + for _encoded in encoded_list: + for key, value in _encoded.items(): + if key == 'keypoint_weights': + keypoint_weights.append(value) + elif key not in encoded: + encoded[key] = value + else: + raise ValueError( + f'Overlapping item "{key}" from multiple ' + 'encoders, which is not supported when ' + '``multilevel==False``') + + if keypoint_weights: + encoded['keypoint_weights'] = keypoint_weights + + if _field_mapping_table: + encoded['field_mapping_table'] = _field_mapping_table + if _instance_mapping_table: + encoded['instance_mapping_table'] = _instance_mapping_table + if _label_mapping_table: + encoded['label_mapping_table'] = _label_mapping_table + + if self.use_dataset_keypoint_weights and 'keypoint_weights' in encoded: + if isinstance(encoded['keypoint_weights'], list): + for w in encoded['keypoint_weights']: + w = w * results['dataset_keypoint_weights'] + else: + encoded['keypoint_weights'] = encoded[ + 'keypoint_weights'] * results['dataset_keypoint_weights'] + + results.update(encoded) + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += (f'(encoder={str(self.encoder_cfg)}, ') + repr_str += ('use_dataset_keypoint_weights=' + f'{self.use_dataset_keypoint_weights})') + return repr_str + + +@TRANSFORMS.register_module() +class YOLOXHSVRandomAug(BaseTransform): + """Apply HSV augmentation to image sequentially. It is referenced from + https://github.com/Megvii- + BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21. + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + hue_delta (int): delta of hue. Defaults to 5. + saturation_delta (int): delta of saturation. Defaults to 30. + value_delta (int): delat of value. Defaults to 30. + """ + + def __init__(self, + hue_delta: int = 5, + saturation_delta: int = 30, + value_delta: int = 30) -> None: + self.hue_delta = hue_delta + self.saturation_delta = saturation_delta + self.value_delta = value_delta + + @cache_randomness + def _get_hsv_gains(self): + hsv_gains = np.random.uniform(-1, 1, 3) * [ + self.hue_delta, self.saturation_delta, self.value_delta + ] + # random selection of h, s, v + hsv_gains *= np.random.randint(0, 2, 3) + # prevent overflow + hsv_gains = hsv_gains.astype(np.int16) + return hsv_gains + + def transform(self, results: dict) -> dict: + img = results['img'] + hsv_gains = self._get_hsv_gains() + img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16) + + img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180 + img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255) + img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255) + cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img) + + results['img'] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(hue_delta={self.hue_delta}, ' + repr_str += f'saturation_delta={self.saturation_delta}, ' + repr_str += f'value_delta={self.value_delta})' + return repr_str + + +@TRANSFORMS.register_module() +class FilterAnnotations(BaseTransform): + """Eliminate undesirable annotations based on specific conditions. + + This class is designed to sift through annotations by examining multiple + factors such as the size of the bounding box, the visibility of keypoints, + and the overall area. Users can fine-tune the criteria to filter out + instances that have excessively small bounding boxes, insufficient area, + or an inadequate number of visible keypoints. + + Required Keys: + + - bbox (np.ndarray) (optional) + - area (np.int64) (optional) + - keypoints_visible (np.ndarray) (optional) + + Modified Keys: + + - bbox (optional) + - bbox_score (optional) + - category_id (optional) + - keypoints (optional) + - keypoints_visible (optional) + - area (optional) + + Args: + min_gt_bbox_wh (tuple[float]): Minimum width and height of ground + truth boxes. Default: (1., 1.) + min_gt_area (int): Minimum foreground area of instances. + Default: 1 + min_kpt_vis (int): Minimum number of visible keypoints. Default: 1 + by_box (bool): Filter instances with bounding boxes not meeting the + min_gt_bbox_wh threshold. Default: False + by_area (bool): Filter instances with area less than min_gt_area + threshold. Default: False + by_kpt (bool): Filter instances with keypoints_visible not meeting the + min_kpt_vis threshold. Default: True + keep_empty (bool): Whether to return None when it + becomes an empty bbox after filtering. Defaults to True. + """ + + def __init__(self, + min_gt_bbox_wh: Tuple[int, int] = (1, 1), + min_gt_area: int = 1, + min_kpt_vis: int = 1, + by_box: bool = False, + by_area: bool = False, + by_kpt: bool = True, + keep_empty: bool = True) -> None: + + assert by_box or by_kpt or by_area + self.min_gt_bbox_wh = min_gt_bbox_wh + self.min_gt_area = min_gt_area + self.min_kpt_vis = min_kpt_vis + self.by_box = by_box + self.by_area = by_area + self.by_kpt = by_kpt + self.keep_empty = keep_empty + + def transform(self, results: dict) -> Union[dict, None]: + """Transform function to filter annotations. + + Args: + results (dict): Result dict. + + Returns: + dict: Updated result dict. + """ + assert 'keypoints' in results + kpts = results['keypoints'] + if kpts.shape[0] == 0: + return results + + tests = [] + if self.by_box and 'bbox' in results: + bbox = results['bbox'] + tests.append( + ((bbox[..., 2] - bbox[..., 0] > self.min_gt_bbox_wh[0]) & + (bbox[..., 3] - bbox[..., 1] > self.min_gt_bbox_wh[1]))) + if self.by_area and 'area' in results: + area = results['area'] + tests.append(area >= self.min_gt_area) + if self.by_kpt: + kpts_vis = results['keypoints_visible'] + if kpts_vis.ndim == 3: + kpts_vis = kpts_vis[..., 0] + tests.append(kpts_vis.sum(axis=1) >= self.min_kpt_vis) + + keep = tests[0] + for t in tests[1:]: + keep = keep & t + + if not keep.any(): + if self.keep_empty: + return None + + keys = ('bbox', 'bbox_score', 'category_id', 'keypoints', + 'keypoints_visible', 'area') + for key in keys: + if key in results: + results[key] = results[key][keep] + + return results + + def __repr__(self): + return (f'{self.__class__.__name__}(' + f'min_gt_bbox_wh={self.min_gt_bbox_wh}, ' + f'min_gt_area={self.min_gt_area}, ' + f'min_kpt_vis={self.min_kpt_vis}, ' + f'by_box={self.by_box}, ' + f'by_area={self.by_area}, ' + f'by_kpt={self.by_kpt}, ' + f'keep_empty={self.keep_empty})') + + +def compute_paddings(bbox, bbox_s, kpts): + """Compute the padding of the bbox to fit the keypoints.""" + bbox = np.array(bbox).flatten() + bbox_s = np.array(bbox_s).flatten() + if kpts.size % 2 == 0: + kpts = kpts.reshape(-1, 2) + else: + kpts = kpts.reshape(-1, 3) + + x0, y0, x1, y1 = bbox + x_bbox_distances = np.max(np.stack([ + np.clip(x0 - kpts[:, 0], a_min=0, a_max=None), + np.clip(kpts[:, 0] - x1, a_min=0, a_max=None), + ]), axis=0) + y_bbox_distances = np.max(np.stack([ + np.clip(y0 - kpts[:, 1], a_min=0, a_max=None), + np.clip(kpts[:, 1] - y1, a_min=0, a_max=None), + ]), axis=0) + + padding_x = 2 * x_bbox_distances / bbox_s[0] + padding_y = 2 * y_bbox_distances / bbox_s[1] + padding = 1 + np.maximum(padding_x, padding_y) + + padding = np.maximum(x_bbox_distances, y_bbox_distances) + + return padding.flatten() \ No newline at end of file diff --git a/mmpose/datasets/transforms/converting.py b/mmpose/datasets/transforms/converting.py new file mode 100644 index 0000000000000000000000000000000000000000..b7e214733fa4ac842afe2c6efe8f5917d961889a --- /dev/null +++ b/mmpose/datasets/transforms/converting.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import numpy as np +from mmcv.transforms import BaseTransform + +from mmpose.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class KeypointConverter(BaseTransform): + """Change the order of keypoints according to the given mapping. + + Required Keys: + + - keypoints + - keypoints_visible + + Modified Keys: + + - keypoints + - keypoints_visible + + Args: + num_keypoints (int): The number of keypoints in target dataset. + mapping (list): A list containing mapping indexes. Each element has + format (source_index, target_index) + + Example: + >>> import numpy as np + >>> # case 1: 1-to-1 mapping + >>> # (0, 0) means target[0] = source[0] + >>> self = KeypointConverter( + >>> num_keypoints=3, + >>> mapping=[ + >>> (0, 0), (1, 1), (2, 2), (3, 3) + >>> ]) + >>> results = dict( + >>> keypoints=np.arange(34).reshape(2, 3, 2), + >>> keypoints_visible=np.arange(34).reshape(2, 3, 2) % 2) + >>> results = self(results) + >>> assert np.equal(results['keypoints'], + >>> np.arange(34).reshape(2, 3, 2)).all() + >>> assert np.equal(results['keypoints_visible'], + >>> np.arange(34).reshape(2, 3, 2) % 2).all() + >>> + >>> # case 2: 2-to-1 mapping + >>> # ((1, 2), 0) means target[0] = (source[1] + source[2]) / 2 + >>> self = KeypointConverter( + >>> num_keypoints=3, + >>> mapping=[ + >>> ((1, 2), 0), (1, 1), (2, 2) + >>> ]) + >>> results = dict( + >>> keypoints=np.arange(34).reshape(2, 3, 2), + >>> keypoints_visible=np.arange(34).reshape(2, 3, 2) % 2) + >>> results = self(results) + """ + + def __init__(self, num_keypoints: int, + mapping: Union[List[Tuple[int, int]], List[Tuple[Tuple, + int]]]): + self.num_keypoints = num_keypoints + self.mapping = mapping + if len(mapping): + source_index, target_index = zip(*mapping) + else: + source_index, target_index = [], [] + + src1, src2 = [], [] + interpolation = False + for x in source_index: + if isinstance(x, (list, tuple)): + assert len(x) == 2, 'source_index should be a list/tuple of ' \ + 'length 2' + src1.append(x[0]) + src2.append(x[1]) + interpolation = True + else: + src1.append(x) + src2.append(x) + + # When paired source_indexes are input, + # keep a self.source_index2 for interpolation + if interpolation: + self.source_index2 = src2 + + self.source_index = src1 + self.target_index = list(target_index) + self.interpolation = interpolation + + def transform(self, results: dict) -> dict: + """Transforms the keypoint results to match the target keypoints.""" + num_instances = results['keypoints'].shape[0] + + if 'keypoints_visible' not in results: + results['keypoints_visible'] = np.ones( + (num_instances, results['keypoints'].shape[1])) + + if len(results['keypoints_visible'].shape) > 2: + results['keypoints_visible'] = results['keypoints_visible'][:, :, + 0] + + # Initialize output arrays + keypoints = np.zeros((num_instances, self.num_keypoints, 3)) + keypoints_visible = np.zeros((num_instances, self.num_keypoints)) + key = 'keypoints_3d' if 'keypoints_3d' in results else 'keypoints' + c = results[key].shape[-1] + + flip_indices = results.get('flip_indices', None) + + # Create a mask to weight visibility loss + keypoints_visible_weights = keypoints_visible.copy() + keypoints_visible_weights[:, self.target_index] = 1.0 + + # Interpolate keypoints if pairs of source indexes provided + if self.interpolation: + keypoints[:, self.target_index, :c] = 0.5 * ( + results[key][:, self.source_index] + + results[key][:, self.source_index2]) + keypoints_visible[:, self.target_index] = results[ + 'keypoints_visible'][:, self.source_index] * results[ + 'keypoints_visible'][:, self.source_index2] + # Flip keypoints if flip_indices provided + if flip_indices is not None: + for i, (x1, x2) in enumerate( + zip(self.source_index, self.source_index2)): + idx = flip_indices[x1] if x1 == x2 else i + flip_indices[i] = idx if idx < self.num_keypoints else i + flip_indices = flip_indices[:len(self.source_index)] + # Otherwise just copy from the source index + else: + keypoints[:, + self.target_index, :c] = results[key][:, + self.source_index] + keypoints_visible[:, self.target_index] = results[ + 'keypoints_visible'][:, self.source_index] + + # Update the results dict + results['keypoints'] = keypoints[..., :2] + results['keypoints_visible'] = np.stack( + [keypoints_visible, keypoints_visible_weights], axis=2) + if 'keypoints_3d' in results: + results['keypoints_3d'] = keypoints + results['lifting_target'] = keypoints[results['target_idx']] + results['lifting_target_visible'] = keypoints_visible[ + results['target_idx']] + results['flip_indices'] = flip_indices + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(num_keypoints={self.num_keypoints}, '\ + f'mapping={self.mapping})' + return repr_str + + +@TRANSFORMS.register_module() +class SingleHandConverter(BaseTransform): + """Mapping a single hand keypoints into double hands according to the given + mapping and hand type. + + Required Keys: + + - keypoints + - keypoints_visible + - hand_type + + Modified Keys: + + - keypoints + - keypoints_visible + + Args: + num_keypoints (int): The number of keypoints in target dataset. + left_hand_mapping (list): A list containing mapping indexes. Each + element has format (source_index, target_index) + right_hand_mapping (list): A list containing mapping indexes. Each + element has format (source_index, target_index) + + Example: + >>> import numpy as np + >>> self = SingleHandConverter( + >>> num_keypoints=42, + >>> left_hand_mapping=[ + >>> (0, 0), (1, 1), (2, 2), (3, 3) + >>> ], + >>> right_hand_mapping=[ + >>> (0, 21), (1, 22), (2, 23), (3, 24) + >>> ]) + >>> results = dict( + >>> keypoints=np.arange(84).reshape(2, 21, 2), + >>> keypoints_visible=np.arange(84).reshape(2, 21, 2) % 2, + >>> hand_type=np.array([[0, 1], [1, 0]])) + >>> results = self(results) + """ + + def __init__(self, num_keypoints: int, + left_hand_mapping: Union[List[Tuple[int, int]], + List[Tuple[Tuple, int]]], + right_hand_mapping: Union[List[Tuple[int, int]], + List[Tuple[Tuple, int]]]): + self.num_keypoints = num_keypoints + self.left_hand_converter = KeypointConverter(num_keypoints, + left_hand_mapping) + self.right_hand_converter = KeypointConverter(num_keypoints, + right_hand_mapping) + + def transform(self, results: dict) -> dict: + """Transforms the keypoint results to match the target keypoints.""" + assert 'hand_type' in results, ( + 'hand_type should be provided in results') + hand_type = results['hand_type'] + + if np.sum(hand_type - [[0, 1]]) <= 1e-6: + # left hand + results = self.left_hand_converter(results) + elif np.sum(hand_type - [[1, 0]]) <= 1e-6: + results = self.right_hand_converter(results) + else: + raise ValueError('hand_type should be left or right') + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(num_keypoints={self.num_keypoints}, '\ + f'left_hand_converter={self.left_hand_converter}, '\ + f'right_hand_converter={self.right_hand_converter})' + return repr_str diff --git a/mmpose/datasets/transforms/formatting.py b/mmpose/datasets/transforms/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..833a3bad5b3a3ee6024e1d21bc852ba45185c6f4 --- /dev/null +++ b/mmpose/datasets/transforms/formatting.py @@ -0,0 +1,290 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Union + +import numpy as np +import torch +from mmcv.transforms import BaseTransform +from mmengine.structures import InstanceData, PixelData +from mmengine.utils import is_seq_of + +from mmpose.registry import TRANSFORMS +from mmpose.structures import MultilevelPixelData, PoseDataSample + + +def image_to_tensor(img: Union[np.ndarray, + Sequence[np.ndarray]]) -> torch.torch.Tensor: + """Translate image or sequence of images to tensor. Multiple image tensors + will be stacked. + + Args: + value (np.ndarray | Sequence[np.ndarray]): The original image or + image sequence + + Returns: + torch.Tensor: The output tensor. + """ + + if isinstance(img, np.ndarray): + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + + img = np.ascontiguousarray(img) + tensor = torch.from_numpy(img).permute(2, 0, 1).contiguous() + else: + assert is_seq_of(img, np.ndarray) + tensor = torch.stack([image_to_tensor(_img) for _img in img]) + + return tensor + + +def keypoints_to_tensor(keypoints: Union[np.ndarray, Sequence[np.ndarray]] + ) -> torch.torch.Tensor: + """Translate keypoints or sequence of keypoints to tensor. Multiple + keypoints tensors will be stacked. + + Args: + keypoints (np.ndarray | Sequence[np.ndarray]): The keypoints or + keypoints sequence. + + Returns: + torch.Tensor: The output tensor. + """ + if isinstance(keypoints, np.ndarray): + keypoints = np.ascontiguousarray(keypoints) + tensor = torch.from_numpy(keypoints).contiguous() + else: + assert is_seq_of(keypoints, np.ndarray) + tensor = torch.stack( + [keypoints_to_tensor(_keypoints) for _keypoints in keypoints]) + + return tensor + + +@TRANSFORMS.register_module() +class PackPoseInputs(BaseTransform): + """Pack the inputs data for pose estimation. + + The ``img_meta`` item is always populated. The contents of the + ``img_meta`` dictionary depends on ``meta_keys``. By default it includes: + + - ``id``: id of the data sample + + - ``img_id``: id of the image + + - ``'category_id'``: the id of the instance category + + - ``img_path``: path to the image file + + - ``crowd_index`` (optional): measure the crowding level of an image, + defined in CrowdPose dataset + + - ``ori_shape``: original shape of the image as a tuple (h, w, c) + + - ``img_shape``: shape of the image input to the network as a tuple \ + (h, w). Note that images may be zero padded on the \ + bottom/right if the batch tensor is larger than this shape. + + - ``input_size``: the input size to the network + + - ``flip``: a boolean indicating if image flip transform was used + + - ``flip_direction``: the flipping direction + + - ``flip_indices``: the indices of each keypoint's symmetric keypoint + + - ``raw_ann_info`` (optional): raw annotation of the instance(s) + + Args: + meta_keys (Sequence[str], optional): Meta keys which will be stored in + :obj: `PoseDataSample` as meta info. Defaults to ``('id', + 'img_id', 'img_path', 'category_id', 'crowd_index, 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', 'flip', + 'flip_direction', 'flip_indices', 'raw_ann_info')`` + """ + + # items in `instance_mapping_table` will be directly packed into + # PoseDataSample.gt_instances without converting to Tensor + instance_mapping_table = dict( + bbox='bboxes', + bbox_score='bbox_scores', + keypoints='keypoints', + keypoints_cam='keypoints_cam', + keypoints_visible='keypoints_visible', + keypoints_visibility='keypoints_visibility', + # In CocoMetric, the area of predicted instances will be calculated + # using gt_instances.bbox_scales. To unsure correspondence with + # previous version, this key is preserved here. + bbox_scale='bbox_scales', + # `head_size` is used for computing MpiiPCKAccuracy metric, + # namely, PCKh + head_size='head_size', + # `in_image` is used for training in/out probability prediction + # and as a mask for some losses + in_image='in_image', + # `annotated` is used as weight for some losses. Different from + # both `keypoints_visible` and `keypoint_weights`, `annotated` is + # a binary mask indicating whether the keypoint is annotated. + # annotated='annotated', + keypoints_scaled='keypoints_scaled', + heatmap_keypoints='heatmap_keypoints', + keypoints_in_image='keypoints_in_image', + bbox_mask='bbox_mask', + identification_similarity='identification_similarity', + identified='identified', + out_heatmaps='out_heatmaps', + out_kpt_weights='out_kpt_weights', + bbox_xyxy_wrt_input='bbox_xyxy_wrt_input', + ) + + # items in `field_mapping_table` will be packed into + # PoseDataSample.gt_fields and converted to Tensor. These items will be + # used for computing losses + field_mapping_table = dict( + heatmaps='heatmaps', + instance_heatmaps='instance_heatmaps', + heatmap_mask='heatmap_mask', + heatmap_weights='heatmap_weights', + displacements='displacements', + displacement_weights='displacement_weights') + + # items in `label_mapping_table` will be packed into + # PoseDataSample.gt_instance_labels and converted to Tensor. These items + # will be used for computing losses + label_mapping_table = dict( + keypoint_labels='keypoint_labels', + keypoint_weights='keypoint_weights', + keypoints_visible_weights='keypoints_visible_weights') + + def __init__(self, + meta_keys=('id', 'img_id', 'img_path', 'category_id', + 'crowd_index', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', + 'raw_ann_info', 'dataset_name'), + pack_transformed=False): + self.meta_keys = meta_keys + self.pack_transformed = pack_transformed + + def transform(self, results: dict) -> dict: + """Method to pack the input data. + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: + + - 'inputs' (obj:`torch.Tensor`): The forward data of models. + - 'data_samples' (obj:`PoseDataSample`): The annotation info of the + sample. + """ + # print("\n\nPacking results") + # Pack image(s) for 2d pose estimation + if 'img' in results: + img = results['img'] + inputs_tensor = image_to_tensor(img) + # Pack keypoints for 3d pose-lifting + elif 'lifting_target' in results and 'keypoints' in results: + if 'keypoint_labels' in results: + keypoints = results['keypoint_labels'] + else: + keypoints = results['keypoints'] + inputs_tensor = keypoints_to_tensor(keypoints) + + if "in_image" in results: + if 'keypoints_in_image' not in results: + # If `keypoints_in_image` is not provided, use `keypoints_visible` as + # default value. ('keypoints_visible' = annotated) + results['keypoints_in_image'] = results['in_image'] + results['keypoints_in_image'] = ( + results['keypoints_in_image'] & + results['in_image']) + + data_sample = PoseDataSample() + + # pack instance data + gt_instances = InstanceData() + _instance_mapping_table = results.get('instance_mapping_table', + self.instance_mapping_table) + for key, packed_key in _instance_mapping_table.items(): + if key in results: + gt_instances.set_field(results[key], packed_key) + + # pack `transformed_keypoints` for visualizing data transform + # and augmentation results + if self.pack_transformed and 'transformed_keypoints' in results: + gt_instances.set_field(results['transformed_keypoints'], + 'transformed_keypoints') + + data_sample.gt_instances = gt_instances + + # pack instance labels + gt_instance_labels = InstanceData() + _label_mapping_table = results.get('label_mapping_table', + self.label_mapping_table) + for key, packed_key in _label_mapping_table.items(): + if key in results: + if isinstance(results[key], list): + # A list of labels is usually generated by combined + # multiple encoders (See ``GenerateTarget`` in + # mmpose/datasets/transforms/common_transforms.py) + # In this case, labels in list should have the same + # shape and will be stacked. + _labels = np.stack(results[key]) + gt_instance_labels.set_field(_labels, packed_key) + else: + gt_instance_labels.set_field(results[key], packed_key) + data_sample.gt_instance_labels = gt_instance_labels.to_tensor() + + # pack fields + gt_fields = None + _field_mapping_table = results.get('field_mapping_table', + self.field_mapping_table) + for key, packed_key in _field_mapping_table.items(): + if key in results: + if isinstance(results[key], list): + if gt_fields is None: + gt_fields = MultilevelPixelData() + else: + assert isinstance( + gt_fields, MultilevelPixelData + ), 'Got mixed single-level and multi-level pixel data.' + else: + if gt_fields is None: + gt_fields = PixelData() + else: + assert isinstance( + gt_fields, PixelData + ), 'Got mixed single-level and multi-level pixel data.' + + gt_fields.set_field(results[key], packed_key) + + if gt_fields: + data_sample.gt_fields = gt_fields.to_tensor() + + img_meta = {k: results[k] for k in self.meta_keys if k in results} + data_sample.set_metainfo(img_meta) + + packed_results = dict() + packed_results['inputs'] = inputs_tensor + packed_results['data_samples'] = data_sample + + # print("\n\nPacked results done") + + # print(packed_results) + # print("\n\nPacked results done") + # print(len(packed_results), len(packed_results['data_samples']), len(packed_results['inputs'])) + # print("\n\n") + + return packed_results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(meta_keys={self.meta_keys}, ' + repr_str += f'pack_transformed={self.pack_transformed})' + return repr_str diff --git a/mmpose/datasets/transforms/hand_transforms.py b/mmpose/datasets/transforms/hand_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..cd43f860e57c7f72ea292aeff9da70085741674e --- /dev/null +++ b/mmpose/datasets/transforms/hand_transforms.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Union + +from mmpose.codecs import * # noqa: F401, F403 +from mmpose.registry import TRANSFORMS +from .common_transforms import RandomFlip + + +@TRANSFORMS.register_module() +class HandRandomFlip(RandomFlip): + """Data augmentation with random image flip. A child class of + `TopDownRandomFlip`. + + Required Keys: + + - img + - joints_3d + - joints_3d_visible + - center + - hand_type + - rel_root_depth + - ann_info + + Modified Keys: + + - img + - joints_3d + - joints_3d_visible + - center + - hand_type + - rel_root_depth + + Args: + prob (float | list[float]): The flipping probability. If a list is + given, the argument `direction` should be a list with the same + length. And each element in `prob` indicates the flipping + probability of the corresponding one in ``direction``. Defaults + to 0.5 + """ + + def __init__(self, prob: Union[float, List[float]] = 0.5) -> None: + super().__init__(prob=prob, direction='horizontal') + + def transform(self, results: dict) -> dict: + """The transform function of :class:`HandRandomFlip`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + # base flip augmentation + results = super().transform(results) + + # flip hand type and root depth + hand_type = results['hand_type'] + rel_root_depth = results['rel_root_depth'] + flipped = results['flip'] + if flipped: + hand_type[..., [0, 1]] = hand_type[..., [1, 0]] + rel_root_depth = -rel_root_depth + results['hand_type'] = hand_type + results['rel_root_depth'] = rel_root_depth + return results diff --git a/mmpose/datasets/transforms/loading.py b/mmpose/datasets/transforms/loading.py new file mode 100644 index 0000000000000000000000000000000000000000..5542001a3b4307e8804bc4c155a150931ec1a149 --- /dev/null +++ b/mmpose/datasets/transforms/loading.py @@ -0,0 +1,105 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import numpy as np +from mmcv.transforms import LoadImageFromFile + +from mmpose.registry import TRANSFORMS + +from mmpose.structures.keypoint import fix_bbox_aspect_ratio + + +@TRANSFORMS.register_module() +class LoadImage(LoadImageFromFile): + """Load an image from file or from the np.ndarray in ``results['img']``. + + Required Keys: + + - img_path + - img (optional) + + Modified Keys: + + - img + - img_shape + - ori_shape + - img_path (optional) + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:``mmcv.imfrombytes``. + Defaults to 'color'. + imdecode_backend (str): The image decoding backend type. The backend + argument for :func:``mmcv.imfrombytes``. + See :func:``mmcv.imfrombytes`` for details. + Defaults to 'cv2'. + backend_args (dict, optional): Arguments to instantiate the preifx of + uri corresponding backend. Defaults to None. + ignore_empty (bool): Whether to allow loading empty image or file path + not existent. Defaults to False. + """ + + def __init__(self, pad_to_aspect_ratio=False, **kwargs): + super().__init__(**kwargs) + self.pad_to_aspect_ratio = pad_to_aspect_ratio + + def transform(self, results: dict) -> Optional[dict]: + """The transform function of :class:`LoadImage`. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + try: + if 'img' not in results: + # Load image from file by :meth:`LoadImageFromFile.transform` + results = super().transform(results) + else: + img = results['img'] + assert isinstance(img, np.ndarray) + if self.to_float32: + img = img.astype(np.float32) + + if 'img_path' not in results: + results['img_path'] = None + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + + if self.pad_to_aspect_ratio: + # Pad image with zeros to ensure activation map is not cut off + abox_xyxy = fix_bbox_aspect_ratio( + results['bbox'], aspect_ratio=3/4, padding=1.25, bbox_format='xyxy').flatten() + + x_pad = np.array([max(0, -abox_xyxy[0]), max(0, abox_xyxy[2] - results['img_shape'][1])], dtype=int) + y_pad = np.array([max(0, -abox_xyxy[1]), max(0, abox_xyxy[3] - results['img_shape'][0])], dtype=int) + + img = results['img'] + img = np.pad(img, ((y_pad[0], y_pad[1]), (x_pad[0], x_pad[1]), (0, 0)), mode='constant', constant_values=255) + results['img'] = img + + # Update bbox + bbox = np.array(results['bbox']).flatten() + bbox[:2] += np.array([x_pad[0], y_pad[0]]) + bbox[2:] += np.array([x_pad[0], y_pad[0]]) + results['bbox'] = bbox.reshape(np.array(results['bbox']).shape) + + # Update keypoints + kpts = np.array(results['keypoints']).reshape(-1, 2) + kpts[:, :2] += np.array([x_pad[0], y_pad[0]]) + results['keypoints'] = kpts.reshape(np.array(results['keypoints']).shape) + + # Update img_shape and ori_shape + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + + except Exception as e: + e = type(e)( + f'`{str(e)}` occurs when loading `{results["img_path"]}`.' + 'Please check whether the file exists.') + raise e + + return results diff --git a/mmpose/datasets/transforms/mix_img_transforms.py b/mmpose/datasets/transforms/mix_img_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..84d03ea5a2f1a993cb7f870de9d8bf288c0e0211 --- /dev/null +++ b/mmpose/datasets/transforms/mix_img_transforms.py @@ -0,0 +1,501 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from abc import ABCMeta +from collections import defaultdict +from typing import Optional, Sequence, Tuple + +import mmcv +import numpy as np +from mmcv.transforms import BaseTransform +from mmengine.dataset.base_dataset import Compose +from numpy import random + +from mmpose.registry import TRANSFORMS +from mmpose.structures import (bbox_clip_border, flip_bbox, flip_keypoints, + keypoint_clip_border) + + +class MixImageTransform(BaseTransform, metaclass=ABCMeta): + """Abstract base class for mixup-style image data augmentation. + + Args: + pre_transform (Optional[Sequence[str]]): A sequence of transform + to be applied before mixup. Defaults to None. + prob (float): Probability of applying the mixup transformation. + Defaults to 1.0. + """ + + def __init__(self, + pre_transform: Optional[Sequence[str]] = None, + prob: float = 1.0): + + self.prob = prob + + if pre_transform is None: + self.pre_transform = None + else: + self.pre_transform = Compose(pre_transform) + + def transform(self, results: dict) -> dict: + """Transform the input data dictionary using mixup-style augmentation. + + Args: + results (dict): A dictionary containing input data. + """ + + if random.uniform(0, 1) < self.prob: + + dataset = results.pop('dataset', None) + + results['mixed_data_list'] = self._get_mixed_data_list(dataset) + results = self.apply_mix(results) + + if 'mixed_data_list' in results: + results.pop('mixed_data_list') + + results['dataset'] = dataset + + return results + + def _get_mixed_data_list(self, dataset): + """Get a list of mixed data samples from the dataset. + + Args: + dataset: The dataset from which to sample the mixed data. + + Returns: + List[dict]: A list of dictionaries containing mixed data samples. + """ + indexes = [ + random.randint(0, len(dataset)) for _ in range(self.num_aux_image) + ] + + mixed_data_list = [ + copy.deepcopy(dataset.get_data_info(index)) for index in indexes + ] + + if self.pre_transform is not None: + for i, data in enumerate(mixed_data_list): + data.update({'dataset': dataset}) + _results = self.pre_transform(data) + _results.pop('dataset') + mixed_data_list[i] = _results + + return mixed_data_list + + +@TRANSFORMS.register_module() +class Mosaic(MixImageTransform): + """Mosaic augmentation. This transformation takes four input images and + combines them into a single output image using the mosaic technique. The + resulting image is composed of parts from each of the four sub-images. The + mosaic transform steps are as follows: + + 1. Choose the mosaic center as the intersection of the four images. + 2. Select the top-left image according to the index and randomly sample + three more images from the custom dataset. + 3. If an image is larger than the mosaic patch, it will be cropped. + + .. code:: text + + mosaic transform + center_x + +------------------------------+ + | pad | | + | +-----------+ pad | + | | | | + | | image1 +-----------+ + | | | | + | | | image2 | + center_y |----+-+-----------+-----------+ + | | cropped | | + |pad | image3 | image4 | + | | | | + +----|-------------+-----------+ + | | + +-------------+ + + Required Keys: + + - img + - bbox (optional) + - bbox_score (optional) + - category_id (optional) + - keypoints (optional) + - keypoints_visible (optional) + - area (optional) + + Modified Keys: + + - img + - bbox (optional) + - bbox_score (optional) + - category_id (optional) + - keypoints (optional) + - keypoints_visible (optional) + - area (optional) + + Args: + img_scale (Sequence[int]): Image size after mosaic pipeline of single + image. The shape order should be (width, height). + Defaults to (640, 640). + center_range (Sequence[float]): Center ratio range of mosaic + output. Defaults to (0.5, 1.5). + pad_val (int): Pad value. Defaults to 114. + pre_transform (Optional[Sequence[str]]): A sequence of transform + to be applied before mixup. Defaults to None. + prob (float): Probability of applying the mixup transformation. + Defaults to 1.0. + """ + + num_aux_image = 3 + + def __init__( + self, + img_scale: Tuple[int, int] = (640, 640), + center_range: Tuple[float, float] = (0.5, 1.5), + pad_val: float = 114.0, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + ): + + super().__init__(pre_transform=pre_transform, prob=prob) + + self.img_scale = img_scale + self.center_range = center_range + self.pad_val = pad_val + + def apply_mix(self, results: dict) -> dict: + """Apply mosaic augmentation to the input data.""" + + assert 'mixed_data_list' in results + mixed_data_list = results.pop('mixed_data_list') + assert len(mixed_data_list) == self.num_aux_image + + img, annos = self._create_mosaic_image(results, mixed_data_list) + bboxes = annos['bboxes'] + kpts = annos['keypoints'] + kpts_vis = annos['keypoints_visible'] + + bboxes = bbox_clip_border(bboxes, (2 * self.img_scale[0], + 2 * self.img_scale[1])) + kpts, kpts_vis = keypoint_clip_border(kpts, kpts_vis, + (2 * self.img_scale[0], + 2 * self.img_scale[1])) + + results['img'] = img + results['img_shape'] = img.shape + results['bbox'] = bboxes + results['category_id'] = annos['category_id'] + results['bbox_score'] = annos['bbox_scores'] + results['keypoints'] = kpts + results['keypoints_visible'] = kpts_vis + results['area'] = annos['area'] + + return results + + def _create_mosaic_image(self, results, mixed_data_list): + """Create the mosaic image and corresponding annotations by combining + four input images.""" + + # init mosaic image + img_scale_w, img_scale_h = self.img_scale + mosaic_img = np.full((int(img_scale_h * 2), int(img_scale_w * 2), 3), + self.pad_val, + dtype=results['img'].dtype) + + # calculate mosaic center + center = (int(random.uniform(*self.center_range) * img_scale_w), + int(random.uniform(*self.center_range) * img_scale_h)) + + annos = defaultdict(list) + locs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') + for loc, data in zip(locs, (results, *mixed_data_list)): + + # process image + img = data['img'] + h, w = img.shape[:2] + scale_ratio = min(img_scale_h / h, img_scale_w / w) + img = mmcv.imresize(img, + (int(w * scale_ratio), int(h * scale_ratio))) + + # paste + paste_coord, crop_coord = self._mosaic_combine( + loc, center, img.shape[:2][::-1]) + x1_p, y1_p, x2_p, y2_p = paste_coord + x1_c, y1_c, x2_c, y2_c = crop_coord + + # crop and paste image + mosaic_img[y1_p:y2_p, x1_p:x2_p] = img[y1_c:y2_c, x1_c:x2_c] + padw = x1_p - x1_c + padh = y1_p - y1_c + + # merge annotations + if 'bbox' in data: + bboxes = data['bbox'] + + # rescale & translate + bboxes *= scale_ratio + bboxes[..., ::2] += padw + bboxes[..., 1::2] += padh + + annos['bboxes'].append(bboxes) + annos['bbox_scores'].append(data['bbox_score']) + annos['category_id'].append(data['category_id']) + + if 'keypoints' in data: + kpts = data['keypoints'] + + # rescale & translate + kpts *= scale_ratio + kpts[..., 0] += padw + kpts[..., 1] += padh + + annos['keypoints'].append(kpts) + annos['keypoints_visible'].append(data['keypoints_visible']) + + if 'area' in data: + annos['area'].append(data['area'] * scale_ratio**2) + + for key in annos: + annos[key] = np.concatenate(annos[key]) + return mosaic_img, annos + + def _mosaic_combine( + self, loc: str, center: Tuple[float, float], img_shape: Tuple[int, int] + ) -> Tuple[Tuple[int, int, int, int], Tuple[int, int, int, int]]: + """Determine the overall coordinates of the mosaic image and the + specific coordinates of the cropped sub-image.""" + + assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') + + x1, y1, x2, y2 = 0, 0, 0, 0 + cx, cy = center + w, h = img_shape + + if loc == 'top_left': + x1, y1, x2, y2 = max(cx - w, 0), max(cy - h, 0), cx, cy + crop_coord = w - (x2 - x1), h - (y2 - y1), w, h + elif loc == 'top_right': + x1, y1, x2, y2 = cx, max(cy - h, 0), min(cx + w, + self.img_scale[0] * 2), cy + crop_coord = 0, h - (y2 - y1), min(w, x2 - x1), h + elif loc == 'bottom_left': + x1, y1, x2, y2 = max(cx - w, + 0), cy, cx, min(self.img_scale[1] * 2, cy + h) + crop_coord = w - (x2 - x1), 0, w, min(y2 - y1, h) + else: + x1, y1, x2, y2 = cx, cy, min(cx + w, self.img_scale[0] * + 2), min(self.img_scale[1] * 2, cy + h) + crop_coord = 0, 0, min(w, x2 - x1), min(y2 - y1, h) + + return (x1, y1, x2, y2), crop_coord + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'center_range={self.center_range}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class YOLOXMixUp(MixImageTransform): + """MixUp data augmentation for YOLOX. This transform combines two images + through mixup to enhance the dataset's diversity. + + Mixup Transform Steps: + + 1. A random image is chosen from the dataset and placed in the + top-left corner of the target image (after padding and resizing). + 2. The target of the mixup transform is obtained by taking the + weighted average of the mixup image and the original image. + + .. code:: text + + mixup transform + +---------------+--------------+ + | mixup image | | + | +--------|--------+ | + | | | | | + +---------------+ | | + | | | | + | | image | | + | | | | + | | | | + | +-----------------+ | + | pad | + +------------------------------+ + + Required Keys: + + - img + - bbox (optional) + - bbox_score (optional) + - category_id (optional) + - keypoints (optional) + - keypoints_visible (optional) + - area (optional) + + Modified Keys: + + - img + - bbox (optional) + - bbox_score (optional) + - category_id (optional) + - keypoints (optional) + - keypoints_visible (optional) + - area (optional) + + Args: + img_scale (Sequence[int]): Image output size after mixup pipeline. + The shape order should be (width, height). Defaults to (640, 640). + ratio_range (Sequence[float]): Scale ratio of mixup image. + Defaults to (0.5, 1.5). + flip_ratio (float): Horizontal flip ratio of mixup image. + Defaults to 0.5. + pad_val (int): Pad value. Defaults to 114. + pre_transform (Optional[Sequence[str]]): A sequence of transform + to be applied before mixup. Defaults to None. + prob (float): Probability of applying the mixup transformation. + Defaults to 1.0. + """ + num_aux_image = 1 + + def __init__(self, + img_scale: Tuple[int, int] = (640, 640), + ratio_range: Tuple[float, float] = (0.5, 1.5), + flip_ratio: float = 0.5, + pad_val: float = 114.0, + bbox_clip_border: bool = True, + pre_transform: Sequence[dict] = None, + prob: float = 1.0): + assert isinstance(img_scale, tuple) + super().__init__(pre_transform=pre_transform, prob=prob) + self.img_scale = img_scale + self.ratio_range = ratio_range + self.flip_ratio = flip_ratio + self.pad_val = pad_val + self.bbox_clip_border = bbox_clip_border + + def apply_mix(self, results: dict) -> dict: + """YOLOX MixUp transform function.""" + + assert 'mixed_data_list' in results + mixed_data_list = results.pop('mixed_data_list') + assert len(mixed_data_list) == self.num_aux_image + + if mixed_data_list[0]['keypoints'].shape[0] == 0: + return results + + img, annos = self._create_mixup_image(results, mixed_data_list) + bboxes = annos['bboxes'] + kpts = annos['keypoints'] + kpts_vis = annos['keypoints_visible'] + + h, w = img.shape[:2] + bboxes = bbox_clip_border(bboxes, (w, h)) + kpts, kpts_vis = keypoint_clip_border(kpts, kpts_vis, (w, h)) + + results['img'] = img.astype(np.uint8) + results['img_shape'] = img.shape + results['bbox'] = bboxes + results['category_id'] = annos['category_id'] + results['bbox_score'] = annos['bbox_scores'] + results['keypoints'] = kpts + results['keypoints_visible'] = kpts_vis + results['area'] = annos['area'] + + return results + + def _create_mixup_image(self, results, mixed_data_list): + """Create the mixup image and corresponding annotations by combining + two input images.""" + + aux_results = mixed_data_list[0] + aux_img = aux_results['img'] + + # init mixup image + out_img = np.ones((self.img_scale[1], self.img_scale[0], 3), + dtype=aux_img.dtype) * self.pad_val + annos = defaultdict(list) + + # Calculate scale ratio and resize aux_img + scale_ratio = min(self.img_scale[1] / aux_img.shape[0], + self.img_scale[0] / aux_img.shape[1]) + aux_img = mmcv.imresize(aux_img, (int(aux_img.shape[1] * scale_ratio), + int(aux_img.shape[0] * scale_ratio))) + + # Set the resized aux_img in the top-left of out_img + out_img[:aux_img.shape[0], :aux_img.shape[1]] = aux_img + + # random rescale + jit_factor = random.uniform(*self.ratio_range) + scale_ratio *= jit_factor + out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), + int(out_img.shape[0] * jit_factor))) + + # random flip + is_filp = random.uniform(0, 1) > self.flip_ratio + if is_filp: + out_img = out_img[:, ::-1, :] + + # random crop + ori_img = results['img'] + aux_h, aux_w = out_img.shape[:2] + h, w = ori_img.shape[:2] + padded_img = np.ones((max(aux_h, h), max(aux_w, w), 3)) * self.pad_val + padded_img = padded_img.astype(np.uint8) + padded_img[:aux_h, :aux_w] = out_img + + dy = random.randint(0, max(0, padded_img.shape[0] - h) + 1) + dx = random.randint(0, max(0, padded_img.shape[1] - w) + 1) + padded_cropped_img = padded_img[dy:dy + h, dx:dx + w] + + # mix up + mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img + + # merge annotations + # bboxes + bboxes = aux_results['bbox'].copy() + bboxes *= scale_ratio + bboxes = bbox_clip_border(bboxes, (aux_w, aux_h)) + if is_filp: + bboxes = flip_bbox(bboxes, [aux_w, aux_h], 'xyxy') + bboxes[..., ::2] -= dx + bboxes[..., 1::2] -= dy + annos['bboxes'] = [results['bbox'], bboxes] + annos['bbox_scores'] = [ + results['bbox_score'], aux_results['bbox_score'] + ] + annos['category_id'] = [ + results['category_id'], aux_results['category_id'] + ] + + # keypoints + kpts = aux_results['keypoints'] * scale_ratio + kpts, kpts_vis = keypoint_clip_border(kpts, + aux_results['keypoints_visible'], + (aux_w, aux_h)) + if is_filp: + kpts, kpts_vis = flip_keypoints(kpts, kpts_vis, (aux_w, aux_h), + aux_results['flip_indices']) + kpts[..., 0] -= dx + kpts[..., 1] -= dy + annos['keypoints'] = [results['keypoints'], kpts] + annos['keypoints_visible'] = [results['keypoints_visible'], kpts_vis] + annos['area'] = [results['area'], aux_results['area'] * scale_ratio**2] + + for key in annos: + annos[key] = np.concatenate(annos[key]) + + return mixup_img, annos + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'ratio_range={self.ratio_range}, ' + repr_str += f'flip_ratio={self.flip_ratio}, ' + repr_str += f'pad_val={self.pad_val})' + return repr_str diff --git a/mmpose/datasets/transforms/pose3d_transforms.py b/mmpose/datasets/transforms/pose3d_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..9dec8db64ba114dae1d86f3a21b709327787097b --- /dev/null +++ b/mmpose/datasets/transforms/pose3d_transforms.py @@ -0,0 +1,140 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Dict + +import numpy as np +from mmcv.transforms import BaseTransform + +from mmpose.registry import TRANSFORMS +from mmpose.structures.keypoint import flip_keypoints_custom_center + + +@TRANSFORMS.register_module() +class RandomFlipAroundRoot(BaseTransform): + """Data augmentation with random horizontal joint flip around a root joint. + + Args: + keypoints_flip_cfg (dict): Configurations of the + ``flip_keypoints_custom_center`` function for ``keypoints``. Please + refer to the docstring of the ``flip_keypoints_custom_center`` + function for more details. + target_flip_cfg (dict): Configurations of the + ``flip_keypoints_custom_center`` function for ``lifting_target``. + Please refer to the docstring of the + ``flip_keypoints_custom_center`` function for more details. + flip_prob (float): Probability of flip. Default: 0.5. + flip_camera (bool): Whether to flip horizontal distortion coefficients. + Default: ``False``. + flip_label (bool): Whether to flip labels instead of data. + Default: ``False``. + + Required keys: + - keypoints or keypoint_labels + - lifting_target or lifting_target_label + - keypoints_visible or keypoint_labels_visible (optional) + - lifting_target_visible (optional) + - flip_indices (optional) + + Modified keys: + - keypoints or keypoint_labels (optional) + - keypoints_visible or keypoint_labels_visible (optional) + - lifting_target or lifting_target_label (optional) + - lifting_target_visible (optional) + - camera_param (optional) + """ + + def __init__(self, + keypoints_flip_cfg: dict, + target_flip_cfg: dict, + flip_prob: float = 0.5, + flip_camera: bool = False, + flip_label: bool = False): + self.keypoints_flip_cfg = keypoints_flip_cfg + self.target_flip_cfg = target_flip_cfg + self.flip_prob = flip_prob + self.flip_camera = flip_camera + self.flip_label = flip_label + + def transform(self, results: Dict) -> dict: + """The transform function of :class:`RandomFlipAroundRoot`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + if np.random.rand() <= self.flip_prob: + if self.flip_label: + assert 'keypoint_labels' in results + assert 'lifting_target_label' in results + keypoints_key = 'keypoint_labels' + keypoints_visible_key = 'keypoint_labels_visible' + target_key = 'lifting_target_label' + else: + assert 'keypoints' in results + assert 'lifting_target' in results + keypoints_key = 'keypoints' + keypoints_visible_key = 'keypoints_visible' + target_key = 'lifting_target' + + keypoints = results[keypoints_key] + if keypoints_visible_key in results: + keypoints_visible = results[keypoints_visible_key] + else: + keypoints_visible = np.ones( + keypoints.shape[:-1], dtype=np.float32) + + lifting_target = results[target_key] + if 'lifting_target_visible' in results: + lifting_target_visible = results['lifting_target_visible'] + else: + lifting_target_visible = np.ones( + lifting_target.shape[:-1], dtype=np.float32) + + if 'flip_indices' not in results: + flip_indices = list(range(self.num_keypoints)) + else: + flip_indices = results['flip_indices'] + + # flip joint coordinates + _camera_param = deepcopy(results['camera_param']) + + keypoints, keypoints_visible = flip_keypoints_custom_center( + keypoints, + keypoints_visible, + flip_indices, + center_mode=self.keypoints_flip_cfg.get( + 'center_mode', 'static'), + center_x=self.keypoints_flip_cfg.get('center_x', 0.5), + center_index=self.keypoints_flip_cfg.get('center_index', 0)) + lifting_target, lifting_target_visible = flip_keypoints_custom_center( # noqa + lifting_target, + lifting_target_visible, + flip_indices, + center_mode=self.target_flip_cfg.get('center_mode', 'static'), + center_x=self.target_flip_cfg.get('center_x', 0.5), + center_index=self.target_flip_cfg.get('center_index', 0)) + + results[keypoints_key] = keypoints + results[keypoints_visible_key] = keypoints_visible + results[target_key] = lifting_target + results['lifting_target_visible'] = lifting_target_visible + + # flip horizontal distortion coefficients + if self.flip_camera: + assert 'camera_param' in results, \ + 'Camera parameters are missing.' + + assert 'c' in _camera_param + _camera_param['c'][0] *= -1 + + if 'p' in _camera_param: + _camera_param['p'][0] *= -1 + + results['camera_param'].update(_camera_param) + + return results diff --git a/mmpose/datasets/transforms/topdown_transforms.py b/mmpose/datasets/transforms/topdown_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..6b60bc0198199b7b9141f0d835888c20c29017cd --- /dev/null +++ b/mmpose/datasets/transforms/topdown_transforms.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Tuple + +import cv2 +import numpy as np +from mmcv.transforms import BaseTransform +from mmengine import is_seq_of + +from mmpose.registry import TRANSFORMS +from mmpose.structures.bbox import get_udp_warp_matrix, get_warp_matrix, bbox_cs2xyxy, bbox_xyxy2cs + + +@TRANSFORMS.register_module() +class TopdownAffine(BaseTransform): + """Get the bbox image as the model input by affine transform. + + Required Keys: + + - img + - bbox_center + - bbox_scale + - bbox_rotation (optional) + - keypoints (optional) + + Modified Keys: + + - img + - bbox_scale + + Added Keys: + + - input_size + - transformed_keypoints + - bbox_mask + + Args: + input_size (Tuple[int, int]): The input image size of the model in + [w, h]. The bbox region will be cropped and resize to `input_size` + use_udp (bool): Whether use unbiased data processing. See + `UDP (CVPR 2020)`_ for details. Defaults to ``False`` + + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + + def __init__(self, + input_size: Tuple[int, int], + input_padding: float = 1.25, + use_udp: bool = False) -> None: + super().__init__() + + assert is_seq_of(input_size, int) and len(input_size) == 2, ( + f'Invalid input_size {input_size}') + + self.input_size = input_size + self.use_udp = use_udp + self.input_padding = input_padding + + @staticmethod + def _fix_aspect_ratio(bbox_scale: np.ndarray, aspect_ratio: float): + """Reshape the bbox to a fixed aspect ratio. + + Args: + bbox_scale (np.ndarray): The bbox scales (w, h) in shape (n, 2) + aspect_ratio (float): The ratio of ``w/h`` + + Returns: + np.darray: The reshaped bbox scales in (n, 2) + """ + + w, h = np.hsplit(bbox_scale, [1]) + bbox_scale = np.where(w > h * aspect_ratio, + np.hstack([w, w / aspect_ratio]), + np.hstack([h * aspect_ratio, h])) + return bbox_scale + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`TopdownAffine`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + w, h = self.input_size + warp_size = (int(w), int(h)) + img_h, img_w = results['img'].shape[:2] + + bbox_xyxy = results['bbox_xyxy_wrt_input'].flatten() + bbox_xyxy[:2] = np.maximum(bbox_xyxy[:2], 0) + bbox_xyxy[2:4] = np.minimum(bbox_xyxy[2:4], [img_w, img_h]) + x0, y0, x1, y1 = bbox_xyxy[:4].astype(int) + bbox_mask = np.zeros((img_h, img_w), dtype=np.uint8) + bbox_mask[y0:y1, x0:x1] = 1 + + + # Take the bbox wrt the input + bbox_xyxy_wrt_input = results.get('bbox_xyxy_wrt_input', None) + if bbox_xyxy_wrt_input is not None: + _c, _s = bbox_xyxy2cs(bbox_xyxy_wrt_input, padding=self.input_padding) + results['bbox_center'] = _c.reshape(1, 2) + results['bbox_scale'] = _s.reshape(1, 2) + + # reshape bbox to fixed aspect ratio + results['bbox_scale'] = self._fix_aspect_ratio( + results['bbox_scale'], aspect_ratio=w / h) + + # TODO: support multi-instance + assert results['bbox_center'].shape[0] == 1, ( + 'Top-down heatmap only supports single instance. Got invalid ' + f'shape of bbox_center {results["bbox_center"].shape}.') + + center = results['bbox_center'][0] + scale = results['bbox_scale'][0] + if 'bbox_rotation' in results: + rot = results['bbox_rotation'][0] + else: + rot = 0. + + if self.use_udp: + warp_mat = get_udp_warp_matrix( + center, scale, rot, output_size=(w, h)) + else: + warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h)) + + if isinstance(results['img'], list): + results['img'] = [ + cv2.warpAffine( + img, warp_mat, warp_size, flags=cv2.INTER_LINEAR) + for img in results['img'] + ] + else: + results['img'] = cv2.warpAffine( + results['img'], warp_mat, warp_size, flags=cv2.INTER_LINEAR) + bbox_mask = cv2.warpAffine( + bbox_mask, warp_mat, warp_size, flags=cv2.INTER_LINEAR) + bbox_mask = bbox_mask.reshape(1, h, w) + results['bbox_mask'] = bbox_mask + + if results.get('keypoints', None) is not None: + if results.get('transformed_keypoints', None) is not None: + transformed_keypoints = results['transformed_keypoints'].copy() + else: + transformed_keypoints = results['keypoints'].copy() + # Only transform (x, y) coordinates + transformed_keypoints[..., :2] = cv2.transform( + transformed_keypoints[..., :2], warp_mat) + results['transformed_keypoints'] = transformed_keypoints + + if results.get('bbox_xyxy_wrt_input', None) is not None: + bbox_xyxy_wrt_input = results['bbox_xyxy_wrt_input'].copy() + bbox_xyxy_wrt_input = bbox_xyxy_wrt_input.reshape(1, 2, 2) + bbox_xyxy_wrt_input = cv2.transform( + bbox_xyxy_wrt_input, warp_mat) + results['bbox_xyxy_wrt_input'] = bbox_xyxy_wrt_input.reshape(1, 4) + + results['input_size'] = (w, h) + results['input_center'] = center + results['input_scale'] = scale + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(input_size={self.input_size}, ' + repr_str += f'use_udp={self.use_udp})' + return repr_str diff --git a/mmpose/demo/MMPose_Tutorial.ipynb b/mmpose/demo/MMPose_Tutorial.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..0e9ff9b57f1c623af817c18210e64a6008a52bd2 --- /dev/null +++ b/mmpose/demo/MMPose_Tutorial.ipynb @@ -0,0 +1,3944 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "F77yOqgkX8p4" + }, + "source": [ + "\"Open" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "8xX3YewOtqV0" + }, + "source": [ + "# MMPose Tutorial\n", + "\n", + "Welcome to MMPose colab tutorial! In this tutorial, we will show you how to\n", + "\n", + "- install MMPose 1.x\n", + "- perform inference with an MMPose model\n", + "- train a new mmpose model with your own datasets\n", + "\n", + "Let's start!" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "bkw-kUD8t3t8" + }, + "source": [ + "## Install MMPose\n", + "\n", + "We recommend to use a conda environment to install mmpose and its dependencies. And compilers `nvcc` and `gcc` are required." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "0f_Ebb2otWtd", + "outputId": "8c16b8ae-b927-41d5-c49e-d61ba6798a2d" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2022 NVIDIA Corporation\n", + "Built on Wed_Sep_21_10:33:58_PDT_2022\n", + "Cuda compilation tools, release 11.8, V11.8.89\n", + "Build cuda_11.8.r11.8/compiler.31833905_0\n", + "gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\n", + "Copyright (C) 2019 Free Software Foundation, Inc.\n", + "This is free software; see the source for copying conditions. There is NO\n", + "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", + "\n", + "/usr/local/bin/python\n" + ] + } + ], + "source": [ + "# check NVCC version\n", + "!nvcc -V\n", + "\n", + "# check GCC version\n", + "!gcc --version\n", + "\n", + "# check python in conda environment\n", + "!which python" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "igSm4jhihE2M", + "outputId": "0d521640-a4d7-4264-889c-df862e9c332f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://download.pytorch.org/whl/cu118, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: torch in /usr/local/lib/python3.9/dist-packages (2.0.0+cu118)\n", + "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (0.15.1+cu118)\n", + "Requirement already satisfied: torchaudio in /usr/local/lib/python3.9/dist-packages (2.0.1+cu118)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch) (3.1)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch) (3.11.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch) (1.11.1)\n", + "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch) (2.0.0)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch) (3.1.2)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch) (4.5.0)\n", + "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch) (3.25.2)\n", + "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch) (16.0.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from torchvision) (1.22.4)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from torchvision) (2.27.1)\n", + "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.9/dist-packages (from torchvision) (8.4.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch) (2.1.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (3.4)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (1.26.15)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (2.0.12)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (2022.12.7)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch) (1.3.0)\n" + ] + } + ], + "source": [ + "# install dependencies: (if your colab has CUDA 11.8)\n", + "%pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "MLcoZr3ot9iw", + "outputId": "70e5d18e-746c-41a3-a761-6303b79eaf02" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Collecting openmim\n", + " Downloading openmim-0.3.7-py2.py3-none-any.whl (51 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.3/51.3 kB\u001b[0m \u001b[31m1.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: tabulate in /usr/local/lib/python3.9/dist-packages (from openmim) (0.8.10)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from openmim) (13.3.3)\n", + "Requirement already satisfied: pip>=19.3 in /usr/local/lib/python3.9/dist-packages (from openmim) (23.0.1)\n", + "Collecting colorama\n", + " Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n", + "Collecting model-index\n", + " Downloading model_index-0.1.11-py3-none-any.whl (34 kB)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.9/dist-packages (from openmim) (1.5.3)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from openmim) (2.27.1)\n", + "Requirement already satisfied: Click in /usr/local/lib/python3.9/dist-packages (from openmim) (8.1.3)\n", + "Requirement already satisfied: markdown in /usr/local/lib/python3.9/dist-packages (from model-index->openmim) (3.4.3)\n", + "Collecting ordered-set\n", + " Downloading ordered_set-4.1.0-py3-none-any.whl (7.6 kB)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from model-index->openmim) (6.0)\n", + "Requirement already satisfied: numpy>=1.20.3 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (1.22.4)\n", + "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (2022.7.1)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (2.0.12)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (2022.12.7)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (3.4)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (1.26.15)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->openmim) (2.14.0)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->openmim) (2.2.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->openmim) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.8.1->pandas->openmim) (1.16.0)\n", + "Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.9/dist-packages (from markdown->model-index->openmim) (6.2.0)\n", + "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.9/dist-packages (from importlib-metadata>=4.4->markdown->model-index->openmim) (3.15.0)\n", + "Installing collected packages: ordered-set, colorama, model-index, openmim\n", + "Successfully installed colorama-0.4.6 model-index-0.1.11 openmim-0.3.7 ordered-set-4.1.0\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", + "Collecting mmengine\n", + " Downloading mmengine-0.7.2-py3-none-any.whl (366 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m366.9/366.9 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine) (13.3.3)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmengine) (3.7.1)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmengine) (6.0)\n", + "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmengine) (4.7.0.72)\n", + "Collecting yapf\n", + " Downloading yapf-0.32.0-py2.py3-none-any.whl (190 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m190.2/190.2 kB\u001b[0m \u001b[31m17.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine) (2.2.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmengine) (1.22.4)\n", + "Collecting addict\n", + " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (1.4.4)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (5.12.0)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (23.0)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (0.11.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (2.8.2)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (4.39.3)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (3.0.9)\n", + "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (8.4.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (1.0.7)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine) (2.14.0)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmengine) (3.15.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine) (1.16.0)\n", + "Installing collected packages: yapf, addict, mmengine\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Successfully installed addict-2.4.0 mmengine-0.7.2 yapf-0.32.0\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", + "Collecting mmcv>=2.0.0rc1\n", + " Downloading https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/mmcv-2.0.0-cp39-cp39-manylinux1_x86_64.whl (74.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m74.4/74.4 MB\u001b[0m \u001b[31m12.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: mmengine>=0.2.0 in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (0.7.2)\n", + "Requirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (0.32.0)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (23.0)\n", + "Requirement already satisfied: addict in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (2.4.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (1.22.4)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (6.0)\n", + "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (4.7.0.72)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (8.4.0)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.7.1)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (13.3.3)\n", + "Requirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.2.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.8.2)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (5.12.0)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (0.11.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (4.39.3)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.0.9)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.4.4)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.0.7)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.14.0)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.15.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.16.0)\n", + "Installing collected packages: mmcv\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Successfully installed mmcv-2.0.0\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", + "Collecting mmdet>=3.0.0rc0\n", + " Downloading mmdet-3.0.0-py3-none-any.whl (1.7 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m71.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.16.0)\n", + "Collecting terminaltables\n", + " Downloading terminaltables-3.1.10-py2.py3-none-any.whl (15 kB)\n", + "Requirement already satisfied: pycocotools in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.6)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.10.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.22.4)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (3.7.1)\n", + "Requirement already satisfied: shapely in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.1)\n", + "Requirement already satisfied: mmengine<1.0.0,>=0.7.1 in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (0.7.2)\n", + "Requirement already satisfied: mmcv<2.1.0,>=2.0.0rc4 in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.0)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (6.0)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (23.0)\n", + "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (4.7.0.72)\n", + "Requirement already satisfied: addict in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (2.4.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (8.4.0)\n", + "Requirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (0.32.0)\n", + "Requirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.2.0)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (13.3.3)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (5.12.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (2.8.2)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (4.39.3)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (1.4.4)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (0.11.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (1.0.7)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (3.0.9)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmdet>=3.0.0rc0) (3.15.0)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.14.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (0.1.2)\n", + "Installing collected packages: terminaltables, mmdet\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Successfully installed mmdet-3.0.0 terminaltables-3.1.10\n" + ] + } + ], + "source": [ + "# install MMEngine, MMCV and MMDetection using MIM\n", + "%pip install -U openmim\n", + "!mim install mmengine\n", + "!mim install \"mmcv>=2.0.0\"\n", + "!mim install \"mmdet>=3.0.0\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "42hRcloJhE2N", + "outputId": "9175e011-82c0-438d-f378-264e8467eb09" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Collecting git+https://github.com/jin-s13/xtcocoapi\n", + " Cloning https://github.com/jin-s13/xtcocoapi to /tmp/pip-req-build-6ts8xw10\n", + " Running command git clone --filter=blob:none --quiet https://github.com/jin-s13/xtcocoapi /tmp/pip-req-build-6ts8xw10\n", + " Resolved https://github.com/jin-s13/xtcocoapi to commit 86a60cab276e619dac5d22834a36dceaf7aa0a38\n", + " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (67.6.1)\n", + "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (0.29.34)\n", + "Requirement already satisfied: matplotlib>=2.1.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (3.7.1)\n", + "Requirement already satisfied: numpy>=1.20.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (1.22.4)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (1.4.4)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (4.39.3)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (0.11.0)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (23.0)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (3.0.9)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (5.12.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (2.8.2)\n", + "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (8.4.0)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib>=2.1.0->xtcocotools==1.13) (3.15.0)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib>=2.1.0->xtcocotools==1.13) (1.16.0)\n", + "Building wheels for collected packages: xtcocotools\n", + " Building wheel for xtcocotools (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for xtcocotools: filename=xtcocotools-1.13-cp39-cp39-linux_x86_64.whl size=402078 sha256=e6a1d4ea868ca2cbd8151f85509641b20b24745a9b8b353348ba8386c35ee6c6\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-a15wpqzs/wheels/3f/df/8b/d3eff2ded4b03a665d977a0baa328d9efa2f9ac9971929a222\n", + "Successfully built xtcocotools\n", + "Installing collected packages: xtcocotools\n", + "Successfully installed xtcocotools-1.13\n" + ] + } + ], + "source": [ + "# for better Colab compatibility, install xtcocotools from source\n", + "%pip install git+https://github.com/jin-s13/xtcocoapi" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "lzuSKOjMvJZu", + "outputId": "d6a7a3f8-2d96-40a6-a7c4-65697e18ffc9" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cloning into 'mmpose'...\n", + "remote: Enumerating objects: 26225, done.\u001b[K\n", + "remote: Counting objects: 100% (97/97), done.\u001b[K\n", + "remote: Compressing objects: 100% (67/67), done.\u001b[K\n", + "remote: Total 26225 (delta 33), reused 67 (delta 28), pack-reused 26128\u001b[K\n", + "Receiving objects: 100% (26225/26225), 28.06 MiB | 13.36 MiB/s, done.\n", + "Resolving deltas: 100% (18673/18673), done.\n", + "/content/mmpose\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from -r requirements/build.txt (line 2)) (1.22.4)\n", + "Requirement already satisfied: torch>=1.6 in /usr/local/lib/python3.9/dist-packages (from -r requirements/build.txt (line 3)) (2.0.0+cu118)\n", + "Collecting chumpy\n", + " Downloading chumpy-0.70.tar.gz (50 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.6/50.6 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + "Collecting json_tricks\n", + " Downloading json_tricks-3.16.1-py2.py3-none-any.whl (27 kB)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 3)) (3.7.1)\n", + "Collecting munkres\n", + " Downloading munkres-1.1.4-py2.py3-none-any.whl (7.0 kB)\n", + "Requirement already satisfied: opencv-python in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 6)) (4.7.0.72)\n", + "Requirement already satisfied: pillow in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 7)) (8.4.0)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 8)) (1.10.1)\n", + "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 9)) (0.15.1+cu118)\n", + "Requirement already satisfied: xtcocotools>=1.12 in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 10)) (1.13)\n", + "Collecting coverage\n", + " Downloading coverage-7.2.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (227 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.5/227.5 kB\u001b[0m \u001b[31m27.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting flake8\n", + " Downloading flake8-6.0.0-py2.py3-none-any.whl (57 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.8/57.8 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting interrogate\n", + " Downloading interrogate-1.5.0-py3-none-any.whl (45 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.3/45.3 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting isort==4.3.21\n", + " Downloading isort-4.3.21-py2.py3-none-any.whl (42 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m42.3/42.3 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting parameterized\n", + " Downloading parameterized-0.9.0-py2.py3-none-any.whl (20 kB)\n", + "Requirement already satisfied: pytest in /usr/local/lib/python3.9/dist-packages (from -r requirements/tests.txt (line 6)) (7.2.2)\n", + "Collecting pytest-runner\n", + " Downloading pytest_runner-6.0.0-py3-none-any.whl (7.2 kB)\n", + "Collecting xdoctest>=0.10.0\n", + " Downloading xdoctest-1.1.1-py3-none-any.whl (137 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m137.6/137.6 kB\u001b[0m \u001b[31m14.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from -r requirements/tests.txt (line 9)) (0.32.0)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from -r requirements/optional.txt (line 1)) (2.27.1)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.11.0)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.1)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (4.5.0)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.1.2)\n", + "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (2.0.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (1.11.1)\n", + "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch>=1.6->-r requirements/build.txt (line 3)) (3.25.2)\n", + "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch>=1.6->-r requirements/build.txt (line 3)) (16.0.1)\n", + "Requirement already satisfied: six>=1.11.0 in /usr/local/lib/python3.9/dist-packages (from chumpy->-r requirements/runtime.txt (line 1)) (1.16.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (4.39.3)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (3.0.9)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (1.4.4)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (5.12.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (2.8.2)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (23.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (0.11.0)\n", + "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->-r requirements/runtime.txt (line 10)) (67.6.1)\n", + "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->-r requirements/runtime.txt (line 10)) (0.29.34)\n", + "Collecting pyflakes<3.1.0,>=3.0.0\n", + " Downloading pyflakes-3.0.1-py2.py3-none-any.whl (62 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.8/62.8 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting pycodestyle<2.11.0,>=2.10.0\n", + " Downloading pycodestyle-2.10.0-py2.py3-none-any.whl (41 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.3/41.3 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting mccabe<0.8.0,>=0.7.0\n", + " Downloading mccabe-0.7.0-py2.py3-none-any.whl (7.3 kB)\n", + "Collecting py\n", + " Downloading py-1.11.0-py2.py3-none-any.whl (98 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m98.7/98.7 kB\u001b[0m \u001b[31m11.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: colorama in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.4.6)\n", + "Requirement already satisfied: toml in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.10.2)\n", + "Requirement already satisfied: attrs in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (22.2.0)\n", + "Requirement already satisfied: tabulate in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.8.10)\n", + "Requirement already satisfied: click>=7.1 in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (8.1.3)\n", + "Requirement already satisfied: tomli>=1.0.0 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (2.0.1)\n", + "Requirement already satisfied: pluggy<2.0,>=0.12 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (1.0.0)\n", + "Requirement already satisfied: iniconfig in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (2.0.0)\n", + "Requirement already satisfied: exceptiongroup>=1.0.0rc8 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (1.1.1)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (2022.12.7)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (2.0.12)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (3.4)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->-r requirements/runtime.txt (line 3)) (3.15.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch>=1.6->-r requirements/build.txt (line 3)) (2.1.2)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch>=1.6->-r requirements/build.txt (line 3)) (1.3.0)\n", + "Building wheels for collected packages: chumpy\n", + " Building wheel for chumpy (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for chumpy: filename=chumpy-0.70-py3-none-any.whl size=58282 sha256=ccde33ce99f135241a3f9ed380871cf8e4a569053d21b0ceba97809ddf3b26c8\n", + " Stored in directory: /root/.cache/pip/wheels/71/b5/d3/bbff0d638d797944856371a4ee326f9ffb1829083a383bba77\n", + "Successfully built chumpy\n", + "Installing collected packages: munkres, json_tricks, xdoctest, pytest-runner, pyflakes, pycodestyle, py, parameterized, mccabe, isort, coverage, interrogate, flake8, chumpy\n", + "Successfully installed chumpy-0.70 coverage-7.2.3 flake8-6.0.0 interrogate-1.5.0 isort-4.3.21 json_tricks-3.16.1 mccabe-0.7.0 munkres-1.1.4 parameterized-0.9.0 py-1.11.0 pycodestyle-2.10.0 pyflakes-3.0.1 pytest-runner-6.0.0 xdoctest-1.1.1\n", + "Using pip 23.0.1 from /usr/local/lib/python3.9/dist-packages/pip (python 3.9)\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Obtaining file:///content/mmpose\n", + " Running command python setup.py egg_info\n", + " running egg_info\n", + " creating /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info\n", + " writing /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/PKG-INFO\n", + " writing dependency_links to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/dependency_links.txt\n", + " writing requirements to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/requires.txt\n", + " writing top-level names to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/top_level.txt\n", + " writing manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", + " reading manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", + " reading manifest template 'MANIFEST.in'\n", + " warning: no files found matching 'mmpose/.mim/model-index.yml'\n", + " warning: no files found matching '*.py' under directory 'mmpose/.mim/configs'\n", + " warning: no files found matching '*.yml' under directory 'mmpose/.mim/configs'\n", + " warning: no files found matching '*.py' under directory 'mmpose/.mim/tools'\n", + " warning: no files found matching '*.sh' under directory 'mmpose/.mim/tools'\n", + " warning: no files found matching '*.py' under directory 'mmpose/.mim/demo'\n", + " adding license file 'LICENSE'\n", + " writing manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", + " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + "Requirement already satisfied: chumpy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (0.70)\n", + "Requirement already satisfied: json_tricks in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (3.16.1)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (3.7.1)\n", + "Requirement already satisfied: munkres in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.1.4)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.22.4)\n", + "Requirement already satisfied: opencv-python in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (4.7.0.72)\n", + "Requirement already satisfied: pillow in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (8.4.0)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.10.1)\n", + "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (0.15.1+cu118)\n", + "Requirement already satisfied: xtcocotools>=1.12 in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.13)\n", + "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->mmpose==1.0.0) (0.29.34)\n", + "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->mmpose==1.0.0) (67.6.1)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (0.11.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (2.8.2)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (5.12.0)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (23.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (4.39.3)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (1.4.4)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (3.0.9)\n", + "Requirement already satisfied: six>=1.11.0 in /usr/local/lib/python3.9/dist-packages (from chumpy->mmpose==1.0.0) (1.16.0)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from torchvision->mmpose==1.0.0) (2.27.1)\n", + "Requirement already satisfied: torch==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torchvision->mmpose==1.0.0) (2.0.0+cu118)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.11.0)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.1.2)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.1)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (4.5.0)\n", + "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (2.0.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (1.11.1)\n", + "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch==2.0.0->torchvision->mmpose==1.0.0) (3.25.2)\n", + "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch==2.0.0->torchvision->mmpose==1.0.0) (16.0.1)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmpose==1.0.0) (3.15.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (2022.12.7)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (3.4)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (1.26.15)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (2.0.12)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch==2.0.0->torchvision->mmpose==1.0.0) (2.1.2)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch==2.0.0->torchvision->mmpose==1.0.0) (1.3.0)\n", + "Installing collected packages: mmpose\n", + " Running setup.py develop for mmpose\n", + " Running command python setup.py develop\n", + " running develop\n", + " /usr/local/lib/python3.9/dist-packages/setuptools/command/easy_install.py:144: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + " /usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + " running egg_info\n", + " creating mmpose.egg-info\n", + " writing mmpose.egg-info/PKG-INFO\n", + " writing dependency_links to mmpose.egg-info/dependency_links.txt\n", + " writing requirements to mmpose.egg-info/requires.txt\n", + " writing top-level names to mmpose.egg-info/top_level.txt\n", + " writing manifest file 'mmpose.egg-info/SOURCES.txt'\n", + " reading manifest file 'mmpose.egg-info/SOURCES.txt'\n", + " reading manifest template 'MANIFEST.in'\n", + " adding license file 'LICENSE'\n", + " writing manifest file 'mmpose.egg-info/SOURCES.txt'\n", + " running build_ext\n", + " Creating /usr/local/lib/python3.9/dist-packages/mmpose.egg-link (link to .)\n", + " Adding mmpose 1.0.0 to easy-install.pth file\n", + "\n", + " Installed /content/mmpose\n", + "Successfully installed mmpose-1.0.0\n" + ] + } + ], + "source": [ + "!git clone https://github.com/open-mmlab/mmpose.git\n", + "# The master branch is version 1.x \n", + "%cd mmpose\n", + "%pip install -r requirements.txt\n", + "%pip install -v -e .\n", + "# \"-v\" means verbose, or more output\n", + "# \"-e\" means installing a project in editable mode,\n", + "# thus any local modifications made to the code will take effect without reinstallation." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Miy2zVRcw6kL", + "outputId": "1cbae5a0-249a-4cb2-980a-7db592c759da" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch version: 2.0.0+cu118 True\n", + "torchvision version: 0.15.1+cu118\n", + "mmpose version: 1.0.0\n", + "cuda version: 11.8\n", + "compiler information: GCC 9.3\n" + ] + } + ], + "source": [ + "# Check Pytorch installation\n", + "import torch, torchvision\n", + "\n", + "print('torch version:', torch.__version__, torch.cuda.is_available())\n", + "print('torchvision version:', torchvision.__version__)\n", + "\n", + "# Check MMPose installation\n", + "import mmpose\n", + "\n", + "print('mmpose version:', mmpose.__version__)\n", + "\n", + "# Check mmcv installation\n", + "from mmcv.ops import get_compiling_cuda_version, get_compiler_version\n", + "\n", + "print('cuda version:', get_compiling_cuda_version())\n", + "print('compiler information:', get_compiler_version())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "r2bf94XpyFnk" + }, + "source": [ + "## Inference with an MMPose model\n", + "\n", + "MMPose provides high-level APIs for model inference and training." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "JjTt4LZAx_lK", + "outputId": "485b62c4-226b-45fb-a864-99c2a029353c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loads checkpoint by http backend from path: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: \"https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\" to /root/.cache/torch/hub/checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loads checkpoint by http backend from path: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: \"https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth\" to /root/.cache/torch/hub/checkpoints/hrnet_w32_coco_256x192-c78dce93_20200708.pth\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "04/13 16:14:37 - mmengine - WARNING - `Visualizer` backend is not initialized because save_dir is None.\n" + ] + } + ], + "source": [ + "import mmcv\n", + "from mmcv import imread\n", + "import mmengine\n", + "from mmengine.registry import init_default_scope\n", + "import numpy as np\n", + "\n", + "from mmpose.apis import inference_topdown\n", + "from mmpose.apis import init_model as init_pose_estimator\n", + "from mmpose.evaluation.functional import nms\n", + "from mmpose.registry import VISUALIZERS\n", + "from mmpose.structures import merge_data_samples\n", + "\n", + "try:\n", + " from mmdet.apis import inference_detector, init_detector\n", + " has_mmdet = True\n", + "except (ImportError, ModuleNotFoundError):\n", + " has_mmdet = False\n", + "\n", + "local_runtime = False\n", + "\n", + "try:\n", + " from google.colab.patches import cv2_imshow # for image visualization in colab\n", + "except:\n", + " local_runtime = True\n", + "\n", + "img = 'tests/data/coco/000000197388.jpg'\n", + "pose_config = 'configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'\n", + "pose_checkpoint = 'https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth'\n", + "det_config = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py'\n", + "det_checkpoint = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'\n", + "\n", + "device = 'cuda:0'\n", + "cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True)))\n", + "\n", + "\n", + "# build detector\n", + "detector = init_detector(\n", + " det_config,\n", + " det_checkpoint,\n", + " device=device\n", + ")\n", + "\n", + "\n", + "# build pose estimator\n", + "pose_estimator = init_pose_estimator(\n", + " pose_config,\n", + " pose_checkpoint,\n", + " device=device,\n", + " cfg_options=cfg_options\n", + ")\n", + "\n", + "# init visualizer\n", + "pose_estimator.cfg.visualizer.radius = 3\n", + "pose_estimator.cfg.visualizer.line_width = 1\n", + "visualizer = VISUALIZERS.build(pose_estimator.cfg.visualizer)\n", + "# the dataset_meta is loaded from the checkpoint and\n", + "# then pass to the model in init_pose_estimator\n", + "visualizer.set_dataset_meta(pose_estimator.dataset_meta)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "tsSM0NRPEG1Z" + }, + "outputs": [], + "source": [ + "\n", + "def visualize_img(img_path, detector, pose_estimator, visualizer,\n", + " show_interval, out_file):\n", + " \"\"\"Visualize predicted keypoints (and heatmaps) of one image.\"\"\"\n", + "\n", + " # predict bbox\n", + " scope = detector.cfg.get('default_scope', 'mmdet')\n", + " if scope is not None:\n", + " init_default_scope(scope)\n", + " detect_result = inference_detector(detector, img_path)\n", + " pred_instance = detect_result.pred_instances.cpu().numpy()\n", + " bboxes = np.concatenate(\n", + " (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1)\n", + " bboxes = bboxes[np.logical_and(pred_instance.labels == 0,\n", + " pred_instance.scores > 0.3)]\n", + " bboxes = bboxes[nms(bboxes, 0.3)][:, :4]\n", + "\n", + " # predict keypoints\n", + " pose_results = inference_topdown(pose_estimator, img_path, bboxes)\n", + " data_samples = merge_data_samples(pose_results)\n", + "\n", + " # show the results\n", + " img = mmcv.imread(img_path, channel_order='rgb')\n", + "\n", + " visualizer.add_datasample(\n", + " 'result',\n", + " img,\n", + " data_sample=data_samples,\n", + " draw_gt=False,\n", + " draw_heatmap=True,\n", + " draw_bbox=True,\n", + " show=False,\n", + " wait_time=show_interval,\n", + " out_file=out_file,\n", + " kpt_thr=0.3)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ogj5h9x-HiMA", + "outputId": "71452169-c16a-4a61-b558-f7518fcefaa0" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "04/13 16:15:22 - mmengine - WARNING - The current default scope \"mmpose\" is not \"mmdet\", `init_default_scope` will force set the currentdefault scope to \"mmdet\".\n", + "04/13 16:15:29 - mmengine - WARNING - The current default scope \"mmdet\" is not \"mmpose\", `init_default_scope` will force set the currentdefault scope to \"mmpose\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:664: UserWarning: Warning: The circle is out of bounds, the drawn circle may not be in the image\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:741: UserWarning: Warning: The bbox is out of bounds, the drawn bbox may not be in the image\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:812: UserWarning: Warning: The polygon is out of bounds, the drawn polygon may not be in the image\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "visualize_img(\n", + " img,\n", + " detector,\n", + " pose_estimator,\n", + " visualizer,\n", + " show_interval=0,\n", + " out_file=None)\n", + "\n", + "vis_result = visualizer.get_image()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 801 + }, + "id": "CEYxupWT3aJY", + "outputId": "05acd979-25b1-4b18-8738-d6b9edc6bfe1" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAMQCAIAAAA4vkODAAEAAElEQVR4nGT9TcitW7ctBrXW+3ie+b5r7/2dv3s9Cf6gci8oJqIBTSCgFgLWBAuCBAtBJDGKBUGwYkGwFhAJRANqzUKCJBVBJQoWggkxSAzGGDT+BGIM995zzve313rnfMborVno43nXPnGx+djs713znfOZY/Sf1ltrnf+D/+k/TRw2hYgEacoACAFAEOh/AwBQiZREIzOBKElUDHKWiSUAkZkjULZRRxzXdZEkKQlAcGSEbAC2l/fLS+j/QjLh/nnbBds8gnPOzByPsdZaS8zIzLxmZhbcL26CBiBj9N8FAkAYQEjiMEn8//2ZS0EmaDsikABQVUccdhEAWaDs/jHismkEmWRCZVQYzzDsZESEiaqyfeQYVSJsgkRGf9IwSPp+FP3GGAZAR/9f/bn6/5L0NA9mWpdLRJYJVXKYERFkZl7XM48hKTMDadtARAAwimQEMo8jR2YOYozxeDtHHg5+fukAAkk6QQCfX9M+CPczvL8vijDhfiiG+pUM3n9hWZIMSULJuv9u/4kA6YT7x4EEA7FgAP29fP5e2+J+dH1c+z9+vr1l9fc4GABUy2C/pbIA0EDJ0qRdotxHsaokVZXNqmmbGbarCkAyzFhrLYkZJCFTRB+REkmGcb8lynGMpYoIRFTV4LD0/v5e16tgwcwMoKrgEBFQZpJ8PackoyLCNjNd9ThOSWTKhj0YLyyS0PrFV5NwAIgIUSHaxURE2IyIzCRNh+05Z0Q8Ho/ruj4+PoAwolARkXRmRoTWjIgVIaEfBckxoqqqisYYo7+XIw5JY4w159sxrut6rRdzxHgzUFXl9cZ9IDsmOCiUiVFjzlcOAlprRYyRp01jRkSHgjyO8zwhvT6eC6QcEV/e3799+wZgrpWZkSDZT6CvDFwkVwByIscYQUrqm6UOVAAcn3EhIqTVTz5GSiKyqqLjY/+5j2Lsy8IFH3EEOS3bA76gY7kYDiYIkbLTRWRmHzAANs/zzWuSJKBCRETEaz5zMDggTqzMJHOt/V0nrKoIyCSgESmICAMZYFrriFSH6AisSvCiOxJGjIiwSxIggZlZdZ830qXzPK/1conM4zgEV1UEXOqzFIGIfRKAGIzV8bcwMvfrrCKZSUsdKCYNMkjXsk0mEBmHjXLZFSOllWBatlfARDrDKNTb8fanf/qn/8a/8W+MMRwkWdaPb4/n85mZ11JmQisi+liutfrYV5VWRQySJTloCgg6woDLLvAgLangPnhLlcmhALk+Dy40Aq5VDP4iNNVyB7rM/PxfAI/HY72ux3m+6spMTXVsH1VlIfMolYExYlmZGQwA5f5liJ2FWcsMkKwqwyJASDoMAQEQDINGAAD7O/iMmxFhVJXvfIDBILnWkvo/igIiCAUNIjkKLimOwcCc01VH5sijqmbEtHdU7axAReRwSMo7tQGwC6BgAiQtguL3oN1xX5kha63VuUHV554FA+b9WkrKtEGAFJIwBR2ijSRdIpnMVQsuRZDsJ0b3TaeDAZMEO6LKtnWnlv6uIzpgdcggMiiUVOvxwxeioCVi8ChNR7xer8xYa53naXtxgiDSYSBQgfsGiiLtSAAwRcCOJEEEgWBXYtgZvB/lfu++c2EiQRn9HffPkSi6n23n/k5MZdUqr+pT11/ZjmbHQUCfLwuIgOwAvZPrL+umBGF85lyS7sRsR0Qw+q8oHCIZABIo+LPiIbnfNO+DA+6Ya6wdBdSpt49rCUiaHOAQSRZQCURw2bkLD8G2AxT8Wj7GGJGU0xpnCpi4GCSJrrr8/bmutdZamdm/dNUieRzHkvrmIwhakquLHdi/eBD9Xf6lQ61MgrYdkXCogGQSJZkQ/JrXqmX0Q6skR2RVLa+OsZmJ8loLiEAEiCWXAmRGRvR7lrSsDBbxWpMZ53hfwn6GZMaYFggaSRGkMAzbgoMMEBGZKbBguc6RVQU5Mqrq559/Zh+KvofwnC9pkTzO7GQAwNIuZ0mAEUF3ZEBX0Z3Rw9DdX+Qdoz7jVT9KSXC4K0My45AkyxB3EeFdPfPO+vi84UBGEEXKiDACkh2uQkQcxwFpTQ2iIkBB7LjP8EhG1+PB4QiwvDuTiFgqEzSDLCKWVvJAKIBVyB2rGIZBW7aiGyF39P9lK9IhHehYFWk+3t6fz+c4h2CUvCy4T+yOs7bE8pJkIWEkhrCqRgTp13xFBDM6vhCIiMgol2yhDB8RMG2s6xpjZIAMQUGyDHb0sCxJwZGUUf/6/+P/9sMPP9g2HJFVijG6xg0TqyKSqzJzWUfujFtVOdKMudYR6SAibUP3TQTlIkMMuPrJBEhHRJQUkZ83TdIxxnUVum6NPm9Ik5mJHV8+Q1zBC6attYjo2D4yjyXleWjOCIwxul5YNTtukuzskAyAJiNjt7MiCUslhYQcgBkmtMN37JJqJ+xuLh2lAgoA7K5HAISVoGnv43v3TuHso2OtVQE8Hg9J1XVNpCQKRgAMmhwZeSRr7WsmdW3Qp0skibT1ywRMRrg7F4EeOfpOFgvIjpKAOjvB7Kwlgx1HyM7FyxoMkFXFked5dDiAQ/33dzsOkoiwrojoDLHW+mzjOr7EXW1//vczmMIzsYw/RC6sZ5I20ufxUNVxHNf1PMeRmWOMr9dXmODGBhwkTEPCrosBMm2utRhgDHLnvP6VC+oE3FnC+ksRCuxEaGrfTO5wFl2ZATAMu7RUC6WqWnO3Hfcb4ISCcXgnRBEG12CUgV3X+xc5mJ2UP2Nf/y1bhO5mPSIIOqANK6BvGD7vEBlBdQfxiwTMzPrM+vf9AYKE5koyxvAul5wRGXmFbOMOarYd9B3ByxhEMuZ8VdjC6YMR/WFhB4hw2Hkc6G8ey3ZwWLxeK89khGAASxKMsCwqGdY+3gBQdt5VHfYtDdgwmLRRVYmcLNuZ2X1wVQEUMcDPaq8zAcA5MS3tTiHKhmAG6ccYx3FUlcVIwlhrmbgsGCjbDEf2/XIpAkBAvmESA7SDGuOYml7OcfSROh5nWOw0SUYX0WRw2AtkZn68nhEB8jzP7s7JbgMbSYI3OnJjJxJUnZxEZKRtIuMGnEqiNDLKKhNwQqrqX9evHxEds3crTErsQ+m7orMAxIISHNIqrxERMDxAR9jlqsx8PI6IACVRdiaWis6+HV13HyoDqkIOysGGEx1Iygo+8ryGMGtZRz9fqO4bsy+v7wC480J1IiVDwMhRVQxGRM31er3qmuN4B+rzvnc7ZldZwR1gbUREgxwimDGr/Frvb+daUikiFm3giXXqGApokbEsDpcuIBgeR9hVsGv1SexrF6AVAEQNDhV+/PHH67qO4xikgWT89re/zczBgYSlM4cakGNGkBkuZBwCug5YdNBhpojlBvAq6VJH8jAo3+coZBf8i2ILADIPaUYEMmD00wAwGCGf45DkSNt1zQTrmvtNBRIsYthGRtUEZHutC4BXBZXMajjRlsAQICBdElHuRJ/koOt6vgjrsxchboRVuOHBz/+1PUsjQtb+ABTDDMOUJDrIqi5AILk0397e3s7zui7INBDdic8EOoEFYaOrshk3KL1jk5AkGLUfHsmNKWqXJyQtVYkZx3FI0hT3tYRBMKAiTBNiGMMgaBq2YAEx0iQYMgEva0G2BiFANskw4zNVObxhoxvbcARjWf3kP59bZ5S1rnXke4xT+u36ds3rPcbI7OQ9xjCQ47zW6y1i1kUnACKgfWhImkDnS7lQkuacDo8jJYEcDPh+d7AhrrIt3rewH/6dKhtq3u/zF0WGmdKSBNqrNBfkWpoqbQh6PXwE4UlkOkcECISpPU244fedNh13+uxaDfunULAJuKM0BiMZDpaFoHT34584dkDy92BqD6HoMCox1JeC0fenykREpKDucYMAs1HopT03ANnnJBrG1OMMyLYcZES9auQhwtqNb1VtCMswMKsiQqpuascY3VbiqojolrQskmMM2576ZX0GAEwRCcBBywzJgAPUKgQZkJcciejOGA7Y/WwZDvb1iv7tQVdVQCNGRNYyjH4slF9zfv34eDvPt8fbx8fHGAz69XoNDgC6s5qgDIw9vukjScV9FIN5N6SfgZ5UhDT3odOqGPnl7U2FNWdwdLVRtu2wr9eyFGOnlx4EeMcbExF5tyMw6J4zyR00DTbi09gEATIzTFVJNcYwJa/Sfqfgbiv7YLjcFUIZjAgxjEDU3VTbC7IZFBnRjehaS6sakXo8jscxPtbLJBKRoC2hutkCqqYYw4oRtpMuks5yUdThAJcEFMewTKIR487i7CoBAZj0rlplI8hIrjPHt7W6uRJDS48vP3x7vbpMzLv/ikwBWDuYASZ3nFzWdJ6PYQOux+M9cX18+4bwI2MaVWslGGnYRAVSUaqkxxgRsdZyFchRLlhEqKOlOkdKqKrMJJJI0pZGxPH+3ul6MNbS19eTZA4WvWoPqmK3/zoy+8ZGDynU0UNLlbFvHgCjusOGY7Ef4AIQ7g/rF69xHq7GyditZsNma2CM0EIfpVXKIyQdzgjanhaAsYTzPK7rymR//sfj3ctE0aLCHU5d1aiFQdoZiGS3yHKpGAMRpJhhWUBZ4Q1PfbYpVTWro2Cp58xVn31qp+uybQo3iEnYfjvPdV3Mkdy4/MhzWeORFr3WWuqwC6AISPvghyWVTdF2uBGPuwy8y8PI7AY4YtiYswBkBNCjLyBAd+onEdMFOMlIkyx7wWWfFVZxjGHC4DQvP/JQlhtptsDo1AHFOc6qqnXf/Ybw3Zg07lhwd2MRGfHUeobjzDHLY2AMzXq8vVXVx8fHBlFzT9zTA9jNCkkEBJMKZl9L2423I5EjJCXD4UCYHSe5bKg+A71ig2AkExnoG7KzIGCSUd2pQAiguu8MeC2pai0tle2AE8xgCjjhyOI+VNlzY2485HPArB6zYp+oDvHYPR4MZOQe/Uolm3CQ96yad0/Q0+XoarcHvSSquodNcIyj75IsjmFTwgwcmQejh8Tdc1d6zABUTQoIQtizg/nKzAXPpTEGgo/Ho+aaAPbouRpTJQ0pj2NdJentOCVVWXKOI1QjsysGqOQoQ1I2APaXEjDINNmQmGOUZuPc0hwcMdIWCobrqu4agw1vJmrGebiHFNJa63GeY6Qwq5aWg2PHbZsZe5zMXGtJOjDWNd+Pc9S41gsWxsEzZU+9Lpd4hJFwx+2uPBI0sVTMSLAs2KJez+fgaDymx6JEwktSxiF4zpXHyeZSyGOMpdmA82eX109FvsOVPkkn7pti0XB9omUZEYw4Vq1SHceBFXapihml2kCPwz0/7oq2rwYQUCBJWiBxGAtQkuM4EJi6VJlpK6KLM0fs3vr5fJIgnZnHkT3ZT1ByjlQRQDJH5no9DZDSXE4O4Vkzy0oPx9QMnUiUVw+EBUSDdd+HSiC7IcQmBFzPMcasFV2Bk9+uuTt+Ut3fkkSWNnYFbG5B1QSQmaHhlxop/fWvf52ZyFh20CMilGYUMeW0wzGLY3yxVMZ6VcQhZDCIJ0ABssaGqFBe6fcxMiJcWlfl6CCF67oA2LwQY4w4Hzl4XZfoJKWifOY4xtgkp1UiFiGAQYDBOImFItDR0jKBCFooaCQhjEBmSILTQqPlmX3UioxIMGLBLo2glgaDwTCRQ8uROeesLvozU3MdkZZhjAitKxlCSDiDy1paPToccfiTdLNkT2cygpbNel2PtwO1MUr3AOlCxlh0ScFh7VEclERqzjGGvGj88R/+0a9//WvH23GMta4jshw2B6mh8AHLnZISYRIY5YhRRFkI01hG/5aR3TIljSjTMNMEowQLThquYIzz/eNVmDOPyDz7awaJ4Kx6c5gWa5e9wGIV64sPgaJQyIwBYFlT16hjDNmEUGIgTyzPUKYjeJAu1DiGqoiUVKFADeeVhIK1VqzqQ0EkczBUs6ooWevBUU+U7YwMSgLxWhWBH/7gfV71ek3ymLa0RmgXStrjZyNczIPXmog4k2tdxxgHD0whRiVWOLyGiYIo2IsLQq3v6G5kjkweYWB2sseNwu2egwQHXNIqzTmrtKBLVbpsOBjHMSVcM3/IcmHWgYiM7kgiWatQ/UUSYJ/aCqaycIePAAkJlg8ShiwCQfYwuGb1VFXdiXaPrg0GmFFAlUbEDcy54dvxGPP1dESQWs5Era5jOmP1Ve2ZSXM64rKTIMcSzOMNOaWyMx8WAvl6TdtwKSMALoPQ6CrHVA4oyNH0IU+AaS4CVV1OZhyQvCplHY9rzhz5mX+j6Q5lZbosPCMG3WyMsL2uslkBUKDGmZn89vNHZkrl0RwcgcsZj8chLa9akTjPsWRbVQdi9iSDfBxv15qzLhKCx/GAXWM2giMUVg3GYLqAOAiUJs1M5jG0KhyXZnO+Xq/XMR6lCg9JlQoHnaC1qnwxR45RegI4g+zqWD0tlSIl/+FPv/r28bNKCHqHywv5KJXlZKwljDC5Lh1HSipN2yOSoFYtIIAMhKawJqBMMd5naXBSmcnJN7CMuSYiwpItjlh+PMbi0utSjlwAQ+ml15fHkXi8Ss1OzcGIQwKNvo52uYLic0GkULTO8OtK5CA5C7OUxyNUqqs4oSiOKDMRYFk913MfmHv8ikIAA6wG6EkTiqJhqhq9sSELO5siVt9sGiaDBLk0YbKphUDtTiEbRkhMwTwOA1ZWc7vWmhoeQzltofQWKSnSCquuX/3qx+fzW1VJYCkVL6ZtWEApk0zM9RhHxYtkiQvrOI61h+Jgxk08cs2roUTIZxzcvZiCLGmthYyDh2FAY0SjdBkBMfOhNQMLEVIUOExjZoOIwBKWKtwXjYk9k3B3zPYmlb5sQrWw+UOgzAiEJV2uyAPy+AR8dlW4y2mLgOVq6qoan5G16OgvIhqCEqqnX24y5JzT5OPxaPyKB1EImeDKbmLyNJuKs5sw+Kdf/epas3GVLlHnnBEDtZgnjPImJTYvNHq+SECLzRhCZAZKU3OtCxxA7BOZlCi64DfRHTObGdAjK8MjVgA9a7SxKiLGHfFvKBgBCBiNI3eHnbBrTUXmr371/vV17ZY6zjwIoFSG70Fjj2ZULEkRkMCDmYmmavkmSJNdIFdNMQE7WPA7j8s14chIi4UinKPRheu1+bqf3f3I0yg1wMhgRle7aylsVE3XHPkWOYAhuLTpBH08pamaKJa8qttg5Bjew7ao6qTVXSlw96bRHZ1AC96vs+a1pkv9f5d0XVcYyOGPV+TQI4BsdKJuxsfnEf3sZrj5VXfO8fef+cxDe/LXtUDsvkdEswvvDqBxX5BMbhKs2NV9M6KRDKNBVGdmOvfci+yQ1L+hoqfhCa9+MActsFn0VQtQxEg6GQXpcSaDxvJyFafTEHPVHCTBpVruuAJJQbIvXoTJst08OdcxNvPo7oAdmUxGYFJ9xXjz7dfa3C3W5qaSJDMfp9loYglOsJkBFvfnlhruL4q5AQlZMUHwQCCjrGkFV2REj21zNHdgubCUme6KUBTwcmWBwEi7uCRrNmqVpMoRuAJnjlo+SFU1HaxKzgRVDu9pfVGOwKngGL/7+ruyImIALJ2g8v16LQBHjC9fvnz9+rVKJjTwURMUk+f5mHPCFn0M+Fq8VIk8YjDL4LJyx1NJAVyuYBJMRlWhySEZr2sZ5RHkUuCToEMeNIO+6uUgM12NiqUpq0OMbYDOzMGwK6BUQIKj56xwtNggRhIJRIfq72f7cxYt32UxCFQQnzTQ7vB+IcH4/Jf7KoWpLmSbR2LA9f3HWm6AMIOwy06GqrwW7wE2jQBVL6R7Pol7CGghB9e1ro9nzeLIiGFXyUbdgXS/Tsf/4zwy8/m8MrPpSv1SsT5pdxlhERWNV0/e9N6eVeYgImstByMgrTnnGKfANa/Tw6VjJMfxtS6ACs5V2eX6L5l6EWHMnlRb3MMsNCd8IVMIUnYRMhEYJQcOxMWY1jtzfMdh0VPCsEsd6+yCJSx/j8mRGCNIavkT77Y9OHxLFIKcc35CQFJJ+ys1EMZgODDnOh/ndV1wPD+uqspx7kqhT2DnPYrfE7BQHfeozVcUI0fEKq+ppN/OwPvb+iiwuxy4R0tyEAXSYUtyIgS43PB9H3ztt/0dU6pPxAYENECSVVNoKULkJn0047FImtZdzPRLRsJqHIhjjBxcS5ZLffdgAZbpIJMDQLDDFBpiVZM+2Z1hBJ2OI6jIS05Dhqt2QthslU48NCQCMCW79swgEGAuHnCGXlyvjC9FBxOJO7VIgr3W8uxRR1hTmcAi4IqIuEmiG9NusmXnhOYvLtXSpj7PWraX9okYEQ5SRqJ/EWFmZIb4HdzeabJLwx7D91X+JNi7aTrfc/b9IExspty/g7QVEajawHVm64hkxUhWJwKAWtB5nqwFYEQu1SeVw3aSBmZYAonzPANwrRIgLQpgbsnGzV8zairRUFcUnfICJhw1HVmAvFHT/iytGGnyqDbmHEemvAKI/P6UINCIjKVyBrRsEdk/UoabPtMKQwvX2u/qRl2ahEESzJ6/ZByB1+a6E43rNydWSWjZPjKTQTvKWbqIJsDMWbaacQOw2TctINslKaP20MWSMtMuMsGK4CkkrDUX6UABSiPiTbY2Wa8jwSAy+PRMoCkwVWWrYVUILbypeX379q1qMuM8Dq1VcMaIiDMPvyZpIzzLQWWewUeEzWdVAZO2PZiWkSFpjBEYLAhIRo+sZzeITVpnf9jR0zPs0RruHLZGjJ2WwmVmh4vPuQ+qgFcojOgRWkdFIcEK2eqLBrjPpITsuObN/M/PyQuLzVzpedxWqzSyFZ8p/H4dYWsl9p35JPLUJmpaTS/4rGjV9AgBMWJXqyTtymTzFNdakzwiAZSqW2SIqNYGNj+mof7M7C7R53HYXrU+i4zPO263GqwnjB2YGUg4lxegDAJcuutU10vXGcceXI4zItZSHOcL4hgvmNdq8B+qB1Ox73vz2yzYtcCSPhme++mJQVbCSwfgTsBwICUv4BEcY6y5KI0eQ+Hm2kWETIodqgcH2RetZSyEfLTGQ777BsnOW7NxnieA67pa4OmS4BUOZBpDELDCLcySVGZmfFyvPo5jB0ocmVWLwapdcX1GGNvYVAlGjB7eEGAC0lpimUzfIloXOrIYTUbZCpuO+3Yx8nB01UmCEYpoisRgBOAtj9kALJtnEVySJMSw+PKcP9eBMc5DxFrLGNgoCsHrTsbOQbu7eR096vsUs0oFIbr9XeEgQ03BtoN8qo7IYdngoDNcHCLCmwKA1efaGUBUFTIiRsuqYQdA8hAQWZSIIKKKE+GcIw7FPeDpm25LteSqKhdWt+kiJLmKsu4s2IPD3RPfiO6s1fpaAGupqk8OWjbQg8aIcXzS0FqK3JoifAdXQXO3GLvBZQBsOlY/wGjSHzeTG1rWXZx+Pzy4SW9d38V+t94KeI4xhG62ZJtVMcZWpEQN0CM2iW2u7qPfz8fHa+6UL2nJBDLXupgxInkXNIZpPGJ0hRcgxxCqW963QFnRVRajtMLIgLQGgxEiijsH65PsVt9V2u/nYwuaAcGDuXnoCDRZhjRZ1hiBLfnDiOyXqPsrUCuc9sRwAx6bQx++3zxyT4vNgR6lO1xEgqEBVVhkitjEvVLXq5HJ/EQjmuDHjIRRILV/4wKXVo5M8O39/Pj46P5z2ebGPxEk0ThrF2jneQ7GMl5rxkiraETAbo6bfnh7/7heuluo5iFW1VrrOI5IvKpOHiFD+jhUksQD42rNg8wmGVhrrWw9X8umVb61TFJTG4qIgmBXzYJFkwddTZHoSGgULMTRDbO0bBnugJ7oiqyrQohRhDwCO7AAaPCzIb1Pofy+mNFcnt1KEk26v98nszP0hum0Extu5OlOorZNMDmaD0iqqZB9ICJCrIwU7Jvh2AF8jFFV17X6fUagUNd1jTHOcVjKDN1QE29s6TPF3seP8znX0hG51hqt1yIFm0lSukmaIqWQ80iATaqdc/YnCmCMMcaoOSGP5K6XxSAHh2quWhzZStYYQ9fsYIjsLImCpWo8gVsMQskRZMRBFCSzG4CM+DwbZSU5CKfHJyXBNj7DqPtzRDNLt6QpSNKqOWcn4J6rdUunlpbfPOwWBqy10mjorGl3EQQh+5qz1QvHcbSIcH/lFBzBQeQnuY7A2IoIkFy1eXeSZkU1prnJftWs9wo0+RMAPyVamyvRUAoAuoMvzbJjd722m6FdMrKVvg2dRKtY7KUKjtRGQzHyYJMh59OyYfYHpYmyjK3shI0mPbHVYp9HHDdqC1BeaDhaJNOoUtibIs1ddmWenb1GPEB1/xccGKuzu81lhz4pxAg4MiN4dRlRIjkTBZyFR44KDO56q3X3DShV/5s2yGwihFB4zsZRPtvlX17X/o76VVzdC7emc9huAhWMDC7VVB0SpQw0fY83D5y/KL/2cQ3aELS/ndigTd43tsWy8z6QfdK6qQ0GY4M3mUm7uL9iNtoWjDZRKRzHEREdzjIzhUWCLG7F9takytnUpMaEYGzWRXJDP/1PIBgIDZTt0gAprMBhHCbbLmBPs9qIZjcvcUvJewa972AcETF1fT6Z83xDrbqu83F+e34wyVZNe/Pj+tVO4IyxtJixpCI8oqoCsVF5lADS0VP1rmhvsjlkGL4JH31/N++Pg2GUQK9SBkhpKQhQREbkYIgyoHtg0tVt5rHWClPyGNnz2m47+mu2yJY/MlvKSBKgm5C8POAxhshvWnEEOdZaBwMHZpXt8zzaR8i2Zdxy3vM8Y0dJrbUephMXbeuhQeFlvULRNdOnSvvuGgVHtMASXjUyLTfxwjbRSOTGbDnCs0wFGtstoLO1TK+mj3Qhzui4zCNZ2sBIUxRJgHPLfAlwRPSQQoXILbyEm/m5uaJt2xF3BKxWJsY2XrpPxy+yL7Z+CXtA102ksqkXbCWLGufbuYhEqWqrHGMfj7J5nid3lqmIOB/RTamkTAYAdseykbOm+nfRcF1XRIxxrl+UTf19RWaWI9idksllLKuoYfKWSwEgkqAFlVetBqFRyuBPP/7069/+9oijSsFxHFHwTpyrIiJBwfsb9mahjtvDgaR6yAACiCWRCsIY3h5MFXlYLyiFB0JHjsL9XWagowxamLiZNXa76NywXsal6uzb7amJMbI9RXxTan2P3xYixdEUaEJByqEis5pCVRUtdpKPMdQUx8ypamZZZnbH2Q0kyFqzr8rO2AwmjNVPYYzxON8/1pPYJV4KgexjtGBgD1+14xEBvYhge0oYq1qCdyJ2f7cJt+ixM4UjPyVlG9gMdHl4rALAVlQDDEdmijPiU2L/fZw56zp5IHNZVLGxkka9Y48fP5s7QGE6WQaEo5RGkRpgNRUFjC3uFFAGyjegSJJsZxygkHClmUSUw/aBye8JzJ8X0aZR5dpTpjIIqIhidP326VSl27zsiOzZxGdi3nY5QRSqqicIhkHCJWaXqLaNJNljTKsc3Pohg/EpcOpGiD0ZuG/1rkLW1nO2mHZ/GrXMF+gavMvzu+FuBRxJZsSmvTM1duUW9nDajkFWLYtumvuWQc+5iAiO22ssilirIiMMqJoO2s42hmOtjr7RI7p2aUIYaM1FqRx3424w2MJildgQn2mgfZHOVg8DAD6u15yTEc/nM/aAan81jMYeqHJkIFhWaF/bbLMiZ8PPn0VhN5UlYcTBQbWllTkix3l5NowZau5ssYRLSr+9veW2nNI4MMZ4zYlxFPbkJxgVXVWjnXMoU46xTZTWWiOTEdf1BPD7+TOyP0OMRX/K+ZqvzwVrBYhKHuc4Xq9XRMy1jky46GJGu3d9nZPc7PcuEW03HtP6q1pO4DTFUUllEMpqNKUTK7KVJx2ZbFcBOHN08+4tGL1ad2gVeLi6OwxiJbdq5bpW5C0c8wb65SaOZNgJngWpsYwotjmUVXU6cNe7WqVoD7itgm3U674dTXxs2Ci3Ag1tEyBZvyjU9Nlz3pXpPl005LLDwX5gHYCH2seNXUnYHplocw8qxtDKtW7iSAMfNbUW8/3uAfsdiigA8h7u+jucsCy6mWiqiGhvxO7QSurpCe+f1vY2I0qrDfgcmVsXd0RfGXUV4lJfpZWSDQUR4QqYVS4dx6NrtYaX8Ik3PF8NxhqI1kTABGLOPHNtranDWFXOyMyisVrd7kFS9xwev+wzZJS6z+jaU2hLoOESAtFsFrI2iMSq8v35fUOdUgOJewhXUEAJHMf58fGRmV51HIe9fb/A5poRCKFuHU5AoKMHBCIMQxqZBm/94MguVta65vMRWSix3ym69q8NBCGRQtVyjBG4ay5bsQ/btIJUbKJtxyKhSedBelm1KjKPMWiRiEC5PPaJiQNeMwIBjoOv6ap7vvILwLa7BWVAdtmqYoTAXcBin1apHb5cq5+xrDlrjEDGUg07gtoFaw9DUnKTYFoeR1KEHNJ6W3EFV7oGB3jACVRVLNLBZPMl6kYMukext0FFwVU1jbbVrA4YXXJZADR64Lf/Y1lLtWou0VYbaYkgo9a8FADWscdCcDNLWxwVvziUsFCNu/YQ+BcntmHR6sRodcrfKOHN4Oi6rS8tMloy93n5d8F+27dFBjPWWswIIzPrmmxztWUwqiqZBTXGRGZV1WwNvIg8Rqh7jl3pZec1wSfjaACGLIumoCvEYmyUzGBj8dF300QyMmliWZrV1bGk4zE+n8Pz44rMMcb89u3Lly+v64N3GqZxU83wUTVm1znx2L46Hubr+7MOEBaIhJkEiTNGBwdRMY6RZ1ydvQh2/kiRReBaEatT+2enorV43lgXRDZ3x4CTDUPr0wWs08NzC15xjqHC6HRX9UIAupXiu2dHUNK8Ks8cOZ5zeSTJheaRD5UEdfxHcM6pEQwyzsbQ1ny211ciZVRQXnhVkhkR5zhWVcSiy4XbGm9JZxy/dICRVDCDoRYBQQ3PEHAIPjPBfXJqLYw0MwJNJUNkcxex4y+fmO1+0GkuIoCCl/vs61YUdas3vg9HY/cYXcgJiHKPfGmGXexpFVheRkr6bJDtLh3MzQPkjaV1t27DufHOZuhsJk0/LUBqpEYCHIGmuc25BuHkcTw2zXoMAEvNvNyuZC1wAppivOdTcwq01joi2Qk+oyt703KHd9rOHjzRcexJB2HeFU67pzW1KDNlf3s+j+PwvA4PlYvLCTWUNUKrmFFoBUTSOCJH5EuK6CEIJUtKMIIxkmOs6wLCoDOq1AoFJhFuU9sNHm6fXoOBPlI33puNn0lym9HMaoIul9u1LkHNWeP8vDb+Bc9lAGU9oYEIIGs5OIPr+eyb1p3iGCO2AycAqLYmFUmpLevMYIHWHvbazsxrvciMOKonL9EDXLefor2BeHTPFKZS264oGphpttFwJwYxwxEGlyQogr80NyYTDQ4nhNYFWLoTt2oysYkNYTvzrKqP13XPIEOaTetqp6rC8vJabSdJ2SBaOdqyu+aW1zVhMCOPA5cGAsdx1VV2BlwL7RfYB81bdRstYQModCO/P0UOM8eqdIuuhGOXKWfJLIMOKih4QoYzU4pPirXtgqKAG9zu9qVNuW0T2e4o1P4vq4unT8cDeK0V0Q52kUJUn4BqgEIjIvao37e6aTtHGDLH9+TbgY8APvnAu/yvWrNUFZl907ramypaY+xBbEcP3kILy8GszfAcJLWuA5HHMeeVGXaGjZI+Ee9Ejw8yk+BaS1gxRifOPg/NGHBEcjxZQ4a2Hv80bV4MB8ORoXYeWPfrx8hS2TMZJlylltOwOPi7n3/+fBYkf/zxx3VdOaiaEdGWTI2QbV9P451ZS4yocguSl2pEptgGh03I7BIsIhXLq6ZVVWaIIfOqxfaMNcNAQaVMpsHz7XktkjGONnQr8zzelpTgSNoMeOmzK2IoAB3jWKpWHjgcRs0FaklgfKzKYdMBw4wtDrSjPer6XuO6rhcL41CJ0OBYyAiUaoyoa4JORhyR6z7Rax3HAZGG5qrxYKfCoGBGjAhoy+INTOsROSLHma6VujtRLQSXjKAsrIGOX5s8akCghNF1yXmeIeBGE5t2IPRfyI17ut4VyyrRltlGc/2NRrP/2PAqtg1nQ1Dr9svZXB9ydfnku7NxxPY/COlmUrQfHGhXBzNJarurNqlHP32GzcjMVGEZliLaSqFvfTU6tbOdqkdVETFy2LYUweqRH294qnqw58y8ZfmWlLdRCsi2T+mB93m+zVmjmMjVbNMIeQmOQN6WD1qtCDAKI7OyM9Rh+3ldj/cvr9eLxUzO6zry7ciYdbUfwHxtWLebmWY6P1fH8bFQUGtzmJn9qMf7QSZfL6F8nBwDr1dElJYnln0cmRNjeYevkWmX1hURchEPsjtXra5VwXaSCxBwjqMQkgZDSz+9v/32dz/33Ktq9tt9jMdLOo8xGkp1FdsLLUhGVq0wovRicSoej+PSQgBesxwRAhkhKRE0E5YXthtfPNekOA7YrzHCJvOhF+GommMcdV1INI9HNgz5Gu2LGZFHhAuGqKIPDtlZFnKlArzgUyE42tutEAzLCmshIiEXCsECSzDyTa5Agb//+dsjT006U0EKJKYmg2Uk2mIPAiuQJZbG8fjy+KnW+vbtG44kg4Zrwm0QGFwIEYgFEXpjUpAWYQY8VPOVHEKcOXjVRIUxDWenge4C0bIuDi6XywEcMViVx/Dw5VUnDyeqoWct1as0q/o7PBDHmWut5QXz9PE43iStVQjuWz8DsZG9EaElyFrb6AwUxQcfNR2wjDwkVJmDwxBKznbbw6BvpLGreIIYG19pN1QWTELEKK21vExpPl8Rcc0XAKm6vSbQapYYA9WWSC5UDqYoe+ej8USdR3C5HvNwpBITmfZUKVQFR4Z8IGx8rLKVZAyuO3u01PZaK8ZY2L5BiYR9Zgq66kJpjDHtZZADzXx3SCiY4WDPLGRpMBuvIUdvYGDlDYDdCTjj529fXXobj+ixZHb5uG0fAILb+g12aBuBHwzY1nOSxPFAyp5ROViSjvTHsuFzCEwiW96whZB77wiTyEwwAscxEpxq4Hez+B/gcuXxdq0lwMzqmIPHsjOSkdooJwLsvRL2o1ug4OLa3ghjHEKPH2E7xU5FEiIP18J+dCjLOb9dcxzvq2zhwABxxFm5crlUlQSuzPTy8pE2wwmupXO8VU0EytfLkZGnIyXSTK6pNhPi4NLERmc8PGBk1GtgLgwdo+BBo065WOcYLmHVkaxaD45AHAd+fn0cOYZZWivGhLppC0cClZRLVxlRwctrxAgEaWlFCAFh9rw480B3XQcwCRxRJTIz1rpsH8dRhoVn+BhHQPXxSo6IseRVPB1L9SUOQt/mFRFvx1jtQQI88rQ0X2uMcWasulCV+VhLj/ME81qvJTAfw7WqntfLcIycXpaO4+hjeNU6Insg/Bhvc85Ir7kyBkGv1tanXazFkRznqhqM5FhLjBBgGjnosn1gBN35Y2AswYLgPA8PvdY6FY/znPPVZK6a1/s4JWmFiXmqrhrM8KElkAtlF8PDGQ4RL0rhXNY9GLPrDEagqr5+QJytHnRBc2VGeMKRDhNrwYxh7VEEqr0KM5glLgjAqgvA3olkk1FVkcNiuTbBPTMPPZ/P7npby3FkVtXz+cSRq22eWozVjqdS4JjXsrk8f/Xjj2vpemmuizjDd7ep7ncUwKIzgUyoPbPCZlnpsLirM4u1GMXYy16O4xB1022ObqYWLTjLvtk3hzkyKEyYEQWVdJqPXjsDzDmBoMMbzNxTnq4k8Ata/IKaoT0yOZJGQgarVkNBm/UWkcZq39PekgL6utZaRSxGWz9GRMSI3LZEkipagtn4MACXJSCtb7///XDGYRHXdZ395smMXLdxa26YSglKGse4rmscx6zlSMxX+IgDm7kNyKqqWh3zvlfHc1ZLbp7zmpo1temLaqaNNbchUXp/40+tC/WFIzN71CMYwc3qUU9WMcbgSMAJkYOqWxj271Ao4rPT/fwKZF0oZyyv57yUKK2XCvSodO5RaO9pkR1RiYzAe4YCCp4LNn4+4lxvUeP58fWIx8wndV35w9uqef9ytzFeNLZIKpuMWVVqj2EC2ta7tlXVZnuNBm9D/EIy3XIjSV5ZKHvZi/3d8ZF5Mn/3eo2IvZOKELz2rq8kkfr+TFgm6cyiFoBakZlNZqnV7dF3TscngnH/idFOgxQ86wIiMgTh+cojaXgWgxoBtUBmz2tvWkYjaHzN+TgOky6X1BTZHtUm4vl8fn5xbRhQ1zzGgPF6bRS8dt+zaXbbbjYiIDLk75fu8+ezTdk+SUP3yYGD1fZUATszQZe0ULQR6Tb6LjV5d4ywq/0FM8OY8tLEcaS5x8YxCGDW1Vj9kAaO5FE1ASc4GJZeqECOSENLTgSijZeP1vDtIx1xrZXFJ5zjrBKhZek1j+OgZXsGoi+UURm2D0cy0s0JAG9x/Cd/874stNX2cWOMq6616pPHhMbjX+UDcsU4l+SaQGT0haaaXXQkwAKX6tDuzTtULxiEj9SzRkQMlluv19+7pqoZWLrr6Mhca91g9T3tsidUATdnAYYYnX0DdioQ3laCl6tUufDIoW2sTjJQMmzQERO1ahIZI0FIy0QGkvl6vQCd5ym4bAddjvOIOTGL0JQrGHAGFmIIKYFx0YqwleVlEQxjbJZvVKDCoTwRr6ixBFVFnsIMP45zrbX2QfMYQaAVsQaiGlMSnGILpZsDRRo+IpHZCGp33GutCR05rtfL98IcCFMTDjLbMD4iHFx7+JfREqoYxzHmnGvpuq6+XINDUqiadtxGX+3GFhUkq4xS/7DlZYYa+9vm9EdSrON8//j4kNY9UUNv5mLs7RsMQkY078xVK8HtVoc936ARIwDUXJnBSNtHhGyVUHsbw140BKCHx+WIDZuUi9vPLByMT8Ty/pPgqwXCQZRredKVfHe0THhi01KDSWJyRgQQ6CtlN9PaU8w8xtFyH9uX1nQrj93t7yC2dIWtGsTgmJyf6MqCQB3MU+2H3hofrEDN1fcYuM2XEXOW3FK5C9sTByMCjjkcroEIO5Zpp/0e6VJmbgigypGAtalMCGuWMAujt3ewfVNFcLsXdpr7SwYdn5nYqmOiatVacdXjOL69vsVrPc7zsiVeWEeLTfufzDn0ppyDIlD1lbTW+zd/jFj+XeQBrI9HvevxuNrBt79WrVblbmplv5+mT6kX1PiWxfdooG6qSWmy7/qezcIFBjdt/zgos3QCy9Kqj1UXVyKlahS91RSPx+P1euWtPPw8UfIimJHtm3YeD2yNznbMl90YwOdR/KxgbCvWl8eX66XpQsYIaC1EQMqN+hfNtVzWiO8aFZJkSoXqMSaWZLqsFkI3R2YBNedxHJ+f4uPjoyW6BFYV7TgG7i1yvPHSct311oYif3EMvrPub5FtMyf3SLJUrmin9F53Y6pQPbtbhLVftdoi2GXXkS0eW1X19vboKrxhz7aSQZMzUAE+E4Mqq7J7dkQL+/LwqgTycWiuWgskH0dEaAqkXAbJZHscHfn8+u2n9y+ras35/ngDrOjtS62+a1OIpgwrDN/LEpIhF5kqNJuizb27JejWpqpaEbSn8s00rvng0NKrKh5HG701DajHE7NWO6YKnmstOpMWm/VNm1qBOMcx3yJyLF8lbUJiD7OTx7FX03qViF7X1mzq9rtWoKdGrS2+Z/swstWHTONe/4AMSuOIgxjAVXILXjZ/TWiWckbvXiI49uCSY4xpMqK6SMhULWkmuJ6vE/jrf+2v/d//9X/dx+FgXZNWxsPsWKkOvzQW+HCIMsNjT+IThuOL4EQ4JmQgrCsp+Dkv7GhPS6NHleoXBZAhAhnSxM1MUWkwCJheU2cOkm3odZ6D9mteeQwV2n8jMyUyEgh43kXS7U4Muvek1tVTkK8fr/fHQXKtaw2hbQjbZR3w9kOI3r/hyBbDuLcAuMRxhw+S21Ghr3dExN2eqqqryBRgLwqJUUGiMjzijBEEF1idtzCh+Hzdm+YjqR/X99BzOxH2Z++5nwBxZ9ZkGLX0PdjVBoL2JY4mwyQK3eLCMiEmfctHBSXSOxlvytyu8WEgHufbnC/NlW/n+9v79e0jRmYevfO1R+l77pJRczaNkMxrrSTLyki8lhivFkW3u6QcC1fViKgd4/acaa2FGIPtCgnaS22OsB6ikkhMq1zZdMrbeaq/Gmw+kwFpP4zvpk4tjhzHSRLcZKloHij+Uva12917YwSz6lVrQqQvV7WzC1s6D2m2IUK/wqIGfS76CGUci2D8/k1YNg7y9TOP8yOu1DjGa/556EF5x6btiLXna0nA39fvuKRtK+HtjnFfKHYY6URR28EuyAg+r2cTQEfkGSFk740abK1A2V5SkkkHNJSLjk+WKrBNWQE3Sd5LJV/8/NNteqtAfa9M+UzAyy0YW2K/VavkyCJzOcHVVLUWpGWT0m43Je0tBZIiA1L1F32LsFtpEmNsJV5L8rYYL7pxQ6PIat+J0J6H7lSPcBmwj6SqlXvfT0K7UYLAL+TjRH4ygaO3kHrBGBzMbDM6efv4WjYqhOzddrHlZ2RKMyM7Fgl9m7GJF8D7lK4icXJEhDVtBfhkDSAsaJXmQBbiuhaHudnpEI0QSY5kxBm5mpal0XtFL5WIIQIoouBh2Vzc0osI2pW9X8SUcagDhD5VbOSWETVccYevzYbrEckRD4NLFQGwV6Ml3csMvDGM5nu2FvHztSU2zR52L9JugB7dn/QWgCXp/XwInGv1/el3F47aJsy0i3shCEsiHWPcRVUMS8FqQM71Pk6Ss65flKD4VDfIXlVtVxFNKIRFLJTkEaG9v3LzVCLSGdP1r/xf/9XHl/e11mrfMjDKFa6Wq7cSngD0DYXlUZFrk2yupJrezh5oIqBTY1FhXuzeEbOu1+s1+t1WVZ/6Nm0pzSOPpdrjHdnhMQYycnmq2MABw1WryqSuGmOMcUrfVy9IGtAyYEdmE3Eheqm1qlKvPu1tzLZyrRVgbm/X5oJA2NybVgj013mX7wrvnm+fBaS9ZPUMv+wkj/Go3qfd4zk3bROm2tqh3VOXlbek/Rgj6DlnD77Dm8dUVdhTyX3zN9u9E0wHYzUfHXV7ZB0Z43al+fxhkhOLxIEkXJCosEdtu7vPoLrFMnCAbM+oziDeQZ/n8Re/+fXbMX54vP3+4+N6vo6kQJmrqh2SSxpjvNaEmkvqtRZHriUe6aqIeCYe8ClmLwcNXvJ1Oxp+Hu45J5ERQ6XtBdWcu+YdLsyaHTTbWD9y7xQazUvq5UBp21HK2Ov/1DkLEpZzRG7G3CcZFCQisj0BIz9Hn5I6AV9rlXW5rlofH7P5hz/P1/i0rAPFFZaI05ZZ3TshOUBwLH+BLn/4W6HWeNShhw5f9Rfvx2PVFhXQu39tyLVa0rq//n2fAbQNU2ZgC4sr77UffUv6E/Q7sP3mKHrBk6YqgHAkKOrt7e3b8+ucs5dffXwogBXtgP09/IRJBYhqk/jW1u14u+k2mSmri1ttgTLABlzxfF4AOOK6nj1iuprx3os6DNtn5JF5ycBq8PmToPe9tYqGyei7PjhGzrotBlv5Wr1gOZdWIrdhwuvqqO62m23SQlfiN6y6pKZEfU+07bk/CND1eX3uOdHBqko6gmvZ4nGMEGVx70mSJbAIJGMKAUccN88ggkNSRmQEsRkKu3YpzdidrAMKGJSQGdAeZrecMDOJEDTnPHNI95KfqohYa7H4GEdVva6LGWvN9fyIM0MRW8y5Rc/3rPkOA7YVVvfGfwkl0q3Ol/T+9sPz+dxmF92rV5GcNOAjh+cabLuDcgbXaoXlJzQIm2YabDqwbZcZl1CS5xpjnCPKrLVXDvB25OjX/awAsu3KO8t6L3hoepek5bIqMnHvLQGdSRBuQKRUUQDW0jhuNZC/D6oknbeGtrAlZLA0ZXLVysyC55wIZqRKakPiH96mFOQJFrC0zhhpDWnRRYtI6yF7nFNlYt503cOkKK8xMSMGiOATdZozwhJKzceUPTqjdKneJ7hNVTRXtSRAu/DpJxKq8+0LydfrldIxhijZweh93SSNstjl77qFN59foQGOrHUlBoKdgz+uGVjH8Z66RdZbAAFg76A1EHLQA/cOWqKMXmUo79/epy1zSFqrSfnjGHvR3uFY6dJeUFXBaDH1XAXO6KkpK/AwHpFimYpoblsrqRAR5e+wXldtfcTbdmH7NpDZpnu9YA6R4Npvr5cRlQaHwCog2u0s5JDVHrqu7urZupTeHQSGwvbn4I/A8/n8K3/lj1H6+tvf/fjTF9vzORHRYGMXTNLaq5RRe893hLbtHMum9BYH7VmNsDtWABjCx+pZJmOTGE9/SvVvBwYEgwO2vK7HCMNrDvBAHAiMeEpoAB97NRzAZI5xPtfsrtHpZvOPTAIvqfV2fRQMWFiflmS3XQ4D0eVGyWvhmoOYcwUT92m5a5l2/FySVq738zEHCjyXH4Ua8W3U8bw04/d8fPwJv3y9Xt9ef/RXf/q49PzG4Hdxs71PYbdmuLXvtzXBd/ueHiM0GQGAC5k3fNcjFlcZtSrbWyToupkQQURoraqC49N9ogvSJB3xixEwHN3PNSDu6n1m+0sqtw7YAsdnLfiZm3cF2U4C4DkOVcGCeXBcqog6TMoXKiNhf4qw2W7+vyhMu9fviNgBV6sCMec8jseqixljjI+PjwEwPxNkbELlLnbbj9pdfX3mVWm33xFt3+6IqDmxIhGyPmecrbtmWHN1SbSPgT2XIvdAHjbp0RvaHVUzzzPjWPMVwY6Qy8qOYL9oItVCC8axF5+v1bcW0UiG/d2e6FUrM9/G+Zy3E/5dVTQ40U6HY4yP60liPM75uqIVyUERQ+lNEcAQw6je2CYSSmyfPnq3xeWQFI4cGy9oHq10z6f6eQYpV01YYxwoXwLgk4xgN2qVNDRskbUp9bjXeWXQNJhnJhnQFOQcZ3mV5jkeLjnjurFJtEzIbeJxYyRx+3MBuq0j7+yrQGqA9lkUeGVMazDej/Pp2W4TnX0+QR0co9x0iTawNUmOGAxXL7PaMEazlGhfVRgIOTFG5ISYI4G9JRcYbb3XdtN970sst1hGkpIxiLbHxAjVUbZ0iB99/wE0XFr1kjAYWjXGKDCPt6oJ+YjRgrAv7+9VNevqqn3OF7MH7SHJ4uPtjBg///wtrEbYSV6v1XGon3U/jhax5kgqV4W05DnysEuQ7UeOuleD1b38NZAr3cSQ5epPjj1ZCstg0WT2ztMqu14vAK05sf1atVcp9Y5q7zXAAtHcSe7osUEZ6amrXkCbslYDOtgiV9wOfd+TEG4rH5pYcBoUkrFuPUyzxJon7/ZUqOrd4/cWv0ES1mRrhknmYOzgVZDEthbcydkiGvPNI+ac18czAl++fLmez2KP7owe1dsu4etXhiEhs92a8Kl/jUHmyxVT3chSLqwFY6//LpLX8kGOjLU9M6BaNyBPQF4F6s1t3Mo4QsblGmPUZjZvaDMzADMzuivciNya5GCstmTPOBodUjC6sSuz45rXvaJq5x67iKma31e1rIgRI2mVv39l7mpOwLWeOd6kRCgdhgN/4wf88PiTH2f97f+nf2n+yZ/8W/++v/Lnf+s3R/6KfFZpVU01AF3b48h2L2jv0ru9x0kDr9erje/L2341OJC7ZCn7cZ41J4O1VtnuRV9WyC0oLNSrKsjn60Uy41C1EIWyKTFi/oIFLbR+BwfgVbXlpiCZkb1+ISVVj3paUfb5AAtSHA9JdL2/nd9ezzlrcCS4Wo4Y2ZoOrVevR8OmBXyqLXe5Uza6fbxrapfMeJznqmrTlGs+z/NsspXvvzjOXgRpZpT2iP1O7VYrl7LhNJAMuYBzxHPRktlcsO0v7fb3rmL051Pf8eVll5ChslDUyBgMLZcUgeYJIxLgt9czIo5jT1sABFmmLIWNfDAjbwHP7WpgUU0ics7rYkT1Q7qe416CfqtC7qoFqPCs9RjHsiS8v7/P1wtEc2i28bABRApbZGwSh1VtIm0biNrEK9gUMTKPZPtJtRdQT4IzDklDeHt7X9dTwLKudWUe5Ci/3Hs1bAmq2ptAzrNKYdJSKTPawV6Dl1ZIFgfHeZ5LvJZ8E7K80WkCWGsdSUt7l8PS96raQWaTaZY1GBFHp9G7YMKZo1AIMIiJsoluVm5AKKNL3rxB6R4xAIgDjtjmQBH9NMZ5Fp1xmjL5sl+WvOw6elUNzB6ZFBddI8bsjaEsUmQyWhZgVQWHYrHaEmQeSTGjGaBLdK0aSb+/n0eev/nN7xA54jjH47dfn295BHGttdb66f2LtKoF0jv5FHrFn1xVXCk9Sb49Hmtdr9ery0ySHrkFk75rVWnRh1tIEMphOZORh2XRskR6BGxpn0zUzhOwBiPZ4KTMA55EoLccWQ2rnnk0MbsBjyajlpTAob0iicBp0FTSDATTyt64lwFyckVtz8s2MpSKGdLKG3NTgdvNJ0iPFpxg2T6RA0GiiGzdW0Tch2M1oP2qiNiUCd/K+xx9bgJRS6YiIjPGOD/Ws7eSqO0D0ZxYBOO1ZgaOPP78z//8HOPt7e3rfA2zXUIj4hzHx89ff3i8XWs29ibNFu31wDUiNJcH0FC/rDkpqZxt7EVDvq5r2iqc51leVcqmDjQM5jVie1BXMsdRNX1d5zgeBbUXT/Mb4YKDUJVQORJ7ZsNxDgevWme+fRq8ZGZkciRit93qLR+xG7zGcvoCt6mWYNKz6gy6fcnJ3b4wSHwE0i7pSS2uyfrV76+/49fXn/xf/uf/nv/hP/r1P/Z3ff32m//QX/u7//f/4D/Eb//Wcf7R12/fltUbLXcPjG3VEiSR6G1dd89pIDbaoczEtujpszDMOs685jPbnHscx/KULhaII8cjM+VapYR47w/HXlSWY0zPIx76hRc0u9AMKmOtS0vjtnWx7R7U2eS+odj1yJ38eG8X7lF3o6bCnPPtiAX0wt3TFFjpWGEDEaT5i227S0UL8nEc6Lk1cJ7n82MpeD7Or1+/WjchiKy53t/fI9CrUkV05boVVBE9P5bVMZbhjKbRcu+A6apxpMX97G9j8ExUrTPPqmUbjUNSPCh7tJUeUV6skCAGuJLD3vK2ti8GVWC7Y666e0ikwhZWmVrs3SJgyUSMwVdNIiLzeHt8na8AjuCiaGa2GsMka66IWJrTGGN4qRehfMzrhzhebPIpVhJALqbdSv0il50j2yKprMLKYqIN3raZ3pw2it1Arw0sNS5o+0scP719+VvPb0tC9DaU9BLPtu6CiRu4pogDMVUZZuSSZS+0vd3Q0hFKDpteXmvawr2FzHd1siOhavcD28On3TTpQqOMMlr1n8xlxbIZC9Ja78dIe9b8Wa/gqLUAcIwIwNHo92yED/Le5c4wQc35kjDG6AlsF8dedYxsrHq2Rp9MxhHjZWMpwExWxOrxmRyibzlMo1+bJQil4kr0oL6Shzi3zLtI5DFIDnh8m4rreRzH0gz5Y74w8gmFI2L8+OPb12/f0Jsypx2E6KADFTQqSazJjCNdegFdpDcgT1Rv2ezCEFUgWIViZfaetUBaMDV6j/rjOG1f17OhhP7aDhpMIbSt+yoitT/aCCiDq1avMaAx18cPP375+PY6jsPmwZAXySUrQoCqSCp6tSBSEzwORrVZnUj6LQGfzVvr/R9AhXgw5+fKgdxL3DvCfv35lYzMRPiV9QqleRBGG7DufS+Px+P3X2XiDSG0pl5tSLLWivZxjt56rAWYCvOYVx6R5IgE8FxzqeD2HXr9xON1nOuJxzvf+fb1+fXx+NWL+rH4sX6ux3E9E5wX3uYwrzHYim5h8zajrnp/O17zWtc1QKqq5pQdeV4v2ddCRJzxGDCG2pbrPN/W6wLq/e0HSd9WmVkSljKAWgEi8+N5dRIVJbQoiyNHdueUQ3MN84xxHMfH81mWiPr4+hgHbJDjcQYcVmZO3vr63fS5d4n2IK0nr00YgeqRo66ZYzBte/str+t4fx+TjrWkY8T4Nt//6K/+7f/cP/n3/E/+Z3/x7f/7b//n/8v4R/5H/69/7p/4D/z3/rGfYv75i3o93c0rw1bziW1nHuCimWj9ntqYNRKOQ1aER0SS5Yi0y1XryMPln3/9+zwGSwdjxPjgdZhLkcfI8mtOnsMSu0ST0MQuxJEHHAPhWfELXn2PP6J57OUvWT/+9NOf/fo3+XhrK+BQ2h5YOc6pIl1aaEs0BwJVFTEYvNwGp0hhjd1nP8BYOs8zz+Pn56vLfUSDc80fBIDRE9AIV5BcKidzhN4g+Xp+dOe5GghgHGO85jXGQJAOrzWQjLGyXEq6bSkPdTGar9mqFe7Z88jntZwj1lpY5w9v3749HzwJVK18DD9HmYy3QkEaHO3KbF0rM8khUCnCyVU6AcJtkpeIwUjBU45pgBkoDVJLPR259tp7HIzhNkVycT1f1zgOr+tgel4/KKpqxTCF7RfN8zhea/7Jn/zR7373O2fmlKd++vEPzhx/9ps/yyNfnoWD6d7rd+bIx7GmSAw5pcOOyPJe0DiO4XQvlmiv37e3t9frlTky46PmI0bZIf90vv/6+fu385wv/dmv/6JF5l1DSjKNxWC8nY/lpXmNcRTgSy9dkVx22E2jIR2BA2rhiuwivn38PMYIHJr2G7FeB/B2Hh8fMx3JKIVzrlL4OMfLtVDH8eYPIrmhJBxh+rU+zjGySOM8377i40rMxeA4hL4hIUNcwaZrhiAmhcHwqn07Ri7zcMB6TSc02OxQFZrTrOTBMOc8BqRCylMRIaLZhfAK5irF0LLIGExKOCitITFOldgTVUYZiwY8ag/tkxnjGA5SKtw06Awwo9ntpQZg7zGnnZyqkaeBWmW2HxjaHGMXONs/3+SI5NI23/Gnpecu5CHJKtyla3cJYwxEu6BuqUxmMvP589c8kMcu2zOSSK/FIwh9DlnuUQ5brdFM97WujGjlQyTsorYPcHjvWojRW4n6UcjqWdTx8fNHnkdnX5KZ5zniui7cvUjdpJL+95EsqCyKyQw0J4mfpMzGOnr9IjOuqjyPVtvEyKUa57GuuRG3NpUK9nIHR1sQ+LoutVaXJEaSWW/Aq6bwOPUKv/3ueP9T1F/8GB8fH+9/MM6fv4n5a59v9fxN1a/GsUc/n++qLfrmfPWrXmtCtT3cq76lIDYJ+xV10SFzrfeffpxz5qAUH8+vJMeIKsVIGgM+RADCUADMqkWbDaz3N6CuJWV50ssewtQ6J8PQOb4+Px7jGI+zzVoHw5lxjM/ijDfhwnd983q91pzRg3/EGElyzhkjATQKd5xva60aDsVbvD2fzzz45/zd3/X+712av/lH/vH/z3/h7//T+dsfPl7+K3+V7z+M8Rdf+TXXaFrNumaFkAwBs2IEIgpYYFto9o7WCefIEVhrTU12IUZGjGtWJMbZyIrInGsxelmFpYtqq4+Z6doDjtH3sbdlswdWo3ex3aibgNKVzIwjQtDX5wTTdu+sNTU4FrHWDCOTIw/m4zVnwQw84nDh6elL75mMeEZxDJSwCkmPfNp8vt7GY87Zu8Fv3fjN9wnKCMvezuppk8tSkBGJT5FMP5Ax5pzXWt1jucUkVUfvpyE5THIRZaFWbC4P7i0xfYZdoAQXRmRXEmZqNX7GXx74BjDemCX3nM9tNeQYRLGJwJn9yYgFK+rtfHu9Xmee4moRQRufMYLcRqpqFk52TTKG4oWYEWfZdI047etmyI8x5pznEa+PD0hzVWZm5LePn597NhSz2mI2QA+OXhAqITOVhJv1VoYzKYVrChER13V1DGx1taSpOiM/aj6YAH6/XmcclxyxHehtsIVq4Ag40qsajd8UKnwapaIVtM0ZoExy9bD8muM8RkKIH7+8V9W3b88vL7zAV+otRrBe6HolI47Box9mJIxYa8UG2ZmZr5Jdx3GuueaIVUWXUYfyjXsD0IIy9oGADHiAB/NJLVWVPx2bC2By7TT16eDZHlnIqswUSpK0ViUDEjJCdjKOyNUFJdSIRcaAsTwt6UNf3s44xnOVUW1IWHMGx8hctapZ37EHiwPBrHYEYoKIuAgUHsd5zSdMlHopRG+3YBz9zjMim20YjhjVylnZBKO3bKmkiGjTVG3Jmn1rDT7/fNbuQCwvVUiqrR2CJS6/fXlvU1mSRmlWL+MQ15IGuFoQJFLDQGECTReOx+NRVb1/idFWCWh9cG/oyczVD/x+L5+8wbe3s6o4xudCva8fczDaqig4bB977wJUwECPBQMYchKOFDFgBmfPBaTVa9uRJDWX7OM4mjVP5xnHBUVbhQQAh0CggrrqiDTCxMiGGFZVHcH5sr+sM6TL11wf49ePpd+MP/7Vv/uv/jp+9+KPx5//mwPnk/7V23HV9Tn7u8ud3hdYzLFNqW9+ltpFnK1J2fum2iiiHRUyM45AW3lyr98VfdGzRfwAHWGPZO+vaHtXuy/0wHrZbR1quVY5aRs1K8nLnq/ncRxHbsRyruuGT1t0QtuQWmi4pbfWGCeA1+uFxDgCoEtHfifSv9eoN/x+6gdHFI6/+bt/9u/+D/9n/97/zB/8y//K3/5fOr/Or//Sf/8f/U/9F/+rf+O3f3E+5SOjXBFPqJJpPjhW4Bpl+oBTMUQiiihipUMVAMRsi4jYYinG0FroxZLe5KD1mq08a9cUdjRfolDx+tVPf/h2nn/2t/5ir4U54vl8vt7HUb+gAgNzYMDvCl4uegbmmnGca13naFk7W6Dpe95GmXu3cdpaYVMPjuWlSBeOyDICHI9HVc1r5XnImHM624Gg+9FybHVz8thMtGjMjcCQ0BtWwOYS7zlrZq6lrenz91ECgpd1xKjPCRSixceI5W3hDnrXtwTQqv6rkvlyJRGka5GjjRVj7/0R0HaO0a3PZjUHEWwksXnV29eDVNqMC1gRhNYqRkbEqyofR9St/rJ1V84G3vg2WbEil56BJE9jhgPxubemi/L2xmcOGwpoVTOnyxzjrCqUmMDNM21svlSZAdCWrOwZwj2RaJC507wkMO1tzT2rKHvEgRhjDEH2ggKsrn7SMeKNx4SRAXsoWn61rLP54sDOKN3JqHnjbiBKJdvP5zfaZ47fzY8vb48h/va3v888BsOuRWW7UJbQi+vpa80cPI6sWXJkcq3e+xJVM+W4moRYtVWI3z0WWibUsjVZosYYHWOjzfBVI4/l1Vpeb2onFb1TCr1Mdoxx5GOtFeLyZjJWVY9IXGoWghC9If6pheC/6w//5Nu3b8/rdXBkHsdxzFpxHHC8ajXlfifgRJVGk3Q2DNhupm16Fb1dS3t/1n1QGKGquFFQseymrjHu7SJluRlmIiM/J0NdotoLXY50GXDvim7Rgm5ZZ5LMs4FZ2+M4fS+JZPRaq/z28UF6L4MGEaxybrOhZiTWeRwke+lhi28AiCITGzRLRLgWyRHWnojRpgRR13WFfzEwA463h+ZFsjQleRpAkDk4tZiREW09sBtzEp90rd5GJ3TKW+HBhAvWXPXjjz89r1dbLtz+xzCQDkSAkb31bGs5yXsZSYkfP/w4ron1Nb788IHxdh3zT97/E7/+3V//Z/7pX/3N//Pv/v1/5z//9/y9/3bxj2LRfLk+xSHehh4KbK+u/sa1u/zWP3CFK3qZg2k4WZkPt7klrGb92fv9CED6bk+AXny0/R964UFJweBgsBhC2UYVwuEoQe1/Q1dV1URGZq5asKvcreR+qvejHmO8Xq8559vb27Xqui5Es7f8WjNBOjQXRz4ej1lrKsbz43FW5fjZ649W/uaZ/9t/4O//+/7B/9rzn//f5T/+T/70D/7X/4W/9nfy43ffvGLFk4qyqxKYKo7U1Li9SSpUfVEEGOk7XBJjHA2VS+4VC/2ndyGPMb483p6MuV6AVbOklqUSyHGW+Zvf/GZ0wXc8ruv57VkkvyyuXzCwAAwpybnHC01s4hjUbDQrBUg4QESIEHCtayyncTBeVu8LGmMM5GWV62Q8jvHt42PlQOLkoVK3X7kMOHlT25qNu51F2vaf1QTmwIJQdRwHgKpluvuzqurKaYzR/CkJcRvGCF7rGmOEkRGjBQ5RrmZfO5NVFcgdXCJCVgC96AKgrCGD7WKx989TRD5Ze1hmB7jZh6jAsUd5MiBktE4mim8xUKC2eY7loGdzbNs1//Z1IomMozQz1m0qcCV7u856XTsKJrRK6G54rVqsSXL0fsMKmdEwn4L3ZoLoLY8Gfe90JKu9vTYLkCQ/1eq7QZcv+lQUGUeydLHyct7GBm7HXrNXw9Z8VY9PWwHMnqC73BeaRxdGDKj1eNVTEtwrg6pMV5Dnj18+vn688cx4dOnHiCWrKqpn1ZTKKEegCnEI5SqO3EabTc5v/iH4KnvY0oE4Ec02b+amYbF36gRu/lev5gWgKn3a2vd2eHqb3DaNWdtdX7OJ9Il7R9i212WgN6hhVOtyiapa67quK49jlHTVH/7xn/z88e33P3/T5vFs9xveftwjQSUPA/YLBVQqCnhdV5sFjv01OoyQyyuIBKSl7swDE+t2paTtNWt7AMX3aM7N32pOlbctAr5rtppnQVj22n8LBEZ3M7WtGW13fTlGvr+/zzmb9qKtoKr+Hcc4fvzxx69fv85rnucJwKi1gE1/H/usxaZEb18bt76813H0ss71008/vV4vkgX3Bulv357bmYU8jsMlsv0WYMRQNBeuQqSiN8W2vV8vqjbZLHbNRLbl7IhI5gMxyyhVW7Y1QHBP+DrniFCpgQIA7cO5kvn6yEq8fampH6z5x3/yn/tf/S9//F/8U3jUjz/+4eNf+Mf+gX/xX/wf/zf/239rPn9Ad9Tfl0x4X1qSLMnMGNEGbYBJVLUxAyKyFy+qKIrj/H6ULTKC2VJp3ulxPymwPTIBNMUDIArFBh+2dKekaFP9XjJXNZ/zODKPXGt92GE8WxPFbQAU4OKu45bqPM+19PHxMhERvW7lPI6M8GqlRPreQPfit3rgy+v5yuOs+dQ7f/6z3/zqr/yr/5V/+K/9W//Pv/jTL+tv/PXrz/4m4nxPvxzS2quc7gFEV7C9P0I3z7MXxn+qK2xImrVnMxGhuXdaeZXDc9XPc605Y2wy4TlGRMyrWjti8TwfZK/5FnKQjghXmzV+/+MWEbC17GfpVSWvVmBrVjFHo4y8qWFjjPZdaH/OQ0jwmrNxtwAvTzzXcWbvn/+Dn37q3/H1erb54dqwVri9vYmxliCH2aAsspfPnrmtK28lXq8C8IGsKt8OrARLVVXvMQ4DRMs6BE2s4vZcbIOXiNireexV88yBMKBIzjUjONhSwL5EG6/e9z16j2pXyUjQcJpvyIIXrYCB6PRcnk0zh8aRSyo7Rn703snGuIntbWKnIV5BZBJIumfeXdw3/1rnebYiy2uttVToj0PS2wOEc87z9ujbWfLuRk7uzQetlogcVdW7/BqCfnv7Ium6ro7+v/rxy6v0+v3XHCMz5zXPL2+Y9ezNQ06XQjFAypyadnHvZQqZgV72o+rVyMjbi6iiN4iv4xhaa4ws0OZ5HnVNJN6+rpHHC2ZhMJ++II04+mm0swr24u2IitdzRoQCmvOIZLRvwbjYveOIUpiTOeGwWulvtvKtG3Mm+Hw9m3bjOREcvYbIXF2qhKitVR+gewRwHI3DYK8dPJ71IrnJv2ohBySXjBGy0zjG8ee//c15vqVcg2v5b/zFn62piDgj65qZqQgG5VXlMVps2m+2tBplbSjfiohme+ImeYYxxtibJLvQ1xK+r3urpnV/qmPFMNp/IzPzdk4mMsdnfP4OREtqORINOKqqpDaS/eM//uPuYrvLKevj+Xy9XoEk866aG3nZ23R+//vfkzzOlJe8/uAP/iD2vooj4tM1c7NnG9l2F37NuwMyYPZK5tbSoXdgkdYqGsk4cmyGJGTXyAwwNoDd6uDCWrw3B+xU005ZpIipemmtgB7jLz5+bivKaDHfyMwccb++1ub6YlfribS5lmp9++Llx3r6+eOlv/nH7//J/+M/+9f+N//U3/bv/8P3X/3J3/pn/on/9T/03/q3EeOH8w1/8G1F5zf9pdAdJD8NDFzaOMRWnGJYj8JZTYRilL8Ur2t52//iPM8xztUuG90HJF/hZ3jShUKtXeDcU/P+0qtqlCGVVXS7Q1MO7T21/X56jjBVC+yTttYOWpKWISFGtuHO54sznIOvj8ti/3i7oj6fz3XNPKhX+vGjn5qOCwfwPH739V/4j/4d/+//zn/3/a//bT/+ir+L1x+MH9fSWiva2uZ8LOnkMZbPMZScXt18hDzKTR9RoMzy/pZ9j6tLs3sU3ZQiAJZGnvPyKsLjMb4c4z1ipHMtAVHl61oFOzjGPsA6tiPO5z8KmjzNY7mqrEput8Ky26kGiOpvvzTA06RRxIxINU5zX/BWx8IXPV8XS2/nyWN8na+rrhMY8GEOcDhTGGYaQ5jsZaphgkwaIQ9JQN0ZOMbYpgXBbIIQeR8QbO+Ukbp1TLt0X4UlVKOuklTLMmXL3pFatdbiBufsG9fZCewOOABSsLSs1T2TioUBqqa89im93TYoZ/kMxqyYGoVDGMhHDFHdWMdG3zgaoZaLoM1S7aURYERtEaqu+fx0YnheV+/zjlsuQebd7G5GqhDCLXYCgjzGGJlst5BfzBc3pQbqPqE/78frxTnzcT7O8zEOH4lrrbvxYLdNGcytHWXEGL3pNRyc3he2F/C1WJG3JyuCmTTlYMGrbGDO2Sn2w+s4T9SKhLzOHGeOoHvRFGPnEYZppDNiOLYVz+bu0FiV5VzN7+bc4CcuqP/p2High/+Y1/X+/n6eZ/UKlgjKR/RQbMe69jJs+dB+7MiOcp9pbkPA31eP+/5G7iVviIhxPN5acvDD+5cf3r+o8Pb2VlWrKh8nR34mx/5dQ0QKAEQ1Hxfh0dIqMrdpHzPzU7A227Y3Qt3TbhtV2N7eAmQwJS3XIwJwf1zuNmWbcnQlgNvHThLvSfR94vaeaIf+7G/+rXEeb29vVdN2M7PaLLeDVwMjyV2Hjsac7wd6XdfHxwcAlugmazXrDL3xQVQvpUN79EUDaU5m+2viLtuvOc/znKx+z9d1+VbZZzIth9Ui0zaNMQo+IuZczDS3rrHPUxYyo8B1TfSm5HystRjESGagEbAOlxJyW4d2ICeyoZVxPNbP9Fmc8Zv0X71++I//y//8v/n4+Beff/qf/m/8w/PX11/9D/5H/g9/3/j1qONSvXH1ckqAyFYzxVbTWIjelMsNUPSEJ00uUb0yMnohDWGNMbzK5UhMrblWHgNEymkc7foUMlFwcIDEXTDxUzvvrZ8xSAjgso9IBb+8v9e6qiYZy2UOch6MJoiZvFN6Qc6ItcSM6/my63x7jMej9zzM1wuOHGPNeRzHT19+eD6fq8w4dEH55a3w4Re//LSe81e/+/2/9q/9q/xXvv3B8/jV8bd9vf7iEpCvhcxSXpVG+xkewFlYpskiets82wGt7C3UB4CR242rynFEh11RNt21Q68Cohmo+XHRtKJxJfWRT2ltE2oGjB8QM+5yAwBwiA7O3q5dNYA4RohThfZz2sQZtjJw83cIGYwRyKJMt0nQCoj1PrLF8sG4ruv5mykpzwfJue0x2husYa1PVV5XCUhmUFAF4/W63t6+jLZBzTzPExzP51NappOjuZDADmkXiXa6qzXu6VJXn59LKhsD7FMZ5NIsmzEkHBzFKstti4m7j2xcAQi5nSr3wKispM3F1c0RopE4GLHw/yPrz2Jty7fzPuwbzX/Otfc+p05V3f5eseclRVKUQllUQ0qiECkRJVmCBMGAA6cxAgN5SfIW+CEIEuQlQGIjCATYkBHACiIEMRTLMg2JkUlTtBpLIqmGpMT2iu3tb3Wn2XutOf9jjC8PY6596iobB4VC1Tn77LXWnP85mu/7fSKL27CoGUV1I7kz1YfmFECP8GdF0/XJEl1SLpzJOpXvqDB4HcSGdmF1kau+dPXP1qgcctkijluieSD9qUnjK2AN9mtNaWZmEMCw0eXdshzSxXVdz+ddVbe5W9LWZc55iUlgn9OWYbNKkNLaYQlSBKZaEYMG0lSjEQglymaoQaTzbPqihxAUi4hxNOLptuSsoUbq5WS53z8dS4ruGZ986yPvvPPVMOuVbUeztKg+q6rgi89KQt2az3uAFD11JAoyhVllqiOR15Gk4lguHiU46e4Jxhbe4o8OV2UPaGANShUS5GNUSnasr1aVDt1iG2MQYqIdMy9HtSJQ62S0MjvPcNdtv7z5xjPGMZe9zF3XkVX73K5yY33sOR0qXixBqThB0VLxmV0ddxPcRVDkTnK77P34rEwQY4yhErMSaTjGSpHHkaOqyGo0jx+mmuO9yUxRHOE8CaK8t8UkO/GtCtpFAVLED8pMZqa5q2olM8ljXkkzk2pAWGlh3IzHHosdi1RFkQEXSgPrefzfECABVWchMqVoZqIIRkUNtQ5Bcj8ssyRFDMjrv0tVNPBMGFRtuqEmrVt510Ukg4KsqnY0ZptfxTuvjajMXJZli03VLdn1smaBpKkoHUhmE++6kKlWWItQE8Nk258uNy8WfuL5F599/n3nm29+5Vfyr/7f3/rNP/Vzn3nrV7/7+8YH9w9LSYzs/BIqhChpw75JJyxBCRMnJnlgeiyT5hQh1LKspXYsUZlzDmt0xl4Uddlzv4FRJbpukHI1IaxiRqgqVS1FrceBWVXZvCKIUliEYrZ3kDjvZ8k6DU9QSnxozAqJBizZ4SBs0FRVoJozLLKuJwpi23xdai/RcSjpRHplUFV7pN9dNOSkb1bh6TLny+VBRdYbsbd/7j/4v/4Pvvd7vvjn/uLzL59vbp/lJWBFwXnuYxk5w1UzUsdaSahoEdkBlpqKYLnII7KMHzKhPezz6IGAyt3U2URYDPR3SgZyURPq1cFLoLKmu4sooRH1AS9W+uEpdAqE8BAXgTnglaSqSKkacxY4REGNLKqkoExcDbMMUi5OBGu3mjOHqJI5yqfRbGMmMWCrjf0ydXg/yh5jwa6hJJQoVZ+twxl6mAtAM5tzOxZAmedt694iQaWWNOn5SAnNynWyqqggkSJ5NOWyxDGphhwTrBbxjtRS2LIMXy+v7sW8mJ3/InIAdK7mZ1axl9mLqBBIlmqZ7FWOXiVQ2CweRPeEsWUIqkxQMcWcZES40B8tmiJNSUBSSqbBy01SFrUoZKbgYJeLdKiGukcrP1jtEggG++GRqUQqC4xqjbTiGvI8hawQkdUX61VdP5PMEsg5W4VwuVx6BqCm4soZqQZVKebqiFSzZF0t5B2YBhMVgxAIwAWiZJrIUN/74tEjiQ84BoepojYiS02FFREqUtyj9COyhsu55oC76Oe/9hUzGYrI64bK/XXxysxkolQ9k0C5j33f/WbZCzt2VXUxZ4tHppd12lgfrS2hENX1dn3+6rkNH2P0GeFujXMQgVMGj+HnfpVtjTE4pYAixO3m5vRwuecMdXMZJVWgm0Uvkg2nsZASLHFzkTJ/2C89sRvLuu/7aZwUYoVhfqlIVqd8ZKb8h3/5v3SKGl7lPlkndQZTYPWhsHVT9f4jnLmvGBVs2+Nq6jpmRMM6m4iE66Ki/8Ug3SaLOYAoVIESqi6EQcylD3QANJt79iyigy96HxigNRO9sWFmPXhkHgDCZVm6qJwRw10iltN6yWmrV9W+bbfrqYJF6ZKw5wBVxcNYCTV06XUd9VBg1XlTpEEoFooChXCWqjb/oKrM2hKtpccyuKqECT24/y4OH0wwk4bKXFQmq2NShxp5WM/HOMwzmdNMriACd3eyJQ51iPL0CvgUOXNfGAYHFxf/4GZ8rOb3/4tf/fRYfv1f/Hef+UPf/9Xv/p7/+rfev7On5+UVZrovMjNBG+qUFE2FJVV7+35khNUVeKvUy7y0RepmLAAmKyEndVaoU1W5Y8tK49AOBBIUh2jV4VKz4RkCVDBae9L4/QqmwEWzNqCtsoKSISOTulRy6vAsy+AyZMbD0FMvU69ds7S8NiSYNNhRb40hpEK2TsO+ykObZb2MQa8ezChMRKAye42yO95c9cuf+56//iPj//B/+slf/NxHnvnlwXRMyXJfaJozHGJmKT2jSCEzZ79jBa060OJ95ZtZz8FQ7GidbvWaBty3pOvaqsOjpWOZmRpyVnon0AkrykjSgT2xuD+6a7rAVxstRBpAQpJV0AIjwkQXlaMsBpuU7mouWhWzMn0BOaBERkSSqi7Fdij0KK8vuZrtcWynw0FXsCsWYwoXNvSxRCnFKJToquLuFVSFmUUlS2bx1B8fCyJUqYLyQHwfaCGVx6BxIWHrECCSKoGOZjJNEUNdIxe3mFXVKW0q7NngMGfkFpsNp8CSAoN1fT+rquO89twPOg1wWm8vc6+C21IyURxy/PBRNexgkhOaV7fQcegVp6KvwO4fxHSPElPHATG25o4VW5gimo+Lqsf+QURmzcVc1WcyWXpEPlQyVLUiXzdVyxARblubH4/z54rQFxy93USlACq98lNDJcjrKX2tDk11FZPinpECXVQhnLvrKEoJxhhouHxQYA1RqRmrae5zKmwsFalUGCh10DCur4tyNOvtiIsI8ZEFOhBTokbvSaE7AFNjKYQ5Xa01xi20NKH7Ytq54pkHlp08psnwtlWoRIOpqsfH4YvtMbVUsk627givgkqYUFSTmblLnJabzOwNfZcLzfYi5ebmBhmH1aKzNVmK4e6M2cKEEugyCjQsvStvcI1TGVVeckrxQljCZLjPWceougULM49rwS0ph1wKuUcl9vYt9iy5f6Zu5AFkQQwu3qVdQVsGoeIqii48iQZgiYgkzIYZIkJ45Y9H2DJclMhu1pTUw3MIFRG3Wdm1XusYdR2zcozBTKk6jdO+h5l1vOxxLR4nuFZVaSPs7TXGrCqFI7EbhdQQWjUOZkk28x1Iqqm5mkSgUMgCqSJWOJJmOkDBNVAmgDVTJgMmbtiPO+fmZr3Mvar6lATayDi1p4ZZ5hJV1/Voz9yOcVqSd1gKuCyLFXT/4KPbmKebv/UHvm3Fs8tnP/Xnv+9bvu07vvfbfuLv/vavfvHmtJaPWaUqEJldcrO8/IY2ccgX8ipcFxElQvLNN9+cl82HbBkAeqzIrGm4UfXCvrhGLJDZ35Kp/TyHEnk6nZ48efLlL33V3Q8SXjRQRTtbhEhAeSi/pJ9MehB3kR1JLMik6elxWOLtaeKx2mO1+kCrgiLFQFbWsT6oPIQCjYQU1cZ44Uq0PiQPAG/mfP58/eR3f+Hlf/Z9P/PPPvlNH3/35avb5amywjQFeqSpCNsNIX4UGTYOfxehqgbZI7VqrOtBvWtjno9DctM/DI45bo8c+lULIGYz00iYVpW4RVFtZAQPr31RpF7H+0JoTpMEhdNgdUVhX1W+YgKRPY8FTbGDW9v2Y8hazHv9aWOwMvZQVRcK5JCAurdPg2RJi5h6EXCkVQOyCMseNfMypWg6eghyrNY+ZCtQi+rwQa0jzVdgKio1YxmLiES3ht6CgKyYE+gVs6sOsaTQwTxyukRkLKOq9tm3GITI7FdAFxdIZE7TzFQyIxfXqFK1KpqNq30lOhZdFZAo0kQKVRnZKk5VJqamFpXwozguqKSKlRTi4Ad2YKweSqNHHZ90qikmQS8tYQFNg2/CsENoS8sFBIg2SbGIWlREQDW90halEBmFa9rShwDdJDmULbcQJDqOFUIMNNW/WgHrva7oeB+UyDGQxR7qPpabh5oKdYpDkKwCgZLjGi4wW73caMJroO1rwf6Vat6pYY3lEuay+pEfVGnmiSxUViZL3aSwiJJUHxSct0sBGAtJVkZEIQ89bBUPTqARrOLBVGwJOZCZaiYiGvDUMinRyRCRdn5GECgRH6d19dPl4Qx0akczGaREWiy975f2QA8bh3NEjnpO21Jk6qpRzcDbVVsXBZKuc6boFGkikg1jll32hhqzCSG4amX7DhcZvsy4VGExPTAYXcqLiNj1NyrQ+nmkCNix82yZglHleBQi2xxCAIX2tovM7hWiO4DRym1I58xkEaxSSoElMMieYWYUdHIifDD2nLH6gPYlKy3z1Wv+c99L/bqqasdRcxy9u4BkmSwEid2yAKUqkFLrOCUpcqX6srH/dvB85XoYkh0P6yWVISAjOczdrfCwbSb+utfsuU2YCGOGD2WD1A2smvtuZoUAqp3GSrmmtBGUsPJ9u+HNebndJd6+hD+k2DtD9Bd/+Z2333z46Ptf/fzdk31eyArIAjWIFhtXWcgwaLXV4Lhd+xECoBDMaayZc4ylIatSmAYpFmxXGCR7HccOiDKCCfrwmnj16tXz9z843dwB6CeiEoUDmdayLRFpROIhf8h9+LrPNHN5XMbDkBApU1HWIqOaBp7ZF+us9MYikvsWwnwsvISPT3QWo2bpuM5Mj//YPjRabFG+nPjyj/2J+Tf+P5/53/7vnr+4v+Wc4iUsZLtoD+9DVwskj0ZQHt89Qm5v1/1qYder+iYPTKZUFSNFtRGGWaGmbddrlmBPPoxQUlqCJaLhAEIYEoeu8fqlxzIWqVRI/y0Ozevh17JwCjroHGzAI9unPKgxp5lIW0XV1puRmUi6HMGCc85nT56+evWqa0fykEMBGjjINmQdRmMxMnu5WawbH9H2HmJmAGBCxXv/1z9VCyMfMwxa15nH2WmtuhIU3MbpVJGcQSBYtg6t13qlziRtX6xeF/BdbJuZyNXJBJGiikpyUUcJCiaqbFXM0Qe3Q/OYnCWAau4e1DJD6zgbq9sjUlgqkiUilkhvaRXgAFlu66xpnROqbDo6WbE4io6j84nOOnBDZSWEh0wmSUWpYkor2ksAPcQQCKZe2yG53mDHtAwyvIFwMueUgtKGaGSnWvnxYnukTEJRB/NEFCpQMxvrwof9QxddH3YQILo9G4N74CC21vAVWW2r6Is0WEoRgwnVlUTEbsJhvsdcllPMJEqEQerwVbQKNWd2pnUvErTfy+J1M9gPq/ZErr4sy/LifN/PnDqkT4ZMlhjomQXdDGnwohGppXSYi8hSzGSCjNC6JjWx6prn6AqKlZSyB0VXYS/64qlgoQU6omqmEQRU62igYRBoVRGZYGmnGIqiKhNXuG5mJo/Or0AX3fd93y9NxSSZmcMfsUSHSbwH0Dz093icqDSnRNAg+0PEeIjo6uqJuXZ2XfZWFVCRTLzuderAsPaY7moqkOvVRm7buaqiMsFZGVFKVR5phjjE5RasVu223P/1xdTkYVUpmapTFSXSkhrBKGzbOSKqXuu8+qc6tOKUIWpEZ8eqiO5coLfLuppqhrGG6GJq3j9vNtGi188KU9VKXAHU5WONzDnn0QLbUShoz/BFTW4V9uknNyaXBTp4ez9cT8v004n6hcvlb/8X/6/z/+rf/+yv/RLuni2XtSWCVtCkAFAJRbvb0fN5UzF9VEoLsW1bT9iqKiNyBlVCchSE3FFbTGQFK4T9qBOx1irD9LTe3tzcteTVRV3U3d1dhJmpbqpqOlScJUduo3LPc1feEWUmJp1poyUQU4pG5cyggIKdSdKEonT3XllAvZ0218vm9dXVSRfXp8chcezhGJaT6bp/7R39A9/3m19856N/78efvPXJF9tDIb3KC4FKoaQYFeqPW9hHMtpxORXdfdjR7qDqeABna4HtcRv9+M4vPgTVELXmkvajfUBZNRGXmBgyXS7KUFSVxod+JYM128NyaHhFVb1pB5mtohcxHGLaVld5q6NJmnuyDph2VUUuYs2YH1dt46N0iFfXKfrVXlOc8iDlspkkRiCqpeDVfAkwuxA5YB1tw4l2YcjVYNZasMx8FMUcPSy0quacVdXXqqr2xdo/Zw8DeyQm18y3x7v1+hmZJF0woKu6lhgMWQapalxw+fVw6z8mV0qPqpsNQK8nG6ytotaP6uPQq+p6rNXgElVyGMVClOtp6NDrj6QiIlkKFBCq0exDQtufQWZmZVaVNrAYRJQWHWKsNg5ocVDEFE2WeP1iRVyMyBk5A5HD7MnpptVb0UnoAprStExSMRFLwiksiWSreR/2+c6rF0uJFRKcrF3Y7YpktRWGDV4XO27Da1hcE0N4Vef0wVIRuDqGI8Igc9+Bitzb/DHMl2Xp6215cothk2jurBU0ZaFHRzGaqpu6QaUvFTyGx8uBsz7eDtezVAiXve7CCpwuvWlNMJvhQfTDHnX1XMGGWt9KlWDHEYg9eupaYq2HXbYLBSQOwamZtY27ruB63RaliBHWC/nLTjJvRiiq4/RMTdQKg7KWgOkC07axl7Xm4EA/P4q2jy+SXXtpZ873BEx4zXGt1kyq0EEj9OBSiZkABZSZqB6DSvbWU47sXoh0N91fizmzSFYmsjiD5DJOe/ESCRGQjuukkWz97OvnfVbNyH3WjJrB6BCt2IvZlgcoxZJSFFLGGCbSaCUFmzvf46+v/7LW9R+ld2+qVJmoike3wOMVGRF9pqj6ed+2OdW9OgjFHB3AAqXY9ZEPoTpt8+cse/eBLxQcppUmGlssFVPGLW372Lf6n/0zn/nlf/nk6Xrv5w6VK3AHNiYBK3hzoUUex1a9zyip1VzMLjGX0wlZEuXuhfQtd0PM8p2XuU/t5cjxikTEl1HgnHPbNjZAQ0REelXTZ+UYQxXtQHisIo+FCjBrUjlUUCXC02khs1xTMFnnzEvVxgxFK4bXdYzRamG2TWPufPyS6zOpCyxc68XHBzMAE9k26lCdC07r53//H5h/5S9/NObl9u2qUJaCCkHD63qJ3CdoNzRXRS5Jgzy8um+5lgK9X2iDeVvgjgezaRuWlGCkQVy0/1lVQlxULoLJjv6kgytzYY62vX3o19e3Ja8fBn3DP77Sw2tHWOeVXx8Ys0d51a4pFXam9FGvzMvWz/KXL18+zjwBPN5Kx/NY6vH/kodK3WAiMosdrMjr5LKk2kl9jFuy2FYFvD5S+laqwpwNoLFW5OUlGJzEXhSx3tiJSNbMmqexuDuz5Kpg6kAbAgdiSK6WbtM9o1R2zjIJBTE7rUitp+EuMCmTqNf2lcOymECVa3tzl9RRECIgO8r9QNxnZstWOoQxYu+wUbQsJsESleXQxhceKwa5epAe65LrG9KqT76u9j4kvnmsMuVxvcLsR1Gp6DKmcJtzsnZEDLy2ZR8cov5Yjfm6Zm20k7sDOqAOUSo7luPggLKqlwO4lrxH6cKrj/nxhbQkODPdFlLmzDFG/34XTYFZ42xFsmLfgcIw4NDcmVlEkDKsHwdWalGYyT4hZ+X9ZTuWgTwEvNfqU0haElln48Xhhbs6iMj/2tt4fd8Nh+3rML4CCmiGRIFiFCuBKCEhnDzEuV+38Xy83ymtqRI1EYcAFciQ5DGXp4Mmau3ulo6wUR2emeuyrOva37TXEl0sf11dIzSFibKCV5r049laHzrsyOu2nOyY9C3mFkf1bUIfuq5rR1heD1EU2Dcyr6ipQ5of2RqWMYYQW2x9KdTVg9sNCg7JGFzR5UyTv/rnfOyGRZkmrEJWChKiHaTrNsbaxV3fA22ZHW3oFKQiFKEoO1JSp2JK3sd+rpmiqRaQLbeqGMP6su5evF/4Ze5vvPEGVKIIsc4Ioh6bNl6VtST7FFvrTMr0mzverZvCtPYzb+1h6EnAetDT+hs/+AP3//K3Pvvj/2j92Mc3x66ktbjLupP2Vm13p1iPn2ZRwKxt28Q0k+1JQFZEPFkWVQ3I6uPOHYvuhlVGc9Ii9orsLrDXjS1EigiRIxQZ19TIvpZ8qA9VBRt3C6zrMGFWz581a6qVpWAPLVpyUGSmTlhh2+a2bZfL+ZAywdx9WZae4hytRh3zoojIK2GUciT9AjCzE09pQtviftM/+UP1Sr7hX/7z5cmtA6mAyio2KAnuOMCxxy1AfayQ6tpZ94XXL1APf2c3JFC2o/UYbAzXjP3IrK3qU6zPiz2jQGXdqDKmZS5ZFgJoXK+3UKRAMqxSUXE18fedYkfmd1XkdXby+kzvW8vMilFAe2xUdbizqsVlkBpXa2ZdVQI4Zs5tQKoeISgwhSGqhNNKJRVe4HEW24cPhO6MlzGWZVGDovpXxf6om2vyT/+pYI9/XaikZHC29/uqBeuPozfZfVMvy6JX/+VjcQnAIQpI0UVFOMyFUB7spMeVQddXVWWKddhhfyiasA0aRulExYlKSEuxFI2JbEnpa3yCmLbn+Hw+R/QKy6hWhZT2RJUlvQ5UTh6YVzw+uq7VlcrwrkQna+eckpOR8rqIuR6zB+6GiYqYczJrcV/HWHx06WOEg8YylrErcpkuZXJYzCsyAnuM4M6slpv1FI2kCNzuTjd99doVoISiwur1/PloRgEYpA9nkgbLWaT4urRmUN1Jrj4MR1SUCOPh4gWnYKaIlHFqbphX3N7rGYmqd9vWxN9rGdEMnyIZQ2m6TK6ToropIyoYSpiomj3G58ykqvcF8Ch3oIoOt2V0aXg8aB8/4tcV0mvrcGb2ck157QGWKdZm9LZhuQlMJi1oEYzM2eVwhXAKAZ1Rl20CgMpl32dU4rFQpQjN5HV1/AhwgXaeaKG5b8dqudu7yQpwF/FldGmsj4DDanfKVJQSKGEigyRLqkyCVdc+sqHWk4Xeo2Uysi0+/QB7XX/hwEEDYPcHUgkGj3/OmgAWILRKahGsQGpRZRV79XC57FHXu/T1kVo1eFyUPTYY4JKYuevooaiwpAowdXcTPS3LsvrjE6izGZD17Nmzm/W225E+uCPKrs73fjklFZITkeOtsaxxeT/w6t5iRw0djLHsPhEyPF6+996TJ1/7n/zFj/yDv/etv/45nEZLnFzUKU6DWFxl70pI0SmLiomiCBWlvvHkGUlR7ZXb4mOX4h6ZeSEZWe2uydr2/V87pq/djERUV0jL6gAiKqJ6jNPDD1vs8RhVWzPTXd014whFnjWRhYTTrFRhR+2dOcYqvY3vdyz3ObesOcboUrJTQFB0tcfp4usR6lE+ChcVTrkZ9vJe3/rkV3/f9y8//t9+Yr7Q5ZSi1za+54OMK7GhqqjHFiaTABL0McS03cFjjB7sQ3tcqSJybAy7B3ITUyKrJ1cCNcyMJSiHSoIXqZdWzxd5dbKKR/7M40vIejwHEuheVgUNWOhP5DDaPh4XbAUNgSG4WU8mEhGtG79sTTtAxL6uay9Wj9ITLb1UUnqmfiV/cWZev7F+uJ8gqVRkWR0dZIuN9zln7o+xLo8n5rEJOoYKeKx6TXTWnIhgmerqZjiKp2ZZvD7ZzVTV1bqjeHyGPXaHxz/dhDz58MwlD95FQaMam4NjmHeU6SSzyRtHKd/0FcEUbsyAKNVK55xzzp7BChXAHrHN2R3VHnmFHVW7AxaoiVCkHGnsfc0qnZlZj01ZUprDtcoYND8en9I80fYpKI8bWYr2uFsBhq+LLcNMos4vXmGbMmcxcGWA4Kri6UqiH+QKoYqajTFul3Wy9jq2k8fhT0rxtI7Fbc6ZmWZj8bWLmDp+Cx/pFkSSOTOkscAiDRTZ9z158FVLpUzKxIabyAJ195ubGxXpuAghIgLKbT9XTkF1rnnXHED15acQE+/h0wFSpK1lKtwHNy8BvDtloVZqZVV1IL3CxG2L2XAqVageLPs+mXFsVBtwhrw2x/3NSJGS3jR114vEdRyVSrEAFXInelOoqlT4WGtoKhLZ1+4iZkns0W14v/EUE3Mdi16zReXrF6KZ020R0xKNHjb1CBf9+FMRa+1+HixhiXmgN0QM2ZF/EtuOSoWsdsxzutQ9LQ6RjlftRaOqiluCrdR9841nkpXb7u4Q8dulW7EPV8G9U+mm/F9bFVAkhEYs0DGGj7FALVmgDRdru/0VPdOkIRVo15CS2sZ2pORwN1ZFOG3Y0vb/dh9t2xYRTdiPSpLBWtf1K1/80vPnz0/LamYZkZmLuWmfehChWGc/VCDj4ZIVJxus4Wke+0UnZ0HmBeaXuKjc7eOdb/ue+x/+vU9+5qee+WnpLVrxQkY73ev4BE3VIUPU1RRgNsKK5w9exHnuWTszM7V4zunut77MiHutG9jbspbrcnPiVYo21DoGE0DlEU48Y2tvol6/5EpfEjlSrVWdVFLaY+06REbMUpXdEEN3l1c1zxUbane7qMRkpV6r1KPmjdj2iH3fYx46wf6LWsfQOa+PF3BfvYMv1jpl1mU9Sc0v/fD3P/zDn/rI536b6ypUJLKt86AJYhwVUl5b/C4ixbSZRwUspzUqC9B2K6o0Qu6xDJJiRrx6eJiZD5eLiHR203I6RVWA5RqglXJLL7WJm01KbehYxR5/DUEKGhTj+frxzOtuSPspdbgKj6Ec5NB55D7nZetycD+OUetAXHc/n8+q2tOFo0ekSh1HT1WrW3H8NUXWwSfS4oBimFzJkSC12AXRYxDntm19Nu0Rs2HnHyov+kg5+tEKMnW4GJBhUawQQ4Nx+pX2o3rf96rKffZFqAR4UJ9UdXe5oPYhD7VtwnNcSpqdOeo4ka0pnioHdHrOucfsOLKqiipKTRSv+1pNohjC2RVd8fEZI+YUlGIGIMN0uC1Vtc29KswkwKgSclAGxZrmtTzSrqTfcrLpkLJlTFapwa0OVzcxjr+OJK59p4iIcgqn8L5mCuia4HKznG5vAuztbwoCDGEqmqvg4ELRYrKmMMDZESPAFO6ozqmUKNni+fPnIsdY+Dpe0iGjjhjzw66N69BFtcP0SlARu5lBKS6LGqp8GVvxEik2ctYi41LxsG8E3F0ALQ6BFha3ZgpIpYHWx/nrzYggr+J7ACinkYxKgzglBaHQsZjZULOuNq6a3Fm9amJJdW9GZs+5ZpwzJw8tY6s6RhcZx7Q1q6eAwVI3pX7Y5KnUIDJF74mLqKsJi5iqMNEh7hCt9KG0ox4JZOph/F3EpBKWw+PITptTMJaEi3qdqmDlA+oDxdDUUd6655CMmmCeSm6SS6TEpBw5u0AHN8aWETYCNkUCBGoydDGSlyol1rEUA1KSsVA85FaWBYvC3n/5anehqc5aVCiwxYIx55yZe2Wp2OqlhJqnLAkveGGYqw8tUfCNcapgXwdOG+5nTGMhpkEWW9z9zTffWNQWHcSaNBN38M0nN8Ma724bJxOmYyoiL2500ZX+1O+idDvvug4Amtyz1AZQsvh6us2kFqUSqJ4v7cWZOndhouYuyQULHZXbrIQBMqEuZeLJBPS8C96Mu0J8+Wu/8eqP/pnbu+Xtf/DP8LE35HxZZH8iSwSVAdlJYZWBCsa+NXBHfen2axdiyM3NauKLLZHidgrVXZNSA8tU30QXHejMzEp1CwaVkDIzX0VZqWAiwBtfSkrBJcsKcOMwiIwx1tMgpjqy8HDJuXPmnrlBigHugZhWOSo85wosMZdItQQTs1NeJGVGJWCdgbMMe2RnReUlZkAKi8qpGESqreKnFFxw61oQWel8553lO37gy9/9bZ/+x38fYpf5YhWRXCo2lVIMT7Pr0H5miClJKCeLXvAai0nGaAt0F3w7hNqP45JKTFiRMzGKJiKQWLRQ+3a+F9ImbCcmNzJNgBqWamEIMCP5+KsKj3KKFC2pUeUgpSi1qKzqUTVrFhDFUtuyktTKhblpAvVQMYIfX27evL1dUbvNmcxZq/q6uK++c4YkfJTKnrtUrgbXEpdWDnP2lIBuB4p2y7NpYRgd6dxRKdrU96UhNjCo73FUw92sKyj9MFAzX6ogkiazZTg9IVTVXuhmJlVYghKWRXUD4ywRKkqUmpBL1tYP4+QdTjd+i8lT2g1GJWz4aTgsW2AiJQypKZU6d4ouc8/V1BSZmZRMosR15CzOGILKZCSKAttR0ypiF1SwMniqZaGvYsVYTa3SzNaxOEx9eHtlyypVilZ1NVpARIrBCiNawyiNAEJp0iGL2qJihcvLc6gGZBKTRTWxlViZ64mQShctSlJE/RJ82NNEmdj36BW7sQZ0pcJlY14kSg/IVM4NSl+dTM3dq4aOKJ025s1Sxl4wUSWRUKbMB92kpNUSrZkVEXU3G3lAZorQ0nFJ1JEVJMPciDvxJ7YIU4SRFy+spirMK/9YqAuNOsRXjhHCBNUwRLXSkQ2L2ZkpChkVIvRdZ0l5P2GVImI6GlO1AVO1J9ilAuWiRjWYE0ogklmHXifTIUNgJFmRuX3kraeRF/bjuWZqlTFrapHb3HRSU4AqRA/Zu0hTtPe0c9Ok7+X+vlmZW4/J1HCkrOiHLD2iwn19+27N+90Xf1m5wBfUvsa+J93NJC5nEfGlbSomBRMRE7RfS1uBGXrVyPCIAZACo+ZwbzFkG9IyU4HMcoopxLzAYFyYIqXqzgOKaxCyLqgosR0o5pw3p7s5Z2bM2E9PnzJTN0bzdTtqufmChz0pHnfGhRYHmEKGeU9Kz+ftsu855/DVxlxF5pzmNmcKrPM53ua4Z2yIBbrCUnBhglgrTUjVyKiGkItUVaA0oTziyXu255ml4u4N+J7zYtbrB3YO1aM8hCQgVUWYTojbq/3h9kaXNz/6y1/4yvd/9+/6xN/5uw+/+1u+evMEisvYFimpJTiIMkVUkSkuwZxVoGiv7lSqao8ZUT3X86rO/WbUcD3G+ZXp2Q3S4zixKA23cRzxtyQuc6dpad2cbvYMkMOMooxUtXG6yWT3q73MKAHoLdPd58YQMYfI3q5ucwFEdT2Nbc6JUldEQXBY8GIu46QqPDbKqIrmOy5uGeEVJBcDzWZebHghHxLLwvoDP1h/42++/Se/NG/fPuPeaeKmupB8osvGaa4WcLH9fFHVRC3uQs3M0/C6jivnnKaDfNUXz83Nk4h4eLioGguZ22kdqkts+yShTor4aZe9h4qjhVEmSaZQ1SD1iMwBwN5296JHy8QPOwqPxVsfeW04WXxkHA6ZmjEZKtxWuwmbVa9yt51no4k2vTqB0ShSGCb3eU41MxvmxXjUmADooJSqUpXMhGAZp8ys9iwU3WzOaWYPl8uyLHPuxxLiUIewZ5JSnc5ThWjhQzUiB2hJipAFrjdrZu4Ro5CKVFOUdTbhsZcutTY60U3MDn9d1ixpoG5To5ViBZxarJABRWbe3N7MSgIbUxe/xK5Cgy3my+KXy0NlQIoqE5ShVdAiEK5S4kfqNQBIaiXppkrdYvY6Wc2iAhGirpXCasFJCQRinK2jJqDQVpI5jPboypN20reU30z3SJLDtckIPWBDI9ZxGHJbYI/DuEyAj7JQJKBlqkIXZs/G3NXddlpUaZtQxBp1dwx9K5gH6SFzPw7q4xAAixCqajDnnGpK9RPG1AoIASNaaVngXNQLLLkoRWRAadwkTbSp3WUyhe2AT9AijrbygNkqtAj3lFKprBsbSGbGYlaVj9b562qskpvATEsEHXWBjgMgwHKzjBJAVW9v1h7GVJWZR4S4xSw3A+QrX3vXdZ0skm5mdZUmkmVinbcqlRBW6HWZJceQhKpUg8G0rjtaqJNU2Gru6M0S2sTSo2OH6LK+ernvxX1ByEv6+SyoWpzltJqu/uZYnuWUCoG5oFxxQBkpSSGU8GNj0c48lLvbId/PFu/02aHUYe5qEJkR5+0yM6Fqwwvc59zn3DNItvQ6TfaKuGwPDw8CO5+3fQ+DuS+Xh4ecO7RXqlDCC9brHIWYH8FwyIYmRkQVGFlVLfKr1hqow9RgZkuHm+77nkhqUudZqkzULAfC6ohFM900q5VgbsNkVT+JrCXurm4997juMLjYsbNsN0gvz8YYKk5+aJ/3Wg2nIrLYEhAdyNxzz+XurX+Gxd97/snP/Qt7YpH2ESEjQwxjXovTq5QfHGOYaitIGz1QFGiXaLFzltRlXmCw1adwfXab4yrUhGXwSF5SLVFm83Yox3inV7DSm2AGGXSKifZ2x8m+AER5NT2L2VDQ1VQ7vU7MhpmJsNEWMM2qQxh8DDzVbanHzHEcqUQL1EHmzMxSeeC+ec1Fg3GxCpbE/jR8PH/5tT/0g1/D9sY//vs3pzeUsMwEJoyk5DEuripUmOpqfmoXUNU6xqMwBIDbkpl+ZEjrq1evLpfd3ZtD030Vsio1aVm6TyahsDb1hmQgexOkHG72uN08VJMwYwNlRLKYGaxMVnX7fc2eAlV9XW9U3WBKpQrUgxUQktOwAQnZTSFmhEIYedljj0wwRSekcFgwD7ZilR3o/NemryOMFph7kox94hBVRUScbm+gkqx2lPVG6dgBFxWGBuU/luYCwZArMlg+JEoSkVQIsWQpkYo0MeGpKJWNN2/TjkMEkKxCMgOVXUCQTNbDPgWVsdvVT7LFzE4gSEHWaVk7LYbAlrEzUQlgss5z58HEPozw3Vcd77oyFXvtl7kby12T4Wr7vpeA1fY9dYEqUrTEm7kBaouJ8hqQoywnq46dTmbu0Ux8q2q48PUoyJLKHmKFakqTQRlgtiX02BzE46qoxw9ZpSlWqoQeqBaBSkGQh6y9jiTxIrMq3Bf3RdVVfYzVdVQijzSa4x3uu1JVE9yyTPwGJhmX2qdCVQdNYSlaHaPNbH+UtDBbtHG8JHEdFLfbLa/S1DarKJCuvQeESGQG2IP6LBRYhfbj9aYAr78Oh44I1WDC2GfMDRlSqSjmxKEYL5EjM6bILMydBecVfXMcxKptZGdZEAkqyyL8Rb6zc+uK89gWiLgonNs+68j20KpwHJLJHQSuwBcKwQUWQBCL1sPz0tIH8jKwVgG+bRddhkzBDh2liY/aR7/y8I5Up0CiQ21LUAVp4aJ4sazoPATMCSGggIdWAqSXBWMWRVCk5xHd0HdvKjCBootSMUET8Y4LhSpVOh9qmKAG/S3/1JG4yXbRyWbQEiGCZdaZ3kear4mWpIjUVTlVVSJahca5q3gGKUamGshKjlEpM1IB1VVdqPsewirhleqgCYQImr2skq7mY3RMH7ij0bWSOc2lKhuUago0mqT1UTjKNyUUvFQo7In7Jilu+6uH07d+83vf87u+8e/90/vf+Z2/7c/uLyV3T9eY3LeJ5eZ0iogZuYwx55RMJRe3gsQsdrqGmfYooJTKmFOhvepvg5PAHntxEh3MJ1eSKIgE0WFeO6jM+WDDV291PTsJLCnaDrejmmjPZUnlkJbM16KNsgsR8cUqEqb7nKpKZc4YZjNC7Qoqj2iTsTLQOtJir7qfLKdX54tRuVUhb0QyJQFfPF5tD5/45Ksf+H3f8Pf//vv//T/9arm7uewpqskSbHJQ2dy9IkS45+6+mOrMHcDDw8OnPvWpZVl++7d/e1mWWRlTTW8i9yqagcz1ZB3JXlU7RVTaDk5LZhxIqyOGFZ2DoiKMpAr5elFKNqmnkfKl4vL4DuIIPFfRzKRy3y/I2DLbbmtmd7JyYoJLUEFAbs8d+x2w4wQ4wiWUrBJIUioOcwV42CgydlVUoSmJ/QRqJPtHPvLW8/ffhxyBCtHEOn7IWNxIliKBBMiCHa2bqrW6zZq8Ydbn5rZtvDrjKdhERFqPfpAHznoAQo6nlzRAhpaZma1kJmqYi0kQbXc2k0bV7rH3G+hYc9IWGWaXZD+YRZB6vC29+wQJAVVUNPZIqTHGEI2IqEOIO2wtlZxlxCXT7EAPNierR04HIo7VNsx+RLCEzBAzgpHlogZIPc5BqkrcVJWRppqZy7K0TDwevwlwTcMBxI5egmFqIhZ1BLipDgpUqo/ZnMGEqFg13rM5TClHCgK75SVpZvt+EelYmXHoaomqomD4cPPKucooyoWztAZkNW06hc8KEzPzZESElYgsZVEESotmWh1UIVLk9B5vGLr4O84/sgHaqrOy24moCROEgFpHiMPVZ6Xgh4RaOCK5jJLLMqqOEMyYRXJZljHGq4cHEQOzP3dVtaHJWCBbZqkmSsW6FVYqlamkoUQq4H/lvf/za+vgYwXQubPHqhxHFsgVpnbcHzye2uih9CFdMhESLEIv0o9nALjv39abHuID4oqg+jDEh/1/H7/lh/4fj/8rx98lVyH1o2DyqsT++m/5+utIX5DHl3T8U0QA/s/e/Pc/Jh9rxf/WKWrEKWW3pth3cLqqahFWMlluC4C8BuiZOQhFZMoyRlFUJSJAgFykpkrL4JJ8OQqATC6qpdK0NFRpqrUUjGg2Sc9u2U9YEylrA4+I1LHqZ2aa9ychODT30s8+aIiZccy5B3f4yTjP95cP/uyffva3f+wzf+snv/gX/4JjqGTuvg0dpdvlAhHBkWqy2PLy5UuVoSqmCtGqEG0WPMVt7ruPkbMu513Vz/cXKTZttZl7AmVGiVShkyK7mQMFig7ghCKqNFMBhVRVCiiYVDNjJZmGsn5fmIfA4dGx1yRHsjNtshW8199jwxtC2SVUGwMyw8w2wJTMQqlAprCERTyBIeo8RobcS5racjm/+4N/5Bt/8ife/vmf+vIf/OP7qwtOIzEh5mTnP5Ds4IPKChCVbpI5l8W/+tWvtuP5srcrWbsLHcNEZN8vInT3WdTjUSFVoaI3J79cLnu1d8cHD2flRCbStBfG8vVXulwT1JXzqmyHkEgVwAwdgsaIXQVqAA6P1lQ6ZZqAmlUXsxOlTCoKV5qSEspqf+VOElKZ7m6mEYfbuA7WjT5KkXt2lbl96EdVkuwcR2YLdVFHZ3v8SLRiWQfoQh9lnAHC1EQSmY0bUC0UiZ7INxyRQhZEpAPARbTx4ziadc8qqe7Zha1mZ0KZydu7257cmtndGJfLJSJqQaBe3t+PMYYaI2/MydxBFofZup72fT+0rmycjpP5sG8mvq4rpR5e3d/d3Z18eXl5MNGacRrLBA1EBTGi+dU4NOEJ1woREVb7TYvClMkO9gkmVQ8cepFm0gDYuo4HtGd4pJXwSjw6OC2d9oFSbfihsLqxFipnFaRERRv7lqkUg4JgdX6cEMKiqriOyPbpUg81N9j4fe341P4Jq6oSSYqWFAoCM6tog2K6LVHliaGSJlkYQYCb9KOVj8f+9QSQNSkdoydSijYRkFxLXNEgF19P67ren1/tEapOpFyhdR/6Vnq9AzsToknUkgfnq/raUdWZTEYVVG2Y7XP2MGbGZmqs44/j+pDqCetSlCpVEWg28uxPPfkffcQ/KcJkEzsAkYWMgqgFqx5tGhQ/aO8KUuVIBHNoWiEpc/zz9/D2m29+6mb5xd/63J/4PW/Fpu988N7qa15CbOyoUgxbpKKLr845KT4+EEPVccTFE0dtpaHNH+RqYzJv11PMvVQ6MDyY7ZicmR0Hq6oDCiANJLzg7nS9XB5MjUEAOqyK7+xf/G/Of+3CyxwySoS1oaywQMXUQLqhURpkdf2Ys1SSpbCqNKgc2rFKGMlqwX0cSB0VeqKk1zGQKL8kjDT1WVjM2AF9TMUEq+rGhwJjVkUESIGJavVADqqImD3GXH30eXckOeKAaBgALajKnOrrhfnW6Y17grws+/yq25O/+D/85r/6N7799/2eX/wd33L78nKxu90WJSLmsixqtu9pw3UZNK3r1keEYibCitizxI2iAaFrZrpJoaku7AdMX+D12pDOAl2tSM3sDCxVLUY/RWamazP1tHeH132VdmwTRKpqy2otcdSsKl8GgMxqr7i4FRNZY4ye9/bG8Tj1P2QbW0NFuG1TMB72+87mNF13RkmFUKekBWz4B++ev/07n3/yGz720z9z9wN/7NWAswYrMFi7jtHriS4NbYwks1rl0R0ou/vpn2GseX7YzEZVDzl12ztQZAdpBwQ7M6eaFcOwABDVIBXFXq0JOj3ow4VmF6HX1RKvsy86oSIt3C2ycKjGkhxmEXtTIFBRagZtykLHcIJcTYu1N6aKOFCTclTVj94HQKNyXD2gnST72BVFxNDx/L3nZqMENSeAsYx2ix59/NW42CedaOGqyuyzG+huT0jOGT3fVpEsqHpaCtWrjRVHN21sFcWHN+UsUo2mztl7NUqXAJFZabIUNApk2VWMLVf2bKtDREzcZpUo6qo37u1JjwgKNB8RM2qqqkNyn1RZxxByz2BWr1EPKAygTWaHKEoBA0u0xMAgFYC1IJuN4FYzSh0MIFLJ3vYXpAuTA48/Y+sLoq4vv6fZAPQYCcjhFmbWFaAlHf+CUh4/JOvoELPvS1JUAFNkj1vNBVmqKOR19UAyvbzFyCIyxFicOVV1w1x0DLAqJ0vdB0UrQ6SDZtubEU2PL7FFUVKZwSg9Nl2iptkwziPtL7OUTHCP7bTeEojMebncPzzAoHr0B831OxqGD11gPYroBNh+KkcdweR9qTcGZJ/5aN4wMyKz5rJ4Zu5FmCyKBichuyeXBy0hVKhgijqAt/wTH/PPXM/HbL24sSqh6lTp+R4FKs4KZDXE0cy6JU+k8+E+3/pbX3rrc3giv/XwJ7/3W/78H/2+z9irX/iVr+zzO8P9E8/efTq2+80vEyfdTUe3+Y8rjR5iMOfwde+YIVRVLeaH0BGCygGfUgsWyEyV5ToBNrOsCquZUchRg8p28RvLzJd1lcXv88XivtyNZVnef/6B9Vr1LDzSakULAyIFKHYhInkgmBQ8HiiqChMcs2Ntnu6eOykLTlQyjqWIuQxHZs6Tx0QRMF0WPxWDdWGUAExvuxhEslwh0D3LpZMwm8MPoVQEzc1l7lNEtm07jaUqVTWLIngc/AIloqK6R7k45wyL+/v7OW6IeLos/uL5l//wDy//4J985sf+9m/+L//XL1/M29MZL5BrrTenqmqBTES8dz778COvKQ/qqarBhxBeZe5zzt7ro4ncRXVRkdlH/NfTGVVViqFAFQtSGQnMudzcrmNc2sAWNIOJBquNxS4q1GL7vKFuEaHgGIY8QBONJ2u7XtVx+jQZb/H2HUo7wgEM08q8NzqqBgzGqqFApTDOgKLG3IvQRXdyZD6zm3f/6J988yd+9KO/9gsvvvGz/rDfQF5RNy3s+xG00ACjtq+Yzcyqcl8EkmR7oEXEKm/W9bzNMbwfBj3wd3egeTji7nPO82UHdEUUJFX2TiotrGoD6sa2179+Avcgt6+HngAK9CjFD0AsCIVBS68uKfVFCYGlM1T8mHgD5I6oSKf0RqvNtFVRYLIWXbrACJaU9DeHqqs356RPqzpQfKLQHsJ3uzDGmJe5LkvhyIASCBzsYvcKJu9PE5AkRZnFm/W099IT1aHZmZQSFy2rNIAc1WK0CoUlRcSu71UJhKWVNIcQEArMx1DNlGFGyrZtIpKJiNr3XYRu5lWichrreUayVG3PubhbaUlFVc39UDkIhKicRuR1VyoipdrOq41zuOUsGR4sFaZUQQTs7InqvFCBVYWaEEey01GKKpUsI0JFsliEiQlIdlxIPT6A205GssSPbaegB7gdyKZiyd3MMoM11DQzRBPXsV2mUgUkRCKCakATvnvOqT25q9hstL9/qiAzx1ifPr07v3wlV05cN0t7BsHTcBq2mSSH6NNxisrLvjt8KqP2QQFl9gQQdgWME8Awj64IodGP/57BVvfYAfJ0exvFrQJQcxdSrRviUkCPjBNhvebqV/XK4yp3ghpgB/oOEftj+d6TeRFEzHVdZ0cQ2nKEU+mRWQ4WCVGQuZYqC9kZaOIkJitYDsURj0BtKKigWAu8h4XMglFVrWc7FJJGqQqivnr7e362nt6+cf9vGJ+/8m+881/+ovxHP/uu49nXvvLOF967X5/d/uFvefLnvgNP7J337O5UIuJgIvO6QnARWYYvy4ptzsqiQolhNcUFCjn6PMicc6ioaqmKacURmirFISiYJqeBbieIEVH5MLfazouqqyhrPz8MU1450law5MVI46nERRPwZBEpnY5pjy2OQiHe5TxYYVfpqXry3G1z9f6A6j5y20xS1IQqyRSeTcjSvcxPk5WZj9Azdzut6/sfvNjdxCjAKNUqM93taGyWxeecq49+nrHi8JbJIWIUVGlJiWGk5ol1RtZwlCzLuimXfYM++8q/++999K/+p9/xEz/+Kz/0J+L5+3Z7gtR+2VYfw0fMaWZiDlSmUil2YCZwTHcNyMi9TCN3qthA1DTT9mC46KMbODOlyE5wrVIdYygXWblQADK2y4NU2y6rGVUR0vk2pFk/qASqRSpkQAXUYo+/qqSir9Pc991MFvdWaBcwt92Gi0lz6110XcfDw8NaEnM3qCBbcA4wK93WlA1ILHoLnfvcTrcP5w+e/9AfefZf/LXP/MQ//tK/9zsDfN9TMm+JC1DRwXBHGrkpYo+Gl0X0gASAutucm8oo0mxU1czdTIphi9QUtyPwwMwgllnLslzmvdJNFi+4WFVRLUWAyEo+Mn8BqhBa7LmJeJJGNtA1G2BDRUN5ZWYBwuLdzc32cEZhUUXBgAQjE8CiKqZ77lQPkJUOUehQSyFJ66F3VWNrg9nNbluiAdzd3b18+bIqluUUlwkRgskyX7KnFDOgLWAXkki0nEhEUYe6lSIiGkkKUnLWBEpV93lZfYjIyXyPSC2lGFUg1DpoTcxQcbXocZtI93+pCaYrTL3QD0WRoi0DosESEXW77FvXEAExF/exxewrmZGL+Uk9s1SRgi2nuyMBogdgLY5DJyNRWxA7VCeTJao6WaaorFRAfUgZqgQBB9RIrxnNG6+rUiiLBwTfiig5apdeobt4PPIHTUswfM2eFYlDYBCiKIamZV2PwdPNen64RNJgM7fFtVimBsqcPQ9XM5kZTa/rMklEFFYiRbj6ML/sm9uyrmsVzudNNQEsY+QhYr3a7k13lERJlJvtGc/vHxQ1xogqT7LT2qmeVVAMzW0zExdR1XVZL1sTX1K9oZjo1XuKhEoBtu37Npcxsi/RokKGerXVSO0YDEhVuzMUcpSeR8qKKqEmFT0LMZMxVpKZeYlyVzMlPSIEGOYPD5dlWYxZkKyqxu0ACpHi7jaoktXCSgfYiru9UtUIGM1os87aM9UjI9PVW1OmD44bylhSz/Xg9a5898+9/+yf/OIX890v7nF3rvvf/ZnbX/3Vd//az3zO3nrrm263b/j2j373m5/59V9758d//v2f++Dmf/rdH//s7de+Fmp+66Eq844a4kiZznMhYjdXmymMEsy5qY0jwkw5sWuxaWcSKJ3MaAlJVbn7nogMW9VTFl2ztrtnt++/eH7SU1EoexRnPwp0AMAEwQlOhBEqksqCidgExAyRAprL7Xp7uVxmhJgUNnUtBsUFomqFJJOifXbYAQXCOWecTFMPK6YgWArLFHFfMjtMLLoScs3a7u/vbVlUJfY51GBIQRQBPemB+8qk31inM5i1ExrFTWEquthaOTmnDGXWfdXAqUTUJonc87z46fyF+W3f8s6bn/yGH/mR59/xez731psffXjYdTyMpWoiqS6RJ9gLyHKibBECPH365OX9q64YTqcl9hJIPy1YNPfFdd93EW0L58xwNTeTIgTphqjhzlmpIikPtXsTUQ05YarJBtdSXDjLzdV0y6gqc6vKjFAdATFxDWhiWuu+CixjqQlUZ6aazUxVnauBcKoISnjOub2aJ7GZAbq698Je0nV4VrmEhqcIgD01VbXy8hD2iU9/8If+4Mf+yU+99cM//NWPPvGzngYimYSI7nuoelQSFCkiP/WxT7/3/vuXy8Xcm3yMKKXSskBTzxmu4/bmdNm3IsWOVCjtaHTW6lYx1QyFIpVK19U1MCuLovH1U2gXVxWotScYXkpj6t6bVRURT2AiV0CBAp16Pp8LXHiwlJtRrAfJIdXEdI1tf3pa1pvTu++/L2PZmX2SJw4taOf1mjiDoWRwFMzs/vkLqIjY+bzB9Mac+04TcFp7QBfrMW9QOl9UTdnheDK0aLDqobGbsnMdproRYn6iaEXKoiUqNUUOpxtF5ejgVMUjA0o1Z1ZmNcUGzACS7a2SUmnVknlpAcTNspxsPGw7Sqp42fdGNCB4cxolIYZglI7mxb519zQv+2W/6PBZQTLUtMiSUglOh5fKq8u2+BBnZjY6SlWf+IgoU9svG4B1sahorOI4tuCtQKW1QFJEMUFUQU0VzNp1XS97FOXUUingAkI5VN9+8uSLD/eD4qkibm4NOxSRIUvRHu63JDE48+Hkg3v1Fv/m7kax9TMGxbu7u7mdTRWqIlpVSgp0brvc+h4J6GlZTHWbZ7XY5z6g+36B20S1j9kgRjHOKNEjURyMUNfYNxdPCNz2mdqy6UqbJb50ulSyYn/wYRVTOaQsbWNKJlYG6xmW+WY+4+0Dar2EYHkxVHLeCHYRuZd6A+sb4/b5q/u6ycw7ZlGeLxgqNqOZ3YIj3S0NlpKTxSxFqCqzfACBPYMqpZASJIcOTBb49Okb733w/hiDJd0JTOSYhSNlMHSmotetVQqCaQpBqExvGYhJuZZrKqTEk+M+T2saTc4Vtx/7nP4bP/qv9Md+6Rfee/7y/OwZP37rd+N3fmr5p+++J+tYHx7uX7z/D3/iV3/mp7/2ez/72d//HW9+9fn4j386fn1+4tOnRbZ3z8Lpy9mX3aosF3JQMLP2WUGFrbosMgaPJZDZMTPuFKNEHgTmKlcM94ywpqZlmUlxN5PL5dJD09NpeSTbtaKqBwUCOUEGZdDsIIyz4avIWNexLMuMuGwbOla2qhJWCrH+u2blFcqv9aHTUFscyIKU8nB1ScvViygp9xB2YKaLa4qFDFoDd0Rk1uw2IuZWjG2Ph31LUEznnMUQ1cgcFC9oT77JyQpFmBDZOQe47n5ImsiSVWl4/vCb//af3rflM3/nx25Ob7wEd6tlO5vfxcmFXrapmrEuTLhR5XK5MMtELZnnDcDlcmmCbv/z+LGFFKjISV2jck+qhGJMFbEdmuJWrhOKEVR3H2OYO5FtsXdRBXS4eI9DD5MigHVdqdLgvQJTMUQVkgz2CQ5rEuKRK5C5hioshDuTkV4Qkd0whdmj86bwMBukXLGby+lmWVbX13P9wov33/vDP3D/8sUnfvFn/ebNu9VfRelyBPWYmUEa5daekM9/4Qudc/XIgi6yA+dbnJKFqtq2rVOzBCVKec1KbIkJHmfMeo1TRKesa8O8Xn/1d9svmxTHNY0q8+gXD5ERYHV0wX3BVP8tJimAWwq27MJTA7xEnsYy3EnZL9PFhTBRyZAPxQP0D9C32NpETcGFGQp1c8UQKGufl9NpsU5VUae5qmdNBYZgJUemZgAl1n6eRv2lGsCM2EUopk4ZBacUGAqSg62sfoSg4fFns06+7beziZIGsuMo2r51nUAKqNLR6z0sjQjG7J/kkR9eAlEtSkRlJo9slXy4XKgiLqpqVwdaHaTU7GEuiouPK8khK+G2qHh/ypn55I2nb7z5rBMqXJxBxdQGf3IBl66ORI/lTsvLzUzdz+dtjOHtDbtu1oTKkss2n6i5WjinVSId4iZqiJqvLyrA3fuQiV5dXHZmQRXDVLX2eVyfr7NtxEyWtQ8hpdj9eXt5f79HHQ7AdZF1DcLMXDQjtMgZe/RsPGuGC0RkZhRkZ6YgKiklBjG0GCUFJaUKZCm1Jn05pWIlRuG2bscCLE+eni46333/g1/41d+6T5cnz2LlOD+cvPYsfy51O4G8fPn9d3LUnKP01S57iFMaXdI5aRhjnMZikDamNL1xn9le1LlnY7pZZUllmWpJ54Tmi4cX0GZX1O1wiXmrQ9zMRjs5xVwFguPozNUUtZtREFMRLNQReHc83UWnS1xuavvacvPpn/nSR/4ff/dXfuErXzqtT5699fRJzMuXv/I0x1Osv/nOfc39XO9/9UH9Y3zv4eV/+Xf+8Xj7zW99tt5v8R//9P75y82t++K22tInTxmjkY3aENPrSK2qsuliDRiTFpr35TJw3DwVV4NjlYi5uIpU7UBFVId+idfjo+hf+5qKUExhQLLR44227htYCUNJNZNvjHGym1LzgrnY0AVKFbtqLesaPlMVWumF9ve9jnkoGmSolaDR0XjEc1IbWg2g34ZEishQW8Q6CaD35ccsTQmpbBXE9cyuzMdopobY6eu8bhHYoqbLzeXhZX7iG3/pT/3Q7U/8N9/+y79UH//4cr/rs5tQu0kLyCJTSnaUGGxou13HGA0EFSD3uZjXjK4Y5mVbfbjoFcU++4DoDyWT8HSjMkyDHqmhWgPZ+oU2VmpjKZEAZsbMmLmL0F1FOOc253b1qFdJwbCYLyodyiHXNCdSGqFOSptrvarliWEVlXPbB+mH5pw2tJRTmCadV3O5PPSx23OgMcZ+eXn/7b/z8ru++9mP/60nLx6+NkRiU2Lmviwe+yVr9o6zj10z6xoiWLMlwh0qEon+IJbF3CMqItog251vQwVaaCZHcIX26Z/XQMOuzDry7MO/juKyUjIenYj9fvU1YIQAr9PRHwsMwepr1y5jmLqoS+O1z5f7yIzMLavUiNfBEo8345GwVAT5ELuvC0klbpeRc4+qFNyMRVXVbYgbJBWE5iwXLWBHXSQ2zdCWcpcgRDomPhqYLML1NAYFQDSTIss6XUoq1epD0RRT2P/CLrRQwhY3iRBywDrEvt44cUQF9At8jLKAKHF3e6p5PFXuL+equjnduS2uWNxUdY9t54ziZd+2bYNqdh6xmbuvy81R9xw3Nx7l+n239mi3U332KIqZmZuxFlCFWXgoPFTt/TAmeaRqisxMUtRtZkgGAQwRVSkisqq2CiHIlMaiV/T67/HcGGMwy1WTQiClTUh6RpaKw05UFdmvK/xjFQoQ+WESS6fLdGLj4dQv5Iy57xUZLAJhMl1ExrW4rIgQpY3RoQLtmECvCUmSs7KQlOqy2cRZFhHJPWHjbj298UYGPv8bX/lXX8NP/sY3//Tz3/2ff/G7/tKvfNevfPnOav/4ad6qnVxOckN/8gD4G3cTDyhYnRaen+qpVFppW+RRbDFiziqoOuo4VfYtMvj222+bmasNaF9FZBYyGCd1mbnaULCzbLMqGVUFU6p0tp73haeqj5FqgGZlSmv4oUS1xxQslbOF5fbW04//yHtv//Vf/K07vzuf/HK+/4jcfHyJ88jf970f+VJe5ov3RW4NN7NGyR3wUPvdP/rnv/593/mRy8vtKx/M/+9vPPl3vvOjy+VBDMmxcElGaWy5A0pRmqcSApoaREUik2Tj2q+P0cpsA63u+06Ku297iKtAqsJc2eEBJVTOeQEHCKKHi+AjK7XJtBTT1xW9iJQwYy+VPjWYYLH5U2vKZulRAoTCk7MOqF7btUheN7vQKm2ch1BwKC1UBFEiImAygxAYTdglkWkzVlDMzGHWgyaR6+PErICKaW6WaIBL8RCUlTQPiYfMuMpEKo/mYI8cui522d97uf25v/DV/+7vf/o/+0+/9r//Pz6sT4pFhm8046A8VKmKRknBpOPtmAq6pohSWzbf/1jvbmcEqhYbW0WyEgkFixZpLRwHNAXQVIFax9ZWzTYjZdtS6qB6LT6qqni0ayZHsNIhkaiePEtJtUutRcwV0QqyMca2bQapxklnkU2mF1FRMk2FOIggRW0tqQrldWISAFXP4HZJZ5XLr//BP/zGf/J/efvn/uk7v/+PrPn8/bksaq9evbpZV5IPDw/qliySVdzPe0cIZKYAYsrkMtZgbTElo3G/N8tpWZaXD/dof2obprWLQMMRBiBDtQnDvaC6Xdbzts38Oh8wpNpBWlU8UvIegQCQOv57K3FExESPlWelpt+o77kjMXzJzJhzmJ333X2ZhJtSWOSeIUjQQLoqr1rlzFSR5WZ5uNzfjCX2uZ3DTFNgw7VUbNyft9E7mjZBzbIQaQSxAICD/RCodUgLYuMYx+KA6HaicKmoAK5HuOHSJAAe0sgWQ6mgU4cNxxAFTOkb0JRXy2UXLkCx6jAuVVWFoDrdqxJztsSvBxkerMvcSdg12JHqhFIkCw1YJIWZMHhBVZsGk9uuqkyIIx+zqlRZUtDLvgEQlb4CBUhuoJKgHK6Hg7UNUWiLTgQKFRevKkPbLqzxda5WyM688cKgKmVnHhlDpKuJovPIL/s21put9sVGFkwEVFXNyg0l5BA8evxUpI/jD/KdvS6EEdR8PXKryM6L2uekgJEKEbOcJW5LdSQoiMxK4RjQyMRVOyYwr5YfN5YRJgdcyuaoqss8L8t4bsvL33rnN3/rN37+n/70HVKffde//OD79du/8aOfOkO+9Fc+vz+TuFtPv/9T588++dKL54bVBc4JB2BfLDkRxvM74qQc9iE2KTp85pRyMmGaWdcjAu+8/6UqVUIOqRp7bADBe3OKqaVlhBEmrkDtrKGWXpVARaaThGCP6e7bDNUlEgJ3qY4kDkVWSUEJLVERyPwb76x/86e+/OSpz9pvLgb4V+7thSbyyenlwz/53BdQxZt9Fl1psW6APAm5t5//3OV7vuHT599+55e/gvvvvHV5nkVfV6vI4srlwq2788fsKpHDDdQDtMzgtfhCVYLZca8lY4yxLOcZOedJncbhHpFC7Q17j5fQ8sEWOl9DYFzVzJKSQIvGs3MzO18BOATr0BKgT9Kb5WnpfexVdaMjhyyTPXMWVVFmP8VVhTABigeIvIc3hyGke2BDr9JEwDJIRCmFRxN7GDyuXoc0HFiJtiArZCr6oOkYlyMDq5jIHnzJtRo4Khda5qY3drNd5vrWu//Wv/3k//YffvOP/9c//6f+dHzx/WXZH4Y7akLFeJu+4Ygy7SJXfbRGV4dXhNjRCc2MmHMMyyMRTCOjvVOtysHMTahmAg6KU1nkcMm4ubkRkVcPDzlnl/3MQqSQ45isnkVEbYiIqW4sCp0CcMsOH0QbGR9v/u7AIDBWqpaBiQU6YKEytZigFCgmPmfyKtWuqhZoHNUYpBMS9+Xm5v335n/vB+bHvvHNf/iTT3/ojwfyZHcZD+sYRxIDyWNdXe5GFRvuevDZ5ZoOm6zHYJ+G2EXUYb47vD8QdHYWiUSRKmK22FjUIvbMmNuOfJ0619dzlRClqiqaFa+HHyLNNu6ONXD9bVBQ+GgoklrXm8zZMmBVm3Muy6LqcybQISIJlopAkKjhNwcpVqRqV3XMTOI8dx12WtbYp2Zij/ssP61qiuo3KgDcuL+IqaqmpkBVzR7UC5Y9eWyKOjFMVNH5TkoMKERTOE0k6ZSoI2jq6Mq/rihRtE2eKDn8LXWQGAnAIHpV2fZnVFWNoe0TJjNjBq+wCGgIse+7iEC09myVaETVtcvNDOmZdtWlwlnMNPNCmNm+bUP9QIlRKkGGL6OBbqoa+0S0zaErE5XjRLqGOIn0p1OF02kAeLic33jjDZz3nXPOaTZUOHxp36qJocMmGrcogvaUDalgkZlpOroWiUwVVKIv/tYQkOFqyzJE5FFR9X6+8/98/h+0XFdwXLuPWAcegvoD44Dr77oe8PjQZ3XYUYHrN7sCJPq5gDpM/+g/1QfdK27nS9asN8T+hJxPb77c/uXIn68530nVsehb9rXkO8BvvC9PX+1PfNYZEAVSZJA9b5ODbyFXH9n1wngkSwgOvdpj/9bM1///r0fy6+PLkQY3Xf9kvyJ//R4cRtK2Y0iHIhxpVg3eEC0R7vaZp0/W37ik2CtZHLbh/nZ/Vb6+1LtTxmXLVxf18TFqoGbILio4bdsWQ9/YKP/q1z54+umnH3zt3d96+eS7Prrev8iTAJLlEmU3vnSik9T1cmM0LbqfRVlHBG8/pIPVslgx3fd9zlkQd2dSqHuQFAPdtFiAfWhcxuo3/Xi/1MSF1R+qNtGe109dRNQzs9HwYooq2fezwlgmmMKRSGvJh5iJiJqgpCBAicpSMyr3nhBClRERQXMyrUpUvf2dhII7kXsTCovAwTFwQ2VmjdNpXZaHhweFwFSBhSJHFGO/eWKmEGgdp9H1gXRcNyYjl20EZV3wzhe+9gN/+NlP//NP/fX//OO/+3t+7WOfsnfe4ZMxC55jlayqcXfatg089ohWcBGUhZUN7z2Wucw5e2My63Akjs55UztXpYlJCBsqh5IKKZJaKVWtQ5HD5nSs4XueuYxRDuzN/pCZZUdi1euSgoIh2u7yR9/Lvu9N4CJSmi6Ftge0xqeMqjw65N49l0oKl/4BEmbGqg5IFxEGap4fPvPJL/7eH/idP/qjN7/0s+9+62fG1x7ofOONZ+++/56IrLc35/PZ4QXMSgD7ftmLZkOuthBFR5U2FruQErz64qUAZPdYvU0gm3VAVDBU/Jr7p9u2dd7A452vAqhW9eK01mWpQ6kq1+htkdZl8oBVkJRrGNZWu69LPwLFR1aR5adF5hTWEEhjszqQV7CxHssUCiB9h2BZT68+eP90OrF4vn9Y3EktkXFa99koynz6xt2tyMuXz/f9suiKQu+6j+ZchCJmUnKgmipDlW0gV232hrFHnB3JLLKhTD+0tZcWtXKBCYTMbHvCNd6gP/RG/2aPxvqcud4qAAusRL976vbYG+SMnm102K1kg2m5xySpMG3CQU/+RbQDZTtCQoBebmeadnodcezXsjdWFSlFMatMwXqMw3F4T1VHHYJuqyqDNEjS1XKGqiBRVS2SK206gpw4NsZFsicf3ngWyrZNF31y9+TFixci8urVq3Vd1URSd6aKoKrPmYCUyt6sm4OyMrKmQP7M2//jm/3psiyPD9T+l4iAjqwqBgquCqllLHuEcxEropIEVEpUQczScZD9jktWWLWM01JV3awo9twz+fT22Re/8KVf/41//i3f+PuyXuV69yO/+DvyyZtvSnIsW7ySr0Du4o2P3N3VzUvO96f9wOn+ez/1+ZxWWENeLXZjKXvsOWopF9EqlhSJyACg4n1qCSpZUI8MoJ7c3vESAUb7sK6GGrJ0+IxQM1YN6RhQkjTj9emuAFxEUBi2ROxmGhXunjOKBKjFARiF6imAio4X7/LmlzbneMPrnvUK9eYFb7pd9lcvP/7Wk9OT8erlF2t50+wmAZWns+7HK/vIafngtE/Olw/vb7+JTe4+97WH3/VxES6c7Ec/kWABkDpWjMe9LWji2ePW4XCyEaebm/O2kZIZnQIv4D4vJxvJJsnIMR4LiOo1nbYrCwGka7GLYG9+EOkKhaJKYaksqEC8fXikqri7oraZkiVjcVUUU7GK7kXlgUo5VjztlevWWayzoPVwIjb9VY6+62DPEdAxrGF+Um0VR4oK+l6F6hEc5o9rGDMoqezjyilOAaRsqPrRD/RBKxKZu9QbU/bFT2EcYjO/8Bf+zZt/+tPf9CN/8/3/+b97v97cRb2UMRBS2ARrn1aJlnSxuM9UEROdc47ldInL8NPdnT+8elmkNh6kyjrDWcxdg5k6pChhInLAVQjuoobtMiFloouPAmdmsqCGDEbKlaLY5vFph1++OtZbLJEU8WtbgKN9vPb9jahLZGGvolRHu0wtN+ut7DKaalkgCnRfeDiJj41sRNrtzQa7e/ni+Q/+wPs/+RPf9s/+yZc/+82nfDHWpw8vH9yWYEXEsix9K4o0YFygsriCumfUEf/SZCpxscyAedu90Y8glqhUJq6TVDOzZcicEYFIGTbGEB/Z6+YPfbV9UYBiB4kfPuGuaaAq6ohJENcw12JRD//p/vDQu+cetHZCWhbcOga4siYEM9NEIejw4LoiKsV0Ztbc7+7u9n1f1EzGvkf2Z5I7WAcVJ+YQFTFdBxKzMlm9+VZ1wWHbPbSL1bc/RcSXIVEpoAqjvM8MSphYlh+4GiQ49MhBKKmje2wYEwiUuUm20pt9ICYIUkXrSG5vOEOPzpQq27yMMZhBSjNw9n0f60KwkOsYte/DNJNm2qnVQjAPXU9/7jWDgtwSVXvAGOpN3Jb+hqrq6p15x0Jkilc7jsChYmSaEDILS0dWiElEsOp0Om3nS9PFhzmDajbnJNObeINDNqUtqyddpEQF8urVq/aR39zcMDLnzmEG8WAUOHy6DBkVGXg8jZsILQSe8qNvjU+4Oa9hfD1F5+Cz0+395TwjKByLVdWyns643OgNkYWZlCpdxjIcuZ8vFGmqj4CkmaHE1NyrGf+CTMsQar2xvXP+9jc/O9/jbcm+fqdfvqVqQu6f6Rsv5CP5BLJ7fWV90L0yv+nt0z86374h3/G93/QvTg8D/s3nemVG+NjlMlKlXWy96DGKWCWEs6qGKwWTMB+Qqn2uY4nMDhV97GalaKqTIS4kh7U4oC2sUO0QsGY04djSH82HJDkh4QWnUGUqpnYoB5Dl9WTb7Kvn28WH7UPj7VMF6t5fLTfL6UsP2y//1iu9+RTuBsvsPmU+HyqmNRO6Pr2RMZ58hKdP4PSJl+dbuUyOsbmy3GatzBYT0bVliyktjDoWP68NZHIABWOWlDx79mzpJBygc6yyJgHRI9Szg1kA/9AhdW2FRQB4wJJW0OpNMwktdDrKa0WlXnMcg+WqMhYpySChDitQ3DqosfPIFEecdUQUpU2xzZ3ve8BDrdpphARpXmobM/JIbjIXHwopM6meJ4tsMc/bpScFUWVmC/UkfrLhQ9WFjjKmNAVIHr/0Kv4+FfeSk5we8iK8xTtf5ac/9YUf/uPLT/3db/25z4+3PsZID99lXowmnvf7QlnU+gKSxWlarshy98pcxog5z/cPva62ZA9hQnHOeZkXF/WCslSxyx4yHekVPsDlSFB3tbZOXj8dpQrNqVJqR+x0ySHRJA5toEIAO5C5x4C3PzJzgRSRtk+y0oSuBvPUAjcvZzmoqIrpi4khtUJSBFVRDKoEIyKW1T/6sTe3h3tVrfv3t89+85e/95ve+Ec/86nPv3rnLcxtb9KWiCSjKoQ5FusxnYiYWWezq7arAu7ajQ6um2Z2UySlLUESAmXS2kOrqjlnRvR7RXLbtj0jenNx/fUohzYbbnbeLgf9owhov4czEz2cx9GHPT5mHDz5UComrXTpSInINAnFXjMFe027Wfu/tFS4T1tVrSt1coG6WrJ2Yheq2Wq+QIfj5B1pJw+X875FJc4z9tyFeSJuiFPKkqWosgO4T2a/Lf18qooaQKVnelJUaSoiluzEpB7P6pXoW1VZRzd2FBxVIjLGOPnw4+y8JiKDs45ok2RV1XWVpCwR823uJBfXygkevKdCquowFdY6jpD2rPmYy9vCzOMiMUXzq6/wh84WfD3GAPZ9bxFQ5K6GOdly66wNCFWYDtO1WYn9nlvPjeeRHUeyC/RlWY5Ds/iCMxiDXItCTEYgzaWvwN6FJ9iPkKGmUaq6i1Sn0ydVda/8kEz29XFaLLURyaJATEzFNFlQeTW3++2CYSUlhBVkjxNVimMYTJPo0Xdnao7Uk1ifyU7x1BPMZm3cqWbqKFOq+enFfYQ+GevHbha5e+tjf++3fXuSz04j7k77KDLrRc3g/eX5i4d3d5lf/mAu8e6Pvrv80lc/c3eSMeftehKtU/rdvOks0S5hGdnG7dgaiWvHVExHgRkcY52SVLqoEchCVrJSsde0xcR6VUqJWigW1Yq8R0G1N8hk2rF0MF1ICmLarKqTuhVLsXE30SV5L9s3fPxbbuFfkuenRWZklCxpu+ENeXjvgxe/8ny7eRLcn4iJ3fqua26vJJ+++9UPIC+wfQnv/tKT/ZW8efOry7/z3u+6u7XLXhbYMMY5BqrAlAoVGoSiBuuzQdVTu4FNValEFSLDzPbzpVeSvQMnoGUiRG0qBKy1VcGLtR9amitbjN2OnL0g2pZlkgfnFEbUcQPlPrs1MTMVVmiR1KTJOpaIKFaDHlI1GAK4oHeySa5qLRIkj6A4N0XWvk4hsqaJmiByc9UhKpWAiYyZpSpiAmDA0+ZpWRHYycVvGGluF2DK1ISmOMXR9EFp4xAEUdQrlXfOHcCqoonL+aUYNLa5jP3Vln/237r/2V/42I/+v9//rv/Nl8aQ2p/I3YXn8hYEHNL8ypSqAbIItznnMk5zzr4LF1tiMvV4BCLgHArp4WKJIGM0SxowlmelQkUjNhctQwghZjY0UgSoKBcriNolygJwNAFIU8R1UnZwwRglUybBQJjanPtpnLYZQw3jtpcLIKuZfRkDFgomF7q4358nBShz8aqASCIpLNEia6+Hy/MnOmrTbUzLevEH/uj9f/SXPv2zP/PFP/ZHAi91uNzDT1v6vNSdiJwuRanFnJQ5c4xRFYh0M7IUwgNVIWaDkcNGyEQJE0YHINBEa4qQiXVYKqIO7qBgrIHUSn3dAWcVisNsm5uOxRRP1/XFBy/F3U0kalaK6TZ0DYzULHBdVMjt3la7pC/mStaCQi0im0TOMjVUmoiUuK655YrVRIl59Jek0wxSQbPxcN5KdNFboFSBCooIhVOCUFXOHGJ77lRdfMkZBqFUNX1GRQuaNLWZldX2FUkQZoSjKOZBlFLAmDHUBNZ8iMvlsizLsiwxa9u2dV17hHxQwmA2RiYv5yDpokds6HXjd1QlkKZ/qTIrYiYBS7ou3Q7ocIq2rHiIVtV97BDd96iqOdPMxJV7OKQqdDgZDsnKOWX1wSwAVN22TVVLYDvHGFlzXdftMt39vG8xdylLprp0NT9sycyUKe4oFndRgAcgITOhNJeqGov50Pkwb9a1EuucPaEPiFxRxVV1GktXh1XlvhDomMjV5bIHRf3kpM7MPO/DRjCbroVIBY2EIPtiBLqj7tGCiWTEJqZutW+rj9zT3c/7hHQyryo6iH7fApS2xYTaaEYxS6gVIj5c8fCS2y12kQXcnfn+B+dP6G9v9sZ8nq/w8MGLl4X7F7auQz6QvejDnZhnwe34qMRlHXvO2zfy+X/1pTdul/3b346bXXR+4kHfW/wyeEPhzOiomxJkUoclslSqVE1dxCHmwiz3lcK2+UGhwIImljVxBa3WC3DPcHdkRl6sBTYZLiIGal7hOFFkuGqqGiWDQY4xltYrOfLh/W/8+Ke+6xNf/rWfC3mKpyV7lcke9cHz50+wfGr9wufO/+KrH3n7xfNXX9pitSU+WudveHr52LNnv+Mzn3z25jLxie/7g38Kz1/9L/7yP3zvB3/47dMXUPsr1zWWO9aDYJjDUDmT10yCnuge3pJjcSvaUnpQsrLHTCVoGyZ6gf3ou3jcSVSVqErbvFQLbTA5zq++347JNIBrrFB3MLwmrJkNkMwsll6rY3m8eauak9oUnjH8NPzl/Vm1cOgntQo9f1jEAjULFLroECUwWXboIR8Xt72Lgphu+64ic9LdFx9R002RoEiooJuGSoNYkTaqapg3FfnVq1eVuSyLQKbDKNKYt5IVJUM+92d/+Lv/0n/y0R/7sS/9+T83vvKVh/X/R9V/h1uSnfW9+JtWVe29T+owPT2pJ0ia0YwyGmUEQhIGiRyMwQmMr4zTz/4RbF/fx4nrhCPGlo1tsDEm2CZIKGAFBAKhLKGs0Ywmx87dJ+1dVWu94f6xap8Zt/SMRvPM033OPlVrveH7/XyNIglimSYQUA0bERFEgOCEyEeEH4KoVNVIQdVtHlQ7rXBAQ6BKWQNomkZEqJi7RoC5CbOkVHkObg5rIQwnbpiy5YoWsoRgitUTiY4eXJ9d8LpSZeaqi0DEMo6bi81+HNrEQ5+bpulzFRYRgJipYSCgVl0H1wkSYTggBBgzq0d16NY129ACW8wzHVxe7t75vMNrbmw/+aHN177sAGBG0c+ZsCWTzQJZRqOUiIOwZD1qyxwg53GWGnc3NwAIQjAAgKCo0bVQNc9EjgLrKENhblOTI5dSLJSIEjeLtt1d7tkzLg5E2NrYuHp5t13Mx6xdN7t0uGq7OQbmvidhchSFGi0CCiQcAWaR0qwf82YXPh4SMGSYbW1EVh2cpWl5UmxZOALZRIrEcI0pcmOatZhZIHDDFAyBFlOoyNrXXIUsNSkAK8AcotYZUHEcwehqzExMDsDOEuABBdzIOFgCc9hRVmCspXPmhUnMbGNjI+dcNYybm5uHh4eUGpikJEdTtAo8AZ/2UUiIFl79e/V9MatHR22sAwAKQVQssJsoEImZNSy57pI9VAsRzdpWi6tqtpxY1NzAKRxIhpLRg0Hq4iClNPYDJWmaBpmDZXRFhEFLMC7zEGGGwdUNhciSmHgijVB0QB7uWFn8oDWLlzHMIqJr2jzm5XKZ2rZfjYhYP7TKDEkNI7OZmWqbIjXobm6GakSUBCDKFB0CoaoQRGsbwnqZHYDgEB4IgIQSRzKTo/kiMhKKhSNxww7gCa26vAJKnc9HFdsSImqgefHAks0tSNZapACPaLVVTByI5km6UbaIz+6NgoHjuDvfOiXHrtsoG7PihxHm+disqWbCGTc2ZGJwQnU3KHPv3vvILX9u9oVNj6uLWPTlCraLokfDQmAgIAuDwA3n4ydPXNm9WtwYyV1ns9kwDFAKJSFirTR/JicyMwE0izoAYJy0mRa+OZsPeVx/OkkIsZGUiEul2E8uBQ2ABGxgJTxNiHCGRgrvPLKbv+NlN777U+d9+0Rfsrp2KXkQ7uzg5z/wrac+f/Or7zrZza7ZeBVeO7/u9F1v/bl/9p3f+X3a5/n81ENP7P7ZP/F9l57Yi5tXN5z61ceWdsvJmSy10UzkWQYubV2DVf8XAhwJbRAnTVx4GFA1YjReXx93CJBpLIKIYtORxIBQGbWETFU+6OZeVcHTtgkRCaFGma0vbKuHByEiKoZ7dUVVeqYOYcyYJE03rnsABCHFFMCMiOGgBu5FLCwUsI7Sag3uEQDMnosR1MgkqmN/RGSKygiEI4pyFbKiQjAAwhRyUlXQ4KHMBCiVRRkQRMHkDFS8YcaA/nCJFVaOVIYxRAo4OzBRMFFEsRym+urXXPzAB2750Acvv/Rljx3fWPRqBGpGQqoa4UCTE7nqMNUNuZrKK+QhLBQJyetGkCbuIISHBYYWn0nDROqec+YabgQkVHUeHhE1b44oiBkinCYbukckJMVwC4K69QNE5PWe10O7prGIUAyDLnWllDL2BND3PQCIyAwg55z7YevYjpn144rXKsYp1DHCIQggzEXEYo3pMwdGGxTazoiaMS/PXHf+Za943gfeffz+B3af+7y0ux+NjEqB1LeFwDqX4p5SwyKA7hDqlrhB5IAprWUyrK21kfUTgAhHZASEcHAHI2Qz6/veJ5rutGJfrg6IKK15n9M17zifz2cbG7q/p0OZNZ276liY2Zm0XltaANkhzFcJgZBNuZtvYAzCTF1TxjIcLAFonmZjmOY8laFeu746bA9GdncS9nUgY31PAwMJTM3dDYy5wkSIIxDJ0SbJFgAGhTsT1SQAIoLASh4Nd0SsAaVE1CAD1a5Aq8muahsqM3LaHFmulyJz6vs+pVR0lEQWseaFxbqSnmTFYA4YQuQwLTOFaL1QADOPqKPjaTOd2hZrPQoOrszk6JPvECeLpBavteCi7fIwAhNzm3Vk5ro8NTOEcAcr3khLjATO4RSsZohRqVxYLDXcMq3MqTa5EVWrHaEAkE0BkZCQuK4YzYwNMKXKRm2aBgjNgZuEHlpGZpZEEWGBYPWLRlWf9qw4ORp47Sub3gsIAAOg4oaqlOr0lGgdUgl1A8jsobiW6URgEPj6qmbmrIUQ1V2QLDS0pmalqaMJRUQGDsDKcYdaMbgzYjj20qGTauGuONLu7iDLJ1Lrg18izwVvHPuTnYwOB1EYCfau7ktg10lEFC0JIuuYY5h1Ked84LOPXbzjB258IPUHA7bHRs5iAWBWzA0ngSERQ0+2W5YjQzaTKER4MPbhTuJVmY1IiIxWOw9EYZik7FKJs9VmNqFXgJiFkSSqwYYRqmmNqkqIuqp8Zm6chRkDjcApTiZ4/+fv+Ykf+K6X3falP7p4tWVqebbUQ/ZNQ9vUw7f8+E88+6Vfv7d3eLA/nD598mO/9/HrTt398INXP/WpPzpz0w2LWfOvfuof3Pbcu1YP3rNg2T17cOL29uFdm8kOwKGVOUOpKyugyYiJBBjsYPG0LxkBvIZGKNcsHcej6HVzoikTiAGQnk5FZkR1I0GZbq+na+cadl3/vQmVMJVyNX+xUrknGUU2o0SIVMvMym0wCHOLUszBonZSyA7mpppFWJAwpswsdfMAIXaucSLV7YCTc6mKs+hIqh/PrNlT4tAQoYjQYiRsYeIcoWUaEgRicAQYEsuEBDEjIlyveRSDNAwhCFrAQCihrDaO/d6f+FN7//gf3vye/33xh35QD1faKS+DhBnYnsFusKgIWQREixonurY0IOEUROOIiEHgHkQQwYRtUzH9joiNpIlN4Q4IGOhQj0IAdwsjYnO3MAgioDroqF9AIBoCRnAAMQW4QkQpWL09zG6OiKWUra2tiDg4OKhPPwoLNqvVSt1SYnJajx7rXx0AzaOmHtZ9dH0q0GFTmsOVjm1DsILdi2df9rIbf+/9pz79qa8899pH9IqmZF7myr0bQUlFMgU6hVVzO0YYGkMgRziCh7vVgQ0BUOjT3pKaqDdVhIhg4OGkVYxYfZaEgATqgfVbnsoIYusLAOhgHp4Im8wGXtgjEEYEgGDEMrapCwQtox4uN9rWQCzapukIIhd3c7IAIBittNy6AQCDlFLqIGdSjQCTyw6d8rW+ERHNrTJ+kIJxsqNOBQYy1izKp4WyCkBgmkQIQLhme5CFMyUzq0xTRCcmAnYqlfxZ/ziqyBEkm3BeUBEllBppO4/12hUZkQGcYsrJq+W3g9VPOBDDq78J61VUSqnxcyklAFB1s0KG4NWJBCSMWDNxQ5CPFuERYUXdvQpax3FsulZEeDIROAEGWSBBxFjyYrGonvZx7JsuBZC5Nsxj0SBUdxJEjPplRERxkzo7rlkNNSbPfDogAoSwLzWlDasEnonNLGupKkXTmqoCROQQZpZA3AIAmRsAqNp7gxAwD496xz9DCiNIxas1kfHIS+RhbAjVSl3fKHdEswAO1+LqWJyYw0HJDVyQI6JuoOvv7esFvJu5K0ljEUxUs5/Nx7k0HNT7mPH4l8/TqVWvtNEl2rx++54rq37vojU9y4KAox951uZlL4YUAdm6JjXIB6uRY5Vmm/Ny5Q++uPGsrRPPn5/dP8Shy5WAVt3NU+RuWAQQ4dXLV2azWSsshE3TlDFL2/SRa8k8BfFG4DQomsQHtdqrhycGam1lEczMwAQRmKupqwWoLk9mxgZoDFMIUhcUa+oWyq47tfPYPfkzFy7+wz/90u/6Zx/Szc0YXWTBFjLjA9OHL1x64O3ve/i+T1x3yw3v+e0Pfes3f+0dzz39xNlLL3n5qxLHsa308Y99enWwu3nzs16ZLn3q3JMrfOlidhUsDm3YgBSIAuTkuObsANRR+vQjrvv92toGYmUO1PITjsAa9bUkwkohqCnQ5rYmeCAeBV+AuQVATaFimDIx6t8TkdQ/EwGZAtnCPQAlQZT6GzISeG1Y6z9wqwQToAgMqkeqq0ZN1IMwkAQA5iXcRYQipIojCbwyoNQASJBq5VizPQAAMdI06/a1aaWy91Ew6gTWoq6365YbBs9VBtJ2nZmpWd1dubtwfVe43jDIJMRlb3/31udefM3rn/OxD97y0hff8/wX6MHlDZmZWW18j87N+ngxp9q1mTkzT0F/MfkV62dNGAyEEU4AR0nXQjBdqMEsGM6VFhgT/iamtxqomAqhORLlADR3AgOaLmGoiTcQyJXaQiLFXET6YVllKX3fFzMSGUpOKSFAMW3bNgGj2yRHj3jm92YQAJS1si2nS1Fdl1pmkcaiRbgbVnjrLcuXvap89aO//PAnRhFYxTRImaTvR/7cyVmIky/w6f+Nycs43f84GQin0utojhfrvIX6lK9lgYhT7fIMK2LAMxgUk3ERpt8Bmdjr3DscBwSgcAdwViylcJMiQEuRJCKN1RSXgBieNm3GOllyKlwDAuJPbv748XRqci0yhak7EgTU0WRAFXZHOGJtPX29Za3HlhLX+NQpzKpWohpeGASwAXbTEZSIIKIF6t0soIbbrEd87mv7by2UK3yjmtdxOkmDkGogx3S6+BqhYFblJ4FcwpEQmHBiVDFRIBYAIIbar9cFEyEVHQHAp8bRzJyIUNiLl1KweKVu5ZwZiYndghAjyTgUEkHAoWQAX8zaqP8aY1TdbzEiMisWkx4bJkW7a0XYAIgDENo6KbVAnX9R4tSwLJcH28d2MOfVaoBp6usknIiKmapbeN1klRzFlYimxwPCwwPAwGEtPq3axpQSC00XZzisV+cIWP36GIFQs5JwigOiaUJQiiGihRus5XUatZA4evmQGMAtD/B/SIsZgtxixp5thZBaTE8Oi8ditqC2szCDc97PsLO8BxvHbFAnnjEPPgi7lrFfjeEyrg4AnLstiFKgt4Sbwr/1+f3r795sOGcaQRs40vkSAgB6AEADyKmpfpVipnkMgr4MDRBT4+BW116CznVrLQShtdhSY6oJymG1jqmTFjchpEU3215srFuLGq2DjEhgQcjq4FAwUBiKbc22vvO1m//t7V/41b/1DX/zG5/zk+++rzk2n41N9iuz6PI1tw9aLl58cHd3f/PssVe9/Flf/MI9yva9f/yHfu6tb1UbS/hf/et/7Td+5be/8U1v7uiRj559Qg/7xtMhbaWa2InALCiVgeAAUIWIrFwDPj2IACbyCAI4ua9jjD0CARPXcBwWZJbaUJqZugF4DR+3yUFRD6iAgOJuEIJEGFVTLijC7BRMdUCK9RavY25nsaIAkIjdvebGJCQVaIlrx4QVZonoSISJKk0/CJnq7jgi3JSZm2Cb8AJAgMhgDkh19XhE2YUaYRIerSQjMDOc4uTQGMEcI5ogAKx0EUNSHTC1ABE5Z1PEqcIgD+XgQKpPQUAKtsAN2DhcHVz9ru89+6WP3PJbb3vqWWcu4qKEctSwQ9Q6BEOEOtA2RwBmsXpEOzBwVGfrusqpQk0EQI8cKpOszd0n9ggCCDMEFlecXNoAhEDkGgyoaziGAyRER7DJMEqAaBQQSAgEDEHDkM1t9J4bFpTqZkopMfPEpASourmUEhyxKQAQKMIAyCa7insFJh9tZRFGKF1HNx0/df7SQV92udiTL3rh5hfex0t58+0/unN10XZ5IAZPJYCwMDAAsDRmRTUTpzBomtY9HzHDw6Zb1iHAsY5zOCEAuHntNesRZV5Dv1mEp7l4GBEfZeQAQCmFCAnEvPKVMNBRXYzcoemaQXN4mTcwLJcYYTjkYSXmFy9ene8cy2XX9vtVX665+bYTtz6ndxCvdQIGBDG7WUJGmIJhL+WL71v+aomxlkxHHWSYg/mEIvYaPesRHqiVXQVrAwJW6B5TXwoTuPu8m9VsJSvGdf6EMalwa6ojGk+2wCoM9mIaEQTojvXDcVdOVT0IzKkmxNVB6PSxTwN/pKiEZgtGrNxL1Vg3iPG0hwIqcnmxWIzjWKlArrY9m5vqMtcQi/rbToWmBcyInAzACYIByaPObK0mqEZISrWEyqpdIz4WCCYIK8pRjcjsph4+JVGuHWJ1tjR4ERRkYGSop8H0erqHtm27v7/vAG3b1i0MYmjOPt0xTEiuViyvE6e4gkeQpZbzCHyUlbuuBa0UI55+dhgWHhAO0484IAIRPILpaLYEbOEA6hUbEACBDi1xwQmu4kFH9B6WidqrqsXqu66AAlVShZTNZ7PZU+eGwTd3dk6Me5cQSK/qfHuctYuS57N00B+OuQlR8azIwCkhdsyK6NmCaPtg6FXbnZYtrvvcxeWrrz8rhx4N1VO3Uo+Omv6cNaXkpkjUirgDILacRrUICAipIxlwdASAcEMmUBVmr/bN9S8PjYAkRJyEEGezbjabqWo4JJEwdzcT3AQkIlN3hFTTf2e4d5DvvvPkfbvwPz54/gff9OIn+oP/9v5L5eSsl1NoY7Sbpaf7/+jj7fFrHxn7P/893/OT/+BfvuCOF5w+duKOu176/LvuOtx79H/9/M8/94W3f+jdv3jh/ntjftd88TrI3DW+OmDmhBgikibym2s4uzsEEiiGVcwFINSvMjwTmCoApjWduN7LUMHLzLQuZ8BDA3JWAKAqGEAqR90HIRHVaUq9YBiBwoM5NU0Sqpq2VL1zat32ou/7ccj1FV2I1E+/d0NEmUJTA6qPhBCx4rUqLgASgKcUVUY9hckXM7OYXGKpjvgwtHqhKrsBfDRNgV3TFHBVIEMDAwTKagROYIgRjgEE0ERI16lqmHlQnXu4V9gyBYUHEAUzshOah6OCLnJc3d4ev+XbTv/Sb976O78zftv35sPDmiJw1I1FBGMAg5sBgEhCnIwWwBRRQRlrB0at3CPWzY1DTBZVQUJEtTqAnrrGhthiGv4ysjEDhAICBEaVcNU7Kaa2v8pVDSUiECmgaSTnnEjUshu07cygMKfcD5VE42bMZCUzMxDWBFqECdUCUzw5CNX/i1TLQeZNWazKcOHqRVam1On+8vD259j1N/Pqq2cOWty8ZV6WYyOzkRUZeCRsVLVeQ5FMmlaLEzC1ceSVCnIGBCb1Es4R69E9IhLUcUAtwmoWKQCsn3GycAKkZ6igscOsjshBAUEEkG1IRCc2Ng/2l2MppzZF81L3Lst+fuTBe68eXlEtttJWZkNztdj+dcdO0TJ2rts4M7vlsFdDT1gBa0HMIFGD1dzdInI1JkBMcm6isCJEDqlWMxGIRGEQCEA+TYammTBG1ChsVDUmQgZGrOw2QEwiPhatOyiIhCQQBcAwhCRcgzCrxjrrwt0Fm9Cqnis7W8f29/dLcXNFprqlIiJmnppvAHernEqoXx0AWEAEcJUBFAQGQJGmfvhZywyimCJixZ1UVAtRAHplpFdsi5khyVBGCw/E2WxWWRYVxMHAmNjyWHPMEDHnjBHSzWIKiTdHcANA17BKRAOUiHBbewjATSLQ0IAAhJhrYo102XJWrW8fMZcyikioqVYfFBNRBJgZBcAUJ+4BUF28buvRHXjdjCADVRENgJn1PtSjD7B+lR4QRXMlpgRXBxdYOIG5GUMy4Grjd6/0AwhCQQ4Pda2VZb2fSgkm8YhiAWCBhOEMJpxK6Rvu3CUsvnTfQ7y9efVghIVsQ3vtdtpdMOBoy71DHDe6nUPtJUfLMwh1K4ZGlgPUo/XRb9rAre7imPjy6sSXrzZvvB4vN7O5OUIITikUsN5oaMtjKDMlrOxSc/OUWmN0dwhPjBgQOul1RnAwq1TgiCiqiGgQnUjxmPI7wgQJMTG3DSZBJCFGD0JYhc1RyGIlSm0KtWrUarePveVPfftf/XNvee5L/8xnnrj0lm967WLx6X/7m/fQ1tyuOUFAD9z3yPd/25seunzFcO//fstb/sbf+Ov/+31v+/7v+qevetUbPvGx9++NuMzDZx66fKG54Sp//fNue+XWzubelX3g+dZWx4xVQCxICGDupLk4TOwJBCcmIkYIM80jalBwwajuoDp0MowITzy93hpBALDeKxBRLZkxIIBjkl5i9VYyPe0Prg/6HBmjSKTUJp6lWDverGhi4bnU3zClBBGqpWk23BVjyudxCENw4oSAVjvgWjFCHfiDeSR0CAkWBwBQosG1caw0BjN3EMEa3qICLQeEuZAIMXkQzlZl9KaR6cxGj5r6GoiYgBkxROpuYxyL1uGccANo7AbQaFWQcetszaHrrLt6yb/uOx740Kfv/P2Pnn3Zay/fcEL6KBU7N+3JNYgIMDVcSjEvajrpBomylhS0pmpTxToHVhk3FLV26kdDszJzpRBAVZ64VvtjIEgrEJS1iLlN2hUoGBzBwqHGAA5oEObOCBRYwpk5WxEhdyWk7e2tw34QZMsF11pcQgQKAC5esEK0cK3xQwZECrKIND0twUwawQQ8QOJF9tym3mGWMujOzu5dd1L+4vbD56++/Hl+ZUzQGLuDGVgbwUR1KFrcwUOYIQjAJk+5GnpwkqAAnS7agIm1yyhH0+WpjnyGpB8RE4mOmdPTO2Azw6jBHMncGsSunR3a8Fh/ZbHRLYzG809eufjE2UsPPXH/Q5G9L27IY79CH7d35gDdld1Hztz0rM2Tp/cuXZ3LZmko+9hSNx2dtfGpWeSAUJn/KQmKTBNlSCkNo01atghAcjCIgKPeF/iIYAZgANAhB2FxbSRZ0YbFigWBCZFBAsquI6gCWrggj1qk5vysFZqIaOCoHoRAJsR7V6+aGaMwp8H6IAJHIwJAd6uaeYio4iZZB0tgOAH3ZiKTcLsUZyB1Q8Q2yf7uVeYETJgEAy7uXmHmpul8gnwGIjGzutWjuZ3N1E1VGZFFimkwgRp4CHHFZnVN2847VeXKDTEVaWrVbAEcHBjMzMj1NCciJgbziAKQqubFwQ0BggG8Dg4Jqu5JmdndVJWYzGxq0r0GAwMAJEoR4eoptR6+0kxE4QASpoaAAhQR5lOoV32RTDMAChOjIGIIHhEmq5cMPRg5UI0JgxgCDCEwAh19cBVXqKUYVZ9HzRYBTk24I+ZArusDDwT2knBEDIgt72+9bvEHH/vd133NZjQZx93Hzu+eH28rJZ1cpMXW8Yf3z89oq9DYDyt35WbmioQphai1uSlnrrl493VXL18oZ/3c/QcvetJlVg6R26kHq5OoOihg7jypKlOkRF4sEVHTjlqkKgvWHFZkAWQPBCyxhvq5QKhVknfJAwlz1XgACiGlpmlSSgCI06xPAglDoHjwLFJiUsw0W5RsJzb97m/6kR/98f/0b3769Ou+8U33nH/yT3z93Xdsbf6dX/747rlucfKG//aB93/yc585e9n6lJRe8J3/7wdkfqs++28/9Fh02AzHTt5yenux2djuBi+fOnnjTUvvIDWLZmOOqxUQm4NDQ9AjR14WbjYAVjCCM4qEgAiBmjkEMDUyZmVmQBeCCLSqaCdKbWtWAhEQHMBMK4xd3dQwggwQTZvEAIYQm5LmzBqeTcNUkCACkQpaojQNSs2TCNIkuYKY/PVtQB1QEzMSUGqJaNp7rE8HejrS42nacJ0j1emWr5lECWDOHRSr/xAAPGztepfsLsTTdg1JzQr41mIbSiAGT44f0rraxFD1NjWIWErhJCyYVUspgOaAVZ5mCIzhnlduc0vAeqjDFox73/09q3/308953/sOfvhPHvZlNk829o13CPkAteVtg7Euc1EhAdVGNtCJE6C7K9VYdKJSTIRmbRqGEkQWUeUGmIIS1qOvZK36Z6/pBABREMBIHYiYYsxZmDsSzyOgMyAhT9oTJjfvXU8eW+z3vQ3ednMWynkoXiR5FA7CEQoDSMRGO1P1UsoMkwKUSiBUl0pDQjQdBZGYHDE1AOALSjWtZQfiwKNQtxM0pDH63Yt3v0S/+psbX/wk3P0NI2TKCUJSk3lMSgWIRJqccytNcYMpLz5apOwWTIJgEKmZkULx0pCwwqguXTuqMRIEOSkR2ZSzyeYO6KbeIBNRLnZ0AUvb2LAKdqICFL2lmedWaZZ2/PDCuUc+e/6BL439/tkDfOD8xc354sRi63B1MELZ3tzYWw1Y0NLq3JUkn/vI+fPnn33nS57/stfnEYk9GUGAgwWgOTE3bj0wrS8tCBQiHsvKcHQzYigZ25mU0iMQoJJJCApNkul6ewahuWeIBDVzPojIIIygWKaGIFzDhKCRJiIgDB0b99ozhU98XQ9PQgOWxFKr7lI0pYSBpkPnXE3VlgslaFNSVUYwpBoGXIelYSZJci4oVEoRIgSU8FJGqJ2yBaXG1Vri0ZwDEnfGGOZ1H8qYuEnDOBIRCpF2qe10XAUCiShEsJg7hrXtLK+WCJza5B5N08y6brW3THMZLavnRprSKzdJzYSmwMRxHAHRSZXrFd4wkoUnEXVnShX2xuiIYUURseVUTIu6hXeVeE2kPqG+zAsnlllbVkOTUsk5l4JM6JbAVY3ciYiDMAiYGBg0KAUCIDdgAAH9MLpH7t3EwQLJIgyRomjGiJQwF1UNwgo5RWRXTCyFMoAyMeewVoI4HKUvJDGOuRSLsFmbMDDHEGCostXGoGMP0M/vxmvbTz3y4YaufRJf8sToTKe6DvvE43JoRkCzNoZmsbG3XHnRBhSDV4ibeEl969KyfedXrhUS3JIhl9c+tfz46R2PJXAHAFRWDWyYH6oxRRnNoAYbRWEiC3U1QCY1w5oHX7fegWCMyChIgR4iUs0LgsiI883FcLBskYBgZSpA2HEzkyaEELFxiAhnnJvsic8jqzTNIbSLdiy600K2jTteffjKw+/5Fz/7o+dJ3vTqb7z0xP5z73z2f/2/r/t//sPvfHW35xf8hS9D0VMpUZvbw1N88sKl++ebkObHbv3y710d4pHD7XTp4vV3Xnf9ydf41uYvPx5Nuu7O2eXnb63asSM5JNzoU6aBLKQlX/qygS3FHGDhQZ4AAZEMIywkEUulYk1otGlAClxgWpIBYACquWtBYoFQcEKkmhMUDIhqVhWPdWWbTcG8aRqMmvfiYU40NWru3nRtbUQEyS0mbjuAUwgSETEJEcXaH3KkET06KNdf5zpDZpKPrH/VeKP1v1brVAxoarHvdYKDAtFGADFIIBpSJe2BOTg6IoZOyK3MFIQYTUqsiYtFVNnR0UKXiIhWnqIYdNuriwfLl9z1+Ctf9pyP/uH1d9/11EtfubxwbiYnvPSQyiwvIMLJFU2qkxWQglIl4TtmoJQ6wECP0GBgrOL2ahMGNAv3iYEAlfnsjljHCVxKQSL39d4OgGCyCQJApXRDHaBjrV00whlxKLmSp3IZajTeAA7oswoBZQIAIDByEwgkdnQKCq8ejlYSMDlhmnX1A081mK9OulMUDGjkmM7JsFBYRo9IxzfLbD47+9XZYw8/cv1NslRcDD4Kigvw9vb21d39us9LSJOqIzBDEAmZFzBX81VfVCuepbiJCCKjKwnqNLKiidswmeMmXUwQVrLSUQc8my0q8XQcx7bw2DHNnPefeOLzH7104fHifuUQyPKcwoex98OxX7WzbjzMy4Plzsk5yvzJx5/qLy23Nja/+JlPHr/uphtuu7OUgiDuXjRLkzzQ1VhYdFqaqASEhuO87UzzSnWRWkGagYTwsgyQuARRQN2wMvNRdgUzSwUzuVU8eO1HE5KWYBamtckHOMitznIjfMrMKdXshBHJ0MOq86GZOExRPESQCJklAUaEllIhblpDWuoEpb7/lTmJFOgWwQBV5+XuWYt5IHJCsjF3i9k4jp6SEoAGMyOAqpJCy5SwKaWYeyklsUw/OICKgiZEtRwRxOhWapB2BgOyrCZMHpBzlrYx041Fl3N2tSrbdDD3cI+6IhlLFqGch6bpsCbIgpdRu6679vip8+fPg/k1O8fPX7o8n81Xw9IBopi7N00jNZHJI+chQnMuNUkQhcswEqIyu1bviYng2io/ffjqjl6/oensUjMmUlUgLGOeNa2qF/OCUblmBh6BGB6B4C7eMGEgCamZi5uBEcc4mLmnxMxNKSUl2t4+vuxXiVocM9Hc0R7dO+t83QN7d+yqIt6Mp1qIC5B91DbnYSMSp3JYWmEtoF2IEYUebsywzQtBeOj8yQFWaZ6SUbdz7Ivv+dff+W2v/IXrXnxm1VPXrBJhWYFwEeeC6D7lLk65sRHhVXcznccwrUfXMwA3cwsv6ormUvtpEFVKYkX7fpSulQqvA6yCvtr2ARIqlHnhBFvg/ThHNqYuqy0ImoMHhze/5qGf3/ue9/7iL9x447W3nbprdXChEfgPP/qGv/U/PvJHXz7XbWwGnnOft4d2QR7fuKDjxz7HLz59y6VPPvv0Cx6DZ5+5+zXNYoMQsG33+2F7GL4cJwo1r906X0bwZLq/u9icm7X9sJ90sce5CWNzCHRUCpiSWChowtB7Iq7PTb3tyMM8eViu5FAHilI8CMIZQVGqba5uAAEisUJYKVY0IkILREBKYz9UXmA1MwBADXCthtqaGBoSqtXYRwqGRIwkLMwMTJN8ccKmT03zM6/h+leOoLW41d2pgSqhXUexrO1SiLVQqGkS9Z9HBLREsSYJ19YcAQmi+No5KhFRl+vFrZTqv9cjh3j9etpQTexjTy13Szr/7d+9/MKn2w9+ZOPM87vFdj+W0kBg8g67MhK2GfWoa43K2QhECPagwHAH5IyOCUczqusRRFsruWoIZjhiZa1oJOIwq4tAqCYXnDLk6jdXtVsYoQEUDpPfEACcmFeroZhySkQUFDUfkIgSwFFgMyIIMSFGcBfUIDggM5MFM9fbOAFFTMyBSgjiJBHRmY8CrQJnGwnFvSz7xSLZYv7ogW588UtbZ247iNw4kRMIIYMTozACJ0nDMKBwILDDGNYgEgAyMaIAYCukCoyODCQM2LA0XbdcHlAwPCNKxSMmPT8LPVMCDVATXkuukj7WNjs1PBzc/5nff/iBL/dJWLpLZy94k9Lm8SsXL636PEAZDq82hJubi1XfH5Sh6xoXuro82Dqxc/7sQ9dfdwtwM2WQIAJ4eAg36E7hCFPWdYWZEyWNMpsthmElODtYrhCjhndU16jgBGWbphcAFt44GIY7gAQgWGDN36XQmibmAOvBaRVSIAUguqqbKQAIUzETYQoiJHd3BAtvWDqiohoURFx9XIjohL0rcIXt1OV0YECtWc2sUqVq4A8iR4SrSV0tmRGAlZG5qotImgRHjjV3QAcEYgiAMEVhDLBSuGnQHcMD3VQxrKoEJjVlqLMLUCJRVWQupo0whpkVwonE3o+5UqjaxEMuzPXqh5yHyg/pOkmLRZ/zxf1dZXT3q6vDgjaOfSUFAD5ddidu3L2UEQGa1Ax5nG9sDcNggG4huXa0QSCBrhSGHjXcov4ODBE1YLRayrx6EoDAzKQRQDVTDYtqN6rmJa/5FwwtAVDRkViIaMFNbyWTCsR8Y7Hsx5yziADgctmbByXfx9Zst/X5Fs73Z91w/ctfdu4L/3LrZ37ywet/N31be2rW7j2aYgapqArb4eoyb0kiPLjqbZuPHbR04NzymNKlY9DFMOYCqZu975abX/+ZD9z25jt3BWe6YkJsaCO1lM3QKnDcg9wACRgmLzxUMdo6eDYmQXhVQETdIjISIdSf8mroE9Lm5sZ2c/L8pYsCAMZoiSSwJgoAAFsUSpTAOKcRx1mmfuZl89SGf/RzV/7Lx2+d3/RNWy+8ZvG1899byds/+sj3vvTYzSk9uYJ//udf/+9+49Pv/INH4tQpg4MZS+PbB9chXf/N9NhHr8qdV+54ySnZSotNJmk35r0hYnsp5PiFy1+lE3fCk6eY9oa9OHbj/hP3lbFZNWMCSrCZvSRkAfZsHvUFxCTiCAzInJqmqeFudWxLHu6mbmKiqsXXjaYFIxIDC1VQZxOCCJoHo8GLVlVRKWUoebQaxqgAzszuXd1NJggzq8D9qV9lrqpZIqbJFw9EhMx1D18v13Wcw9P9SuD60lxfwDgZSADqPsT/j765xtwG4RSPWLUkasAE8DSfNSZ8vINgqNU1xuQwQ0yqJqaqpVBxU1XUKXUcyR0aAeeZ2OFVuO6Wx77vB794/3uO733h6xav3cXzG0ZLmiGuei6JEzuFlynyYF36ASCksJiOCSGKiFST62qoXEyGMMQJ2eNu9YxjwVKcGSOQWQqsTS811qn29cgEVBPloMI+MFBEmCMCmVKXmJkB591M1ZEnWkAQgjkhVPg5AGD1SiIk4qp2cwSjaIOFua4VYjYLhOJWBTwzNZIwKlAKNs0hqBLgbPaJF96a+aHX7p6F+YYbyZyhkTa792MnCcxDTQDRwhGCBWuKggcIwdqDTJWLwugaagoeZsXBAKQ+mXV3Pj07R1v/Z5R0iJxzrhupWtNsg332Ux/6ypc+BUYeTTPDkcqyH7Q/iIBhVOnaY1ubaDr0ucjh1vwYku3t7YKDpHjovi+cOn3rDc9+QX84VudAKSWi6iegEHgERxVLIyD2eXSMZEDSBMkQQ9u2YTDmFUFqKFUO3VTRMkWEO9r6Ma9JChGBREDIkSZbGlWolpqZIPna/CpEpdQHe0rBIkDysElkD44u8Iz9uXsAUBIEQrNK/Z12MWY1iwoZXZURs6q7p5T6fsz90DQNKhQbiSiDNdiGGRHBUAzN3VmkEQpE1QDC4CltlwBrBEX9STFiXwoDi0jbtMZSq1LzCVsxGS+ZIwoi5jwgEwRZ0aIajiQNBvRjrof+hL1cl++qObAd+wEkIYWrZncBVFcLtHCpG2QznUzPLtJUoXjTNHt7e23btm0ax9EtgkCYoDreHISIELV4RV1OiEDwCLBwpMnP5hYAtBqH2jNMQCl1qNveMCRGt15X86Z1hkJuEZ2FI2VkinKw7GMdwCNIHpBEBiuduUM78KzIYrTLQYt7muf/vl55z6s++l+/8E9/4r7XDdffsTGnftQ5lcTzS4w+m8dyvjMenpT7brKzx9PFm2K/86sRHOhQ5EN/oGd+7Ecuvff+1z92zy+eeXE7joFpyGE2FqeEROSVfAB1El1JyWvFLqy3jVUBXeX6jEQiqsoA4VEMEKdCrR+yOZiZQJV4IiIgeujRW+1IMJJCCZFL3Xwz5s35j9+3+bYPXbzmha9JmwsEuNoHom0eu+YXPjV88w3jK1+0uPjQ3t/85tdec3zx8+/4I5jdyAvfS1lQACyfeNYf3HX8WTKbX3s9RKR5C0QOSPkwuvlBpO2HH7vnum6Lz89k+/F7P7i443mX26XuLgU2OM5xtwFE1S6gmiMipbYhEUIR6bquaZqqyqsNItWtuCoKH/2XVQOBAL0o1aM2nBQggLMLu6oXVQsftKibW54zZy0w9G1qSDjMY/CcqU1NmHpqaXIITbNiwqf/PmKyYCJOgYBANVRl6mXrBfx0AeVT9YTVWrOWZU5lNdSJ9+QKmNyNlUxCHDwdyGsYWBhEOCBTfa/YzMzYLCKgEKvVGWAyy6KllKnSdxRHTovsI3eUr16+9kWv/c1b7j9/y73bezfdGDsFSsJwE3RULgnY1mRQh5qQU2fFEDVSzWuYqCECIhOCutf4cER014ioDTQAIFIdILu7SEJYt1wBWA3iWL8r5EnPU43g0EhCDGZu21mgA1MrCQNaSd4Cc6o1D61zR8yMK9eappkBM0MEiwRPFlcza0laSapTpK6qesPIPoDlBLFXEGABcniYEUhffudnT95zonvsWzZeu7e66gxZi02e79CwPIyz2WIsVd7iDXLxdT0XFqkBNWTS8AAKdBQK9ZwHEg5FX9sxATyqbyqI1i/s0QXs7hHetq0g9X2faPPC1fsfefgrMlq07VLw3NVLNGCvuc9jy7OGG5Fm0S22Z7PLh3tpLHOT4sjbO6axWg4Bu4/d/+ANz7lTNbepw2BEE2HV0jRJgut16e5UAzBdmVMxQ4Scc91/E/DOzo7msHGoTmKaSoeIQBGpTwLHRAnXCn8UIgsLR4SaHFwpBrhGHKjqfGOzgpSz5pSSWACjIjKJACTiEt5rkbYx96l3cY+iNctr1IxYRzXT+xgRTCTB7t62rar2fT+bzTbm3eXLlwkbN5W2SU0DHkIYHmJRolRKvJkDTQnGUxkEIETqvlgsDvaXVAPCqQUAV8vjWBPCgYmlyVFKLoAIQeom0qhaYrZ1yEQpFoQMFBSWnZkRkFhMvWk5Z2UWdxjK0Laz+mYRWSkFKyXUEan+sIS5FstATcrZU0oOqKZdI+FaIgO6MXqo1+s3CAAMORGTazhGhLrHtIBbu+nWYzliUjUUdgQkBA+tCC+IIHCMIGdMCEyEzEgRWgYnCFCDZsxj0zRd21ge3a0mlHg0i0W7Wl3p+yeXuycRtppxn66b/b/nv+2XvnLrf7n9U/fd+Dt/6yv3/fLZl8GJa4bZibCrN+SLt1y97672/MkzPe1fwqHpmq4I5rKA7ePDctzZKG9cHMJ88/qXvKrbP0CGcCipnQcaZWBMxccJTIA17B2riz0MHatf2dcPT72Ga4AQBpb12iiAAsjMXE0RR7XFYiGAkBwbR8UAAnHEACdorR/aLctZTm5tYj68CJ+9r33fvVfmN85uLLt7SrTYccCDAHPfOUHvuffR3bP73/31t9x/5ZHvfs1dpxbdT7/74/vDZpOS0qEPDMdPb+0fPLXrp29IVItBwI596LYQrDRt2h/nG9dtvOA5x/r+4U9+7sLP/uVr/sT/tbzt5lU5EHJvPMzUw3KpBJZ67rdtEpEqI6s1oCOYa7VJEFXfvIdDScXMkIks3MzcNTxrqYm9NGvdqajnPLp7Imo58bSgNC0jgsMK6igypQQepRQRrX8urH84wnRU0zhV4dt0vx5dpc/8NV3bML386JMJJp5uaWp/ty7SnzGL5ro8qRKwCaPJMCWd1hsZapdfp+i1P6iHQpDV6tXM0Lhm9bi7WbSuuSAhpZxWvJqxvP7k1/7MLR96e/+lv/6V13hsBY/gKNKAoUepFhpkmqT1SICQFBtugGUcxwC0CGQKx46d4GlrzWTbx0CSanV1IHdFRLdgsArgOUp49GoKB4xwjyAGRExMXRJmRuGURESQqW0SAyJQTWydipsKbSGqhij0CAYKQKjWlMC0bkbXXHsAkGhqwFXXdZaLQoniVGLlXtv3/Tx4+JmdG37v9nO/2z36mgdfTTw3IgkXMxQ2U4FoZ3OAUAcCMICmMvFrQ0BEIg1yjgIBwuiIQgRCXiplpG6fAmuwLESlwDDV5ONnaAvQEdDMmNHMZB5nP3dP3l9qs3O43KcotCp9Qonu1ptvXu7ub2xshMDV8+d9MT9x+mSuOiMrs8WcHU/OTiz3D648ft/F8y/e2b7Wyzr1aFq+GgYgkCEYoyMwesuCBgaQUsLi4gZI6jgcLgMTgSNSw6k61hjRtdTXFoImiiXWa9gjwHHNkA6ICEH26jykAIiaVRCEBlYf79oxETgATd6k0Aibonk9qlkKa+iWVa6vg6MDIFbGcIEKYoRw1bqJGIYB3EVkWFv/G2rysqdZN4YzcRACcTChe0UtgAM6GEJ9ywglqwOTqQmzZZsWWADMOGphZFVDkggspk3TMUK4IwKQWMlVBlEVaqq5FtBuJsymCgBaKjQj1L3pulLGlFLRXNy4SWZGTbJBG5ZRi5m17dxda52xbp2r0ZxUMyASkaipVsodEkEQR4Q6tIhBFVWAaw5MXSlhBaVNePUkdVbTBCpEAtKACVMJgIhtEsIQj5m0RkDqDtEGqVnbtqUUJmibpuQBkQgCadgbhig+W5weCuHJ7prTtzZtd91dXg6PfccXbv2evdt+4Rs++Vfu/a2fuv+6r5TLX7Nx9dnXxzVb286zg8V2GrfPy94h0aUb3nju9Nf2i2us2QY3GHfPNPzkxU9euuZaplXC1jw8ymg6E4JEUOoZ7u6BRBMUCkKQoAqPjtaFUwdE7l4bkfqxBBIFIKBFpNRSgGUTiHA3U11LdqdebUyFeWxOpvFd73lgSf/TzlybD7W3sydfvNmU1GxAqISOjiO2Kfj0Hc/+xG//0n1PfPkv/9A37R2U173ozhtPnPyHv/g753qx2Q7BUnvtabHY2Sl74/WnTyqCR+jhLmGybhOBRx38d35z78Y/e2EsL/+Jv/jeL31m61/+2Ilv/+FrfuSvPPHIFxcsQixIQUZQvbZAGPUhPrL3cUVnOCXiOq0dhzyOIyUySWam4cRBgOqmEOza9jNiki6hCoW0OEM3Qao57CUcq1MtF82FOYmINY03QUQped0V1Rlv5Q/Uv0FhogkUhwFHM4pn/Iygzkvrk1hHkFNeIYTFUVNYlyswtcuTsyPAnHjyWSHh+taIihk66ohqlzxd80S1kyMiYHd3SmJmpKqqNVRuFUNkQHQpvErUSLcbqxf0z3nFlac+8twn//Dqg2944AWXwZvIe6lpNFF4APL6+5qmJxC5ZhUDIkuEJaSwIMcgJGCidUQtAAAQcL1Y3IOQqu/ZPRSmZE1zC7AK7E6MQlwpWpSoEWpJarQDN0kIU0rIkohlYlISEApx1Y07RDAlXHeSazIak7g78iRGn1FSNyCckj0QEWAcRxQcAca1Vl6Leylg7manyuaLhjs+8eyvfvr8Y1/X35ExdzY4o4NTYveQJi0Pe2AyIA6vEZ/kFszhEyAfkBNTI4Lh6uZAjaRSDCkIiCp9jFIQKjhNaOb/o6ojInfNORszMYy2/9DDnz1/9rHl2EHnx2eLDe62O2xOtH1/sDNLAPboY0+1JGG0tz/gKrTsRdaB2n5cPcjando+fXyey7Jt+HAoyODuxbxNjYbVo3/istX2D8ncpW3HcdWmOYa7R2q4lFyJMiLChGZOQSSsqmoFgQKi8mWr2xQRa849TaEqWAMTHadsbEQEiHHsq69XRCyc2oYDkkVxK+hVs9pJwmLEhAEGQcLMEqqefcKxQbi7iDytuQDmdRx3DYastS85UAAQ7h8eLqSptv6ohSFERGpSSilpLqoqKIpaA48AYbVaIYm6MXPbthEBDGYFGEC9nc28711NhJ3DORKLFw2iYsoIYWoewkEQ7hCuFZFYJwH1IuxmjbpDBAxDwxwlJyIPEIdjWzvjOF4ZDwwq1aRunYBqnaEBhE3Xah6PHTt+6cJFV89uFW5DiAkFAcGD3QlMyaNaaSZYdABEbW0AINSCq3C06gSBURwxiZDVnLdpA511aJqZEKaIYlbcg6VBkuQWlfIZplr9g1k1AY0Ag3Wn89VrztxZjp0WQBCGEN7YPv2KrXc+mG783Jf//vbl3/y61ZO7xz69ar44jPecw8NudWofFrMTG8733vWDV3fu6rsTpVl4/VHPT612r/zbZ//AzVtXz+SHDLAtpXRTRsBheItS8zgCrG5AYYp0hxpLEkdHenUSVcBTVHEzmlnlw1ixLjWmVo9jCY++jMvcsyMjmUyG9G5+6vKjF576xz92y0c/uHXmFTf+X/+UTu6Uz311OBYZZ8wpSMBVSCB8dNPAn/ixb/ntX/n1X/k3//11r3jueOtzDjeOf/+f/7bf/eSTDz72cLNrx+A8hN86frbrb7mq3zZCAqTCM0QgHbVdHG5fd8t//ul8TXf1ujvLwxePvfG1+3/4gb13/tKLvvHPbL/425567BNdSkmEGcXF3WcpNZKarqv3X71aDOuk1o8dO9l1Xcl6eHCwv78Pa+2GuaNHIh6sFAwwFU1ItMHtFs0sNQ5Rygjo6g5mM1Ui8slYG4gQauqjZkspuVopRUSqdFBVw5yTNITMVC/DSnT2o841pglnLYv0CP68xlzUL5XWG1Ve++psAlwiQA10mQqt8KMhJALwlGQZgUAU6FDFTRYBhFhPMhExNAZG52lNK6KqqtoUPkzKNO6zHzsYeuA9cXD61ief/5FbLr//9ifuvnjDYtUddtRqSpDHSj10q5gvYQZCV2N0tOwOSBSggG4RzkiGRHWwUP1+VO2h7lATVdWsaZtax6g6ek26mHSzDTMjtZJAkoGllBohIa71lszajpiECRkA2qYhImZyCBPgILApcgTXaWEuhABugUxQ+aeIAKAWyAkZzY0BQQ0tksYqj5FzDIOvBsi5P1z2VhQDEHBbXrF35mN6/+ef9fAf++qtyxml7AxtUWXkcRxXeweVsQaA6FFCuQaeMRMymruQI6BBAui62SqPBXyWFuiolt3dipZS6sQ01Bwg3FJK2fLTFzCTO1QxDhMJNy/85lfsba3snNqVvb29YdEcvzSeb3ISaYRgNax8hhs729I26Xg3m6VhU3hz3jab186lObmYd+2zr7vmZroBFIhBUgqLLjVmNus698r7IKkbMopKJQgryJRrb8CIpkSYWCJMVQGrNjAmYAUTAym4Vv83BSEmRnZWAtVAQkKYAAi1VQ6IiJzzbDYrpTBzMe26ecLI5gGhbgLoxYKjd2MnRCYCRKybdVTrSA50NDdBmS5Qd4ypyK13CWDUEVcdt7JFm2QEdyHsmq7rbBi5QGICcFDL7ohYwh3JCcFBVTfnm8vlMlhMFUnMAsjNlIgCrPqkm0b63iUEADhRgJuVra2NK7u7yDXkLSCciMaK1nFPTGrBzBsb8+Vy6aE5AzfctG3Ont0AHD2QeSjl4ML52hgUN2aWlEopibjr2lIKCxTX1WpILJcuXI5AQmEmHy0CI7DGzkdUVYcJNVVK6e6AU2YcV8x21DjrwABQQ3dhWoVWD3H9mGvvzyJgC0idWvbA7IU7tsgxlHBwiMRCNecbUS2AZITC2jZN/4Bv+vEbmZmbZi1NxGZ54YY7Nh9c2a81d3/36vwf3ntw3xv+whOnXrTb0nw88Cc+Kg+88+z8xefa2wrOtd2qIhAACIhz3amdRf+V2DpxeE439GQuWBhiGHl74WSY6y0TYYBwdOcWrKq+6RBmJMRgRKAK4VdEcDd1bRoCgBijVh6llK5N4mZ5ZYe03Bxwf97RYZ4DHC5OfOaPzj65F3/tv//25R//a/O3/8qZT73r3m/6gXMXz+uZPnXzIOGymo+XVmnb0kah5vxAP//Fxbf/ybc8f/mln/2tD9375GY7OxDh7e3m5c+/49qr9778gd/Yjnzf8Vd+6pavyeqR2K0AEnresYMxNpc7N3z+VX/mFf/+b39meKots604sdicO23e995ffeGrX3vNiefm4bJh4Qa7CAvH1HQtUJNYpNoyIiJKEBGhXLh01aNHZFPoWobg4gGsbiQOgRaGZLKgYS5EgNJK185KKejRJeokRUSuiAl3VTVX04oTcNcILLkMteScd204q7ZNM1PPGOFNysMwTx2gAzOYYytlzI0wANSkI4/glMgmtsf6QsVgwNoSIrq7whQyDzUcau1ZpICaoEeMHk6AHlbfT0Rx14AIciRWM6QpuQhioutxEndrm8ZyCWFVzUQppZCCVgbleSmFyXOelTTK3snhxLc/eOoDd114x+0P/qnPvUR1nCEqY1IqZkYMAEhkAayWkLKHgQlRuAogBCNCaiRnHUwBGZAZ0ENBcABrHAQRa6RmmaIOE0dUhjQRETCjEIk0bWoUyrxtmSQRt20rTESUUkKmWk/UD80RamBk6wQAyBPOPgACHepxgQAJA+qIDd0qzDlq1VNje4qp5aKqvQ2lH60vy2V/MBwO4xiBDaEwMcvL48wNV2/40vHHzm2tnrM6dW5OPBYM4SET83xjgaP24QCGTOSerbAwqhJKAEhxJpzN5w7Wm21ubiJizpkbHkYUk2U6ryPL2DH10qQhBTsUP0x48ugCDhyUkyHOuzLPSduuO3Pzq577+ojZ8vDQd+MQl80wAMRhink0CLZxbHFQhmHVH4Rtdc0GHO/cDtkddCirPvGl4fBMt9miHNuU/Vw6ItPOorcAD0fEBpzcpk2EBpsXCTeIGOqoH8NbFkL34CygBKn6JBO1PLMxK7mbsQQihqlFjAYAIIwBhKkppThWcTKFFU6N5tJK24jUrUqbGg7HYHIr6kLi4YagWZumAYuGeDGbuetYMgIHU3FgYGbMOQeFAjg4Tpm45EJogUEFwzRqK+simQShT57Ucj8IeC4AKVJpWAI7xEELEQkEeQiEzLpBVyioZThx7FjfL4urGzFwqDbAmE0AL1++TMLYNlhGg3pX4f7hipvWS+6hDpU6AFAKYOBovCDYAOa5X0WEUAoH0xi8ZydBrrpM9wADQWFkxDCoZ4OlJgXiYKODYzQsSSzXIUbd94pDTwooJSgRI2RzbaU1JzQzqKBsDEQGxoAZshA6eHELj83F1uHh4ZqXJy2JWt6Yz2aLzbPnLszn86zaingeu8SK0UliSn3fS1oMq9EZrc7AYgpiD4aI+UjLjRb+aLjLzdpWMAzBvAwoHcw2MWe94bUPPf6r//rR8ut/7O/65nHy1Hg3T+3FW9984tRLtmw8Np+f3Tq1npzH9B/Ew2g3vTwxu/6mw/svJd5xTCTBGWkOYRjRWCBgDnAgoTAEdocgMycEImyY3LRJNFIkYi9oZu7E0hUNwgChw7FvmiYoDF1K2HL/yX25cdyZ46NfvubG24bVXZ/8wuU7b7vp279p84l3/vr42d8/ueXH3/9fZmdeZZSoaanb8HHJLKvZqQAQ7bvxYphTA//187eBvySefYceXo22A5mhK/d7q8WtV6//3q2Hfu/xM9/BssDZxpE0idMMZu1LNg8vvveXPvqt/0+S5uZ3/ZOn7uoeu23P/eDmTwU9+NjBuSujpsXGrbk8W709OwABAABJREFUBXF5GLJQ1wirBpESMmEdwlQeUFU/UqLWzbuuG5YDwhAR1M6bUlLbrPqhI5cGlgOLbCAAN81Mug6bQMhR6n3VBCkKmTEXVQ6y6u6ot3ANMFot3dXatk0NAbACWMMwjC3LYT5opGGk0hAejhHhNQrOg3lyOBztQqbhLUUlA3PVO8AajbQe8NI6A9UBCKlmlx79qq1VRKnaLiIMc/ApHrr21DFx92p69IS3ZebAEhHrCArOyI6SgQuW4kGm3/DkCz93/Yc+etO5Fz711PPPnTk/P79ZTjiNRDR5rSoYkoASz6wdhoHalJo0ltznUVKa9PXARTWxuJmIFC+CuN4iOUxw0GlGLFR3ugAAjMTMraSmaUC6lBICJeKu61gIA5jZGmYkJEIgDOcAckTEmuN0VOg8PViu4ZEWiBX+URmUT3vGEFGteNGcs+YyrpY569D34zh40ZYldW3GOY2cZnKy3Xnz7nN+7sSFD1771Refu2lbc0i38iHm7YaDmucNmQNYLu6g7rwOz8EJV4odCUekeVejbziJzFo1WyQ1zQvdbA2pjcEoWbdAL7K9Sdinp+f5Du2xZc6OPZYr43LnrD41u/TU1hMzb2aMsMkGZVzMBm2S+SBXfYYP3fcox/YLr33p183uuP7Ys7c3btrf+8Wt2SsO9cSXL3/5Y5f+5+5mOX4ysS96zS21oEuEQYOGwTurEjvwxO7upRCRdMKr4fjxY4eHh9nDAymlQlQsL5AZw9yLesOoqjlrtc77pM5ba12m1EtHCi+KiJYLUa1fIedcJdnjUJIwMtf4vHEcAUASuYHBZPkrpcxSU2fUNXnW6rnp6uoeWqlNVgozd83M3YuXKA5eu2GMQEMKD9fiEIxIEG6gOhJGSqlGtQBWPV893NwiiCR7uFW8JB7sL9VyxZIjI7ezrCMAEnXQD2VUoGJoLXBC1prKoKHu2+2Ga3jxiGhREBNEOFrumuKuCk3TmFnXdaWMlEQtaqiDuzoCSk0jKjzJ5QiBEZgADYgJ0NTDDY1TapGrQ7FgdJKyQmIppXgoCw55JG4DMgBU8XmEAxpAmJUS02+uWpb9WAF8Hs7EGgYAs9kMALo2qWViECRiOjIBqDsQGcRsMe/7XseCSVDEwc1NABYUA7AV6RKc7J9azm8BSRQE0gGiRbeX5gM07bj66qkbaPMUbp6w+XYPsEKKMlyY30DtDJ8BqQbEylRCBAt08mIyxxxChRid0K24KmNyNMEKuXJE5cBSDIgZbRrHB6FxIwbIAQSE4R7EMKllibgTHoGymzMOmgU8Lo1b1zz01Z0Pf3RrfssDf+zrH+0feeO3nOm+/KVLf/qfyKc/cmrr5EVtj+Vd+sq72jf+g7x1Ml94FG3Y2NlxbtAK6UoQnvfA/5R7PvDmN/3AWw9eMLvxxc3JWyIiygDSWJofLC8tXvhm/5o3bOP21ZGyYoCjK5LMQEvf7+x+4Vr83OK37v2D7/zH+ObVYy/5xwAuAR8u/ty3f/qO/uLgkiNtNsdUD7klHyMPYySOXsPBixqnuojV4AgjRigJMfby/rXHTjfUS6KzF1dti4fj7uZip1+tDg6XSk3xAIKEzCxO4e5NFWgwaYdsFqDuzhBOIZX3KK6jE5Kp5pyLjsMwtF1pW+OEvvRM1EibWPqiIFKWpWEBjDFCiAEgcCSceEB1eF6PnFjbgoWkbrxonQNBASJSqoGSJzqnuVa/jeMkJjq6ZpCgKCAQ1YejOv9ijXGfzLjkNAmvpdK6GkEDIhBFJwrCEHL1JfGJXfrmB2/5z19z33vvfOTOC9sn+m4pZQ6kCLG2XVIAI6ZAhZw6Uh+HZZ+61M5EXUf1UGfAuUhN7VKfBMY4Tc4B1nsUxCDiNqUjeZ2INCzT1r/hlBIhM3OXGmI8WkNERA0DAkBDMIz19ADW73n9m6e16BRAEehTuNJR9JaZYfi46l1t6Ps8jDquxqIH/aGrtUIppW5j48AOAGneJD/ZfmN+4S/vfvoD25//gcNXHc+zXfRG0BhbA82FCVrHglIQ3b2YIaKH1sSqWmGklNquYWZBYuQqGVPKecnzZrOkUnToYj6bb3lzcHi57O3uTS5MAABYdH0czBYz34FTp05de83mlS98dtXcvT0eDlC0hwI+Bx2U93ST9i/pg++5/Jav/bPXzu88lm6+8Zruvst7n/jo+645devZJx84e/bTt91w/V/95v/wb774o1869+WX3voNtCqtNaONjqtkvPDuAMgBVmYDa0JqRLjGIDW0GpZZS9fOSymlOAgJthBWSl1YcvGA6jYi8CDGmrjtdYpuEVwjSAE8lJAiDFHqiNg8EKC62wFCiJi5lHE+75bLXqc/Ety9ndZDxu42eIXGeASjBxgRE6VSimpxAFN3GBCZBCv93AIFEACVUIJqcisCswTJHMArDM4NFpSCKYehASA4EgIMeUxt51U0yZyHIo1U9itSMjfhJufcr/okIsIQilUqCEwEEpgIiMRQAIujBiBzCiBHdYhutjEMgxAgShnHUgo6NsgrCoMAhCB2K+6KFKlNno2ImOo7xRFBUxKr6ZRFbR5Y3fYBYEXRQTUgrBGZvnhEjOQQgAIBphoVEA4k3DiCIFYgdlCNoiFVSw0Vw4uXL7vDYrFYA7MNkcyNpIa4RCCYZoVMCJ2wY4WvgRAIcQGZNQSrcbEzP1BephbC2/4CMx3KDkaQZ5XFl577w0F1Dxu12AZEkMbHpZdhZ3XxYOsGYAF3gKhMmwAkCDLCyGNqIoOSztW545nmZXHqWtJiFt40ydGHMUQcIufctu049kI8pf0QAhMAAyGoQ9QEenTE4gYRCQgcglhK0dMXHrv7yny865WfecG3r5J9y7fecfknf+rqv/rbNyyuGRIfrB437vabzS/f/RbbOvXYb/zUTv8AvfQHY8ZzgezQ6Hjzg7++df4zi2Pd2Y/+9uxlL7e9J3HnxgCgZjbtpZqbLitc1s3pkCDA8aDJyzI/2RfDfnz3F/ZevXXXnZ/7O5/4QUp7/3Bxyc6/4B+SYbeLX/kTe2+j//TG+98S/dXt2el7PvvEzbfvdKlBS6UfOZHlYlAQx+kWSxIRSE4FLHzr+PEP/sH//pl/9vcOD/d//bc+8Ovvfc+/+1f//Du+/ft+7G/9/SsHK+FstrLqbjIzAgevMeVGAIg6BVvWIWgDocXN1ADgafqVwajFowegBkjdvW1WVroQRITErGHMURW8NQslfPIsIiPikS/+6EpgpGogRsR6rQqST0upaQVRmwY0sAgPq/fr0QUG6xsX1z7RI7sUETGRuQNTgDMzVLx7lYsEcMD01CMZkpBvlLyf7GufeNbHb9j7wg1n3/PsR7/3vtuXkc3IwoPDqfLzA2o9JAERwkQkphmY2DxUg1oKFyTEoHmrZmEQRVGk7pMqUocZRUSE29SmlCRxFe+IiNTzIlG9mIkIq9oNkZlronIdF/jUSKMj8DNU5QCTJi6qSK1qFNfeMazbd3dTNTMrOvaDFR3HVd/3pV+NpuDRte2sSW3btvPFoswEaWPWaqPPS2e+uX/B23c+8vnZE98hLxxK33Cr4FWJKmZgDrPEowKAxcQiRUSqMBChOj9viBtJjBQBDcKhptNbaa9fdhzN/PQewpNf/II9kZtr/OT11922/fyj72zzYJ+uPsAPnrdHPlvu+XDs93aTrF78rNvbkxfLrqW+005mvVO7e+7gvrc99Y/+yj997Qvf/Pf/+c/sPvFfT8nsgm8eOzb76sNyZmf72LUAx/oTduwlx183lvMLaosWgrBu1h9kVwPsCzgCJGYKMlUmrolG1LXDMDKliXsFWOVaRAYeQUJM4R4RjYj6hIuBOulAQnR3P8qgr59SYoYjoZ9b27TVIVYd2+PYz9qOiCooKhBhXd0CAAsGhTtURwCRmKsDhIS75zAPIOaIGNWYoYm6ZiaYZDduEBQI4cIgLEgajsQh2BC7YYCHhRaKFjkhKQQSijRlzADgaiLSzWdjxWmR5VHRY2OxSNL4kDHAHJJACgpkYxYHxFCGJppdK0wQQlAl3WCG6NKMyx7BZ22XR00pafHU8FBy4BQEvvZJirtqNgZ+hnPS1gdOQMLGOAUBoQO6EBkQQFApxVi4TutaaYfDg9QwIGOEaUBYXQWvZaShpYg0RFQjqtC9YdGI1WoQIZHEDDnniEAGA5ucmVXsNumaHAWZOCIwgoCQISEhoDZ2mIss0vXN5fu7uwCAQ8f2GCJSwCxfTfnw9s/8DHr60st/bOx2VOY1QhKWu5E6OLgg3cay26x7XFiTbSAQI+aiHnGanso6NMPihp3NQ97ftVhG3/HMNAdQ4QBVZsaOoWSBhOA69F1KRNRKalgisMJTgcmLAkS4URC4YytRgeoA6Chbly7c/vZ/fuzut3z2h/+Mb156JRye+/6/1P3226+75fmr5RUsMZw8fen84a/9+HuWJ265+QNvvffKQwcX7rv0mR+57nVvfP7zb19eePLY6oGUB43Z0vMm7G8JHdSwuX4/2gU1izCdHAY0KdOx9Bv9JW22CkQAMPPz7jxzdUj3/uB2f+0f5PTu44/+5OXn/v7y5IfGHdx8LD51zXte/Nh3b1/dWczlwx/95Gc/N/zwW/7sQw8+emyxcJRcxqPnDBGxMACAuSAUjc3trZ//uZ8+d/6xMupvve2X3v6OD1x//PrPf+pj//Fn/ulf/et/87HHHrVcwHEANXR2okAXNECISBFjoEONr8QpbAu8oi8BLMAAJ0ydmw3DCgrkrk1eGm4PoWy0M91fjkKgNYerMnudOXGSUsqRgjXwaBBdR9AMhA7hCITMgMysVW7GzMzVSFMLN/dJEW3qNUm7GlsqphERwwF9rSUBJ6I6f3bwSkKoUi8CqnTKCoiomjV31/DA0Tn10L7+kZvvOfn4793+1Nc8de3xfnuQEhbkLlPUDFd5uUgXWTU7M0NgQ2Ku4A6MLbcEkVpxQhHRMWMkI6sM9HraikjbpJQSsTRN0zSJuQqk6sUsR7r36T71qOvbyddbXV1TRCJU3jdURVuVBdVBA0KsI5jWifExrRjMKvxvHMc89uM46phzP6zGlaBszubSNqmVpmnapt2QGTB17cZCZjZrfuDqK97Vf/Ztm1/83oOXbkAgYqu2EgsAtsjgiTCg1GY9CCeMO0EilhoWSSTCzEzCgZTDtubzfHCp29nM2zed/dQnHv7IR2954Ute8+o7/PFl/PbbL33yJ48u4McvP77JzcHlq4tbrtPnPvelP/xPrnnPL3/+bR9+66/97tv+1//+T+f/B23tHZbZ5gY+ee/w6le87mu/7s2f+dC9L3nZGXzxDZ3xsTOn92080LPzizfd+/hDn/zMw1/7/CtMDpSvPXZs9/ApMEOLVmlldtDkmk4lxVMCbJp21iGi9gMpzWSulkspkpIjWClIFBGzthuzTh4h1SowrplIlch2JEhkZveo9Sb45Nar9hh3zzkjQISVPqeUEHEYhmEY1LReukfaflWVVsCIqtZ9vV8goggcxxzutSAgFJQp0WhCtdaHwqdKXESIjtLj4eiVRSL3CIBEjBZmJTgkJQ+btWnIWRrWYsuhlyZBooa5X64akeXyAIBmXVdUpWEMoxrhQgGAGu5qFiARiByWwjNxAcQIokloCMPQm066WjULsHaWEnX1U6r2JKEGMTQ8IAABQxHqW1WHT0g8bZMsEIMM1MyQpZvLashMGO7Lw8NETIhApVZJCMBMlCPAkU29qCoBIE3pMBXRQQxdapBBs6GjiACTuWPD1f6g6muJnDOSQ2QvlVcvYaQYBCEMZo3NFsev+eKD9+rpu2H38QVoSRtBwdaDlp1LX24v/lHs3CU+uh4ENzweKIpvnEB32jiB0oJrtW48sybf3D+/4ctjp+nZ3QnVdJ77y3u7QSPCYiZioZ20vWZTb9sZIpaSE4JV4xswIpZSTp28ZrlcBoAQpsRYlTdmiIyOpmo6ISjaWTeOo6jGheM797/jX93wrn9747V3XDp3/41EeuKYmVnGPd+7tLv9P/7mh3K79S3/+s2r5bmv3PZd9+y3lPjhL9zz+p3zs4PlELNIlprlIrb6Vb4mX0nzxV6Yz7ciD4CAksAyVfMXJ3AHTsPGaaMGILq2tbZ5qpOBntfzvVtfBadxdexz+zd/qNkFS9GfwE2kr1zziW84+M6zZ594zWu/9jMf/3Qpi1VZxf4h9AsMj4h6Lk8vLYJ4M5bVzs7xz37qj84/+eSxnWtK8V/4L/+J0sbm1uk8DO9+12/d/dJX3/LcO8MZwqeoevBwgAicAr+AmcPNItQttHgoRgiRgjVCiOwO7iBIgYEUY3isemjAQYlk1DjoV4omweuT5aihrj5Fefo1xmesHoMmeR4CIdfLpl5LnERCAqmeICU8Ijy7u4epmYX50bIz1vanow4YYGr3RKSYdl1XRaTuzsgFHatY2iHcDbyAqxlqu1XypabccX7zGx+5/X13fvn3bnnsz33xJYOUQAggA2QSJGIE8MiHh/O2a5sUCCVAQ7PlWWqIUFoRrELWcDVGajoZdCT8PxxlTZtEhFhSqhpzOfJQiQgdUd9q0AVgrFOhj35NthYHRIxwniiZAACTzToCBMHDwat3mwDNzM1KKWUcVLVfrcZxHJYrVc05U9M00mzMFiSMbeoWXcMNjy0iHCz4We3OoeU7t2571vKGr+w89unmwoubG/dtFKI5iJmRUBKCodg8wZoQfnQl0ETyRAAgnFYPCg6EdnA4O30TnL988V/8o+tvuvlrv/2P77/zHQ/97L93zQ0NN3//nz36rs+dv3Lqjpef/p7XNTN86sNfOl+euvn0zfqZD//497/h7/yDt977+Os/+NivHY9ZX/DG24Y/eveHP/3xT73sRS+9/typx6889pWDBx556h7w4WTaSPP07BuuffGZW7544ZNfbD94/7u+tHn2+q/52lddeur8aCOEoSkb5okkRRSQhzKWLMRCVMKYORdLKUUEgrdMRASOFEAYFfONiERYbJ0DfZQhXeVD5lU6CxG1zUXEoh4Rs9msDKO7N41sbm4sl0t3WywWeSgUU5bU9KgAMIpZVleRGuNr0xYWgwISsa8buQCrjGhz0zXeTSE8ggCZiZvkGoDOLMiMSBhoFigBiQiRCN2UE2OE5iLV7GsGIpKo4w6ZxpwtlAShEZ0IdBEe6EoOhZAjqFhJEGFkOGJIaET1JzODBSIAmkeEWfVkY9nY2FoOIzeSFRhRLZdRSykQRETqCgDwDGx4jfWsVSwZjAgghAFNAEAaAjSckKvRow7J6rApJS5WiAgBBKVpRFwAavolSCImDiSpEUmmaI4eTuYWxIwBZuFmIFBHXlP4NlAlGtQcOeYEFTcctUmH7JYO8Lbn3vJv/91/7L/jX9sX3nnxkU9uvuZPJjQLTwQ7Fz95x4PvtO74eOnhZnXJ4CSmLZUZAKCOQRKz7bCClbviMSlOIgBpdvXxu//oN255/D3/YnvzuTc8+4Uv+pp0+/OuPnV2Q6CUA/F25SsJniMV14al5WawUdEqQ7BhMoQnLpzDgLZtkcEZKaB40cq9RjPwRIII6harPiJkuX3iwt/+2c1dGd/39vR77z597HRubXV4iLu7A9qT17zg1/7Cr4H5t/zL1+ClR3Fn58y5z9xz3anmsfOP7z76lYubL7p28+DiEptN9Pac7W40euOTv3f5rh/aKlf79ph2C6ttPjceDsTgDuFBVLir48EVALmVS+dumy8/+4pf22/ee/V6fs57P3v8qy+88pwvoIEPEQ6HdPlwGBfB48qe/+JXPf7k/i23vvBLn/94SlaFw8xMjJUsGBGJmzGCm4P/9cs/d8M1156/sr8c9hui7RPH1TS7LTa2f/6//cJdL3rp7a+9LRC0H0YaqlVfADHAqgVVw92RiYAKgqmZWUWpwVoSVTO4RIgTOyAWY8DDftU6H8aBSjCSMVT23qTYPxoUx4SYRnp6XOYRWDGi7lHXwMSUJKWUBqqe1ya1tX9yd6iaxMot8HCdngkCVC/rCL7JeIcBAQY1NT3CtUyVPgICoTsJw1QuIXqIe2hghiVgSH+V4XUP3/WpG5/88J2Pv/TiDTdd2vIgZ3CIHFYR0IR04ppjh1f3LTIxq6tBzNrUNtJQCkJHsKLztjMzIACMrutoslKvPUWJRSSYEwsL1cN0+nw8KqMg1gQuiHUIR6X0I9ThgUcAgmFwgEHwesccE2IMIWt1eEwjUHMrauY59+M45nHs+34cxzL2jkCJmlnXSifdLKXUdKmbzVJqKRpCbBXCYMByg2z+ELzy7+nbfoM+cXecaYOCkYmQzBmREFpsAp55AU+ZQohTdKNPS6NwCAhk2bnxxstvf//5n/v7r/zhv3awcdtnf/QvnJ5t3vza7+q+9RV84qTedvPTh+pPvvWpv/unt9/zHWevPHX8s5+8alcvPvsFr/vLf++XfvJvPm7/6OWvfdVD54fm5mYxLPwGOvMt8Xfe8Zde+Zmv+8bnffdtG7dyavw6bghX1i/b3f54/1S+/wuPvW/z1u39R6786j0/+/I3vmZYln1bllERvY22Tmx60EIgqcG6mg3Syg5tKBB1LE1KSGi5UJJSCgtXh2jHHMiIay/ANF6uXBlX0ymFej0a8UANd3fVjAAiOOQxDiEQU2oPD1eJpaa405o8E45ARtPDU5HS62veIrtWGHTN+KubJWYMM2VCQAhwNEdPACQYEUxJEhA2xTNiIlJmJnCFCLWWJRDbWWdFV6sVABUbF5ubfd9H4OZ8fnBw0Eor5MKwykNNLg+PlFIUNUFECg2LKaMLERnZ3KLmrmIyJKE6MgBCQYzxcL/r5vv7+9J241hEaLlcIjAhJmmj2kYAUaYCNSLqzKvOfYmQ2pZKmYaUFgyRGJ0Ey4TmrXDyiOAmFcvuxMjkEohayI0B0Aphwyk1HGTugQLoQslKJkIDMMuASIHVsyXUaClCbOtxgk14A0DgCmWB6mWAMDMtdtMdZ/7Tb/z7K3f/habf+8p//0uad7/Dv4o71wtTN1yead+0MajExtZ1D7374ef/4Ly/UGTTWKCMRWaR5sASjUA4ag+c0rDf9MvDEze/6Pf/47Me+sNnaXzfOf97n/5f73jPr3/vt3zPa/7Un1l+9fxBO2cYZyEjhSO2JEYwoCcgChKR2Wx2dXeXag0N4O4pqL69jVENGs+uATiUzESMFKopJdk+feoFd9/SrU4tvuUN+siPPPJjf/umz93TnNg5d/XCU6ef/z//0jvS8tIbfv4729d8/eY3/vHjW2fu/Yk/ff2V9vypO66z4SOP8410duv4yau7/eaihzzsc3vN5S/d/Pj7H7/hDd3q3Jg2vDkWlABpGsQTVesnunXDXhu61V9608f/zamhv9QfPPaKx67O799+8kdNDrV7oDmgvOm6AD+MY4cnV9Zfv3XL5z/72Re95kUG13R88823yFfu+UDTSKJJeVgfVzNjPCiyuPrIU/fc82lU0rQ4fs3J/YtPHhzE6eNpmYcmdf1q99Mf/f2x2YVbadn3GUcEQOARARjMjBE011sTISbBVGIR4WEYs2ZzRSRmqe+zh/EISlBKSZQKhlBiU0M42sfUyVsNx3ZzX2M10PHoAq5nTcmmqlNbQMRNamddIZzQH5VqhqBuIlJ3KhCOHla0IoqIyEPdfe27DWZOjO5eMMAjiayWy6Zp+iGntnELrtE6TBFR9cO1YyiRh4iu4LzZafavvukrd/yPl37hXXd94a98+OsYoGX29RLICIBpvy/OSQCL2azbBICGsCFBjkiczVLbAVCXWF0hccOCQEeLXiJiIWa2gKZpGpYAhyrRAcTp+HjaNr1uc2MEq95oAKDwuj5ngBKOAKWe5l4J2+gIAlMomyOEmhV1dfToc19K6VerUoqOQxAK82w2a7q2aWcptfNullgwEJgiEQC1AQFZRC748o/xC//96j2/s/3Fv7H6tpbICAGQOYFZyuFt4+AYSOvtZn0wAKABWvYrwilWGVPiWTeaPvr//8vbH3r/69/6y5/93T/Q3/oPr/zFt1775lf8/k+/ffjmbzhxchueEUc45xvS5sI/9o5rFW3nho1v/YtPnnzwq+9951/8oR/9J//2x979vl/fgJNf/rpjz3rTGcbtneuPHTu+dcMGvu3sf+hX/oJTt1Ps3HJsa9Ucp/GpXIaPPvau2295/tkLV208df7wqT/86sdO+qn+YLRsXXIYjChXFbTSBNAwd0WfObgrMWYt3DVBNI4qTQPhiCxIJTKzENBqHEMIqUqF6zPkQRPVtc45GTHnnFJSNUQEZCJjJIxomqaS07OqiNSbTJqmznWmWUINKSICMyCKiHpuanjBCDNkjKBsJiJB1GuecTKmaehNgAFNICYCaigIcCwWAdbIRpPcNcis5xDihbSXD/f2loccQMIRQNJkNU5NmK8Oe0ZBh6y5Sd1WszAzDAyIcAvhMaIBdALzaDWCsBCwEYUoGGIlcAASMiOQWbiVLG1TvFTiWyK2YiIdIrpa0VJTUhzBzYRSncM5CUVUbAaRFAwLbwwRsYQXmEjFAYyhbdvmrBaBdS9GmKaJHUS4qk0lLTgQEgk5BXkguAMzo7g7QhihqGqDCTHc1DxSsCCpKjA5RDEVaeoRKECtCDEPrsVKO+uOzxd/+Id/eN/erde99hW7v/Adx3fK5cfx4MpTL/Qrl5tufurk4dkDSIu+aCtw/WN/gB4P3fV9jXmgq4c1m83q8jA7xl4W/XkF6jeuh8DZ8mI0s7N3vObOL77roo5/7MabPpBu/mSUd/7Gb5xFu/WbX/viw243qIQ30CRAVUUnxihugliGvD9mCkhIDaaiY5tSdgQPNK2+fyMwQ6VoTRDAIkKodxUI/PnfuPf9H7mw2Iq/+APPfeMvv/uev/j9t376Q0/e9upf+ZFf2zg4+/rDt535L7+9Zdfu60VAvvVNb3rkV351dewNLmPMj73j3MbL8yM3NsxxzUw2gnYfO34jBc/6S5e3b8/dMaiooXDOvZRD49aka4dDTM1tj3zszO4jL8bd4epTLgkzvPTd8OiftI1Hv3V5+v39iVUApD3Qzbi4/fiZ83dYyYZiZf9//ud/9f/7ibde2Lt4bOfkHc9/9ZOf+1AsupF4IwFoipI1DvaWZfPk1rnPPTT0NjtNi329eu7CSPL6Hbjt9HD/6Zc8eHaW8J7hsDx+8X682dFhmXOXNBe2JFIGsbQkbwG1Ti+1JHRwCAElJm5drYakeYxSFx5GlJzcI5wJCcFCo46p1JgxwttGahoMNEyQ1DIAFEVJKcxTSqCFkUqACzpR8Sga6N6Gm5YkbR4LM4+WTT0C2QG0YBgFbi42L1w4nzoS8nEwcmZQQq8kcVOwCAt2t/CMHqDs7rlfoaRxVRDY6vygbcMRmzSoB2E2D3Pycpg6KVe42Xr1U/LRG7764HWrT9zy0Bseu0PRD6kkQTJB3U5+ZQCctalifYihm8+jZDMDkGQ0r4plRmBquUmEBIKILAIA3Eyy5wDv2q5OI56eK0PUM8jMgBCFixYighp0YXyEhPOpeLB1cW1HQ+aj9KcBTFDAw9Xr0H307K7jquQ85LGvZJU2tU0jnGTWbTaJ27aVVig1xCxBrWMAuGBhkawr8lu746+5ePuvnfz0O/DDf7V585OrfWzdiswt7c1W4kI6UewRkYXcvaLXSlZvhcZxCCUX04PxEw/4v/jJ65YXTv3cez/373/hum+/69k/97mv/Py77nnjW9qz99xw440ZFNIzxortqthWOr6Vt58Ff/fHl7e/Yv7Bn7l6+O6f+68fbry5/adePZ4//OpPfVK/fOX6N9524uUnvTRBvLGd02bzA3d+0/bm6S/977eRXnz2q7/p0dj/2JXZo6tXDTi77rt7mO3e1+2nx/d3dXU8cBgl5hY5MGAmMkui6hDUEIOpUgRScSNJUCzQCf8/st47zK6ruv9eZe99zr13qkajLlmWi9yNC2AMphdjICT0BEISUkkhvzRIQt60XxJSgUAob0ICCQQIgZBQbIOxjQHb2Mbdsq1iSVbXSFNvO+fsvdZ6/9h3RJ7nnUePH3tmPHM1c85Ze631/X6+oqCuCLGJjZhDb2oWnFrtDaMCEgKCqhITEpmoiiZS731sKmTsVj3vCgYUiWhGjhRRzVhVkgXvQWhISklR0szEFHlaXFzM0JXKYLIs+92VolUmIEOOYOCgjFRbzlhh5xAAHGVnDBFxSk0gRiPxJGCBGFRUIzIFpqhlEytRBDJHCE0C8stxGMp2XdcZQcdOidGTr+taUkJiEcnbkZQSM6sZZ4g9ICEVqbKEZmyACQQ8ZpAUMLGZpGgALhQCms9zTBZCIQnrlNmtGrUhsmRAYs7Q0BKYAIOBtyJZ7VwIodCY8kw+CohZqDEAWjAx46RGCMwmElHEzCNbqokcgpGYR1RiVGFCAKwlJZIMHCrFUJu8UU5qgFBJYkcczVQRidipgYIWRSulhrgYpn5HfQMEisnTcMil7zHRhJtZxJ4tL0APZws3eXzvZQ+e7Fmz8ac+N/etDyw9/EgqFtGVDxw9+syzrxisLMHBwy3ftpjazuqlBiSUR7/97FP3La+/vGslrMw9fdUvJ9+BcgokRZWEDgwwNUODtUcf3X/xKy7+9M9Vk5v9wvIv8rq7ml2t9tjj372VLz1r7uCjr7zoqoXWhn4zR57GdTJq6vgyggwVMr7NAIRgmBpGrmpxHgTR2ExjuyybOrEIACoIGakpI3tid+D04MmjcXJD0U3wvr974t4Xnn7zb33wjr/5iy+//i/X9Q7/7I9fUD/1G033+OH6ifGVU3Fy6/YXvWHrTf++1D94bO0OmHu6tWXj3Z2f4E53y/aLcPOVK61NgISaOiuHxo9+nwB6ay+OvqNhLEifUy2hw5rGFw5Nw/Al9/8ZnFqpKmBsuoadztjODz1+7ob182PXNGt/Yc1RrqP5ymSiVW8NH3nVO99x44eTnT02MV6OTW7aumFu8eTp04ONm3fghWnf0bs6rmRoo0vQIPqp2dnWoi1994nbzXtdhiH6GlYu2rLukkvWHtErdj3pl7AaT5e/+Dnj1SzM28FaEho1fQVASTWTiYGANkkcMJlSduUygqgziTFJI1Q4xxzrKgF65GjKnKHiBnn/qiMHKvpMVzdAVDTFHJWrBGxmjtBzyHxmxxxC0CSaUAWiiaKYIkrSaGDMYIOqdkmS5qA9YgJlYMDl7gryCLPiHZkBqTNUMDDTHKauIKrqEDOzAgBEBFUUySAliUVoKagLblAPXCiIaaLsNMNGhQwECg9x2IqdHzt+xd9Ofee28+avWlycrDdPRu5rN2CRilhiGYInMGQjIiHSlFB0NEYwoewmcgR5b0yUNcChKMwsZ0oiIpM/o+C3//WmqvEMX9O73NyoKqpJ0jNtcbZ15ZAJXo2ezanmmgnhZgRYnVFlAUiKMdaq2iSRmDM/vPc+hLIoy1C44Fves/eevdPsEMvTMwAwLUyNiyE3K1q9rrzyP3qPfNU//rOD68pQxNhzrjSEUjyKwP+a2OSjA+axHBekQ8YJaJYN+iat+qufnR0ubf2Dz973sQ+Xj9wOLzlvz39+r/tHf7h2YVdrcrobxjy2Eeoz9TcNtfC9Xj2R/s//M3zJ1Z/4l8eO0I+0nvWGzectwI2fSxcuTf7a1snWSv+j+x79t3vXPL1j5wu23g8PNRzX8MZqkU7efc/KnuNbJiYOfufeOzZeWZcvl2jWeNfy/uw1J6qS42ObYXc3UVlKNEkIlnM8OUEyA8tx5AQECGRooqMViFqK0hB673IWAhKtRt5KVkuZKKwawMgAiUrHTd2gWUpx4/qNJ0+dSiIhhKpRE3POIWrKYemootGieOeJ3eLKInsnpswuNjEHFJZlScxg2KRMulDNzmPQzJiDEacOlDAAkjF7V8fYckFEoqnDETsppSSWjbCoIoronAMwUGiahohGpgIzEDWULBMTEVQUWLUwrHrNnXPZsQDClUdKhElqUohKio1nEMkdJxGBWh56F2U5rOs8QgsODDEpMQbnnEeqmyaZGZLLGC+xxMDgyQg0ISUDEfCgbMqJFFUh5TzabKBGU/WGrdZYr99vtTrDpkZGIFRTl3DkYARwREE5O6eSAZghgidylBX+QERARkCi2TlCIJrn6w0ulzpbuUUFJhnO1G4Oezw5FU3l9Jx3Mxec95rx9a2J47t637rx9FP7v/873xw/9OADt/7RVMsNlmR8YurJuVNPLi+c44tjgyphfxI6A+Uw3h66qjUIprF14C5M0q3r9tbr+huuFKnEl0O3Na/424FsZv0Op0+XW8oP3Rk29Hf9whufZ/iyYvOtKM2JQ+9+9ksPXMyH/uYjm179vFOz500vr9TFEpdrF6QvKj4/ONTATDQBADpHzknKhRmD46qqTBEAmFEVgcnjaCTp4vKpNbJch84F1Z0XbDp01mHbd+6Pf+PHP3jOY7e88pM/Hltf6u08q11b2V9JwCdPx22XXjn2uj886+kjxTNe56c28sQMAFiK8yf3bTn5g3PSwuSJ3WX1ZH/YBwlNKB5f8xccZejbtR9HPwFg7ZWTarL5zk+Vjz1pg2GJmAAlKgBP++KSO3/iO6+gyd7NzQqgKzbdpxf8145zfunvPv7in/vYj/78z938D9e96NXXXveS+YUlYmeJlo+dWL/uvKHVc8ceDtxExdDmYdO02hsf+Oa3Dz6+ryy8s4gAsYlhuNJvvfiru4PTfWmlvPYF1206K/zL3XdNrjfTVA+GGKMYJtZEBpJSUCJWQBNBNENFQpUUo6SoRigKCRRcQLUIGopAq0bevFWFkY3CkoqaCgAg+dz9ZRGKEeV4CFMiJELnfJMFDjTaewGAgsWUxAQSe0coyl6SAgCwAYNx6Tw7qesQHABkKkV+hFn2Eyka2yq4xjwHQKW88sER7V8RWkUgB0CqqL7w6BARkjSu47AKE1qbc1VplRbPWjr3RUcO3r7jwE0Xrf/Vh7d2S/YwWfnOWOrW7dCJTjWxRwMEsJTEEzmmBHAmMAoARswpNVcGyK8bLKvqVEEBjCFvBM/8VDNdIW8YRYSa0VQ/11cVOyNAAzRVXX2sU2YNnpE+SnaFJUj5h8+MBJpEkzCiWQKA4HxZliGU3vtQFGVZulCMoN80coPBqqSaACIJaVGktIzDq8cvvmx5y6MbDtwUn3oDXnGcKwBASVawi1FcRtjBGc0RIgKQ4IABBuO9cCp1Y+usTefs+PKnujcfvO+3f+eav343vOof73/WG9Y/+ocbxvzi7A5JEWNfCuf/d3YHIHc1/ejrB6+5+oOfW5jrQ3vLhAi1p8OWl70jnZgHeWDn77+9+8v7Dv/OHfOfPvDgvcc3XbadL+vIlu6Jzad1e9s2nvfUxul+vf7x45t1GI3XUpsoENbQcnF50zPLA09v8AOKRY2F0HL2WZNajpgEBkVjy/EgLJIQEFCJCM04KhNEpsaMmWKUFgeUZLy6P4PRD0UAGLCpajPzPngOp06dIgB0LqkiURIhZ84xEAbPiKiNtsFLTIo4NtGJmm0BMj45Ucccv6LaKDnviJFJJK5aG0a50nndY6Ae8tSKAYARU0rOBRklHIqqWnYlAWaycb4IiYg8mWRflQKAyz8Q1YyRjyKKesYcaGfyvDNVFg0dcQRBEAKHJGCJIOAIIIIIWaxX+qASGYGBkQ1Nk6a8mFGlplb2pKpCkLNMyEBQFQBJAK2pEcky5Z5ZFM2BQ4CkCpRl0JgUcAQzkQgKlv2KKTBLEmJTZMsLAkW0EcDV/Ei6ZmZgIiIaNVmOmzIDgeyoJLAkAgLawvZyWFHhfmy55ak1a2qc/u4DHBcmXv5TMy++Mh16ove3f9T/ymc3TG/4+Ov/brEY/90v/O7TAzlK7MtOUy8FdV+48/HfecVz277uDuoBli32jdeipw0au6KWFFzY2JkeP3j7kxuu5u5cbE1J6KDGIEM3Pu36R6rDc3ye7Z2+eGcL7IXXLv7Pd9+09fKb6oexnPjL9/3JX//lx85718/e84mP4fPr05sualdB6m4gInTNSEygo1x5wvzMUWVfejOLMaHaCARtho5XRZdGRA4lTTzwj3987b9esf30dAfuHHv9+7sXXaX3XXXnR0K9NLzxc3rBXz/h9BjoCZg6ieOyh+Dyd7hzTk+cfGpx7nj10G10fN+adsewM2x2jcfbwa9bKbhF5aDbPfnoXqv/uXrOT1E8BG4M2Y8Nuq7uthaPXnjf13HocbK1HPsT45smxid6Rw5M1v2Ta5+14eB9b3rnUntqbM/M1qePHWttXL+1v+EXv/AP//pj7/34q3/up2/+4CWnn9Pv98tO6dkv+0Hv2PHt264syE4ffqw9PlGlQVlJ3T1RdY8j1B03vVTNi50an1pzzjNuuP8kprl51zn7tW8+a0330Kc+//2Ji84bVHU9rE51jwVf1mYYwAFjg74AIULH3nuH1vIeNIHjRo0ECblSbWIqnHOAapJD6RGRVjs2YEAlNVWifNQ2QEViXO25HCXVJAkdO8cGQg6JvIgxEiNyZrBZUjU1i/XAOUJEDp7ZmxmDIShpU/gQMuFF1XuPZkx0xsVhZC7jGJEMjQkwW5CZGUZyeTYiNCZ2ZSmGQPnDDGaByYoWFCzIU+S9SNnwj/af84OlA9+d2v+CzTsumJ8uWzyIiK3xlpoyIhoSqIhkfC95QyAksOzAzxRbdsTkcsTsqF6eQe+CIaCsMnpVRDSXzaw7J8oGzfz0HL0BZuOpEaKa2qjrzUpzVRVJ+UuJ5Ux4AlQDoFwA1IgYnR+ZcT0XRcv5IidusXf/e0+fB+JgMDLPKDYGXqMXS2h1y70xXbGr2vUFuveGtNOATJoBWUiojlZf7KrlBkba1Da6FaNyaf7EfHPWhTvbm8vT//Vw/MRHLn3Pz+x/bK/9yfun99xLk9PddROTR04MtJ4oZuvZtelg90z51bHF5Rpodvbjn9q9stjjmbOG1ZghJEU/4dOK2YPbn9j9tztfeu3sJ9+27g8eP/7Htx65+2n8RNobxo+d81DaOGnTluZ6k5f9Ls3M8MQm8myhi23Pg7FkxXgHy3Oet7jny+s6ISh2UcEgJomcmDzTKO9FJOd6rb4qAWJwRA4opYSORSKSEUGMNYJlmxwRpZTOSPdlFIGFahnpN4rUJfbksOTQLlsAOhgMMrZirNWuBsMQQlIbVM0oogC0Hg6Qgtrqt1BjIJHESIkMcXTmA6Ocr25AjrmfKgIqhYMrakmGZkmESVUpX09EJmoGwOB5RKRPKTGamNAIWmd5HkM00lkjoqpEwxyvNNJCZlwkgDIQYgOSQNrOOQ4xl1E1511G2mdsCxDmXAcky+lpCQ0BxVAFmlQbYWbhZWgJAIAJExCSgDEViDZiXoLVSTxCFGFAEHWhAAMilyxWsfGhjHVThGCmbIDo9AxaTnOSiObZVBQLzMCQ7y/PLAASU07UzhTfLEbJvwunThar3mzLpQ6u9K94+Osbdl5UbT93ccul668/b+VP3ysf+5uNbdedXHPrZa/YdfWbX/bJdxQLT719Yv37lhY09c++4MJOe/z+B+/54Ldued2znlm6Vh0xtmEN+H6r1Ni01qzt2emFhcVDTz6p7hGOOnjWOy1VZORLL64zPPTExbLr1Hwa33DsoaZ+zP6h/fo1/SPNdceXzm3KA07uuuuua6+56m3veNMv/e5v7/rofxzn6TS7rjOIfS6dI7MEoiISvM9ybjETU2/YDKui8AiIhN65siyXe11YlfjkG98h4f959uGLsTvrJ77Vfsfft/7sufVNV8m9T/zETz566vdPb7ksHeoAQEdDsTy/rjpx7eWb23PHer//HGqaGy/6SbnsJUuukVpLOB1mrpgbjrWam9bguiHWE5Nrpi6c6R2+84Dp/GU/5mZD0MpXvc1777zitg9D6lbOij6wptScYLJBKxWoBy5+2bXf/BAYJbHO8QXqr6w995w60Lpq/a/d/Kl/evE7/+lVv/yWb/zJVcdf5kjBoofWxJqO+fqcC58NvcHKyh4X2hXQ2Jr2htl1DoolS+rgR178k5dedh1Mtk/de+KcnRtf/cyZR+/97lfu+MEFz3vR6epYU2u3VqcJUl2BQa2kHhP5JEzmQ2EwCg7SFI3JABUhSkoAaiaiwXuHzpKAJxoxH1fjjxgAMpRlFB04UiSDIIIgGKESeiIBc9nKzMCIoJiUGMzByGZjCq4IZGAmKWkWWDGBI0BzGhOWXiQyu5Cjv1WzC0NEVEfpvLnz1NWbf7UjzGZhCy6EoiD2RtyIOucCeSJiwqb0noAiEBbNRFpO+tx03msXrvxM+64vzT7y1/a6FVmcLLTftMCiYWTmBGKElsw5Z4QKmiNccj5jrp1ogAZ1zHw+a5oql1QcRVbkigtnCnB+WiWQLH2PMWbha37epZSSiukIQ8irMY5Z6AQAuVHOnwYACcgRMiATBufZO+c9eBeYveccc+lccD744JjRiAxRVlnRkM1OYAogBhwxQSWEReKl4eJ1xcVrB2seXbt/16lD22lLV6sAMNAU0I2aegBbpaFZMtFkwBQMBuvPO3/D2PqwctNDvd/4pU2zsPDn9w/2P6Br1kyGKamW7cipgbSmzrkkzm6YPuc5Rzffd6YANzd+o7Vhy2M337b82uvi+HopJ0ETDhZxbFpgElYWqlu2nf/vg/NvffTYTlx6xs51H//NRT6udx+o9hx66jtP2r3H6MHQ2nrl5IZr3fRZ5MYQiKDYZGlRrZakLR6fPW+Hf8nT+77linUeAMBS0oolOAJEFmAkWZ1DOCQjlJQsAQA0JUpMZcJSRgiSShP5QGb0v5xIP/wnk6gpYJNiuywkNibGBElqUNdA1kWydy6lpASh6FSxUTJTbbfHhv2uz8GUzCbiXM7hMUPIWmhYxSSNbkoiJFKzJkZybGbkWEQNoW6afAFlILtb3XRkFRU551cBF4iYzVdN0xABAJlZzHajVU+gmeiINQsEI5YMACTVSIaITiEheSIvGiFRVFd4IJKRQrNBT4JQFBSjILL3gVQbSwCJWJMBGrCAqgkYMgFSgaRiyIhs7MRyuTAEdWYpqSopADGypMwOEA+5nQB22KTaOTeURARIjmTEmVrlzYEDHOGDbGQrGNmc1NhxPeibGcAPYXyIqE3TzE5sOX5i+113TO979AI7Pfzyf37/hne1n/38Ez/y4sn7vj89tbam6QNjUzf96PsvuPcLV3//c0vFxEta7f9pwcOpfejJQ6//yZ88fvrEsSNP/8/3H3rFRZcUbZi2chn7oUSyYvnIiWEzJMCZqelBb2j33RLuvj1cer2ee0k54VxDy8dP7G27eunk0Redmqx+/dC13/Lt6qGd+r39+2f/7+RTT1i5dla09Zl/+fwddz784V/+zd6e/ac8dscntB4kakNKOeo0zzMAcygnerZkSUWJXCsUZhqbxiE1krKhNMZaRNx4qLdgmnG22138950/Q5M7i1feZa/YLAc26K4dX/vjrTe8agnCOfz4gd7WtevLzTx1eqIl7cn19cJVe7/wyLqw+dLX28IRdqZ1dXrdudMHz7XFB9VtGeqpcR7y+qnzcXd57weentl69uLC9qceL0/s75K0JifTSrc20pIJGj12aMZN7Nv5wqacuHTXNxwIadEy9GCxPd6GzmLq8cC9/ca/+tKL/vqzr3xvfWfveXvf6Ah8JWHCnzx6tChbV137qscfLJ868nBncgpqfeiJ3cYOBgsveOELw+aX/PmnH5mcrl74jLNt4dFPf/5Y5M4b3/SWfYf31L3TSXRuBTbOTshgWKCr66GBU9KUmpnxsYKdDiMDCdQKgDE5g5QxjZLAjMk5QjYkzHD6vIYcPYDQkAmSGJgSEqIyoHeUUxUE1RHlGHMycs6DatIU0I0aBzUVA0VGBgRNElW9Z02iRgbKxIzoiPMrYgQaIQjyLm7UqBHQGU4WESPkDebqdQNoZrnt0yRMRoQTrY73IdP+auTAKTSc2gZNKpui5HSq3X9HeuldKwcen9nzzZX735iuOdKcDEEXlVqKCSQnNXliAgTL062R2ntk+ReJdUOAAoAEWc6NpmaYQ75oFJ0wsm+emdkOqgEiFj6oqni/ugZOVdVkP1UjSVWDY8Y8nRZmBsXcXYlITBEAEhOi8+w9c1EURavE4JCoQA4hhODIeSbnfWY9Gdlq+liOpQMQ1JSigaW6UW4qXwcr+klVhmtl8nl66Zf4/n8PD/1xf1LVKdZofgmbcvUkpiODCGYkGRetvQ/fff7ml42fJftvfqj82AfXTy72DleNzPn154REQbpoiO3Nk9e9otl0bnPs+N5nPxuLF5wpwAvf+e7izuu+efVblzZfAuzAFJBschNZArXO2HSprRc+/4ZO2b34SZy/e+2Tn7nGn7+jPsvjBb2tL3+kaT9cHdwZDj0fh1PQtEAJDACo25VEnqo+d8Z8I5df+sxTi/sH3cPMCJlLgYxMmVjhXCgKl7fvZ6YFisSAKsDoklggGuGlyDGzpZFcbvTJq+y2JkVR8N674AeDQUEUgk+i3lEI3jkHiqqqSUDNTJOYiMnqI57JI7KYFAQxiQKBKHOeMFmjmvXwOdvWVseJgNCYBmUArFImS2COPc5qSh4xKEZGxJxGmC/REEJKyRHVdc3AGWZjiLD6N8rlJ++8R8dKxMCUTVi5HhKB5oDt7NlXdZ5y2iMTOeaUEgJk9T4AIDLYqssfTTQ5F2KdQLMLFNWM8odMwAyN02jbbmoJQInQogKRKrgQqrpOOTCbgJhUkiNOamiQsa8je1iePRGoqQHw6g8kH7IBIfs4ACiqOOeAWFOOmaIqNtrYmvUz/u4fnDxw8CjAiU7x9Cl+yeyOn//el47e/PGTvWZ8bN1CgsJ3b/6JfysGyzd88V1Dr6aDucHgF8rWr4RUx8WpqfD21779rz/6NwvLSzc9/tA1O87hgGOEWFuI9TJEZHJM23acnZZ6ZTQA8HSgOX5UHzs1/cbfvq9fT3em7/+125t1T+K33+3iZUvjN7XW0kGs9APefjz5OKFV7SY3H9v9xC/98/v+8f0feujz37jyhTc8fXxpJnGfRroaW2W05aELOlJRIvKe67pmZoWsA2VVHbFKEN1YEEiwptVskO/8yvK7+37dcwZf2yFP1GH25Anc9c3HVmbx/N/5E3hqBma140J7ek1nvc6PT9ni8asbdHd+5gfUOu+Zry1I6zryGK855517v/2R/u473OT0oNECh2losmmNHNl72d5+5GolSBHNutZGl6RyiYcqNO6h7u++7CWTi0enTzyoncInwGgd6Gw754p6obe01Fsz027jxNu/89dfqia++Ly/WA7zL3/o5930xJGTh9sxlO3wyL79G866KnT8vu/eMrbjAjc2PbFuA0g6eqp/fPjQmrNnrBp847Y9m9aXV132DGP3+IE9J47Y5GVrBoB79q1cNLVJUxJAknwoBTR2zhFSjsJARE9IgCAZZ2dmIqaezTQlTSGETLNFPLO7zbcEFeQxqSc0M0YlQANVoCITVZghH6EB1cy7oD88LxuqZbMvIjpiYBwfH+/3hgqmWcFESKZs5JCYAG20RkMCFUBEtWzNyXcNII2ww6tz29VLBzh/oCgKYBeKApHYOVVtgYskhs5QGUwVvOMV7m/Hbb+wcN17J/77MzN3XXf8vDGYGNLytFKDyIaGKCKMqCIAhkzeMwHmMVQSaZqmqRozEwWRyJTPAYTIuc7lMCIYpYWnlLLOR4HQzMSnrDgzEyBU1dg0ZohMYpBSAlHvWFW9y2mMCmZqlNKIxOQQvWPvXfBFKIsQAntCRO9K55nyiQQhqqQkAAoC2eCHuopIAq2q2kTrQbXCXWnUJDWgCZVl8RXpgq9MPfJdt+vhdNb63lkJu1pMDNMgo9oxzx0yd5FsfHz8ie/eu3Kk2vCGybn7npr/0ld3/vQvH//P1pqVm6b85mLtcGzDKTJItGN+/Jq63DS/fkf94tc/4aeO1KOH7/6eO/KXB4T9zOmnQ/cUmdXtyTL1nec+jxv5prUBtF5eeyn3Hx7MzDxx8RtqPxuV3YKz5fX0+Dnbw+vmKBrgMDpbnVyCWY8LTk17+bh0tpyztRz0lq++4qUP3vrluphDREeOAVkBENQwiVHA7IIDQzEFZAQgdJgaYo6gdY7wEGRAJ9bkyM5Vdi9kRRJgy7sqmaqCYrssTJJzDj3lJEMzdMQjzbxJ0zTEjCpqQkj1oFbVqlZXuKoa5L8NMqlqjMlw5JMZ2Y4FDFRWI0+YOcWoqpoD1piZsR9rBjYzNQEAZgQjI2Ty3o8myWeEjWbmvR82CUbvpDMnXUTOEIzR/Yi4OhBScMSVKLEFcLUZgjIGQHYsCIYK5BGxcF7R1DBFAEVCM0gAyoxADiIQIKIJwqi9BzDVRkf8YyI2dQDGpARKBHVMOYxETGOT7zpFRs2PC2Ay8N6pqndeVWtRI4Jcz8VM8oROzQwBGQkYgDCp5nMrquUwDNVGcTQB8IXv7dl/+uZvTp21c3B07/29uacgfefEwie8/4fNW3c2bve+x1ybbnvhe46c9cw3feB6R63leFIwHZqduW4Ar1b+eoKHf/DQjguufutP/OS9j9677+EnvvrEE88YLj1r2/kToW3QG0+um2oMvhcH5URrWMcEpi2oW3zRAw86ixu2XH5oy5P9sbP84g7xKzx/UTr7P3sblMDGXc2vxeV/68bCWpV2pjccfuToH7/3T5pB99TK6de/7h379j0JRSsLApIkZmZDIpKUuhJd8GIQYwbymahhcCSqCtJEZkYGR6YEwaEpcAf625bvuIQfRIsRN+i4Y4axx+4Pp6GanN26Zfqphx/c/6G/Tfffsu3QXLtcF6H3zFi2b/3MD7C38/p3jvfnyXs/u+HHfvbv7/vK3849dNOayalhC1vUWtPy2+97ypZq8rGTTD07XeoTEAUzXwJjd0VcOHjJa3Y8eUuVZNo59ckMmq07jq5fVxRw9tlnrawsFYVfWl75se//ytRw3def9Q/D8fk33PpeDqZtODq/NOFtXgebt11+sLhv+XMfXhvWzRXj0+uLxRtO2w3/XY13edDZuufS2RvP+d6dh5uVfpp0a6ZxeIplB5443B1eIJ5xGFPBDtQsxTbxoN8tyjYVWJOUxqlJLvgIygIxRc9UlAxqAErOGWJWGCGO9ChomW5FiOBGkBBhBGYSADYyTY653R7rDwZsmLEbAKBAQESOUSBjIkyJ2GJTu6zOEEkqRIQOARCBDYTJ5YmfCz7GRlUdlYiGqGcm4oCZOLPKyM00H8QmimoOL4FGmrFWiWhFqwBDDj6mqqihNqwbA0KDGpknsFyR0y8ur3ru0Ue/c86+j/Zu/79zr4wVNRwlqpIZmDTRcUCDHGpUhEJEYowpaZNiVVV11YCZGsYYCc1MmVFVDYmZKR9hVlPHc+KyiI5NjGsSTcqekqTV4Bd1zsUoAOBcHiFYbpSRhJnMcNWu7Nm7oijQNITgi9K74EKGIKhHRqKRWURgpNPJjNqoCGS5lc9rL7KmqU21qquBWyIpKqrLJAOlWA63DqYvWVr7yOYT35vf/4b+WYdb/YlFNBr2ISu9KeviYmoQsfTh1GLzvLe+dLgCT/3Bn2/eeb4/9/If1IPnQ/+8F9euPUQmaI8DL03AbXfsuOGuzS9/5KSdjD80IXWa7rP86cvf97bx+X2f/rkvVp3pVHSiH2uIwbRoVlhrALxj2xvJXhdin5WVnTmOo4MiPi3qxTeAZ6RsuQInv7Ky/f1LO5a2L521ee3PL57obx4f33r5VfNP34yRgvMuGaJy8IKggHU95FVZiollGZaBAbJm07YoIgGamIGIAhI5yxN9gFzGVJUNAnIURIckQt4758wAkXNhyyENquaCyxwVNGsVoWkSojH7RgUMOt6L6bCpAnvNiGJTM2OgHE6vI7P+KjI9pgTmCCaK9kq/lwoEogIZ2KmOTm+jdQgCM0cRZg7e5zY3r58lpQzLY/5hoFmu97XUjgOvvjlHAJk9gZUTB+DNVV4ckkcwj3lzQ47RUGMKoYimjAgQxGoDJbYUUxIRJVVKVT8BgiNC55A8sfBIwKimiKpYMwISgxKYc2StoqySSJN6/Z4LPop45wpypGcW2SamMTVi6oWASBg1GZk4MERQQgDw+cZVXUUVYUrC7KM0koQcM7ms+ZiYmNj7vTvK2Wnon3wiLh/opJlqgltxheUt+5/65LrZ87Zsvn3q/Nte/u7n3fSXW/bctoS6fMVLirf85I7r3/CNt73k5/cduKUVvvPQPY8+vuvnf+4XXnrlyy8867I777vjgSePLpwcXnb25i0bZ6msxXs2qua7WiiWYanX8NqLptpnPfnyly+0rptfs76hF8ze+4vK/eHE3qPX/IGvyQy6a2BqUcdfwMNPu3Yi1wEqsWz8Qw8+Ggp+cM9TG2a3XHb1844cP5YnLSqSCb4+J/aQbxKUjgEZUOvYGJOqoWLpg5CklFI0VyspWD/ylJdlmtki9xM2DbXB9FR5rpcH4kK3PVGdeGLXnk9+Sv7ry+uHS210jn3SfgmFI9zpcfqe/3rAe3nh29eBdYZVdyxd8dr33H/la4984z/Om39yw/yhHUPsLvWaonJ1KYUPKUIoWhr7UdsuKWjVGltYf97i2rNe+pVb2jMz873ejFHA+MCBxzf80VtnL31muvK1577qlQNoUdn0ZfjCXb8wWU1/7rl/vuwW3n7bn1ZLAGPQp7HlU83YlPD0psWHfnDR2ZctXrFl9zu+reeEhgthhbLb3Xnn3tkHZ5ZefqGdv3vf4tSYP2GnLemY7xqCIbh8RGU1wcTBQzIzRnbEoOaQMZN7DLz3hgBiJTqmVTElGgHm+CFkYqa8k2fiYDnN1BlCDlggp43mVJw6FKxJaTWtiFTRjNEhKhIAmYE1SRKSAB2bO8VIgIpEsRJl5KJwLlRJgtOSCxwpjxxaUoM8oGMiZDWjLBMb+WiZIImaOUAl8MSBnS88kPnARDTCRYCrOdbWgIoYOiSrrW6aSCk28FNw3fe7x2+dffRFKzuv7O88RU8VbhxTDdF5dVgOArSYyoDDYYoOKRAKJLUmC7VSFaOCjQQtQORUAVlTEk+MiKrGzCHTFbxD1qoZEFHhA6CWjk2TAyJyDSihiZilpiBywefTP5FTFWam0fOdmFkNkX27PSYSHZlJQnaGvskr2UqiGhBmZSOqsWEF4hSQaagpEFuTyPGw11fVbtVvpYqowaQVEXhemh8iTz7v+I4Ht3W/se7gNU/tnxj6bliAirFESrFGVzgMyS/pysb2tKyEdefu3LR9w6GPfq63/0F9128/eceX3AP3bb2h7yfVOaQy7R6/4Lbwo3e0X/s0nMdHpdObv7izMnn0qVwnvTbjF1y69IxLZ2/atfOef33khb86Pn+oak9aZ4wklrHLGK989COO3Z7z37R/+pkV+f+dy1YDAHAN+aCGASIrdsuVla03Tx14w4kLbm5Vh/am6o823/yW4Xsm+1dtXrdj76kNVGGrjaU4IyOCpOiDSmJYVcAX7FJKZkkAqGA0KHOIL7MYpCaamWFCcpaMiPORxzFbkpocASCNqikzIzs2IxAkNFVRASBRHPYbVUpJHXI1GBCREcVYMRLX3HiGpJ3QGlS1ApEjNPJJGp8IFJREDcCFvFdgQOe5agoKiZlbrQBkIkLo0RQIfWkAYpSVZSISzAhpRODKdnNFA2M0ZG6aKhNmRCzPqAONAWgOnURESaYC5Fy0hjKenMwHAxAwAjEuvYmamJi5IvQlAgApc2qSJQPAqKTEampKjr3rNE2TNOddYlQjdGTGaOCSoqgZYFBjQxgf70x02kcPHQVwqkJtLyAoQILJmvUbNp08NQdGBIxmUlXsTIlTrLKYI6kmNTAgBTRoRJMSOUYQFVFVRC8i7IKZWWyS9Mi7wI4FewcPrltZOUXyQMCxOFPzQqWxrFrOwU8cOvzpC555y9s+tuHwQy+98xNPP/c5+mM/e8k1rzxxZGVsvgev+cn0t3/2pnL801W/16++duuNP/rqHz9y7PBzrrq6e0Hv9ttuH0rct3j84s3nlGvP749t12dceHxsc2/ynKazHgBQ0vTJPWOLe7fsvvnYM+4+cu3DC7PD87/6+JZHX3L8otuE1CEAWavQXkSesKqSVHfNUMsWhKKD8dOf+qff2rimM7ttsLDcNMk550wTSL9ufBFakZx3qW4sSO7AWCFGRaJqOHTOkXcxqVusnRmeGISZ1mCZZlqcauE6zALQQbtmhr7kek/tefuL/e6D26w3gdgNbRRFpmQKAVOdMKULNm9dc/d/73vkrqOXXDe35pzeRDM8dbw5ePKKAw/uXD4+E9rDalgEW+G167ACqY3KGEVLGjONsYa2tiU8fOlrXTPY/vBtCn3vPVZL9at/b0uvV9zyj3bvzYv33fitr543c/mLZ8+/cvLc83Fqw/OO/nTrvu3/evU7PzHxnnd+5yM4dIXFYVE9/v2vH/nnv79KJu86Mf/ULy/o2ZDG+jqznNtQ329xx+TtD+3/q8me+N7e5WKmGm+HX3jbq5vlbrcRbhcWxTMKqFFdlh2zMzkt4L2vYsPMwsBmrKCqyYERoUEAFFXLB3uzDAZCRIdMSMaIiMwspgAjKJWpAJEm0SxdzrL+PBwWTWqgFtglFMeOi6I/jA7JTAFVzZIKEI+Qe2YAIy8jjMjP6JjyfDnGNBLumpoiOa//i3pvlusymigAenKOPKHDvJxUIzXO8XBqKlKbIRAwMUK3dXrHysSrj1x44zkPfWL2nv+7PDU9XNv4rhpBSUUIIp4Ni4Ia9C0sTJNE8+bBUfDmua6RUYUM8gxZk4TAZpZxPzQKGURVLYJDRBUa2VoAsiKUmAFRsx0agDm3UD/ct6GKGROSjIDb2XOZjHhlZWms00ki3nsgbGJNiRMI5pk9QhRVE4bR3H4ZEtXGUfsOY0oBqK4qUamqWjtGAN1+j4gEzIdwujd/bmqtnfen18YfTB97xrH1i34wEbnpIwDFDhRNW1MCtNmztk6s2b5eZDAHh278irN45AO/m+694+rzys5UMgeLE7Ov37i3pra3ep0cuXz4HUoynLq86Rew49r8SxcwXTcxd9Hr+vMlEq/Z8535C1/iJGozAIIg1dknbyf0Kkth6eB4ub1XrE3kYHVECaMhSTYlmwMOGHc/+90xnFrz1E8Uva29DffJwBGd+OR5f/Tbj3x0HZ+/aeM2WwBENzG9ZjjsKqA3BEgK5rwHUSJSEUIkQO89OMSMXwYywhH6Xy06ZyO1riIy5faXGUAjGRsWgODZERfOR4cQoyRNllRBQVSyFlU0phqkDEUdmyiSUpqenKwGddUbeGLPARHBIKXESMzARgYwmp2qKVrugAmYQ1lb0mo41iqDYdOoMaEg0Wg2nz18o74WDEw1aca5wUjmYFFFmiaE0Gq1EHEwGACwjsJj8g5IzNCHQOTzXY/AWSxm2bme58tJGCmJsHNN02RfOyLKqu8RYXWaDWgAkgyBAyOscvrEakSM0ZiRRg+kDBtJw15/0F0BADMhorqpAKBVlAysTPOnTntDyUoMAg9MIlXdeEMsvDliEFAyREFAEwQwFBUjAyRz5IjYQDXGpKYcQGm2M3ni1FxrSwd6S1XRvl1PlX2FgEMVHqQI1cYZPues2fdd+wsTrck//Ohr903i+f92Y3ffSjUv3ZN7ynjeWc9+5X2tP/3F1L4bF/aZe2rX43/x8Huuf/nrJ2Z2PtI7ufU1f+A3XhS2XrJ/5hxgDwCuNzc1PLJl7h47ufv799z463N7t5xeOrLz/OKl1923547DL16YOvR2w9hbf397EXrTkEo4vUHPuq08PeadqGOqhsOibA3rWsxIjcz+8aMfec/v/XlyrkikSStKgWHT2vUnTs5LqWap4KAxEbMR9qshuWBNA2aqSswG5hpx/75vyx+ee4RRl3HmaL8odHy25R5a8zPd/sYppVAN2k/tbnVa0SYW6wFHQWZBZHJNjEVwky4sPrWnKCauag5d8o1/GJQzg3LNTG9p7VTBY1PLTXFIU2vMQg1j0psvkm8UWAownpoZHl8sxze4tvSX+3t3vnD7k7cW29ev9NK6IwdPXHjl/Asv3qale8Yrjn71L6on71x74ige+fTTN31e2q0ocXxqZqycuv6Ktd/857v/7wXPfuVbNroTMKz7oX9qHaw5WuA9frF7TeWdT2ujqbplhAlft4e4HAY7j8E4XDK95ZVXvZhmj31y4Xu7Dv7g/OlzJ6Aj4lyH6xTbhYuxLyJEGZusnbFOPRgWzkcVQuDs2ee8Y8l+AzTEjHvGVaw8Z1jxqvnPMOfOIjkg54KAmCKREclIN4higqbM7CBFUEYIjiVJTFL6ggDJaX7EG+YCw2UovHdEuipWytoljBqR8yl7VJWZCQgh39YIAGAjSVQGaSMjEboRkRLyMH0ETGeFFCXGqCNxBzkkr7AY+28cXHlv/8DxyZVvTj7+o/UVNWrb+0nkbuBWDIwoLMgB1bIH2jIG20kIgXM4XEoSE5kXjYFdSqkIXjQSGpNLKSFYK3jVUaRrLsw8ikZ2BoLIoEbMYKvBwExIRjQKfwQAJg+Q8+oM0ciUfEgZ3IQsVplZSkNFgJGzYrQCABU0EFNFIOZALI0MYzMcLaGhrqsBDgBATb2hmg76fUFck6YuOzp5x8yJezcfO+fwjK/TctEAuraGNBgES12y9skjYd1F1ZiOr6OTX7gp7dk73Qybh+6eXbN57eZ5IvRFs4ZO72weqFJo+zqh7/F0LMZ6OKHj0yfiiCiejOLegW56ob7tFfD/e+uPbTkxc8n///0wggPjqiCe8gN9ANhzce2uXzEaKA2LxSu62768UiTBE2uQv7fxKz964leKMMHs261xIkJkGomNuCSUqgEA5zCqtjpt730UcQRoufaCqAKh98zMmio1ysG6MvKdKQLW2nhDZZMQWoCIkBw4M+dDtCQiuXbKqk2cGCbHp1dWVhBRRYqiWFpa8hyMLCXtDfre+xA8IqaUkigYKoIiOTNBNIKE5pQqb8GgNB81gkBiA89sgug8koApWLbYImQ3rjPEnI2Zj3jEYEqMHEIws+FwmI03eXiWL0EAzR9NqpAF1ZiNUM4MyFBREQ2YGAARg3OqidGBKBloTNnsi/9L9UkAKqqYYfJGJgCaBRAKwMAiMjIzGznvCucRNOUtdTKNEQ3a7XZKqiDsfJTUDp5UdXW3DQDeF4mAwShppUkkoZmqCURmIEMABcXMm8lZC2LGLkgSi9Bd7oXCLa/Me9C7uT49tJJUquWWLy47S3/pkpVz18g90y//5+1vfc69b1/THm56YuHA7//m2b/+4ZPHn9b12x54avcrX/mqx17yqmN33f3T217/ocmzx7ddXK698MSmC0+V4zPPARkuV8d22dFH4q7/6izudif37nl8VxdqK7laSbNs54/tWC5WWocOdHZvunY48703HW4feslw5q7B2CIAtLrgK1har+76Kn5Kx/Z1ujIIRRjWQ2NOGhlpYnpSAv/HZz/5U7/wawcPzqkjMkyajg2WU+GZrCi9NZLVpMAI5BQMaNWkqkoZzv31Q+t1eeJ/rr97hdb0UvHd4+MTZ//e8sbny5G7BRyLtamsajKIgQzAcREASJrIjmsTGDTrxiaWUjMvMDa1dQ22N8CyrJ9Z7C3Glf3k4nRYMyjbVcfRqWXRRDsuilr7pw9WJ08X3nVXDo6thP5Z24+c+5yrP/+urX//Lw/99y3Ln/jd/o/cMBdTssfXbrzs7J/5yOnDt8zf/uW4+/vjqSm7BID18MQAD194oFMeGLvxq92vfe3YK181PfVE3QmzVVzYPb3j1X/0G3/nf60paiODFsAchMrJxsTjqbDW81/efsaheOzkLQePHcBtun7dpEVA1woKFmusxcrgyhKThbJIKXlHVdWkHKhAyEmVQPN0NAsN0MQDaK4No943Hyoxp5spKhgkxRzgY6CqhqCryUuimv2mZiZIhilKUlXErAUzD5AACZEhB+OONMwOAVf9qbjqMBu9AIPT6UQjUUclOVdccORg9T2jHCFEYiLgsikLbDsJ7IJzDghNLcWYUqqbuqljSnHUbTsmEN8r+qFeu9C+tpn+wo6Hbhob7FiEaZ3pS03ex2XwHoEwDdQFAOTVp6sBgIJGjMmENDYpNiLJRNDISCiROZZEuppeDECEKS+I0cgIDBx6AyFgXQ1YAABVs1XtCSqiGRJlEh6SU9UkSUzMjIG1VnLehzL2ExGBKjMlNRUVlQxAABglKHFZUJUaSRqY1VDMGBfSyUbr03FO1BDBOU+GnrlKFRD2sbX1qNm6+adD/7GW37oYBqASsN21Zswf1b7T4Pbd+/T5lzIC7x48+uW/A314fNtOG/ByWk5l6rNOeuhCZ9PSF1938kN+ErH0PbcpTZy9Z/Of9P7r30/f+G9nSumEDgqYgK2bmmNLu7k/6cYTQjWzFp59FfaglsZNdCK5XmvjgY3PTxiib4MBENtIvoA/HEqDVe19EQ+jFicuff9w5juz99v8Zuh1ljtx+uF4+zP7LztdHxVIxwZPD6s+AiooJp5tb40UNc8MAAO76Ykp59zi4mIi0Jy77fKaxdRMFIiocIzIMUZLKSuh0Kwknyw5oICMnhHRG2p+vGsGtmhaNapledNKb1nMiqLQqiKiTnu8qWsEdsFjdqCe4ZchgowS24kIFIQNABiQoyhCCg7MqQgKksNI7BRzfg4aICEhAiqxy2nYaqY54310BB7502OMjij39FVVeV+EokjSJB0Z3EWk8D7Poc6YGJ1zRNm2kxRIVZkIAJgxJwwKrF7nWd0PgJn8jyBggmoiWeWHRMAEoIZgoxATAgDR6IhH0SwSkSBn24BojDGEkKpUtosGUhGYmlSwj8x1jA0YiSUAJGMFh2wAnPf6AAaZWAKqiKiAWtBYpNpMAxCURdX0fclaDXc3/UOu5QZaY10YXLm+ft9zV5i0X2793NaPPnf5c2/x/+6f7XeubDjyn5888OJXVltetXDy1AMrxw88VJ142UfvekUJANtS3T+xpznx+PKur9bHHxvOPSmnTyJAINh61o5mtnxo9yMKUCSPDQ/Lzo7xJFIN6v448Mq3vm5Er/vD8rY3vaje8Fcb91Mi8xUC2Ms/3br1DTXcMajfxfJ5UzT2IUpCBCQ4OXf8x9764/uGi3ffddulVz/v9MnllvkasVhOvlWWDj27ngyccykKqkWJCuZo1LihGjp2AABoh7sBAJZo5lfufiYtbfrdF54/I67SNpeT3veaZlhgMLQKWp5i0zREziOBIpYeEHtVLBy0RK2eX8QusufFo+04NDcuYbI7qMZ8ZxCAaWpC55tTc62mf8IATNbE/uy7fvvAnY+evuZ1hnz1z/7s0dTx5GH8rKWzzuFTzRit7eLTcaracv2PXfLatx98/L6n7/yfpQfv7u7bTyhBG4DhzCPw5ue2v/yN4f/cNvea61uTT9KuDRs2P/91l2x4tgNMmCAAVAhVwNmhA2lrZTHe/+3PPfJAc82LX/esa16wZ+UhMiGi0A6pGSDEji/qQVU6VOLcxYpYLbUnSim5IiCSqUbI4UjoiYmRjDJHQgHdakUctRfAYhkCi47QwCRBkobRkXN1bEDBe4eIsa7NxJCjJAXwRUBgqZuMO0fPOXwUR4GFaKAZs6eK7JjImB2uKoeXdf7Dc38LAAgj9gcCwuozFuEMxylT+Eb3KiHlir56hjCDEbzxTNkefZ1M8AEEA0ZqHZo/7fX9zbc2pvERgxNX73jLM7szP5XRkX11PpZZFpZTV898Sn5Jo89cdd/mBF/8X+DAM3+71RJsuOrGzt9ytbcbfcIqRMBU1Hu/2hDT6sfzOeaH33H0/yJSD/MPYSSwM0SABpoFnbtx8FkH/swxaDTMNTVE7mH4Tm+50K9Ut0/WQRukvgkidBHBSMFv7rbmD8s3B9rrxqtX4BIg3meqgBS8tiYSOzhO44V99qa1BmCgUfVkPDXff/wlFBr80R9mzFnwpgNIexhxgKDa90AAc43f6ze3TRXACMEQe93PCHlDMszV10byY01GLv97auZSseB7W9PxOQ1LczWlXWqc5uzIgp74m+EvJa1PNUf+a/4TnkKexwLAO/i9M25NUiACH3woi6VhX0w5cEAYplRHZfyhQymKeEZyZCKeHTMrQPayDwYDSWai0oziTJBBEZJotjmJjZzi2TQgYIzsPTZNE0Jo6mSknjiShhAARitby6iQfPbM+g3LunYERGBwYoQkKZnnSCRETqGjrsEMW/zhcTb/ql3mcwHQ6r0/UmiZZc9JFm15789Q27KDWUSIgci32+2maQZVlbfXLsd/kYmoAIACEYkqex9jBIKsZQMAJECkzJxDACKHaKqRGZHYZHQ0MQVAHuUBGxBzphKQYxPNqrFaaiJyLlRVQ44VhDmYWWpi4djM0HMgVzVNC9lAFUQNMEfsCIojJGea8XoIiGpAhOhwmBIzsppQqlQa0Lqfpjudo8zNUtcTcWVhavw9zzrJqJvG4d2bPz1hC28f/O3eda9/Ei8+8nNXHHYXz9vZcITAthSbythbuWrrzMrH3/WMh2+/TO23lo48hFVJbdOI2jShJEHiNN9dPLbnaMu3Y2jqgBO+A4PqBZGLtOLDuEiaDEUFrnz6WtLxs3d/6+AYJQdbd9EzP5uuvdtf89HJd7/v5OBfu3Sdt99kqs0TkQIZkPGtN99y9hUXPbZ2obf7nktnz0OhernSmbEpManjUm9IoUhgKbtDDQK7JDJyqSJ6QacqAXBdJwHAMs0Q0cLC0hO7n3zp2WdLGoikYFonahwiQadpJS+IqDH5okxig34VQjCCCKENQ0xIuDwYWCdw5dcW0ZtPk62xxe6yFmPLvJzG22PVQOPKxX/8kd71r3r4N375ede8tnXujsePXLy1NH9w39YXvmbxbx5YfOFz64amknanLaivTz+9Z+7Y5s1XbLzguZsufzHq8NSJp+ulw+nI/iOHD08e3rvc67/tzYuf++RjX/7O8Mf//Zcv6F4/jt0D4fEwcGlTggSgJmcNa0JK5HskS7xpYdvvvf+9c0hPHL0HERos2+1xaJCQa0FBaHXGtOlTsZqXacqEoomYmhH9H0FRBSKZoBbGQS2X3ZwXvzqFhoyLEYMElnlzgAhgSa3RiIiihjjCgyKiKaIjFDJLKaqCpSgCtpoSC24EgUoiwkT4v8ua5VnRaiPoDBF/bOots27d6vEcDMEhn6k2GZOZB9CuDN6HEIoMoCDnAFBFG0lNbGKMIsoAlrt2QkNmGyYMAH7a+9PFwp9vuKVbuHccef4VacvAEgSRYdL8ihMBZS0YY24PRg5KjcOhmjZ1qurKEETUewaAwMxEagZiMcZkCmYKJkirpDBCMB6t3U3MRNR0VGzNjD055ywvXcxMNakSuSipruvStweDQdkp8lMun9/z0kEBR7QsAEYCIkSUGM0xAQbmKGIAhDjfnPhK9zOvGvvxDWGzgBEhEaMZGlSspWmnGbt3+uBNFxydSPCa+89u9yZqHFARqF+5iTX26APnz9Ub3/HeR373jy/Z1LnvwMLk/d/SYrJsmggGLf/ynzhhxm/beONvLfzaM+V2aaCpSdAe/Xq/GXKl7RaEMwW4jk2kgblOi8fmTE9Xw0mmNqQ5SctXnzWzaSuvDKhoeWddnty17YaBmxQOVbnWiICcr5eLakVCa1hOO4m9zXcOZx+b3vUbe1/5chlbcsPSYOCxmLGzL5i/4g0nf3W+PvrxPb//2qm3rS83qNJcdfSrS5+hAsFziwtC1JhMkldwxKBAphxV1UTFDC3F7ODxZUmAycw5JoQYo0jUJORcW6BhUcagZEQJjcViSslUwQhAkWE1ui8T3EyN0KWozIwATGyrgY8joxG6FOsoKV8SuMriYMTs72awiGKEHRcsUykYB2Q+d5WqhoCqhARmYBZjzAoPIjLV7JQbXYHM2cuX8zSJaDgcBg6Ayt7n6M+YYr/fz8m7zjkgAlO1BjUTKwHJwGgUrjzyLJlm0YfRmQUwIKARqCIZ5SpNo91S1hKSga5GnDERucDeGYmpppSAMCVlZKIsyUAOhKCBOFbRmERVGwnk2XEMFIQlNTUp5ChuxOxfglHkuTtzTKGSCgBtoEuChfeCa6Ynv3/XnUuL3YlOqZrM83ljy9Oumij17ok3PhauZWt+ceP9ADART24ePHbpQ7elm+/b+vbXT0+OWaRBv7VtXfv7/nTrxO7FmXXvGRv79W7vZMAyOgqFi9F77EV5xllbCsPTJ+e1jpOdsaVmuIn0Cur0ZRmamkIRhzLu3bFLXlR259763sfBddCNUd2XtFJT3NQLE29rL91R6wdic424t5a2mxCxaZpWq9Xrdh+78XbP/LDoygtfecOb3lZ1/LDbWyp4XIiIQgiVRENLSVohpLpBHK17EFERXNkqEWBda1SAgw+d9lQCa5yj4aKH2DcgIhADrMyr1AkckXdVqhGxdJxSg+Rd7MbQwnXrl8/bufWaV5347AenDxxL1C16zRHE8TAJ/eWZF71q/b9++qG/+Dv56HvXaFm3tl3zgX/tA401rcNbd96woTr97j87+IFfbpeh+1t/XA8EAnk0F43bMxGauaV9eMqxX1Nge2xs0/SGiwc7eFMritjyYCDVwu/vXfjQzC9/5uf/fudDDww3nrQ2DFs1MMA8cOM1SGG+3fclY/h0sc2ta8Gm5eHhwnuqyIGPBuiMk29Dy9RHxDq4iRDKsuh2u2PtVqqGhJiTKaNpjuxNaJC1vIRA6HHUOCJiLoP5T1JNpglwtA4wNAI18M4lNQU1tXpYMWMZCkOAJoGaJ58omhgREVAjyZJkx8LIOAFZIkUgAmiskO31+VnADCJqZmto7Xq3eVVyBQo2OqcbJJUoYghoSAbelY5d4MDsmTyhByBwUEvdYKxZgCGDA9EUER0FcamD2IhLODy3uegFcXDL1ke/Fk689OBFa4tpqnvDNgNQgqpiHecAhIboiBwxGjQp1rEBauq6riw10JiZOckbL8hZcGYKGjVmMoOZAQVEdEgIypwncyammkTYFCDvwZnZATmkHKqskIwsaTLjJqU6RgbPqeUr5xhD6RGxCK38jIuQOZ7ARAwoYAriCo9qDZonZrGIltAwCoObdRu2trb3+310DtGcd2Rg7BJGj/7awfgDxic2L/SPF+c8ubbfUewP3fRGq+tzvn7/5u2Xhfb5U4NxN/OCTT+4aao/VkKhXcDtm7aMP3VZYQ8tX1j4iy+e726svUeUFPfclTYeXTPcuF4OPlXS1JkCvNTiyWYsWu3IxpOtSA8MJ1zYwmP33LovXlusX7e+XFA3xmvC8pqHv3iq3Hly5pKj5zwDkkoYp8I8D41bHaJOWm4df2Pdfdtg652waR6CpbopezAD61yreOX+t21qnePQBSxmw6Zz11zgXTvMe1gCApsoWylGTGZAqYkCiEhJFXuDBlSIqiaRGogyUkopQmLMJCkCGm0QySCYNZ5CBDKsGcgSR6pNgVFzBV6FtuQBSumDRmViQR2lehA1mjyyiLjgwUBVMx+N2YslRnRI5hFAeeTJxhqAAArjQrHWFMFcCJwEEQWSgmXcsaBlthajQ4bc4GqKRgZACkbONU2TyzMi9ldWcpIHASWDHL+NjERkIu2yZYbOswJYBjBniA4igwGaY26aJvMdgaiua+8cjBrf0RkRBBSA1AmYmBkgEBBzbtsZzTnKDTVmIk1EkUhGOZ0ppXoEvBHxzqWmJs+esUnJOZ/ENOlY0erqIBiBZ3KhU6ceMQCEBOKMCDk/TUABMjYuQZOAGZmcFdKoRyCt77rz1habc64WHaT+plYyhBbrK+JXHut9JCzvv8geuxwe6q/M92p44jub6MiR6vxy9pf+4sT9j89sHsOpYvaNr5//2ufK/srasv1bOP3uetG5NRaHzoXY6MzkxKlTpxaG3Yrl4suu3PvkrhjTy9tTHaSVGDuu7Jpxacl6xy562dYnbgtk4qhBIVLyJVEICuSc/zTKDwr51368a4C/XaZPRJ+dVKDtDWtx0CjZ7ffctv/pg29680+eu3373KlTEQouQj5pEoECeO9TSrGuDQAdO0JQJR/89PTaCeqJYQ8nHXMzSIIAwrq0YFGqokQyBDUoG3DBOUccU50sqSZGmOx0HCNNznb+42vTH/rYWb/+mydn1ndXBrVfHl628zS1dnzyq+EL9yxKf/L1r3/4i9/b/ob/Y1e8eu7bX5sJ1b2//7vTw9NHV5aaMLbmPTec1ZyYWFqpn/fawzQWcFHGgQeG7CQBo8Y0OH7w8TR/hJqFwdKx+ad3ydze3sFDC3v3NkeejCt9nYefufnDM/ObHr/qttOTJ4+NP2WFwQmgFkkZ4SjBAtsKbPt4a/nLMDh9uj5+fGOnFKmNgEk9NAwxat14HFAtWAdsmqbp9XqZBsXBj/oqRE3SaDO0VGkSERRBRGMCNVBb1SSPjqW5qVVABRMDSdZIEjExreuoqrGRmBR9IOejSQPmifNaC4xENMYcYeQYDQ2yAxURM0ERmDJVx+CHQ91VNI+emZKNNr4GKSW1ZJJyk3FmnGaGTVXHuk4pakoS62o4HPYHw/6w6Vdx2KQmZoSFmQEhOS48tNoFoqMCgDvLduwtS5eO9cNTnWNfH39qTeRF77xqiiIVsaa6rjMcZ+S2HPmOSAmNXfb7mlkZApqlplHV3Kbk8WPuAFJKI7qPqKS8BRQxjSpiFmOMURTIkJvV5AwCzns7RgS1VDfSRAbq9Wt2LqUEoMF5N7LHuIyLy3TBkbqHwHtPzORdixwzR4ekVgqqZwAA0H5vhdBgtLlHIgIzJ4X4Zm2cuOzEWKX6wLbugJpUOTVqtKV79m09emilPum6dHs7PP700WL//cogzkFspUHvkuuWj+wf/+97rwEAPHHq5ByfPLDusRvXHt09afWAnn6Kyfdh6cyftWNrdWKi1tZwvJwCKM59xjk/9/srZz9zf909O6ZDD+5ZWopubCzROMHkeqLZxQcnH/379Xu+FqrlMJw3s9p3DDE0C25svPBTLnaG536I+y0A3HhkbP1BCBquP/ZL58VLg5CKAmLwZUppOByyYyYsWwUROecAbWQ5T1r3Bqk/jKCDullYWZ6bPz3fXR40tRBEGM2TETHG2FS1rU6YFSEAAGqyBCmqagOKIt773G5mjmO2qyEiKRIiqjE65xw5FjAKLl85I/mB5tXsaEeSez2xpJDfBWbSjuQ5DEFO1/3MY/SiU87ZCN+t/+vqdUSu9AENUhMlJkQsnM/9bv5e3vtOp5PHzvkayxfziB4TY55L55tLkuXbGQAQ2RRVCNTQIDU1AYJaq9XyPNKKa5Iz2igzk9EDgBGZXcgGgTwbyl4DUDNRMgDVpmlSSkSOEL33ScUFj6tD8uGw70xFYkQjH0yAjb0rKk0tRGJwqiwipMaECBJy4wtGNnpZIAAKKC1rRXNDwYJKqnVqsvPAI/ecOrGnPcYrgy4nYs8nh0JmpUMEbZRfu/Sha9Mt03YqJhwk/UoVJmBaeqfG14/58896eteB3R/5a33/BzeDr8wd7A4vP//Cv5w4Nzan6iJQBLXGkV+zZfO5m7d40ZN7Dg9IL6D2j7qpk8vLlbkaa1/61sbty+2tp7defu7ubydtQ21+qXZDSENAaiORpURE/jEun9vBz7N9rKJ/S36SSuYxHwa9QZPEraTpcnxu8cQ/ffwDX/qvz8dAylw3KTYNm5Iao610u6LqiYkImYBQAKnfGw4G9cxYvRhLQ1paXCnXls8/7+I1k+Xscneae2GYJJlhq8SqTCDgI0Jwky1m9KE1rI8vnaqrJd2y0297xtyhXvehRybWMlx49aY///edX//u4Fc/WOy4qircybKlE+dv2LJ+WB298F8/dWjLdnr4O9N77zv+iufsunvX2NKxzfvvH2/MTYwfueDioq4dtEIP6mANqxRVNCtaJTg5tvjUvmMPDOMp8jwUrSn5SRhbf/Z4GCvJNWNHSXzotbpjpxtXOQ1umicHE2Nx2n2Pyj/oXP+PP3WZ/9XXvP6dV/VPlHE5hrFCQ/aJiniL4JS4SkENLRl6AEIgTdLv96uqEhABNVAGJiVK6pEQWJSSQoxJCQUhmtaZ05P9HCJISgqsBFFEaySNsXbgkIm9C50iQkoaY4yg6NHXmtRMAJMKgAEKWAoeCnKpbvpVv05S1zE1Yk3yKEoWITWxghQtCRAoqMWYUpOP6jFGRCZyKakmgAgj5gChiEaxRq2fmtqkFqmaWDVN1aQkUkvdHfYGcVDHodS11Q0m9ezKUJSFd0WJVjZlWUQXo3Td+Jpl9+YDzwf0n9vwyFN4dLqZ6DmsbFm9d+KRAlAAoKx5FomqKWoEZU1CYETgkJo6ISIQkuBIyIJQpbjcH4hxMt/IsNam1qZou6QNMxExqU8KaiEpDgZ1k5IooA+NYkqaxBq1WiwBJiVBjojBY4oREQE5zwx96dgZADtDUkFtUEDFMClLREhEioEIrVBzCCoRmgYN0qDRqElMk5EgJQ1ArVBQGLokyTdXHNlU9nX3ZP/w7Mnxfo+wbKTZ+NiTGhAPHi+jJt/uff79m5clchnne8L1+TuOlB15+OZWb3yLa4YP/0+6+8aZ++4OdbO2Ra0eEptHKEPROvMnze2WU0fH/uqD/JFvLWvx3Nf90nXv+7PzPvzByV/5FX/BlqK3sO/AvuQ8oZJKjwvHME7luoe/yAv7qVrxi4f90tFW/2gHbGKw3KPylZvDH/b+7YL+CwFt7cHyim/C//nmu17W/VkXWbxKaWgw7koXbNJzSQEACaImJQMiiqa9punWtS9bg8FgsTvodvvVyqAA4sTDfnP02NxKd9gdNoNhbIaNNNGSpLqBJKhmSWJKmBeoxJhU64jeY1KpGmdWtgtzIKSI0imCsJhFgaguGiqoeDAUwwzgrZsYoyEOmmGyJsWhRBXAlJLWQmrDuhrWdWpsgFEsmagvQi2gQqrUbzSKOKLSeW/mCNBENaXUVLESEbEklgyhIYumCpDIATsF6A/rOpoZNrFqB6eqjBaACmRvZjEhYpOiQyUUAiFCIBSJLNJSiYjGjn3IYu0MpSHnatEEmKJqMjayZADApXOkjhRStJjYyGEgIgrQOE7olMukJKNQpqSaaoak4iC/bs3ZXy32jUZVhSaRNA4b014oVaVmJJcwD+eFuUEzA8oRDeBMfQQWMCeGolWUvi7k/4ypXwULY2OP3nPvmC+rCC2DZBVEfPhEa7HB8WC3F681DpsnJDAsDbFWu/VgcVdcnuu4otZHbvrM8fe+Dn7/+vbH/3TDo/cpU8uo1SoPHXngihb/adh61gC62FWwNdPTM0DjU+sHIMfr3sXJ/T9lOYwryjIJooKduq5OnTix8xoA2PbYLR4rBB/HfXv7Oj8+TQUSDocttcbAYTNQfGfBby/01TK8sz+8uE5JXG3keOPFZy/G5eHyQrM0/51bv/HtPd/vzIaGUyUqCUWiQPKOQmQOXsBiSrnxcYDaWzm9oc0LdQkAm7Zt+KWX//hlL7jiazffsf/h/3p5e33f+Y1pWKp2ByW30TcNV/0hRAc1jW878qpr15yzZfPVr3j80x/q/tqbZ1/zq0tjk+de+/wDX7t7odCp061n/Myb9t9z/7bts91X/3a4cuPu3/q1dd+9XW/4ybO+d/vez31o81jZGp86fPENFz7xTXadNFhZvOia7mQL6qqJ2got1DoJjnU6dWxiI2BGjE1TP31o34UXrpmZXVMvYW+4Ug0XU2q4CLu3/8B5b3WkgibSbGljS/5Yt7WyvT73sp949UULL2214ZIrLr/rjW+endDh9rVNNZdwkGWNgKo8kjLkvZFzjpCZsmJQCfPp0RgEkE2FHPsimGJT1Sml4ELuezLoOJMf1fIQ3yxFFwK7ACrOOcdeVUEsNVFVHWDpstde42qMPCCxdwbYYgdo3vtYZSCiRkt5z1LFpqqNgy/IoWNhJAI0BDAlZHIGpiKJpKn6mNNeAJSI6YeBaCAqqo4AUjLATGs1NKOkhpokh3rmt9WIXJcHA84hmVMPHqyfmsUAr1iavXVhev+aE/++5uHfO7G9R4mtLdHMBVwdDzCggoCamKJhbnBzjlGKadTbhxBrQcyoz2RmzK6JNhwOnXPee0Cqo6UkzAJGKYkYGELdJERMKdEqQmvIgICSt1+GEURMGtEw0sUgAsQ6sacsFsu8fsgWcAIwXO2XOEuNTDWLw0a/YgDIVi4zZCKHSJRMqbHgfOXwpFZre3TuXOfRC/v37uye99SkBJ75+o2zjz5hrZJOHcf+gRdfcGH7i7q4Vjp1rAorO+nSa3t77ylPLcPi+LqxpWPsWERXBv0WwvhEJ/Qb59suLVaD/g9H0BPbJn/mF9uXXqYwNr9xcnZ45IM3//Vrdzz75e//wK6/vWLqn//xs0/f8/jabVfsvGClfhosJN+uq97RY/vpE7/Yu/w1dsGLwvj6qQ4/88AT39943abi4HMO37rn9NyznrfjwbfC29697vy9dfinayg2qfSJkM0AsAgT6Dr9GLUkAMTkuU31YFjXtREOBoMitPr97sLCQoXgiIFQ1BCEmBSsX1cuoflg0TMjeee9N7Ek0RUud4e5c2XmqJJSyusBRJSYLwxW1KiRFMV7RvIKSTUiKGIQqKoKVgFb+VrKPlrvnGoyAAQFpYzRMEQ1EDPPHIiReNRnggGY5WFMEXLLbqOLSgFGmVaZTTjywjUVGkhKeSkVWi3QRtWcc2aCWd5PvG3btiNHjohIg+wAxbImUsmREmoSzgGmaJm7CUA5XaRkn1LiIiRVkxRCMIkoKobeB4Ua4ygMW9RCaEFSy0lFIwoemqGIiUSMSmieWFe1FewLYpaYCJ2KqSYAaIYNAIllmhACGNoInIZ5w2mmIIQIZlEFDEUSeq6qquUdGnfK4sSRw4cOPc3eaRJVSYrouKrl6SWmbemLxc+uq/eeHtKggUGE0xX+40NuGGPRnsIbv1B89StrrUplRyEQckLBpu+aFlg4dPzwMybG/roY+xbIbd04AJ2rV1rFxLoY3zC15gYsUQYnm1j6Ti/1PFpsYkjx4EU3rDv8yPRgRUHVloPo4FhqpkN5Yvnk1EaNPc+hagaeOaXEX/D0kJd/G8Y7Bvp7ZedjRX+lv+HsbVe94ZVL/a6tVJ3ZtUeD/OPf/dXP/NyvYKu9tNxtO3ZJkzS1sdTqkAJSrGvXaTtQmly/dtwfXJISAH7ix3903brJD33yS5/9198rtP7G+OzywokrPP9OvVQGrJpUbTibLU698zcPfuHGLc+89Lzf+/P9N321fvSe+rFdl//puw8N5qoVe/rDf+bu+M+z3vKRvd/+1uzWdfE7t9RPjRenHj/8mheev2//uAP9z4/oeGvz2Gxsh+74xvnZc1/+9T/1dY2uPPyy67vLg2mLsdXiFEPbmWEaxmRKwKICZMzY8vzAw9/dtuWcDdNbm9qDpbHZUhqs2wMDAK9cqT+01Nm4dhjGtGXLx0/sPX7r5Gcem3zLG1eOb0qLu3XsnKI90ZyY92EcKiUzQMgKUURCQMZRxFhZlq1WQQyaJMY6D3ckmTdNGoHIAMuyLIIrPJspMTtigFVIRJZioYZWCcxN04A6x+SQfVEMh3V+sgTnvfdCIy0JMmWbYsEuB/8yYKfTWVlexj7E1JCBKqSULCUkoMjsjHm13hvkaHQTMjMFI6KkChqzOiyKEgXOSTfozRpSK31RS6SsWFYzXFVmZWYds3PBrb5l+2ZIJh5TnSI6LtBb7HtcG6fevHTh+8aXb1779AvirmeunDtfNMTgRQlIQQVMQVHR1HJIaZa25EwFDkxEdTOsBgnRMXMmdKJCjJJTxiUBIRlB3QiCi4IAFg0y9DCnStR1jQhVVY8GmcQiKiCgiEaMGEhQhZkQ0TkmouAKQqeSkxYULW+gQUHBTJBcPhoAqJGaAOY6AAYmJgrCyERA3hEzkikrJgHzbQUf2s85uOmJzY8dGe+fOBcvemhx5Y7/Gdt6YRxKPTjd/Z//mbnl+xhCbMSbb9fxGS9ZEqF79m+r3aA3s2Vs8ViMiZmVcTk1ZatjhvWwctiXZ77kTAGe+t0Pdradt7jrsa1nz55cv/2/yiNf//aRC8OWubs/UH/0ny5N8pp12w9Pdg7OPzWlziZ8I9X0uulOGMO55vQjXx/f9e1Nz7lo09zSw+PPmL/87e+680NhuPfZi01ccxIAHBwZlGsLHwDAVT6VfSdIiEvLy+PtSe9cGiQzS5CGw6HEGGMc1g0izs3NMUEyJXajs6mBZRUVATskhRRjt2kAwHnfKkPpAwGmqhopKrJ+cGS0Hbl0ADGmmGNAASDGCMTeKHfeycABE6CioYmqmpGiri47UxZbIYxcSTHGbIVXABd8zuBNTURUdk4IDNASgEAIDhGLoqjrGvLaOCu00WVizEh0QCSOLIn3bjismb3GFEIwTZkKGYiYuWmakydPVk3D3lsOUMjCJQRmyrHhSMSIjMg+E1XVzCwZAas2YGogOZ6N1BgQzDVNoypGObOTkZwag0U7c+xefTOzgKQMgOrYmyWxrJJTVCdiDWl2ThKDJHEuiNnItWaQVab5SzkwzSIsQARIhIjo1WmjPnDGv09OTx584pFh1V27dq00sZY655aunShffd7y1/e5Q1dvXxcfnR+Qmd59hN9/Dy91nQP1DVgi3xkPU1sWji4W7QHEYVlMKxeuAd/gynkbD84dH2/8m9za10745bnuoZNVpzy1bXxtOTYxv7zS7S0XFAhSCA00TN6T4cGLXnbxXZ9qfM3olDCsBFhTmPJYMbkLBjX2U+qhtkQTEznnqicbe37BfyXy/qr3Ymm/s3P3V765/tEncbyF7WLct5/etTut9D/8vj9/49t+euvOC5cWB4U6xNTY0GFbJTnAdig0iTOw+vjp9ZcV0lkLAP/y//5rv3da5k5vnxifGNu4fLrbbRbvd3jkulec/f3vhNmtF/z3vV9/17tf9Oqffezex07vPuqPme45SmjS1Hs//Ln2z71tttzQufz6/rfun//EF/Hg3pOHjq5pnazmhltwcmjD9hTHem0samcyBJV+/+6rXupSvX3vHfVwQL/4e/snpsYXawikwEZSgaC4GsQxp6hEXNcVe4qxmZhsHTvx1NLK8Z0XPG/D2h3HT59s6nqmWo8IXJGM6XCqGQ52NxOMNQSxjU/0thyK5drzJjfPHAN66a+/b49id2UherBsn9CRP4UAHRISGEIRAhGoKjsOIRRFkU+4OV86Sg6/Q0fsGEXEe8yRIIw58BJHsQFgBiimjM6zIyKNCYk7HZfvpZSUiEQiOc5SjrpJIoLOZ/Q/Io51OqZKhHVsUpRB3TRNo4CeGQmQyRDzMVxGZB8gzhsxEBE0G01zVyEh+UZXE1UlREPLPS4zOecRkdij8wBgAM4574uciZv1MoiYHIgDFaAE3lHQUCAut4rrB9c8ePKpWzYd+uT0Y89J50/V2CAKMwmYAICY5ade7iZXBVMuNE1jqiICas57QNbRCZmco3wGJyIKhQAkRVLy3sfReYFFs6TMxaTMvixCjDH4thmaQDIyQzQZxVeA5VjAvJnLNpXsg1LU1XVg/kmZ4siKldH0lFMhDY0Q3KiEM7P3nhznr+CJI0AkowInhn7BpYvn1607PXHyLDuwZvGZX/zaCRUbIDaV76w79jcfHJQ0s2Z6pak0wuzZePbl/fvu3eak045Vb3rz1IndTYwBIISi11STaoVrJzkVf+Yv1vzc7515mBYJ7r3pu1rWO66+oDe17b7hHMwNv7nr/jcvh2Zl/kC3ufjK62XDBfv3PA7VickWULWS+mRFx18QZjedj9MbCvZw+LG73/Olq7/9z/A/fxE3n7dc68TZ0wDA1F9z3gtwx/b+YuXXttrDIoydEoncX3H94UClkmgm2vSF16z0+nl30OtX6HhQVaEsYiMiiZnLIoBRjGKmDES8WlQARGRYNSlpYNeZ6AwGPTJIpmVZiogvAgCcMcsSUUEcRRDBOawATExSamB0bAVEUUUCSdmcw86FM/C4bL/JA5WmaRpJuhoy7UfcaQNARRjxswBbrRYzx1SXZdmklFJyWUZAoxsQgOxMgDGSQH5cYIyRSAyiI1aJ3vvchSPi4uJiq9XKeV9ZnxyYVM2SoHfMfrTf1VGEw2hCBiCm6Fhig0ymqaokOKfAGUJLAGKkCmqKaE1KrImMFPGMCCNX4qw2NGJw7ES8QmMo7CQJIEoy59nMCEd8PAUFwNzxE4xCPw2UwcTUwBBWZWgGHhDMqWqyBoAmy/LwU3tFGwUbDofouI6NI/qJnSvTpf3+dyfo0vbXd8u37ipPdenoAMASGLjCJVPEcRf7aW6hA65O1LYJkcLHfipXUvRhif0gNTEeanHblVG6FyhrGhv4cu74YeeoDK1CmgqGJB1BMJXFrZcNx2d2PnVb5Imy6TZqUsb+wnyH2l3lOyhC6pQM4iBzW5xzJREQ0nupf9uK/mPTv3PFv6Mzd8fRKStM0jxKa3yMp9YQps9+6iO/+H/eM7PhnFNzSyipLJ2kpIbgfNTkDJ2B7bjqmTPjN+1rJgFgMH3uxpWF427P8eq403OkSKh04fZLXvu1r331VW/fvnZqd23DOz+hh34nrN2hS7dMnFUUV1++dt3EoO5c8MZX1haPf+cJmZnobN/c/9qnynEKw7He5nM7utgbDClMxUEVi8iaODWDdpv7xYFLXrH9iW8NF+e3ffAfbpu6ID1wB0zP9ABbUX1BlcIE+C43UtcIHEJhViSVVqvTr4atdhljvP/hb27ecuE5G3cK6OWHrvvezi9O1usX4tF62gASVGniCFi/Pvs/pwatqcmJpnfg4IVXvrLeeUF314PFVIhVTrFWA2MAVCPIz1jHwXnPqxPX4L33zmXjvKoqCKdkCKaYof/OiXMjFbRfxayfuSebFAMiFgxmTB5bFlNCMlM0s6DKzgGAZXiEkuNgSCLC5DDfKgatVukc17EaDKqUUvI+xigiwXEiZUJngEYCCmpOUVDMQFJKlM6kDZqp9w5H6EpIqvlDQIgjnYh3wRM6Dt77ApAhh+WxRyZEZKT87BuoIRk5iCUUKdbt0FGixODpZ3ov+17/P/Z2Dn5x4r6fXn7F0/G0byv2kcwoaSOiJmQAhia51hER6SoSiIm994Nh7ZzzgSWZSFJV731RFJUaGBlAAgRyqqoCZuZdaSpKaiJlu/TOV1UiRCgI2fPoWWOkgpBE0CubmV/9rWV1DBEpyOjZkkXugEhI5GAEGciu4JGmhpUQ0Xtf+IIz3hDJoXPoIAVEALR+6WxQt0LnWXOb/nvznocvGlzW340Wmu2b27uPDoqJ5c78eFkMNRbR1PuLX3C4u1w+/D3lOEdg3elNZ+2703sfY2Szuqm7vcHmzsTR3uHZ515nMjhTgJtqntik3W4GvYnT+/BY/8TcU/d8pXv08ped86yX7rj1sxsf+Pxxuu7CV/6CfPgP6rn9/QsuKFJsWnVrUPL6UDvtfP/e21/7d0XdfcnXf19SU5w6jtT4+3YBwLE17vK3vqtLYyWsAA1cZ7JeHKokiak7rBU01bWqLSyvNLaAiP1+37uiP6hCCETUJC1CEJFRSqZlLCITQeAQVWSkPEJCNLNGEg4GTZNyfLuqppTYO+ec0chpGtgRETWNYjIzQE4opuoSKkJEi5owieGoCuZam1IqsipV8hhGjTgZiGa2BZfOs3Mj0zQAmjISsjOznC7nAy8sLWUtWF3XzD43poLoVptLkWiGInnDxcDJSJKBpuS4BADTPFTBLNFSVQuIBoaaiAExqbKpgvlsL1RjxhxxSETMWKk69hZNkxA59GSICUChLsqg6qwBAhOJmiqHYq5ARIcjImt+nYgoq8xXRENPBOATsWrjhM2DGTp2CKMTiVFCy9Z7BCRDzlscAOecoiWVDCeAUZqiSY67dgCWBt2VfXufDME1TeNcqKQuQ/CWfv1Zw8/v8k+dlvNDa6kXTxxjBEVHiK4x9S60yEXXr60lE+tdU9fdXvBxaCvOksSyREenBw7dclBKVW9xmZmOoVLVTNlkcAwoaNzz1oYJS92CysrSvsuu93Vv++P39mPPwAlWg/GxrR/77GPv+Y0Lx4oj3An7D0fTaDo2Pj4cDuuYMUTgnAs3lunZJJ+smpu78Ceh+PdZN05LVx3WZ/fM9/ye4L8WPvxXf/6qH3vzy1/zur17nqr7qRgrJWmdIisgmWsV5RzytrHe0/0CAJ61M+xemnLL685rtXDrpB4c1BPp0sMnT3zridk/+ENo2cJnP77eQTFu02efU9xz0/ynP+y+971jx5daj333wKGHxl76qrBtzVk3PPcHt97WZi7ceJqEzqETDa0QWyEhsnIamlbWmi5iPXDFwe3XvOD+j1/yvUfnx2Yf+X8/sGW8rFL0vjDQWlLLFYhYClK73esNLDamqAJNLSqWTAK2fOEPHtk7OHX8wvOu42bHcx976/cu/bQftgbd49IMix5SlLNvK9bf3uu1sFfRcnN8YeHgk0/+IPpW0bMADZjmsQmOOC6GSMwcioKAvC9arSKXKCTHBMSGBkkjUaNgpmgCzjkmSJnG7nBUfpGYPSKCUdnK3lzQDMNyVJCdyUA1w5GA0ExECM6EgYtzLnN/QM0F16Q6JO9cQCDqDQY2yDutqEJKbEokgpDDw6s0zDsZQGLyiBilGfV0ZmiIRN5zRsalFMk5co4cIzOxyypKIpcJvbBK+DIEBFDVMfGNNi6BOtSMi/WsFpddvNjO/ZHexV8s7/yse/JFfNm2sG45Vk3RYMyTc0nZeqtmKs65pqmy9tKxR2TVNKwr51k1gQAiB3aIbACIXFVDdgzGgE6zKwpBVK0RR5w9l4UPZtbudJwLwECOFfKSzxwYoolEjVE02qpDmph9CNmIbNl1DQo5fZ0pLxcUzEwyaiBv0RgZADhvLkkVkJkdZQeqMroklRC1Ch8Jrzmx+RtLe7rb5btvvfLHPnZPe2rsqIitzLf8WBwMeXKMOq1N25fWbR7ceP/F2ml8nVR4MD47uXwizzdU1ZFb7g/Wjk0GoPLB7y2ef/6ZAjzYd+zSl5y3ZcO64w/vX6jWxP5Faza/aWLjcw+fkFNnw563/cjE/JM/9YYbBh/6k07vOJ5z/ev/7p+/9ifv2Z6WFji1B+XOfU8vdnY+9ozr3/IvP99uaFi4w7KyFlqdZQYAvOFV/Suej6ePFePrWNLpo4+L9jTFBnwcGx+ePK710NQiFJrEOTc9ObW80puanDQA8UwEZpiBw8w8EvIDGkhBzpsm/f+oes94u67q3HuUOddau5yuakmWJduyLfeKCwabajqEXhMgCRBC6s1NcpNLbgqE9EJCICEhJIBDDcQ0U9x7t+UuS5as3k7dZa055xjj/TD3EbzHH6zfsayztfdaa8wxxvM8f0HLgamcBfB1Xedfe1/me4GRnHNEaBlYK4BqBRNwESQlw4zVM0+gKjnS0gzAiEeOWAAQEcC8VHYpWYgxppSJgYzEDkFUIKF3zjkkJylBdh8gcOFTChKkKEadtHMuJRndGobsR4snAIhiaOZcEWKfmBTEMec6P7IDqJNkgJJ/dJKExCBqJgDgiBAgJRHKZvfluTEhESUIRJhSYiMzJXLMrpbIjosmsoohgWFEAMfKFDUVZmpGRAyERMsWPtDCOSBvKJbAoSEwADeslJda3kyQ0HvOe6KMxSYAUGNmBgZAZjZHZOaXzdYAUVWjmjr0VAaNZcHP7tq+e/fOqZXdMGwsGTKC2rvOHKzp2p/dXcYYyVfa9Du+HKahNzQzh9SNqXTxsKWWQBGxZ6k7fYKFXlX3qaokiRFwhc0wOvIdLaVQdNZp3BIGdQcFfCe4vu+xlRySq9qp1a56YeeWq0584keDLvdXrxi6iQs/9bl//dgfb7zsqqWi89yLfuaNL3jFZ3/9LYeGTadoiaaiKJqmaVdVCMERoQg9S/zaqcHv9PQPw4GX77RkOIEKaAj1Ban+mWH3T8du+N43tj1y1/s/8Bto7X1z+ybak2kYyTt06JLvHtwrra0LmzZtAoDtjx/btHbwzLHxXuhNHB5Y8NxMXFWl+b/841X/9JnhnXdMfunTMzK+93c/UD2xoyP95hN3l0vNeKubSl71klcvjrvhkzc88O9/suqOu7A7VicxIEx949IqWBpyURTkxtNwtr/YA62ffMtHlP2L/uAjcwcOPnjdDyepRWNVObQCUS0pMBj10mCi7DYgiGimAPkJqK1WJ2m0GCGVVQUhLjy7Z9ulV7zsbcNfXvXE1B2bv3tkclw0tdvV2bdtvlRfufpj9r2/+5M1DZ9x9kuf+WBRt9tcD5MCAOgovlmBnAKQITE757xzY50uZpsvELPPw2EzEEkOWMQnFREzMgCIKRHl5zSTd46YiBx5RlIA5zifiF3bq2qUqKiVL/MNNZp05j2KM0cuR0NbldefpqqOOKRIDY7uAcMs+4ixkYwNTtaQSVRkIMAoBqOmjUSsaSIzG5CBiSkBK4xCCPISC1S9L0buDnLLJ2IejatRM5diNLMyAICE2lh0ISVDAaxqFrYaGjLYw7M/N7z8tsGu58aOfmZw28fd20F7JpBMTdVUNaZkimqECJCOG5yCJrKf5MsDQD6dx2RkCuyQ0BETEkKGGkHe8xFh6T3ByDQJmJCw2+mWRUs0ZtoEM+fsAyIQKzRJiHUIIaUAoMRkpGIGo6UzGQjAMnYZcshAll5RnnAyo3OMRK7wVHgFIUNDVFVRJd9E58G4laRRHpbQ3SWnPysPrm8OvOWc/Z+76+Sb7gpTHSfgmzoVnfrYQmdq7LxLD+zb2Z69bn684JpxaXo9EJUHnpUYseCCPKrUoVkymfInzu/eNaHpeAGG4dJzDz/71R/uv3m3Lr7578l3u7I0WJgvN+IQYn/9i3ZMnzfYWV3Ka89LYZp977++e8W997fnjzYc2wvVoLvwH7+/7ZQnbjzjye/VhB1om8yHjSf3Fw4DLPRfcn7cc2CiakGnOHbXg1IlqJLUzfzhZ8dm1ieIwdTAnMJ4p53XCqtXrTCgqFL68WY4QF+klACV0Jlh5Yuy8kQwXBzIcuqYQzKzOoZoOjE1aaJLS0sppYIpIw1GUkkDNWN2qomIgMkxxCYVBoAYCAyhUFTj4CDPbDLNN6XEBGbICAZaMFnhsikgR1WAmhaECJ4oxZSD1CwPaUVijLkdJ6Ls4RGJ2a6Wd9UpIpKM3EoKABxjZOZhGJaVt2SOClE1MEQMTc3Mlqt4CCYGbJpBwMxEgAglUFI0zqcOynapPLBxztUp4mjFi/nGYoBQFTFGh44dUxQH2KIiJgmGx81+hEYEnhnRRDSnw5uJ987MwGEygDRSjeSge4HRlsobZUeWZU9yztBkigasaIqGmEaAYIumMUVVTBBXzUzfe+NjYFYUhVeuIQmIt/hbl4UvP0H7abozXqNrNaGXPKj3LnGEFEW6hJaGLnWcpziYAxy4gWuKobpYBqq4IKqwXkgd4uHSYU0AyWZWdFJTDdolhSjJpdRpau0UwoNjQxkf9OZWbTx0yuUX3P1Xg5/74Mlvfd11L77q1DVnX/HRa/VAP540ubRhw/DI7Ef+8M8//bE/mqv7DDis60ypibG/ZcuWfr+3c9eecjF2/6Dd3Omba/tAAHOYWgYEsIR4iAe/v9Te0dr55JPXX/f1Nes2n3fpxQf3Ha1c1wzJO2ehmYD5yvrb9hFsACpg2xPutFM3hCNw+MhiWt1bf2R4IhTxnu/1Lt+svd60c6FV4J33lW0J1u3w+HAcwSIB7/+r/9M/9Fw7hZkSOrR2jhYmUzlwTGNTVRjGpjesF0Ot4o+l9Vu6V1yy+sMfvvXYOWsP97e/452n/cybHxsOV01WMrTkqdHBCiVDqtla5JckShPzzFOWsT8xRnIYucnbVYBqfjB/853/WcCqLdXGV6Vrn5p/GEpY609treuGzXjembR3YubQ0r5UhWF/Ycb7OiX0hURDtJx1rgDgCJHQc2a+ppgTTYgcI1N2xhORQ58HQc45ZsxxtGajq9OI8wwyS3EIiJkAzBWegEWUiMqyJRZYXf6/IB97R1wWAyTnvWJWOSgXLCIORxT3lIKZldXIbFrXg2FoICQSCKaJ1Ct55ggQQ1AzMVUCSZp50aqmIs5x0sw/YQUlwKIsNSOAFMkAR8LLkQQmr6Byp5iHcgAUbOCAh5QwQCSvWihEZ4lrXugsrdfuzx153sfHvv+jsceunr/jZXbBHp3PVXYkT1MjRGIOdR1TEImIKKI5fD4/9I4Ph8HAOSeAdV1XXCU1ESVPBJrMyHKcooSUKu8KX3rvylbF5LwvahtN2JxzHkkzEtaoInRUkUEgSCqaJKgAoecCkRFzbBwCABmAaJBAmMnOmmswERXOISIQIRMRGuTOzBA4xRjNVUO/6AdAbR2n+PTTz39421NXvvTw6Z2DV6zv3Rj9HESUnkOJ/VaFW07d1xkPN/33TOgqMHDi+elVANA5sst7H2n05AXHixony/2nDod3HHPH6+8nn117cDEtFTSzYm0RnpUFngvoyOFSHdVmjz518sruMZn7ztv+YmHVme/98R+M3/f1edo42w2TIcQV7oZr/qLXWfne/3lrGs7OROmR777tQ/vOuXjdprPLdOGSG8IY2dj40aeegDIJTzjuN4PeDMtkYYt9K4kRwfvkmEYH0KLQZexPNdYFY62UiLLNvfKF914lQreVT2BkAGYpCaIVpctnzTzBZnYaU2YAM6AaMFJRFFEtJRFNtcQKSRygohchIGFA1CKSknhXFM5JspRSURSqqSx9v99nzwXQMJuEVJnZIYWUyrLstNp1XasCEtUxiKkzFNO8llo2EUC+QXDZbR9VMF/hBMyYQswda6fVQkQzIHXmU0hS+EJzuhvgsKnRgMCpkYAZsxKYiUNijxABAMTUFFSVCLLdllQK58QQkKMYpOQLD6IEVFEBoGbJSAYxgioSkDlDJeIsWEazbImoGogM0YGKdYw0SSAMbChYFEWM4n1JBCmmoihE1QPJT4WGjybZgOidSwYKEQ0QHVIiUISyrGjAzvkQ6wceuK8zPrm40KsQy6oVhd5+anPiuP4BvPB5V6z9/tf+B4kgNRIjV75lzswg1Sva3Sr1W1AEHE74Vs/hZCzbw6V+RRGLFrh+bCYqhkYXL7p8xUd+r8VMRX/n33+qvOXHPSjHQZ5dN+Mmz5p87L5mct36H92085N/u+vhvYp8+Yc/vPcbX1i7Y7ZoTTfP3Pb012/Y/Mo3BFpbr90kB54py5Pe9tv/7z8/8dGlfm9ycrIehqZput3uM888oypVuyUITUoOuXkKYYvZSoMaoEEYM0CRfdp/XWw9O3bzD68XkH37XvfOd7/v2T2HkySqwYlZtxgw2Xcfn4cN8Esvv/yhbau+ev3dg2Zlu7V6Yvx5L3rRK/GmvwFud2M/jU8upjpy0W2lNkmyANGV3oSxpFLn961ctY4VFuqlIaKngsSl4RzK0aOpANbOhc+beOO721s3jZ91aaxl5/XfepBPPvuGv3/BWVsfPXVL9cMf6coVEoeUUoEUHCOqa4J5TxCBUTUbhIwpk/5oMAi+XSAiiymbs6ZZSLXtO5oaq9qnnXpmM0jgqMK6bOrHHmp2Pv3UmpOmG5KJctwclqkUs4SAhkLEwKyU1MiRVyY0RUgxIBMVPAxNIeLZEWluD0VNs3EoJRMFNGUANQIkNUhigFAwEIqpQ8bl+VGua4bIUBjnxauZCTCZEYCRgaCaJiJiYkXKDyaE5R0MQOmNgAmGYMYEVHAY1nVdMxBEiSB1aBAxJDS1JCCeuWCVFCV2Wu2y6DSDvvOECIiQ6eSqllIEAg8SYu3yntOiIRyvmtkEQqNMaAusDlCiVuwR4iAumCM1q4wHg94B6L8MNn776KYH1u38t+b2y2a3uMQxj8tUE5qZJUzJgrKIWUzSrlox9gVALTnLMmIwQCYiUFFNBsikAsyIDsGGDskTmmFUhSitgr13eW3PzI4IIbYYc8AkEQLkWg0iIKCNWSq8BIGkzliSsXeKqhpyx2w58syAiAKwR2IdRf7mgbMxASCyR1+YqkrM+Q5Ro48VxbDgB9QH4iU3N9Gqj51y/e51z8btG3XfL15R/OCLumlLPRA5eGyybEM5PPPKpWce7vZmK28SG42oi2OrAaBY2B9hyOyaQTJwhfNl1DR1zqdPuObTN88dfwp2tdWZ6G0gPSZ7aLHU3kK/qb3nqhFql4P5/mxZFZ0uHtt+39UfenLzWe/77r9dvO8rvlekqty3Ysutl7/3Bdf9wdTOO7unXrDAbC972+xLX3TTr73/0pe8pfWazt5y+xPdB1Zvn5gOLZhZWx1eem5pQQaL1XicN4jBoQ0NyJdlWZTHJ7RAyOwE8qreCHJkOnnvkyRHTi2zhDJYE0II4DyqVcZJUy0xkpklYk9IiCSASliBD6wh1CBm3lhSmSrDwVw57grGOpaLddESVQdOl4ah2ynDMCRTYxyIofnCXMK4OGy67CrkoSSP5JZZJY6oHwZRk3OuaRoCJmAA8AjSJFeymTBTkmgoRVbeZfiAChKbISjl3Y+ABQmFK0ARTJEFEUsmi5L5oIaa1d35oOuJFZICKgqYagSnpaGy57oeeHYpQQSrynGJPTZgNNVkCMzMCg4oOlMwFCiEklokFsjspkjERpIQPDgFUCE0XHChANIg4HgI6h1blBLBSidhWLmSTDSC896SIXPApGCAoxjqDEJBAA9JEKkqNQpaQE+U1BujkjnqTo4/t3dHb+5YMcapJqGy6TV1WPiNS+RuO/XI6Zd+/4/+tju9GgA4Dh1iAokRS+9rrC+hMobeou+txMljg7lqcipOT8wfni2VicH5pqqVhr35grb++9d33vnw4NmDC4tH08MPxZ95M156xcMf/b/nfPYb1abLbz0dL3/7W5fm/EmvfNsPeecKGs5df/3Fb3kvxGew0+Xatb7z1Yf/86+n/vfH58LRqpjp9Rc3blj7K7/70Ws//+9PP/Zou10xcx0Ds0dkk5SDyeI5CSNabYAALYB5A0UbM+ednRODhHKs45y79YZvP/vc9vd+8NdC8vNLQ8eElR0GgLroAsCff+mHrz9r+v+978qv37P9/ocP7X1u7osTU2de8DOn3/6V/vjKcth0KSJ1qDWTBvPJWeoyGkJA9jZ043r4EGl0aosYB+R64+v9JZdNnr5lzUtfjhe/iHY/fnDnruFNj23/9d9sHdy9cMLFzf9+37k77rBf+8Mn7t8xMVE2zSxTu+CRV9Wza3eqwVIP0YwYkVRHrL2maQDI+4KaaAAOsfAZ74OOuaqq3Xsf2bfvyVUrT1ioh22mudnD1C2Ozu7ZetKqWhoS80CNRzBxBgp5UzRKjgVUMQUFkTzNHaE3g2jQxjnPzllKvqpCkhSiR9IkIQVlhGQpJV+4oigQsa7r3BNHSDkuLidBAoAhmZnzLAAgqjhiqGTlJDCBaDIBiCNZEKJaMrSf7iCXRcvshOIyhziE4L1XUxEJEAwgiaQmQB6VAsYQVCTmrRWZoDrn1DTGiAgiGkJQACfGUXJGdd77MrOkJCnF5agv1dQQm1mIEQHIgBKa6DHfjNVyrGCw/vsPnP3UxJ4dq+FLi3e9Z3jFvC1EqCsUiQDJDbHfTtgYpKbJIhTnChEBIAXiZaGmjhKICNEISTWakQGhkuVIIIiMYgh5vm2Wde2kYKPEJBhpR4+LwFNKkhKo5VKcxPIQRH8qymqZ4zDqcpwioCkiEBooGjIYigIYiFpIRGZqIkHUUDFgo+wKBWnjgLiana9vu2Ns6ei5d+7avem0p86fvOPM8aufme8VWDkyiudctmgAd9/UiTGIKHtHAIOJdeVgnpNIlLGJjYvWd/Fo48tyYfHzb/7s19ddsGl+3/ECLNL0caynS+M4PrBGuu0V3upBjIFg0KPBfI8aN14QTbeGDy+uPPmj7/z45bdf+CvPXVsv9a57zcemZ3e/4tKZuY3/Z+1Vb3x8cVfjvT7zmNv31A+bTw5o6fZV33uyc58/0W2dv/Q1N75i/cQpu66/pT1VzVe0Kg4GLRVl7gEXVVkxMxBiBjOLqaiAocMRzwdgRNXQmPJHBqZgFmPKB1XvudfrefKOyJFrwpBAY0yeKkRyoomSJK8pCIaidiqtxE1dVVuX5q747g0H16674ZwL+v265SoA6nKlSw0RcOEAEsdY13XfsYNiHHyoa2DqVC0q/eLSkjfwQClqXnplCb2iAlCQ6JxzxKMg85TjSlEVsrIMADQvk/OORlURmNmTNzMEI+/QIFjMqbWkBKKmhGRg0IjkqDsBYe8AkBwxoDAAGIO2XCEi3jtj16Rsg1QEThoVwEyJHRAzKgMoaWKJoKhIiGxgngzQEWbohZhGi4qegR07NDIDSwYIZKBgyOSrajhsCND7MokgmUQ7vopSyc4RgByCCwUzxBiYDFRiikQFmBtj7VksJvm2b98Sh3M4Pi6pSYtLCOlX33DqKdNP/e72ld/90l92WmNUdAEgNEMwxNpLKabRQXG5ay+53ZOpG1JKM5013B3u39G2YONTqS+Hm6XYak++4PW2dGjXX/3Z+AUXLqXm+b/5ka8PHz35qjdOvuSVR8JqwzWzh3cl5/lFr4uzB8fOPXfuyNZzaO/sQ994+uYvTb76NaccemLfu16zedijS686uub0NPdkq6gEO3t2P9eG9gc+/Nv33HPL97799cXDh8a7YzHpMIYTT1y/OL+wtLDQqVrmUtSAgnAQbK3B0LJ2B4HMLKtlZ2baYeHo9V/+wlvf8/OtEh0Sb6oqANiyeesegKMJ//G/71g55S7etOYX3nRWb745aFNwdNFBMd72nVY1HJh2KExUS3tkMlkYDEERSRZ6kanbjK0MK1rjF1yw+jVvmtp8aoTYrNkY7n/s0Je/qNf+S/vxo9hbbO/fs4rnoT193fPf3Fk8fMbpa5dOOFfveLg9OT6ZpB+RCCSlVlm1Wq3+0sLWrVufeuoppIyNWaZROlYjExUVQHTOsXOIKGAJQcDalVe1vXu3c6uMiEXJ49NTzGwsXedFRMFUDQhkORg2t6HImVRh7DwSm1oMKUq/9K4oClADwJgSmAlAowkNQoiM2IycADlr2zIWyXvPeQu7nM98XJ1hhGbWBPPEeQodkqgqiDrmoOIy88gsw8IIEdSMR6dsGVmKDHIQhFpVlJo0hOCczw8D9ozosAZyzORyrI/3rX6/r5KZZUiAKgmZRVUQPSOIxBgNGSA/XbJTglQVeXREGFUvESzIoqJajrTMnl1ErGobqNTReql/4nDsVQdP/sqpO745/dj54YSNx8bnkCNCkcKgjEUfl1K2YzoC7fUG+Y0qilJVj/PGzcBycgdkEJICIKqYISgYj3BIBTvLzskRjkKByRFCxgbr6JvH1+2ST0XB2DAgGC/bokaaAyRSRMTMP0akpIagjhIYGroMzUsGZqiJVNDQxEKd8n5OLRg7B1j1oByf7D15T/fIUbN1L/rikz987ab9U8Wzv/yS8z500+TE6rl0ZHJseNr5/Ydun4lSJBPkDLnS/tTa9uy+kmtL7WODY9Wgjg479dFtZff2+afXTqyWQe94AW6qsSI0Acv5uJDfnyhd5Z4bH6Y5KVesmV47Uw+GTZw/umCTPVs30/rOC9+0U179gVee8tw2/vXD102++l2H77/94XphdmEwOdV55oHbd7+5v+3DfU0YPC11Fn1TPtS6dfcLH/uVO/9i7XRLjMIJa2LTjDfSwwBoYENNkRGAUMEIXU7AiKJZdMBEo/ktQKbxOcchBGRCguFwSMxV1RIDZwaWByISIYBoJGayOkiBrrYGJTnQeUY08AInzg3f8bnP1gtH1nk68cjBL1z6snl1rpnVquU0EmKKmoK1uUWekvQNfOWcUFHHUIKm0BBDu/CmI+kDIS278wgEhNAwI6Il75UyHcQhmoKajMIYgVSX7wgzMyGmHDmJqAmsMAIDMFSzCKY24oXTcYoXQPZWmKKhlagRNAFR5UItHe8YCUMYgokCY/b6M2IWJeTJDChjyLiGJGxgGc7ABEiADMiOScwMk+PKDFQhWjIzcD4bmSVGMcvz9iiBnBPVGKPPeSBGy4+gETQtL9FLzyIRAKksUwRknEdaMTn59LNP+dPXlvetBJGZk9ZsvPz8Ezed+qvxH27Y377+2ofaVUdQtXAAoCYL0Lh6obDOwNyV3c5UfWhPpAm0WT+3qr/qiOyXcrqcqpJQWgUr/u8/DC1Nrrt09l8+7c3GJlvN8OCjX/gf/c4PWy//pae+fes5P/Mzd33zP55/xaVPvuFd0+dd8eBHf/m5G04YXPD75Zd/74xnH7Fnno4/uK4qcJxS9OWB51/ZDBe6ODWEprBm0nf7QfcePHLq1vO2bDnriUceuuHH1w/nj46Pdw8dOJhSqqqyubPRlyssoJUGHYAaoAXQA02G9xovpyLOH1585WuuTsqf/Ju/fvPb3+1Mk/kaAO5+bAgb4LQNM9v7k/N9+fZd+9Y8O7v2lDNfeuTGjfd+/hgMO0d6R8UB+LB0TA45Uh4AuFPPaJ11YXfzhrETxjtbLk1nn+ahWrjh+nhk/2N3PEK3/niKl/zGLc1s/dzM2LZVqza+/30X/M+1dNdNRN1HT33ZqY99f/Lt731mz46xVlW0cThP3jdmUrbbqtI0DTAt9JaccwIWg2Szh0hyziFSvzcsSg+IitCkOGorEURhOEitVqvdbvtOKw7qpgkxqCnveWbH2c+7oN/maedDCgWAY5aozjkUVE1GBJCzZiClZGKAqAKWRMUQrKhAEZzRoKlHea2mISQRAUEEUtWYALJx1hU/7bcbbYiPByAjmCQiMlFX+CYmANCYvHOD0BTsRgrFLIUiwuyUWs52FpGkEjOUTS2E4LwHxBCCEbaqtpmVVlJgRqrK0nuff3er05EYAcA5IqKkqJChw440zxGYmYl9lqpm3dnobP9T5KU8OJeUMOlxVYghAFNqZEgioAppKM2rDm25bfWh/auGX2ge/O3ZK1V4QFqRtWIt4gRJIeaV7/HtVFYlI/6kWBohWs6iAkUCGunCmJCJDBzAstFoRJMFACADVIsa89GHYRSbkF9tIPOC+UHJynkOASqIBAaEAKLZSJo/R869jYLhaBUn5AQQALL+hQDNUBWCqIKBUmqwX6qUqUOQbn0Y1HDlxOp7j1x6z/wPXjj9xFWbzpm4aYsnLIoLXnykt8AP3V1F0bJVZXG7IxqsWD++cFBaZWNWGMrKGZtbcJZuPunkYsur+ocP9PknO+DUp6pcLFO1xDDWLjA46daeV0QoxldD7eHoUk3Oa2hVrFrp0WOLm63PNX19m5576LHOjf916+HHo3e6ZjVR6i0Np8844+k3rAZbSn6QcCguYcFjfpzL8oa1/77i6992byEjXGyGHWmlomVq0lijURUAohkiRWafVFJKOTLDMR9H8mUe7mBoRISmWSIUmxjqBGq95icOK9FI3tUxoCbPrmYTDWKAWBHEtoYj0xOvveHHzdx+aU9ZpSt/fMOWdSfed8qWEjQkLpQAtWwXAx3UgwUqSnKdJDLUxI49jGBZHlmbCG5EeFRVkUgEnjikkO9fUSEiAGSkqAkBmpRFKux4JLQm8stDF03JvDFalhOSIZSCZhaXs6kz3WF01ANABw6ImBHRM5tZQiPnUtSUUgHOQhqEwIVnIZVoaCRYFA5MHKAHqllJMOcHYO59Dcxxhi4bQmPCamyAJibaeIIoRAQFEWJSQ1NA8lyEGGOQZAkApGlSSlVVSbLcqOT7QkUMAJCSDEEMgsN8fiAz06JgqWl6bOLuv/hquvykl3zwPT7AwNLwwOHihs9tvPLIz/7ILVVVy7wDZa4AQPvzY73B+LrVwwUcDA+8rbMp9YaF6jG0Pk15Pbzit/7voSPWd/Wal15z++//3tsufct9X/1SePY/ez/476lXvPKp+7Z1HaypHp2a2TB20uTOv/zUaW5+8MOvzy88N7W0+ORvvLv93a/veOEH6Ozm8ps+L3Go3e5kOV2Hfr3Yi+c+b9fmC9rH9qXY0gJZdWEppLJB1cX5gMBnnHfpGedfdOMPvnvPHbd2CgeVNamJP270fUYbSFFhCiAaBIRxg9XmvlHkYHAAaI+N3XjLzWddeMHzrrho+9PbnHe2dowBYMdSXA0wJe51zztz7/6lZ5fg8J7DunPxzN95Fx+8qt6/B+YO+hUr3OTkxMo11aYzcWZyugjaW1zSYjCMg3seXPrPf937yG2dIawcmzkMceaULfvOPv2fD+w68OTT+z3Ujx6ywf4Pn3Dqur3NUcBmauXhlae84MF/r8983ZFb7mlNcJmcH6uGas65DBuQFKuic+ToURvJcSB7gULC3AiVZckE2RuTUjJR54p81XK7qOsBkw2GSxW5ZBZTXZQ0+8TTN//9v218/pXtfh2cN4iAyMso8syJyu2dmQWzFnOOi5MQY4yc9zQIAmxmilA3w4nuWK/XyxbSHDBJREXpU5Sm7uWXbcs2p3xL0/JXLoohBAockgCAhEiIriqbplnG9i7HXhgIjlBfeSKaUd75HLC8meBWt5Pfk6Io+qEkxHa73Spb+XmXyWjMrCr5aZLtSeQyP2mUCok0SrzLIZSqNkKtmYmkZJr7VBBj5EhGvmBHloTzQJ5knNuNRUYwazrHJn5mx8n/esa2RyaP3LBi3/P3rO2VwsCNVU0pzqMOmQnz2GBU6gkAgNFpJp0BADIsUxySJXKUq6sjZnSEDoiSJhpxgkf9BBpoEhnFRyr/ZNphMcbEZGJJpXCeiHSUzDHqmHP7ki+JkWqMGMxolOA/arRlGVOcUqYagxioYDDpGNYJyvleM+3CM8+0n9vbWjlGdYpRz/3Wk3c//9KdM2nXK9efce2zJ56H609ufvyNGU1ATJ2x8dnZWURoUlqcOGHF7vtcpPbKqcGxw+nwbFmWB1QPbnpJyZPzuAO4dbxKFUn6AZLjbqyHgs1gGIZDF48lCL0hFKle166U3FIrHRq2ulVTzhTztmL1tJf+0oUPf/bZlzx/aaLbsY5gbQxlw9c/+48AIa4tgAdF7ceOdgerm16rV/W69/J3Xms9LVsDU9dvPEDd1GLQRB2kiHVCGxlmzAbZdQbEuUkahV0ULhcgMRKRsvTOudBEypEpioBsIwCzJhgVEhSrmBOhiZGCkLJIdLKqSRt3z5OHhGnhyqu6q0450OggIvRZbIEAgySxSo0LVzrDmGJpHC1ZBE+uH2vnHKnVAC4FGnGxiYjQDMmQjJF12aw8Cs+Jo7spO4xHrh5VZmT2IQRHHEJAg8I5Gk2MMRKagRgYoTOy7AlUSJh3H2xmvKzqyjaL0Bu2W11VtZjQcbvVUjNkl6KqaD7WqBqqqSYrSSWBGKGhmKgCAnp27HIuR84JAh0dg5Ik0BwP6oiRABBQRDPGSRFMrYmh2+7MzMzMHTsmyIjIxDm8KLvmk6qYjnfG+/1+bo41ma+4qYerp9c+cPetD+54qLXz4b2dLpXl4tHZaPH+X0w3P+du3j/RipqwGZY03R4DAC/10VZ7Wl0vHX51a/pUwd02x0yTH/uLdeef9d13vOGtH/jwvR/5nS3rZ+qFWNYymE+LzeKpk5sWXvhCmO5uvuLVzXNLcwuPTx3e++x73nzK3gMPfe2Lz7Nq7sYfrQIATCeX47dd9KYNO28P7al00jndJx8d4AIVVQdx+xXn2SCGkkoFxG5oFgflsFB2zhFYSOnI/OGiqK55zRsvueQF133z2u1PP9ltt1u+CL/fLH18noxgFsyMmGxa9UTVn0vwiSI//AO3pA533XTnJb922ep165zX8Jb194vBP1568x/B7zy858GH7uHuhrGNK/2lJ53wjne+ceXKlUfPfN46F7ngCFwt9NLBx3r3fWewf+/BZ56Th5/o7d/e1QanZ+joscs76wZvfe+fHtu/Z673T6/63U9f908LBm8++5L5zpov7vjzzqqTzlnVrffv4IIfOvNqjs0Fl6yf1XGEQae9AtIiRcfYHQ6HrvCWBAmYGSOqJjCrWh0AFNWiKkREk7RamQkFRui8z9mwmbcLTO12u6kHmjTl5a7acLBUxjDdGTuMCSqqwMWYYklWZ08nmrEu7zYAgBmJRoZ/WVb/jvpUkFz1U4wLaVFERVRAzY3aLxVTjdnqJyJq0ZZXvHmjfPxMVDiPiCgURc0sj6AlRFWFfEDOZO/RA9YQETKwVnX0h4sKmBFWVVVV1eLiYg7tanXax+aImNutioC8d0AYVTw5TBpjtIwF9Q4RmZ2qenZGSXO1EvPLvkkeIVxQM7oYSVERMUkixyoGFnNatjEmFYdxiF5MXbToi73l4JKjJ90/u++R6WPfW7l969GpIsGRogbqtpuei5gcO1fGGM3EzMqyhctOD015/zaCsOY9d9XyWV2OQJybETJkAHUOyQxNNBdOTWpi4DJJGY8vgEejMxEVAYScdukUDBUc5/KffSn5TICOEVEJ2YAVVRUBcyZRWp6QA2FOyU8qiIiAQxLngSsv42N850PSP9Ia25BWtIOkjV95ZMtHzn9kc+ehd2w952tPvvj5gwPPFbufrpjZAHq9nvccY0SC/tTaTfc/lxzjYmOuiLY4CaSunJs5s10fs6LVhonjBbhpxW5iHUJd+nWycPEqt/qEyelqasU0z6zyq2dWTPqyn442Q79jd/jKI8/c8vjBmTPXhHUnvbx8Ir3r1e1F343HzEQ0cqtbD3WIe1JvWCcDhGqxKGqKfRq26tg/EDrmdYyYVXii1XJiHBIgaKhTVkqimhgiKqiIJFN2PovpGZAYSZaVw5LMbFA3KfSKogIzkeSIETVnL4uYAaYYq6piojoGMDZjL2JUi+B8wZcdnK92PdyH2sE4H2zw0C0/Mzb+HX/JI2Oo5AdRWlpUfREIkSGA6/hOAw06hqQakwGGOlbIRVWKRkBgdIX3RCQSATKjmmIUGPmxAcGWtdD57h4ltHNmQJmQAaEjHD1XVNXAiCgDdAlUEZVQ1bIYizFnbSKYiQgYJVJD6IArO+PoXb/fL8tCJUkjjhGK0lx+SZbAECGhGYMpiqFDYMSCuCFJqmgWNUulyAExsjIkRAVpGZkDEWmagI48OWZCM8nBfzEBgSMeDAZ5l6kEpS8Q828BZgIAU42CsTIziakREe9Li0zqmfQr131zvDPFBc5pHO833enpF56weMHapZd9uRj3QAVxYz5IpBIA6jp1yHbPHjvP+D3d9tHFI9PmwML+W27Z8Oo3Trz915YOGH3v39f+w9cPltFWVcNpW3P6xe65h3rHjs387M/G3tHFQ7tXv/xN27Z959Rv/WCFXzUxOT4UHver6v68WWsOw+4NF17+3Y91FkKzd0eEXsKJzuxg9oxNvXMu7xyZawp0laubWKArKE2V47U0wxTIF20uwOzAof2d8c67fvGDzzz+xLf/+2u92WP0DHTeNxFfHupzh1wwP+nwOg7vruXjoQEoP9FmZtRhWaBpuvmm727ZssWd3JndqIYAG/wxAPj2yx7+xMPn/M/2+T3bj1x51slr16zav3tYFUZ1YWvDcx/7ff+dfy1BqhSrBIyuW5Xa7dZ+9QkbTtIrN986c8rHb/767PYHz73oqtmHf/ihwVy1e9cZKzb8v7k9g2b4she+ZsuDdw16u8fXrXnm5Jec+PQtkxdt2T/c1ZBMIg65pFa7m2K7rOq6Jl8gARpooYyUJJRVO4kGSb5wqJZCJOIEFmPMzuDs0QkpChgmVNGqqqQJZA5MGbl0Zcs7XtFtCapIcISMrk5ZXe3Y5SNt9jk4z8SIUZCodD7ByIqjquQ4hQRmddMg4uzCfFVVAOSQUtIMPosxHnfspJSSpHzOjyppBIoBInKesQTvvYrEODLCemHW/PRBAEuSJ0ACajTy5mZSioww9AbGDpEkNMGgVZRVq0REjKksSzMFhFa7lWs/A3p2BqpgKSVXeMoyK6BMO85ncYSs7CUCZHZYMIQ8OedclgAgoXDXk4EjZ5l76JAcK2PLnE8wQA8kJEZMEPGaPac8Nrn01PiBH5zw7LufOqkpuQJVLiSpL7J9hYuilSUsyMTMiKbJMrMhP6MBgExzPIiCgeHxAgyILYej8TXmZl0FFJFIjJiJOR9ZbBkMR0kiqHpnADmUSUgRUZPocgylUdbPjYy/RHn3BRkkYKIZPqyqgIYEgkpgxEiA4J0OC2tzyW7+iSeu+vCHn7rrIb7/hsnWujU1nv7fT9/3WxcdvHDD2te3JmYWr//OagVgRDFtlnpVu0whqq+aznR7dq8OewM93JtaLTGd0AwXLz7LYjLPYzYeLR4vwEgE5rijcqj+vQ+c8rKr1x84tP/I/oOb1py87/B8UdW3PnT7ZDG1YuO6885qv/iyi+7ZfvhPHl016C3NtI6uGi71sCg7kwNmH4Z+rDu3+6lyX22IpAYIkftD7tcOMEG5KON7vbYLaLmqLDiU0CXqtSAgVO2Si6zYzzcCZ3d9jqlhh2pFUeDyaVJEwmAoKeW0Yx3FPtuwGRihxgQ5po0IIZvWzSnUTiuqsOCE0QceMz1U2dylL53e8URoVRNxX7H78NzmU86578vnFrxv66n7127Yu3bjvC/TYK6lfSdDk6hUmJiJJkllVYZURzUYRiqZ0NkyB3N0TY5yeqwoChExQkXIOZRkYBYFFJhd4QBH2QICZiqSj2dGZppHyqww0p4piKlaUjADwwToMfvN2UY+N0MYSPJirNGJOMZkwu2ilgRNyCd+SSnG6AgMkdRBSqqaEHJQtjEBGCVVFs+OGFRTrp2qEsWApECPhGQC5ABIxIQU0GWShEqSGBFRoyJwdkuiCeRbCREBwazyVa/XU4t5iiWGILJ61aobb/n+4b17T5heYTEmHSQSCOG3nze8c5+/6WkpJuoGXQTulK2SKwBIsIStzkumVrx/x0EbLAwBk8YSEW+/kY8cuObPf+/Wd//yqojHnrpneLQ564HHj7z80qW9j9cBTkSMq9fghVsmpkrbMNPtr0vC1O7zHDSOUnO01Yp1CrvOujoV7RMf/1FTDONRR521FQTV2XDly+p+RdhQGMQyrZ4sF48NK5gUjCklMAqN5ilrUbjhsNdbqtdt2vzLv/5bd99+61133j43dzT+F1bfmoQURcTA3F+V4Fz8ozoglh9vOaRo4orWtseevOiCSxwCeAIEKDkBQIX9z5z51el1v79tZ/t973n30aXIhYkPAeqyvQpPPG1VPZw84cTZ1BBwtLiETTO3tHrL6T+47NXXPnz9tu//97Sm1RtPO23rFfD0Y8WPvjTWWXnj2LrPX/tnqzad/PqZNfW1n+xOrrn7RT+7Y9Nl1zz6b2NXfnD23h+2u2NkrbHSfIQasSjLnHkNYDFGR2xmagSI7MqCKiQA0dIXRARRky+BIEqmlSAiKhh6E1FtpNUqdahqAmgizcGnd6zdckr7BF5icGIFe98qIWThVf5y7AuPkIMuXOFTFNL8vC4AIGpMQThr/xBDjMyc0/KKohh59pef7/kBnoVwCGAEJqoKpiPBgvOc62LWGOegDwYEtIyCEBFyzCoxxtQEjYJMAAACTQy5ACOShuS9r6oq3y2QtGq18pAcEMl7Acvmd01iosiAYohYlqWKHX/BkmKGDaDjUYkCZGYgMu+yhDV3MIjoRzs8Mz8awxIYqFVUDgi7KbhI6o2sLmI7jIUzwxkv3L/nupP33LHi0ZfPbVqzOHOUl8hPdMoemPPej9ItmPPSGwwFFZPmAsySBIwByVSEDZUgAQoTMDoEZ0AGQc0IctoAiJoiOOcZlIhGC2zmfI4JkkoBYFBG1YQABAyGSuiRRGSUu5mXGs6ZGakQck4PYSQyk5jyElhFTBQxpzJlLnCCZMlpWfDEkOpde8d/4d2v/suPff2N7zn83S+vg3DG/2xb86ELoq9fdtbc9gfKQwuutbxNbLfb/f6SZ+pPrweA7uw+K8Fd8u7zf/F3Wsfmdt743+Pn9pe+fs9w+oWD+sgGP3O8AB+xYTdV7S4OYFj68o5b7r7/5lusgq8e/WLZaQ8O904544zHn7npvidveeEL3txPsv4FH+aq8+xdd95zeuv9569biAvt4YSDofOuVU0e6Q023IzP9CBzXUMbUhsAcfwgE/KZ3x+r1q6kojfO7WpiRaVxvtXFRWw59JVDZFQjRsmXOgAySUxFUTBgu9USSaNdD8BYqyVqTROhcDElA0EGKnl6bGrQ6w+HQ0SKquw9GlC+PpnZkpn3TatXhK7JgfGxf3rpVRuvfPVCK9TzS65glmSXhKnHnjhp250n3nMfjHXnL77g6c1bDpZra8NFaTpNH5LWKQBTGNbRIN/tLcdZeSTJEAyR8/ykCUMzc0URh0MAEFNfeAXjUaicX1aViXOOmdvtdkopH+byRAuhiKk28mIgYkkTqCFAgcQEkuUIzqkq6nI+NiKqhRAMpCh8IlOA8U4H6uGw3/POAzFospiAEJlAlaIktJSDXXN3Qs4Fdcwj+DcTIJoIAXcc9bUxEEOOhgVxLdmL4VIQ7/3S0hIxlGU5HDQCwcyACdUEDRELdjkHVGISaJwjsWIZsmSmoT+Yv/+mH3RMjg7nk8YipIbTZSvrS9fJq6+FEzedPnHSSdt+dOtUWfU0Tra6AED18Koa3rN0dKFrtS8m5xZLLD1PdpcOLv7W+/dWM1P3Xr9qbLL/t3+Nqm0umsNHVruVra4eCMM1b/6l+dk9zZ3feu6/XrDhoTvd1Fpcmo++5VZQSyaWDi5WZbP/5Bd0Fg6sO/RElRzPdFrTU4e3P95+/webK15a79k+xqsKI8CyEWvKllE/BqemBUKr7eqQEpDHNjcBQBYXBsp2wVVXb7nw/MVDhx9/+IGnH31sAIswogYo/blj8eljDZj5P63QsHQ+Nem/v/Md9xOXRR66oo3ZkVWL9/3rX//bc4cGYW5JC99ODp2mQeD1mxeBoOmlUJfJsdeCOhoX5IStx/qDn1ux8c6rT/raLV+amO2dsmKVu/mGgMDXvPszjz0MoXnda9+2+rYfl+tO/vd1W3c1XXXFFdc877newmB2oVoxYwETIFVYqkdE16qyytdTMSJMovfe52WtIRjCyAYexIECQKGZL2veOREBS8BFKnRpbhFKZENWJCzPe9db2pVflF5FRgK8nBrnmASMGQkBJKljAq6AmaDqVMNmyI5SCnl03MRgaGAYVMB7B6pJSk9gNaCvqirLx6IKEjIyJVAEFkTNh0tMKZAhE6CB956zxBo0FzwAGEVRZnCbSAhNIhLnQqxTkJSSKTogXd6JemQEjSlAno+bRRFyRAZoIEmU1cBQIlOukqySFW1SFEWKAmYxBEbKeXsgWvgiHwicc2iI3uXnCDCrKrFDRNY0MghhPmMs4+tNyblWYQAgVqCHDnhXxHcdufrBqa8cnOAvrN/2f/dfqdhJxuDHOrFIbOjYAxmCEhKiMxJVKE1VNYnLcjNTM+MCGRzA6PLA0U8HAZcDvVU1mTrHAGCWAFgsn4OIAcHQ1EhAPGlIqKBREZELJ3WDkgyRGfPm25Jk0dDocxFloozEQEQsXdIciYuIyOBVzBODWUmuBiAMqeykup5M8Y5f+tmVN9zz8k//y+FHfvO573+l+toPTvqfB9+25ogj+/qjK9YP+gMphcAzDVOfwOoQe1MbAaALYew3/+Xvbvvxvt/6mV94+4e7F5938MnvbubeB1/QBLfm379/8HgBvmTN6v5c/0AgH6EstDkc5mLaWK5cecHU6rHzJ8f8c3t3rX/R6jOe96IjR3YeOtD7xs72Cc3++1PU/mC+GZ9K7aYzwOhD0+r4NOgf0b5c/Jfj37l2qVi07mFUxlYfXYA1z/iLvmj1Wy9BdwtY8F4iSNUYI5VUVlyZmZIRWsF4XP3gyiqLgUMKRDA67YE6IkwjVgg71gRIBOQSijijlhMxMiKAnNZcgLWjGiF7EwyVSK1UROPQ7LcgNRWVb0QGBDhWHLnywieff/bM0cWxJ55af8+D537ve5dNrZg76YzdZ2x9cmpqvqV+wBZqiNJ21SClKfQ1DRsLRFRhiQkiAjqLGByRmfUX++i4ZBfRhhJLQ2Dy7DTF8cmJQ0eOdFotZ1SYq5tgZmiKQKhGXMRayFUpNY5IRdHAiImoZG+qEQMQeWZg1GTArGAiSdAyfizf70VRLi31FaDTrkLMM30WsIGIQ4MYoxgyAbHm0Z0ogpnnximKOSVkVjBFIjBV9cZgwJYcIqdIvrAMBiU1gLIoQpOEOAEaCDEZI4siUVF4SFpLY2jsyYhDLaUnDU1U6VrLu+q58di+7PQV+2fGwGvSwey8DsPHXnVwe2B9+RvXrpl65vPXAeCcD13VXlGuAvgHnpzEdBiWPLVxrNLktNDF+aOtzhrZ9uBEZLV2H2rXnpzQcqjzgEAzfnbv/DTJ7nddZEqrETaLaTktItDtJtNi3h0tetOd0LjJnWe9YsuTPy7CoFYXjy3tqZdO/JOPt1/9vju+9pXpmUkJyVwZUt0MC8RA5rPlMKImNfLOVKM0VKhjlkY9F3NHe+yLlRtOesfFF3//uuu++9//Oj62Ymk4HJ+aWFiYo7+ujCj9cQNI7b/o1imW7fLYkcMODBSgFkzMAOBQHNEvPS8+vHe/WqdsVQEghmimhVh37Sroroc0rFLbzBBsoVmamKnm7/7e1Tq36aJXPFat4xr67SNTgWTp8Q1rVj6wunv/d36wcs2GS2dD3Hbv5IaTdjX98RMuaC/umXntOU/84MHueIcGtXZ91XhAz/44nWN51gqABjbito7UUgqZLwRElvF2ICzkU0opCRJJghDCIISiKvuxATBirjrtg7sPnHnhBYcEG4urWlM9rHtLQ0C0qOYtOSqYC8wORkjOMmJaDDRFAjK1GOpsN2qa2lQFEhnkiFoiane6eTuLy4YcUAMm0mwvMDNzwOwLXgajIlOOdPe+Ot43//SbkE0aKTkRcQ0HCiEEETFFZMppyjmzTslERGIyACV0wDnzOYQQIKpJghFHoZEmv7sGUMfANgrSG6SQOz+PjCkyc+E8GwIzIuYMu7zuAs3m4GJ0gDueUqujw/4y03Dke85ovwkcf/PsWX8zfu+TY/sf6Bw6z87q26GuTEglyICIHggAzBEhO0DN6XhmmiSrvvOhkm3k5spvoCIkMDNric+vQY9/LTcfo2U255JsAOCYkkpeGRyXWTnnlrmukFVoeTxAAJD9xKJ5uFSWpYhIEpdbMufKsoxJsFUkAEUzs0JAqXTDY/0f3b666VH39N51//qD73x17Xt/dstr31K94a0bFx5/1cPv/PPLrr7ziLzh7++rO+PWxKYJJWJEK8Y7Cxs2girM7zr4Z+9/SXfj92d37XzkUSZPa0Jz5LbQe+zMU874+FtXHi/AJ25sv+4PdoUVJ6n3c/PpzLPPuuCqi6ar1vYnF4/VOzavOad90tldSIiG/hV/f28am7MrJg7/eKBvuvqceuFQt7QYBi1PTSLyRe/QgQ7C6h+yMUztZKcESVpDPvP7xZX/MePN6PxTobm5ZN9xZUgAlccMlXUuv5+qOlIgji4ZIsdu+b/mzQUphEz2ZFbL7SbmK2k4HGoSgrxwzyWbmbksqoRmOCKjOKLCUdJoSYFdBUxEzpDUAgIFtYb73VVzV647+sKXjh06PLl928ptD55/zw+3TkzOb9q6e+s5uycm5ro0kFQuDbunTsZdPEApKm7I0BpwxTCFFVgtNY0WjIwAEAELMW8YSpVkwxQIcO/BA2WrSmDIGGMDyFm+QTQy+scYQYXIokgUGy2LzeoYLAlXjogUgICAFLOO2IwRFUDMfFZkejIyD0jiTGpNOJr+MalCk/P7ANOyQhPUwIwKR+IMUcgIjFXZVFAaUDJvZgIKo4NOQEQFKMrKs2vqxaQSl2rLAdLIIkkAyQwElJHJoWAGobrC11JjbNpFSWZFCd/70rWdLRs2Ta6Z8i3XrqBwW2n/8+Rf/9f+Kx676/bntu8oGHynlOQFyirDLmd3DFptwMaWhtPzjp0OahzDtnXY25gYallh21mIcTAPVKtUxVwoisJcsaouxXFoeWCqQmgAKwYfWtI7NtkaNokW3NSRtVu3/ujPD288zW1ax696/7lveRXX5c3XfcmVhYjAyNDFCE6BU1RVQAJWQBAENoMEYIhNqgt2HecXQxKEJsnT23dedcVVEgbf+dZXJifHpI5MFTLETwBDkf64rh2Vn+jEGBnR5d5X1UxHfCt2BOzn5/rsIbaKoih8WZgZBi1Wrp5r0cyqdv9obUsDiL5DYzCoq3HqHer3qslds8+iDNesPmvat3RucOSFb/iHm3+YYvORN/zypm9/ad4PxU+tp9Xb11/46rX1E3c8vG92e3dl0XYt1yxyNRa8FTSKP+Qsx+dRGUYbBTvCTwUpKAK5UQFWVeHIkZkFY1SyynEAgyjknSX1VTm/tNh/7N71M1Vr/Tkk7WGZ2gFLLMVURMwZAiNmsh8AkRkwc5MiM4OM4h0ASBVALH9AGqP3rihGrWS0BAIjD4waMCqYy0kQBplTIgBkTAiMBN4zOcfeEReuAETvfDJ1PouBwTK0L+tsNVbVCPMQQhAdlZPMtLdlKLYRGqEhCIJk0pCaJTFVQcyFIUga5ch7BkMqXErJAJJKTJGNEYFSSpyKjCPMGzgCB5jfIgAgwECjwpaph2aGyyFZo18gyKicARk0VXp774obDu98bPXcv0889K/9Uw3H0aoCU7ZbOkNENEdITJIzeFVVJQfpKx3XT432wTDqfQuzZJoPPWTLnfjyF45MRaM8DVuO7cWUlrP+/eioxy6/i5koh4iy7OUgIiUj772MlHSF86paYoEIGZNsZnk5gmgCVtf1KuqecM7lO+/deaA5staOTbTWTC0NDn7y7/Z98i9DNfG+39wwNz7xsTM2nVVN7/zsw5tjHFiIBXKgBvHWpcMHqPRLh5dmd5dJt0w36wu38vDezw6r00494XWvfevv/9E/Tb3+U5ev6B8vwIObZg/5qS40BaRuAe3Cf+u7dx/Ytffi7/7mtS/rnz72onfNHLk3XDXdXXH7ruqR6Q//8gWxfaAzXNh/89OH33BWMzvbdFvdZrAEUAnw8Oghp3DsbIkte/GHcdVTBY3Z6gMT7GOr33cXXCwnr7NH1XmHTCSo+Yb1Hj3lZYqNJL6wvExFYiYenZ+QiA2IWZeteiIiqESkSKpaKNryLub4B1ewE4IcTYqIYOqQcl6VMoKnAlkQAK1EVgRWs4Idx1azGKMdGav2vuAF7nkXrDqysPbx7esfe/CcB2++sL1ifsOZD5x9ypF1K585sCDTbrIxiMG5UpFCFC9uAZIHFAGHiL5A4iwBoKQgI9BWt2o1IULJdV175wxEc0JNTujPpEugbNvJl6YmyfrHwvs8uUUUG8k/R8wPReWkYJCQCK0UBAXzBIDsnVMwwqSC2e4LSORSEiJ0mMnHWBQeEUdDIkKzrKAgUUuizqGJqSIxonPErKpqOhg2YLUvi0KVGc0wpVSgzwGTlP9GjlA5CyWyJRqMXdHCgopO9ci9d+z43o/bN1W7iAdhkFTY8Dtvs8cn4K8/d4fFwo2NuWEyDmWV+sOFU0/Y5NAef/VLZ7753ydW44uFX3DaHg6dJYJqOH8ExZWdLrdYmwR1BHKhmC6qsr/UECuB+IqqotVvEgxmNapQqBUb8nXheP1Z3XMuefbcN4LZFX/0B5NnbMJubb3iwD9/6sD133Pv+wD0DyiyqpJqWZaqNBioKimIQ2QEshFeNgeVOCTHrtf02bFDNLGi6sz2Bhde/pK5udn77ry5KktPPIx1VbbSnzZjneml35k1Nf5jXxXlqAC3PeQHvgIi0OGZF0zPrK7r0K+HzXDoPTviirVYsTKsPNGevruADpcqaHUhdQ/KOEgEQjrbLIiVZ245fcUNn5k48+xPTpbbbr5vZuOWk8bG09wz43H86d23bHnBhx4fW9Ed3/fE7fecfdoJh5Ol1phiRNC2FkLKlGXwucYCMQERA2J24hDCyNZmDtFQ0QiyyNBZLgyiWnI17PV9VYbQz31PTMF5PuW8S8547VtuePCpsRJDrxm6gBoJSJkRuTDEpNGhIXnIxkDTmHLrI2YipghJLCd5jHfaSWKoG1V1zjO7qIoshJAkpSSUU7QYPDEgjGqeEtLyuRfNOOecjzrItJywcfw7RITo89pSUhi1m6P7CpCJzBhZQZMlIjIGIpczrbJSM/dvx/vUrEG1JAJZFDN6LQrmFCQpAaHLDwsQgkhWGOTQiZErIzt8lv+VUWTLBTjrWkf+Xcss1eVNB2hdTIz92uCaDy5+9fEV+/6nefzt/pJjoUFiR8jLBVgZAQlNjQEMnRItE3LymxPzdhtg5Js0AASPrtaEyzC70Qkmj8TFEDFLnUffoVE8DQBkKCwAaJJsy0YmWxbgJBn9UGZWMgbkikUkpZSnMscWKyKq2q12qy0ijkdONo84tWbN8OCx3vbnrvnnP9j+9l+4/f+8v3XXzYHjWFGN8czaU/tr/UPf3fy79XD3gfM3P3fJ9Ck3HUTPPhhDOdviiUF5eHqjzO3ZpjoGuGvf7ilLT+29f+9L/+7wNz/+87/y9recv+ML9/zP7Re96ngBDvMHqja5fnIJqyl32/e+cdMT+9pj686YOvk9L/ztgwee8MPPD3bclk5+x52T72wdvHf+iQOTq05vdeIX7hsszffecn776Ox8qyRJYTCoB0fmJqh64uKGImy8r2gHx8e4hJCqFlHdvvTSvgAYRGe1iiPHBmC27NCWtHwBAxIgmKnLEwgAQSEDILTcFzOPpicAGXFPAJLdsSPOAaEbucK855xJy469Y0sSY2zqYAZYMLMDwwSKBA7JATQowLqUUgsK7lZd5fH5MCA6sGLs4DWXPvKCC2b2zZ+0/+CKx+995RdvoOnVz56xdfaUcw5sWDNnxYIm9qkaDkxBfGvo/HSMS63gJIAUoaw6sahNPZKqKoImm56YbppGLKmqjcgmyEgiYslExFQQOFvdEZCZkcgAgqTMmtafBKETAZgoieTDbiuqkcWSFK0UM9OMbzIlDWogZopIMSXnHJipJuccACiId17MCgQEEMBEoIaGRsKCokkQkcgfNxwiYOELSaEe9LxnA/CuEPEEzAi5CNlyiwQATNgqy+GwsSRRNAVpTbbvvfP21WMTQ9eeGhvH3tJA6kvWwstPPfwbd2wYm7bQG0BoxLcQCxRphbpbjRnBlf/7y/te+YMn/uQDG57bt8J1BmMVROqHUCnWSMk8DiHUDUIqyRVDhXq+TCgy7ANFEujPgmuVJ50+OOEEPKGaOuOFnUteOXbiZllD1Ek//rFbN29Szx649s6l+36k37+xmN+1/v2/9vAQCBQERCIqAYvEgAImoqRmZKPhBBLm2C9D9Cmlqt1qhrVHBwgJbKjKST74y//731xx0w3XTUxMFMp1f+ichT8WP2zFPxwmjXikchmdfDx7xYDr1rp94y9FhaJw3BpnkVqiJhg2A00tbK8GgTjeng1LYyqYtAOduDC/aWrTD321FPslFOvXnuv3HG3OvGT3oV1rN6x/4wtfVdz2owO9fnXhJSve/s4jF7/fP1Hf953Pv+OSM56s54rxlX6wWHU7DJLcEK0ws9GLwuUpNObmDmQZA6IIuQDkkpIvFyJCBlVlJzHEoigkNZ12S5uhQELEdtW676ufb/Y8O/P6d9eDhkokgGR4/CoadVcCiAZopJBCMjNJKd/5QZKCKRAQEtDSoG+g7ary3kvSJEIFqyoYxJAULK9yRQSS4LKA2S1Dg80squTf4NmJJHLLEolgudU8Xi0gA9xyO0bg3KiaEhEQMpKCeWURG2EBycRk1KuiAZrlHC9EYyABA8wDVVULolEFADiLN3PMo41SLEA0rwGyzjzX4CzvKgCPHxSOX0iIKKgIo5mby2vanMme/FFvLxjb9LL5s68bv//z3bte1Jy1otXpSxIEx+wMzbLwGI1G4jjOgpvl1AIBKzQnOkAehORnOiJ2koPlncVPF2Dzo1/TTwehqPKIw5hG9uKU8oPyODGG8/bX0DTTaTBKQsSyLLMZGgBcWRBRq2qVrcrMMunBSueqsqH+kS9/+tC3f7D/G287/dd/9+03fvfZ+7bv+PpX9bZrp+udV1wdDu+H4UF/2gVn7V849NQvXXHpzd+ksfEwaJ6qFxf70iVPU+un5vevUGIKWrTn02B+1aXHNl1R3Dy75+DRVRu3bNn1H/sPbjxegHFmozNJLXLz1E7uTe96zztQa8G6eM/iwXTOhlcd7P/i868Z/69tKHv4U79wsc4fLaTeNJOeSuGhhfZb0SSlvmGboNdbxKCNuv2X12seYKrRHLfKDoemLeVw3YrqvHPSwhwAhNAEaYCdxKhmsWmihdGUIvuACXMgHwQ1BDHRUfAEgqiq2jJh2syy/dzMTJX9iEU/kkaP5hlQeCLn80egqKZQx+QKj4iVgBEQgCAoIUcTUw5SFWXLOJo0MPQleOROMphPwXcOr+wc2bJ5zZtePbj1nlO3P7PuwfvX3Xnj+bzi6AXn7Tzz9AMrZ4YrV8wvLLEBxMEsUpVcLNmBdaSJFRbiQ2NYFBICeZrvL0xOTvt2tbi4WBAhj1i5ZiYgo2svjZz0ZqI6oi0hoGre9qBIylc+AIhIBPEREHHoEwN3DM0gkTkxRGMEZARHSiACiNgkCyEUntGxxOQ8eV+ElApuIxCCEQpCMotmgiQqo5EbqmlUIyEiYpdiQE0rV84sLS2iYxs9fjAZstho1aWj5E1PrkwwJOOqCE3qjrWPPPvcnh3PtsarJtWzszWqlaT/6/ylnQvun2+ba5pUOY4+enOmXtvdodSnnHPhLrSDe56dOOuilV+8+5n/+Ntjn/unsfnQNat8ReQrNG5qjI3D4RCl1wAjOF8sdleGtasnzrvEbbpw7NSt1fpTcP2GlSuKqrCwkBZ3zM7e8o3B7T+K2/c+9fYvnHnPfy5+7TcrcdPGM9brbT3n4Qsvj73draIlAqhmZIOlnqp6XyZNmIHjIJi3G0gOANE1aoVzOkwFMpUcRVITKlcmkfsf3PaK1715duHwtgfuqorWqaecrAw7t+9wH2dMbB9Lw/09h4SKRTJohAAg+ak7z/vntowNC1JpwNg512LCkqVpXKsMZ540+8iwo4MxqjJuuVc41aK3YnKuO3lgfl97ywkrPQ9n5+Tgk+8ZX7fqshdNllNHXrX6wk99Cs9atfvxQ/9z07H62ft/5fJLn7t/W/W8c2HQlzGqA3SxwpiwIDKwZdwbHU+uwPzcPJ7SBqNWGDnDPo0MGNRguT+z/D+HEFxG2xotLCyed82bXvjBn7vxx/dNjLsYqWoXZAbhJ42XmXnJNDmrSctM9/7pYSahSuJROhIw+5BEDIqiEENrIiIyYDLVHM2QhNCSGbsR4C//pUxRLIP0LMsmzYwRQMQILS2XNABb3lxmRbSYoo4G0cf70dxtKnhNkpLko4mS8WC0Nc/Dulx7PLGmlPM9RGTUYWSPcp5g57xGBROFkEwsaZ7fAgMoYoaPjTLocLSVBxjVTsRlsByO9N60HFAVfVFavw/F/3JX3Xzsof3js58NP/5D9/YAg+zmz9EWZgZgjKA5F0Uz7zFPwtktryFgOXDj+DManTvei//kUlmuu/aTYMsRWKIAR0TkR55sL8vpRUy0PDVd3qLlmi15IwMAfrlLblUVE/uq8FVZsIOkZhZVsSqOPvBk+OEdDvvwlc/c/bXPPvKyt2/++de8/sKdY2dU8fDEmByci7z56U+/ZN+f/cvqg0uXrt15+sTpe+P+Gbf/UJhMbt7CYHp959HvFeNlq5xujiw40KfXnL9u7cHnv/vlN91w48tf9qrzT556+sd/ffwv2776N1J3mtSSiz/Ytu979z+0awcdbtxSb75vRYganKzurMTzLpk8+OSf/P2x0zdNTMPS4Z6bXDGzf/euZ3cN152wcmEQABAlYRMSwYEr9LRv+cIxJChnihQY5hbLV17TrF1pB48aAKbUBqwtiURA0OXVo5mBgqAYjSz1kDTrCoFQj0fomCFx9m2xI9CcVvZTQyA/Cqai5UAeQMuifQBAdlygqRo70kTJIovly5OARAEgAZYh1hSEyAdwCROAJ682bMVBBGmOHdl7cJ8/cf19m9c//crnt57cs7lUd913L7zzDj82eXTLabvOOfPIzIrZzsQc1hZjp588FFyUyASinTYljSIIBih69MAh8o4cE5GNkgDARCjHz/kiSF15772PMQ5DY6iOuCh8XSczcarMCAhqSQEUDBM0CGDmghpj45KZuWBxeayFBrkME3kiauLQLZ9ZkcEQkyoROTPBqIRoSmIsaEbmyKuZjmDoZjnm3MikcARQDPo955xAznylEJIBkZixLZ+ERyfgkCITQLKQ4opu565HHhYRdYVPAA5DjFun9fVb4q/+aFINy7F2vxmYVBU4K2Rcao1Ujq/lAJ3K6AANKn/SB/6YXvOLB2786pHHbiueeIgOHQ1NFCRuj/uZTbhmA208tdq4gc++cuXZV3TWuIoHNbT786D7nzv2wy+m7U/CY3uHD9+++tQ1S6vW9RZ7B07b2rQmz7j7q2vGVixIbCc61ODBt/5CfwBjMkMcEvuoETWrUTXFUSYfjtaHCmAC5pEY0AGIJF8VZlIPhr4siqIIoSnQuxIXBoOffd+vfq1o33Xnj3v9hdnFpl1VZVHIsWpx35ytM9drnzxfrJub35EmNgPAtvP/o01lqw0WTC3GYVRicsyMCWMpYCedTpHHpnkhSukLNKtVW0pwYP+5BfzWCeduPGlt+9Bj869+xaoLXnj2qWs6qzamYuqkFfH2W554+DNf+Oev//vWP37kjS84894//aXOz77NmgYr6XQ6/V4aJJ3sViqjfjcLN2yZ6qWOIBtNEClPNQEMISOGEMABGwIy5OUcMKXQzMzMHA2HmlBLjJXZxMTE4v7n7vnHf1nx0pf0Fxcm2qUBFOjZlIk9sRJmk4OZJtOkwuZDCFx4QIx1cM4BsYiACiOqoRmycwgoQZhRTAFAAMQ0ZbyBAbFz5MFMMj9EzVSTWDLwZDEkIVUxdsRplIKZM6WPrx5zBJjl0/LoPWFC/GlhWm4dhRJBMjMg0NwJGCAQOZ9fORkgIhWouqyJcQ4Rk+lokI8FLEdVi0htofQ+mhagZsrMDrwDAMeEqNkxBKNOW0ZHJGOkEVYRkfJMGE0BnNYCrVmsN1Vr3t6/9LPdm69rPfPi/raL/RYRFVQEVAQBoJx5tNywLjdMo4oblwksPBoUABiYYixyeOfyAWCZ4pBNimCWJdD5L0iqZIAEaDwq2A4AgFSPN9zHo8FMAQEajd57RMwFJteDVqoQkYjLsvSQVaVqYKp46kuuWf/j2x/+1KfnH7l95tDOwQ3/uWn6P0OXBjNjU9VSEFKzdZtnX/O1j37uYx96qlj0b99S/b/b5xNNtMdk46rhtl1xcv3Y4jGisV6n0z8yO+PwMLmXrTv6Tjv/2/d/9h927f/83/3FQL94vAB/78ZPlhd8CFudzuqpT3zzGAhxVUgbO64b0ziNl1AfGtt8Ovb6D993AP3CN+85VpQmq1dX/Tkpq2989/pP/P77Fncd6XQmm8Uj9eEDfGJa3KRrb3MRUknYm52jokjTU6e8/rV9IFe2mKhqlaVvJZQSKiJstzqFrwD+/3dxvpgdIlqe+efzbL4gFUacBkIAgJHajtATJxvlgOb3PC8dyJWGiMSWSRiQ6bxBCWoDiNSwRYtegAQVwSQkoGhQJjSjvkfR2AXSVttFDQlb5YRi7WOvXqqXqNi78QTprozv3fjkod2rHtu24ZFt59x7u67sxFPOfGrrlsMnbT0y0VkMi+NQU6vk6IoQCKxTVk3TOHLdsZYrq8FwOLoHltci2XOo0LSKkomYAAom38rbEInR0FJSM2MokE1HTiSIphmE3iCgCg6RDAIhkTM1yCKY5aWJqlaOk4pjBzTaVXlmVRVoYAT3JTAiI08kCGrDXH3zgT//gSbKVWFJ2HlVJeQYo6owYAJQRgCDmIXXBmBBUsPkia1pGAlSfGbn04kVsmjUrK7rj74Q9yzxf20fq7q4sLBQFh0HmBgQ3QHpv/aFL2uNz7gjwkU51LlqWMjuWWu1Ou/4rRPgN7A+sNRfIKF21SnHxnB8GrsOnXjipYNNb9fTw+8+EB+4Xg4801ba++jj4/2eh7R25fqFN/3sJx977ND+w3sGR1dseOWqNMSrti7+663V1Io69ekjv3dk9erxYWLfREPiwgHEJhWtiiDOLwSuCjJCyFAnZMsSQQWikqEv0hiAmif2iuY4OYUoBBgaPXx08Z0/96Fket89t09MTFbVOKgNXrcEhxFWmMM6Hbj6z772tW2rTj4DBgA0Ttjrh6Jb+KnJ1Y44NFDXTRNDrwlFPzTrNpNx/+jBlGCA3pw3x0srW4MLrqpmOi/5lV91W68oTixKX/Z68MyjTw4f3LHq5BWXnX7mhZedf8naKze/+j2MduKn/l/3yvOXTjqp3HdooUXtoxFLZKJ+LxTOk2MkyroqJCjIETH81Fd+EOeT12icaGCYjSCjCQkiItPhI0fQzDmnnIpWdWx2/tJNZ2084+Sds4vjbfZcRJXkyJDIEyM5Q0NQB2IqoihWa1A0BMsLEu9cEwJBDqoxYgcAKWreUxOiR44xNinmNC5E9K4oXZFiBKXlPs5MMUf+pyAmAZmIQlmWAHF0bnVOVQGUAInIp8TMYJZ1uYCEsHxjE+XGH2CUUpmlQAqqGvNcOCs7bEQyJgQAzzmY15U5zsZY0XkmASM2gGQqYMEkmSkQqqJQfjqyMJCRARsFVLblSQXkaHrLdREyWgjzP6hoAiYOnAhjsQ+PfIhffMvck0/O9P8mXP9fcAZCNAXlfKLPojNLWdwCuNzoj+bQfFx0DaZm+QeBQ8pxh7astwKgvFd0RABqiiNqw+iLmQHN4YgNQMvB3QbAiJn07kadQ1Yk+BhjHkGLCKoxkkNGzGz5SlIyIvTF1PRUOda2Ggay/lWf/vjd3/qfpb/92svWP7Zy6tGykJIXwIDIjU/LYBEuag//6DOPHdq6accbX3/rx246a1hJHf22PeXYKq26NNifFg/KIs0Azq+7lDdfuGv29E/tLadf91nZ8fd/9g//MDf3ExoSD47K/v+u117dOgxrpjvDbkFBDMGsWtcOzY571p16cjMxsff+uyZXjiUtdDpF9V7iOCBNrHvgth1PPfbAKWddubSQ5mefluHs4YsaAFh7B0mKHmoHk6E/t/qVL5s483TZuaeqKkKkVgnOVa5sYxsBi6Koup2shyew5bgYMzNkIEDHnAcMCuSAXFGIKo0iP0HBQEXNIEmBbDFgluDldBMAVEvEMYkZmJpJijESqII1hmKpVB6SBollLQnQgCpvwCzRokGkhh23GrWqiIiBrUXGEpZQe+RdWbqQpvsyN7+rqMq5dSceO/X0PS/vrXp2V/vRR9c/9cR5t94VZjqDc87dd+YF+09Y7/s+YdMwCqKCUeHRjL0fNE0iYDMQy3iQ4+sbQCwrD2puFIJrYj85MvqiyBckAnnPCaCuawBIhIBIBiYaUYig4sJX7RhjSkFUEVkBRExVWjR6RDAROIdmJkqICb0H8IYKElHVmQN1gLUiOBqdTc1yAC0ihihVUUqMTEWQROQ8oQkkiQmMVUE05vGEQVRRoJSapIm9015/MDdXjVUgqSRQsC1r4NUnLf7Gj9tzS0tVUTpTTE0o0DGP+8neoQOnb11XF569xujHy05PekkLP0ydHbuGZRXVleVaKqp68WB/+8O02DSLu3t7HuLDC3r/7RNpOHb+C7f5sj9zemqNXXbRZfEL/zK96axbrnnpxz71ydNmZn77je/9qx99RU+4pHn21gkLAO3e4mz3537t4NUv99vuCq1uqW3yIYmiIyQHGIuyssUIZJYAwRygZ0fIYipgajaUOptX0bmgQc0gJDIEMq9lUjTS3QcOv+M9H+z3hs88/UDQbtMfyGRDLbIETvfuevBXf+WUX/jtVSecCM9AsOSTTrRa+/Y/duP19z67+9FzTnveaadvHZ8+dW17Y7lGu/1THn/160Ub6k6215+ezjlzet1WXrVy7SljCWBu33DHA49t++E2G7o7v/+lW+/9/nt/7iOdBze32sX/+t3/O1FtnH9qrL/91u5z28d+5y8P7d+GHS4ihjKntlcM0SE4ZiZis5CEySkgGHgwAkMCcKhoAOAMTFWJGVBECBGYVcUQyLFrMJphBhapEaEDGOu2H77+i9K8avKCi4Yywt14IcBcMFQVl10rAkny6heYhsNGDKt269hwCBYLdhAzgokNs5QRk2lokvMYUjTDZKNTb8GgFhMLM4DkiB+utQFyKaUoseKCRCEoGimYr8qkgnWdNRRR1TkHzHFZ5kDECAyE4DRhGsUrMhCyZV4jKuSkYkE1yRghBgMEJhJLVHiNWhRFztNoUvSuEFMAFlYCkib43BarRNMQtI0ugpEnAEBwHknFEBSYyJAFzExGSRaI+afbaLMOy2nJDpACB9+gVUnTRNX55f6LfmN47ROd/n/07vn58ryDjfOaEAeCRUFFrWqYyAgQUo60ygcsYpaRsyhLsH6yh87tFI0kfHk8goSigZGIR9Ls0cOOTHDU4GYjVjYbiKozJ2CZrwiIkrPswZwhISNQpsaNnM8eDJWpE21YmCu6U/35w4//8b9wAdDZsOl1r7/tG1+58W++fMZr3jnlb28j+paC4HCRwXPZtvaKVj/wa3og1W/s6Zf3/tHMgX/6w7hn11DcYHIFAODhp+ax1WbYc8X7jlz9ERw/cSlGmVgFF59ebTn/gU9fw/3h8QKsXZ3a8/XkZqTT7g+SSleQCle2+9tX293vefc1n6tfvDXtSA99cWHLy2ZpDAbWKWONcsThGotpqf6rv/jE57/1cocUDuxRsEOXubFdMtmvHDaxO+lXt9M+Wf3Sd87Xfd9u+5oAscOtdqtjZse8JyLf9h3HgGwI2YeaZxKIBgnNzPlKVWnZYwYA3kAoG7/QJXWSxFQcQcI2VVmvpKqS5b6oKIFUUW0YGsgCYAMMsazaSeoGtYwKTUhigN5QyZVIruMp1rGwwiX0BYHCGKprt4eLvUCEil10A40AJs7QV7WZ7w+oVy+ozG3cKFtO3dY0m/ccW7/jcf/UAyvuuOfgSSfe/ZrzqzNOWlevmB6UYQgAEKt0NPba7AxYg1UFBY2ohuSSQqca6zchNjW1OuiKMiSU1JBYUUiToktOgZUETMmaWKsAIgubgBUC5rlIgAUXqp3xznBYqwl5cuwG9VCTMCKoDhCI2CUa9gIzu9KRQwXRZtBaMSODOgVFTxhiRVWM0dCc42QaVTygFxSDxOiQ8olTQFzeA5oBileVkCIgOtYmUIsA0IIZDgNqq+iMOe31Fxb6sy3rxnakSADyvy7rHerjt/aumOjE+fn5qakp4hSWxLSqhwurNpxK1//oYPVDf9rz1EudBmOu1Q91IrGWY00tR8OlwaoTV227/p/9X/3v6Wq11UdWMjjRVZde9YNLLv3PH90kLh3atzctzF351ne97aqr5ybW/N1//Qf059/42ldWs/v6Vs1suuSkg9/ofu3GAQ7c5nPCxVfs3XbnWGt8qqxmQ2gJGUUg9gVr0GZJO52xGrTlYgJS54cSWyyQhIhFlQmSYAVtFQGCQOoT+MjROS1Tu5aG2Sfghx/5s1O23jOInzm8O7iWWwjSHsV3u97RAydeeM7OJxoA4IlOszggTTDwX7728weeueOHrX876ZQrr3nT+07YvO7JL9/+kpe/6Ox/unamY9yhhbnaFp1IGPb5r37nL5687wdrNm256+77juy455d/56MTM9NnrL/wnR/81bnB7jdefcvhm6+bu/Z3n/ntO98wsb36zY8+s+uJiXbsG7CqNXVBnv2QVEW4aZrcxuWlKRhQTqUyNAWSTEI//rwFI0TF493wqMP0yImgSQVyZE0ERLQ4O/viX/7AyqkTds83BXmLQ+cLgALNoqSEgsgCJjkaCgyYJIFFVDADWpofMJpzZTOIhaMYo4CQW55eqqlqPwobFeh97rcYRKTOx/ckZVnWdS0hIYKkVFVVhTwcDvMOMpiQQRzU3nszUJFoqmBRzEPmloA4ZDJGyCIlW9YTgYGpwvK2WmTkj8TlJXqQ5ImBqFONpRSo8KrKTKraKkpTUCRV8VnyVvggKYfgkSqaRlIHXNehqipUizFWrVJNwHAUlf+TDyQzi/CnimL+LqgZsapwK/ZLP3EA5l7UPeOquZOvX7f7m3Dzq8OZLWirV21KdTSMkZ3m4SUAUG66M0oBRiGfOloD/9RHf/zSgJFIL5/N0cgAARDV8PgKwyCLvEwhbwfyqyVEsIxTBcj7kOOtvJpQVl+LQzTHMW+JjNAAbVxhmIp45z99pv7831Q0Vmr/6L9+9Oi6l77rE3+2ZrwbHxvX2f2IIBHMjaWpaSf7BbDnZw6u3/zDJ/c4V0xvvmbyYxfWn/uLM5b+a+H5Z/wVwG+/+NnZU/vfO3LJ9sveI750nvvK9dhaGjblik0nvenTe7+4rII2szg+Z4oHH42zM3DZuYX1rOjFV9xwdMtNNjP8x8de6fYtvfmq+dmb1hyY9bpSfYclFF1RI90f68q7hx996jc+9KH/+M+vV348QDh4Oay5q+AgjZH3fGD2yNZ3/9zkC85ePHa01e4UWhCSK3xRFADgpUCkoizLssqTCMlDkVyAVahwZgaGI6w1gYARESbNdjUzQ2cmLqkkU3SmqhJTVhhQ1sIki2gEYKolu5gSGiQ1Y04pio4MS2KmoIRC6Ia9YX6q5JtCFRWImVWgGIaas9tMl1KDItGElJgZQJXQEFDZR8W6b0O7d/2a7aedPP6SqztP7T6w58lt+PT9q5/srj7xzHr1yXOrNu8fX71QdXVCQJZcYtS+1s5IzEsApxE6uGqsCo2gKafGHDfeB2GVxpOtMLcE0vfYQlcZ1JaMUM1AhJCYHZg655iYifI9XlVVjFGBPBfkKaUUm8ZxkVISiOpEyUTV1Ilpt6xs0MQYhcFE0buhqaA6VygYiFaGSNQ4A7VKiQizIBYAEpho9uInS6oAhpSFkfVgYKYqamwmNggDN9FeWloaDocrJlfUaWgJTmjVbzu9/v1bWiECEaUYRSTUwF32rrswf3RqbPW5Wy7avbAUH3+wPbNp4OxYGk60Wm7QRNPoMIFyVQvOa0mVc9Nla6m1gerD7uIrPnfqBf/+qb951RWXvv+l7/joNz779MO9uccfpRdc9v2HHunt2X3y6WetbY89srCw6DeuIH7DQ98bf252bsPJJ37p6w8/eWhybikaD+sE5SzaKs5zMjIjM48I5gXIO0xqJlnvYphVImDU4tgs6iKyq4S9UKCIHVw13j28f6ku4/jSwvjRQ6uP7PLP7X/NlplXrRh7z6NPPn17RW9obE6diKYztqbJsWe+/tfw4j+TR+5bf+Ulexfl/CtPPf2U0/fsfHB61abLr7li64bV3/zqZ7/zrf+6/bYfXXvt9Tf96OZbfvhdrZceeuy2hdlDn/jE177wmT+sQ/8z7/u1w/sOvPJVf3zNe9/ysjS9trfw7K0397/3Tffj7weMiz//5+rLdRtX7H7u2bGt61tyaHFADoGZYkDzidmTLQPnHWepbRbi5gyI0fNRLYNxAExBcQSkHIE5bVkwiSNtEtKyqBIADt326NoXn2RWU6kxeiTzECwvkRAQQXPMwuhnQZTAzDEEEyUiRGeaiIGIokaJQjYK/TdRM4sARCich0viyBMh5/QlMuecY59iDYgppUK1ZmvIKgTOf83SB9PkzCmaKIiiqoEk1kgopmzsSD0ZGxJxNggCABllQK+oimiQpCaAI4WL5NgOQw2SUmq1SjAkYqIRmy+psIEjFkliyZBSSg6cApioI0wKdV0XpZcQEzvnXIxN5lahLdtmLSvjIAOjYLTHtWwmyZ9dH6RNhWgSB67Ggedfp1fcufQvO7rHPrnw/Y+5Nx+IgwpbpMNE6IGXObw/UeoTjER6x/EUWRqyXHWX6+9Pxsx5B0wwYurkYjr6M83U6PjYDQEABRBy6C/89BkCEQFNCDN+GRjVRqQBcIwIlsT52TK6+mhzxrs/sPKt1zT3PLLjG994cOXzr3zz+w5i8aNbHn2+W7+p09uMe8sycH+O6yEWBK412ak2n7P1mvWbDu4m5iVqr7j4V6dat3aub69li1PjMr554ufPPmp8762dnyNEAyEEKCtUcidfxidek1/ncM9DCIOIJ1xxxuIlL37qb/7uvnDRNe5PPxdXLrAu2fxF/NzLFy76zY9O/mhLvyzaJ8/RxmqIkZcWx7Esm4ndC7OHHnQ0NjHeLWZoz7MPlW0+coGc/V8ppTDt2nGhf0LRXvf6t6rEsvRVYF8WSOi9L4oCEYtUIGHhi7JsKZgp8PKVAD+d4gIgiXN2hMtTjWWZupnl2XUEZREUNbPk0igAVYVURQGTEZGRpShBNGcxqmrSCACqYAbOFWYGKsyWKZb5fUNGBUuqQaXFZTLzSoaqSSlGMysQUYFVFU0J0bOpiqjGJGVaPZsWeguI1px6YvucM95Z1xftmnss7nj4xIN3r97VOaN9er3xefvXrdtTzRwrWT3bILqwCP02FQ17Px/qSlG64k1JvSSHkNjQE0FREoWmiaoBg4A5xJKoNkEjAvQFa6NYMqokBKtrpGLQr51zDGBIYhajsCtiP6YUpqbH+mkphdRutyFJ6YpIUBRukIaucNqPZVkOYtMuKo0pMRoDJEVRRFOghOhNzLKWEnNsteT0GbP8/SwElZSHYDFZIkQxQ18szi9EUa6c6yMC/Obz6sVINy5sOXjwyaIoyqo1NzeP1PaKM2PNNK7dtOVc2Ptgvf5Nk2vXdk5a29v5XCGw2CxyqyzQOzVWESi0divXnkIJNB1KaIStJ8875/Of+8cVJ5z0inPPOfjEvb0jEECff/ppncXmsAZQPWFmxQm24m/vvXHy6t+ZGuyvb739P1evfdNHPiq8YW7/LVU3cYKGJ8dlhZVEAv8fXf8dZtdV3ovj7/uutXY5bfqMumRJtuTeOxjbdAyEGiAJkAAhjdzccJObcpOQQAK5JJSEhCTAvQmBAAESWqg2BhuMe5XcZHVpNBpNn9P23mu97/v7Y52RxX2+v/P4kY80Z/Y5e++z1ts+RaOogB1cUyscRK0zqASkeZ73+/2yqgiMD5A5a30JGgKCZXCipQ2LK8upsa7dHz96aNv0HGgfbTK7WDRVPn7u9o988+Q3rz+ECDYAjF50xcZsKj05DQC99/33YxP15B2/s3j1jbe89dfVzDcbm37ld//o6J75u+6+26OfGhv/yYPfePcvvUqcvOqmN49NnnXTc9+oyfBzXvb8V73hD86/7OI/Hnfj6YbH//QD+PgjM4eOtboLQ7ZenrOj2rdv382/NFEUzen9fO4lWkkPbCOtB65EVRJiiwxgVcmYSPhjFQOEuFbnre2GIjJQ0FRVRIWou6SifPoFXFRFVXpU9oEALUMIYWhiLPf+8bt/vO3mm062F1rWFogVGQCN81rWIACKcQfQ+EbsK4NAFrM0K8uSRUKQPE/FD2QRZYCzRRU1HpkCM3tHiGgRjIABNYklIl8FihaEzKjQ63QFNCEjAJ5AQBJVgwSeCaCKEv8qIkIqFklFSicBQqDgnEsMaGAWFOWILBWRCLnkykfoSvAMCqWvNNWiKM7ddU5ntd3pdJI02j9wLB+dc2VZMjMmBirRwFaAglTeVxzQkCUWMS6xp4EkqmTMGjwKADASgEEwotjij3CNYDEoXg2BBsY8C75nrVuu+jtqZ7+puOQTzb3/2Xj41b3dl9NFM9JJEBMDWDkivxZ7T1eosNZcfjbiDmA+OqiJ16rbMwJwLKN1gNU842dolBhUIhVtUCYP5sTROu50eR3VtKJTjREQAKEIASNQhUTCcr2f8UizfvnGqSNHt366MfztC65ubN71rf3tPq0krUtmzFt+1/z5Edi2PRxMal6gBGoKNsLs7AN/+RlXfn00ET61MtSYyS84aIBP1SYneXp92q6y/H53SahvhSRDAFUqCRXFsgTj6he+K37A4PbKU3+QDp89Xxy95JINt349/8WZ907bPljUYZ2450Od9T9cueBzxZz9yVvHerdtr3f7TKxiW23TDa0N+tT2s8e3XzD20pe9rl/iytMnOpcaSXjdXSYPLJmFojv8iletP++yYnE+GOdzQ54A0JBBR4hoyBCgJSJrUBUI5YwAvDbFVyUkC/a0oAqqKhswAKAsg1aHChnh4A1gJIN57w0DAjkUQlBVT9hjH0CUAQXJA6FChFogRvZx9LgPhBZJgVUZVK1xisgsBpABMAR1VFWVZQgGEjQhsSEwKthBHwhYGJHYG0+2SUYM+8L3uitgaVdVO2flmhcc4rnW8p2bj+yd2vPgunvwrIld5cbLZrdvP5JOnMxsJ9Bw6q3x7LumCrUyF8gEgyAo5RVVUhUoImyRALWQoBgxKRSzZIfEqhYJHFohsWQVA4pLjMY+g0jpq+ZQc35hyYpaMmW/ij4iEriWp957QNPu95I8AQDjjJS+nmX9bs+5NCHDiIG8smRixZhqMJWXiHBURWI1AqRRSH6wFRtjILI2CFTVkmEDZNzsyRPW2q3Nzgt3tXcMhTeeV/3zI+aRx/fZJGFFY0yjVePuqmbpavBbUN5OvvbYo+WLJ1qbzlo6NZ8PtTSw9aHbKQqVLHW5TUpxBA0OVRuCs1NVdzq96sp9tfGbLnveCzZc1LS1Tx544PCxvRdcf8MNmBycmb37vqd277rs13Zf9tWD+w8cfOqq//6KCw/c9tjujenUtnUvfdMTj+9ho/3QaFmj2q0IrCKiERBBBkRCUgVEEkek4IAEoQqBo4srS07CQCgGFcUBCxMgB6xXvjU3M3X8WNo+ikurPsmNVHXNV2x/CIb+PJvc8X7827ccthXi5puvP/LIw81jRwGgMTzkHvze8gO3zV501bm3vPqG3/zD9tarTxxZsmP+v73nL060ly6/+KLzLjjrf3/287bqk2uclTU3XHrtyQ78/sKrV/Z8/cFP//XIHd+Z7VdNcG3oD23YbNoLyR/9zfhvvf2R17zzsXZz8kefzV52QdCsWJ5tbWjkoSo8sjKRGs0gVGAUQQdQBRqAXmkAf40CYbEdorGOOcP89aeKHkSM2D+NhvJEiGitPfTUnhte89qi6rkKFNEoJEEBkYU9hDWkyBqyHxHYGSIJlYJYm/XK4IMYlysCGgIZVN5xGKmIZECiQgqRQcQoFxkdlohkze8WERNA733dmNjTiGhkYEZEZ2wvVKdxK5GjxBTLbLEEYOIolAIwGCBAkWj9Pcg/omUYIkb6A5EtyzJPsyOHj1pDzrmgEfENLCyBjVEfQgghKiiGwhuiwByPiYhF1Tc2L4oitU5VfRXyPBcBAoUzQekxH0KIMmWn78Xp+1VTA2iFqxxMaft1X58Ji29LXnjr0oGDY/jh8ttfyi7NV6myklRBY18XANai4OnO9mkg1SBennZ9OV3Iru31g9bz6U44rQXSGFL5jO+MDlDN8VCRxByZyGsRXRkgEUBFT4qIqZKoemYh6Pju1M66po3Z6c57P/noNx9bziYnh7ZvSLuhaJl1Va1kfxdccgVfcyP95GDYuqM47jmEuY5o5+AdUn96ITFkkFjLqVsE2pqNyhJNDPvZLyVvv735pr3pdS2e3+SfPgpnlVUwztokIWOSej585XPjKfD6XZXZQ8kUb9n+9MJDYeNE4zmLmzws01LjsZcknS37n/s6r0uGJsMVVf6DaceJS0fq42NQCt7/rX0/+dhvf+SFOHXRbOua//jeY/OPPd7/LbQdGHvYWsgr9a003/jzP98vi6KqXJ4ErgaXkU4vGYABN93AQKpsLQDHVYkegAxFZyAAAFFGJGYgJAKUQaUMRiLfXxHRAiFQdNuLHCVDVBRFnN8TaAheRa0xRiWImsiUEyVjnLXAEqcEIoBIwqyK1lpLpsCKBAtirEIVpDLkGEqCVLEiDQqiglXAIF40CsyBg2DFBk+ImbNGdZY4lW62TFuWh95x/LJk/OoHx6bv2Xhsb/bUU5ueSjcPbfbrLjo2tm0m2XQERLKW2l6/CparlDJyItAPlRdvkEtGsSAi6tmA6QsrsiVUg6xAwMYZRbTWqiGL5H2Ja9bgqpIYa60VCSYx/dKHIAgG0AZAD+JqrmVqPQ2FL+MW4cgV3a5YYuXEixFRK2JQDYKoeHbpoHOga4ksESkQRZKYxk2GBSKFzDi1CsaHvrP2+NFD774mvOG8E6CyqSmIcPkG+fgr8Pd+kJRqVJWsqQ2P9Ip2tzf7M7/5rmT/sad8023fzeq7J+eHJicEFNAMN5re+64vSylT5k7WLzvLGUHKStj0B6ZftPSVscXp8ar7Cbvzx3uevmZ8y6+uhqNLi5+bPt4cSs/fOtUrFr6674HWhgs5HZ18+JsLx0/d8I7f7vVl8fgeQ5ql9T53peqCqxsJYikiVg0SAQgBJ04VstRJ6b2vCB0AiYQ8SbrlaqKppK707Bg8UepMvtpfd+ro6OEjre50L2/pFdcmS7P+8BE0PUKV7vLMxh3v3PnizV94xEqWTe7c/Pj/+a9hUQDocDU1tmE8sH9i74HH7nvmAzgyNdHfdm66beeVu26i5tjS3Y8c/6/bz6s31Qc+8tC8nnr013++ufsybG1qXHHRhuJRGJmUK88++eT+S7/w5X3fuy/rTzeuumH1S1/X7RevavqaUTg1dj4/ua++fryZcVkuEiRIDkW1chZVIqlGhSh+k+R0WD1zrDjY1hFIB9h5GMi0DypXDxIqz0XJoiWFICHn+syx6XNHh4rlstv03LTcRpslFXhQFZY4TVYAVVYc+O4RaAgBLSYmbffaqpKmiSqHwABgaSChB6f/NKCCFpBEoyqQpiADEiOkWQYAkcILUKbWiQRmNkQgGnlnPgRRZYOEFKFGwhgkeAIBQDRkSY2AsPceRaPrihDG0RewhBC895FkFJvpIQSX2FBWAaBer1c+YFQUItPv9ThICL0Y6Y0nY20pEqM4iFhVa4wSeu8dofc+sYlNUUQogp1Q1iYCaxcBNPJ2AMDAoOUYb2SBkqNqQM3EKbEV6fdb6cZf6V33R+XXH673/rV/zy/Z647yKQUjCRPH0esZByfUtcTr/98D15Rb4utjBF9L0PD0oQYvAIGYLKAOFKwhxobT9dmz72Uk8r+JkIClEo+IKSIynb9u87e/fu/ffnH5ECbZ5NaNl2yppaYo+31TYQEVWSLFPv4j/f4z9NWb0+9NwGItrPaW6nOPj56Yo/p6IdBVHQo8lwwvYRLYmMs7t/547KV/O/6x+O6rZnzVjBNAlqRnnm+t2Rg8ObcB5/5rfP5dBngYGg9DA2AKAABWtn+WaL9wJX4Bu6Gav2vD5rfmo6O+10ZbTr38Revf9tI9SR566TBNrS679ts+fvSGN03eG0bUrYopi+kAAQAASURBVDrcqNgN3MARAW+NsbqGvD/zyg+G84N4fLrnHFF4RoGiSOia5g3S4LYgkSLG3DfqhyOiAQRro+vO6cyVWUWgUm/ZaaVN54pSJM/7/b4icgUMbMhFcAaSAUuM4GKwFzCGgJWZ400UES9CQD74qBgVdUOcD2pQiYKIilokhsACYL2VMNzPgsk8GvAgiC1ISpYi1QD9DuNwf/TyAxufP7PtWLO/r7507+jTT6/fv3/34ebZrQ29kUvmptY/k21fctQj3za9RDkPRJVhDQHYmcxaDb5UFoMIlkitiR0FIIPGOFFllTjeibCSqFQuIgp88sQMEYZQgXBKaRkYANI0raqqlmcC0C+KVqtV9nvtslRVYiVrDaBXz4ZSskzAgAqYWmOUNJpfR7SExUjVdyIasdsskXsJAKysgkroLCSql6VPv+FcnxmcqvFoDpXgcM1eOln+j2vs++8eCBsUQp5qjaa88lfes6SN8jlf8KZWhwJb9cXZufHRUUjSIlTMXE9zz4HUoxaJy1DA6WqR1munphF6Py7d4b48euLIp179c1Of/b9lWixdtfvgw3eN1bNLzjrHG+4f3X/eOz9mudzeOfTQWHKFdI7vf/jYiWNbL76ovbQ8BKbMag203RBlmYB04D0JGs9MqqqKNn3CYABtkgQJlNdYODMAokxOfTHSLxsri8P79zU682XN5Ve+ML342pOP/9jOnqzKrqvV2pWtn3fJocuuusoNWcqboCb8+I7JtAEArhu4G6o0IJqz3vEni97Tpz84snLXyXt/XNH/YaEhkBbQcaMNVkbojG25+MZXbPy9vz6yb3Hz2eu+/+O7n/uR/2jfdH76ozk6f6Pe9fDYlgueeP1Nrfb0/lf8Wd5fTS67rmpPU4u8dmbufnTr8y9YWVoNiI4JUhTmqPwSI18kdyLiYP5AeLpeiRXtoBsYewQG14pFDiGgMACwqoDKmtZgr9+56X/+j+PHj1c9nxnns3JVPLMDiGajp4ku8ZgKAEwhyYwqIVFCNrBHDMYYQxYAiOxpwmhkLoboR4QYKaYDOQIc8CAjd2VAIY2nAMQEbA2reAQDmKW5L0pTiRIGVEZki1ypBLFIagAVUFRQSLnUgWAQEHCQEEL0G45gLFDwHGL0qKqqlmZRmUQJHRsRKVH7/T6hiZ/Ke0YFcCFItC+WaDpOiECOg2Bm2AdOAkAa2BtLKPpsQyI+w8jfRQVFUYzEnrW2ICN2QpEnQz4sCmYpiSbJXLXy6vz6r6/e89Bo8Y/++9fR1nFM2xbdoP5EgNgKRgSMwv4Ez5KDYbDnKwAQxys90KZ/NnCi4YHGJ+gaqS2SgGFQn6FGUZE11C4OyPeACoKABKqaCq1aAJA6WxToOrBEtT5mCXzsyz/6wjcnNu/asXOkFYS4r8td73KoU82A9EOItl1LZfmt9IZ/4jf+zpNfeMeDnzrg5rns+1ClOLTg+77fQ6pWC/PtLe/46rrfnnY7Luz++DUrf3OV3NmXfDWZvJNedEf+GjEpgTjuQ9luLT15yYl/j2e01Dzn6PqbZ0YuV7LWnwpJ9/Bz/ySknZDPU8gQSh3J06RsDku7v3DuNReawrEph7dPGURjcK7KTCGZTeb70Kf2qefcfeJ5vfGHGk/fLLvuTqQs8osugnXDWno0Kl44OZ14gZ6hkrPWVY4MBo2iogDAoBFJOyDDrw3aT2fWqhrvcFQiBUIThVBQo2IlIhKJqmFmyhNrKFQBAChxiHEZgo1yN4RR0DEEYUEMXgG48pgmMJCyVYFAjAEwYQ2I1jrLWqAYgMqgIjpCAkZjLDksSxHxTABQglSkZMhaKpDQKyYKQtYbRFNU/T7wcqXZKbzSTF5zuNWrlw+NLzw8dOTJTUfnti6ubrEbi/ScxYkLDo9OHbPJYhWM+CyBJCMJiSKR5VzQJSkjcmBgh4SEBoissaxMgEqVMne7ROSDOOeQrXof0XBQhuHh0ZWVdhYlBHxwNumu9PqpJ6KyLIEMiBprFWU4q/eKbqXKKqwEXlkCEAZACRZAJC5qBIM2mnyCMSGwSwwzG0WLCAoSAqgEonq9tnDixM9sWzCEm5qc2bhCYX1digpeuKX3iUfyhR6AqNieLKy8/d3vL7q6dGx5+Hmv8k8oGmrVG11nD506OdEYqdfrPsUQgmHtig71qNdZtkS+HKbQ6U4M7X/tL7/vnz/sTiz90XW/OvXZT/vR3GTjR/rGz53qWTfT1S/e85ONF165ffct7qkfnXjmqZ95689df9MLf+6d73zVz78VO8VwYhUMWdccG12dmbWkRjROoQIzi0eMzRiqwKuoJRdrK+Nsa2h4aWlOSp8qInC6vDw5f3K8vxJWV0AqamypXXxxMTGSb5miRFNu1K996erciW5tqLVpRzi6ZF0rn5+fG7rzAbjgOgAQZ4Sq0mJNi94jd+363n995+TChvnHr/3c9x593/tr3WP2vHMXPvyhzb/09iXMYWL9uicPrspy74m9riiefPBYzXardduPfOFHreUnHv7Hh5ITzxzZ99A6i3VID1/xsxvSts9Gk85CmqfthUNP/vtnL3z5h7gNXUOup5BLCKKBjDGWjLAIcFRrEhEW1SCyBrGhNQ8AsyZCGZd/bMUwc7/TDSGUwN4HYwwEVtUkz5aCHWpsXph7uj6axDKz4wQACdDE3QJUaNDiVFWkJMDAvL6W5gCUJ2meJRzN7Y0ZmAR7z8xR/YoRwTpEkxjrEBIXreZdbEQjPrvhMLMYNMwqasmEEFgkG2mGELr9wqkxAijgBBVQo0VB3NhEiQgptuOJRTRw5C6zsIiyCgcNKhBFeQOLhgrKLMs63W6SZ0HKAdpFxAsjRg08E1BDtFIRNWgrLSsOQJgwJWmiMlD99KHKbOKlTNiiifjmwZB1rXQEBTmj0hnsnmkIwaQ9361jvULvIc2JK+23XfIHxRt+fvWjp0bsv56680/SN87zfMO7Ihn8Lsizs14AQmVFMGsV1//zUFWKKK3T9syYAApDkNOjCtTYnhORyC4Vjd1qZVATvxM4YDYjIYAQYQe4xpZVVtG71I26nISPavXMdGfvU7WzX3Z1EqohJzP9xLEqghj1EqxJAlf9IAg2RZS+a1TV3mxj33HwQ2m/Xbm0I7pcFmZ4y13P+bmPXv/OfjpyY/uLfzH/+t3+YY+O0Ryr1m/whz5f/6W0N2dB1RgBaZzcc9mhrwxRFF2G0c7eDbMPcQIHt9301Ka3AazffPc/zZ7/ofbGO8QWAGCWd5oAQyWNV+utvlhrua3V+sygoN4RSoDEe+hs/MGxS/84uEVJis7W5h0fWHjioL/lXbh585aRrZPzBxdsXQBA1nKUwf2FSMyKlzc2kzTSXk8LqpzZezhjxC5KFlnXFjgCABESgBpcS1WBmRUi8dChB++9pqQIaWoAoC4piGYmZ/bee1BWgIql8gKqWgWbODYO0bCspXOqJRpgaEOwZISkNChl4NQaNchiPTglRhXQUpVBjTIBClpghEqshtTiqtOkMITeI2dkjZIkaQihIChM1wcjK8klC5NX1zevPlPds3Hm6dHpgyMzxyZnbtuWTFXjFx1ff/GBofETCCtdmyad0C0MW7R5UBVmBCKwDGIgQUIih6hEJFpyQDRV5YmIg4qIKpJSVVVKNqnn3F4xVoIq+5AliTWZQXaKVb9kS9baQoI6ml9cSIfzpKfoQZQDQRyEMlelMYiAOkB6DrSHgnhAMJTlaVUUHBQDxRVjyZXKeZodOXD/xkaoJ2RI2xWOZNrmvGWK0ZopVuWSKb3ziCGihaWVN775Z3/rt//gridPaqNXLc8G2J1yoSKtJHcTk6vtbn/VZ0maZKmkmLUBrUkwW0TJ6kW12l//+rd//u7vQdn93d/8wHOS5g8vuri8+TnnfP/r120c/s751/Rmpr/18H2XbZp42djZn0jWXR6+99xv/OAphf/1gY9MbapNtcaOrB7Beg1ZMpAj0yeMIxQhBhAJBqIQolVENWXwaMQZS1EPhjCo752YSWwa8tQX/Yn5xbGjx7ITB7VYqScJd0lqptCqmJumh58wra16ye6VK68pZ2dgfsFwt5taq4H9A4/Vq5XCBwBgSA3anIOrj8w99MND7/6TK//8t6oive+v/mLp2596+V2HH7rrO3j+hRv/8i/D7U+GxB/5wbd2NM/qn1ogrPvOofzRA8d/79X+vh9WCCNmpObyVeuw0VjdcMH0ut1X6ixV074Pw1s233frP4+2EJMNhZlrhqFuHlLlQGAcCUIZpfZVCSCCgMAPgM1xC/beR/saFY66+aEKzKzKzB6EvUdm0YoJVJQZNHhWxqaBVdPzjud9SQoJOopYXQQwpIoiSGhYWCVYa8kHAEJrAJxKyPO8liax452kDq1xZFDBGvCBLKgkA54DElhL1jhEJGsMUNR8iG5PaeIMIXtGroKAqCijVVOyzMyeZFALSSWMqlYRFcA6HBSUiGTImiDiUmMRxVdJknSCEFHFQRWiAbh1lksfQBFRgINKCKHslWSSTlHGRCCOeaIksiB4DorGWVRmkZDnufclAAQRNFU/AFqTsPSL0lgnjBZtBcGhIzsYtHoVQ4bQQJCohsGgSCiiGvWkgAAkIQzEVgnUi6DDtO27Oxtb3tC79hP6yOeG73tR77xL8OpTbn8Lml7RalDUTKASCFlmpAhnTCRO7+kqoBF0Gx0aIiAbFRH71EMhUhPLWTFRelQ5Gk6qEChbCABGwCExeSUjQgYUfGWRKqQ+GTCU12XEEtlshf2J5e5tj/rPP9qXofo5l51ddIqu1wVVMj1VdDZH9qrUl2BKNKKqlfcewRBVTzfX+2o089Mdl5aZn0633P/y9zx+9ZtAZPd9n/mN9ENbzaG8Rt7gsmssSMurPbwwdvHTf58OXbVan6gFbCw+7I/cF0YbMDr6bK3vVUOSdPq13uF+uq6XlBse/VN49P8rTWnCIFkKDDYDAAEDAGK6Ry7+H2qCb84AQDnc7/dwcad8/6Np8rYvXHHwD2xrvfbbIIHASATHRd0vIonMQFUJkVUQZTU0ynMDQHSRiaSvWDRbRRTk6MZGGCeacWYsoIooAjGRNGjIkWeP1oCKQWBmi0l02nA2ERGnXFVEYvtlAYpcFMYQs3KaiZcckyoEJEmSRJVLCQYUERIFAuVSrTHO5BAAERmJLYJy5tJOp2MxpInrdkt0DiwaYJVQWQuKLhAoW5ck0VKClENhrWVmZCQICLKSEUiVdeiGJzZeazd1hsLR4VP3bTx6YPzo96YO/mjbxOb+xPbF+q7DW6Zm7MgcM0EYotU0IGKtZyBHEBELGRCAOA4AUDfGh9DP08SrlCUTgSG0ZIClKo8cO9ZoDXkvwlxLUy575DxTYpTAGQU1iA4dk7haTmUI1laAzGpkMCZgMBQqMISGCC0hogqAgjU5GmEyCsYYRQEkqEDUVlQZyS1mjzz+Y7iURMAQFQwV67CrmNU6BxQq7qMdCsaASp1qSx02oWOTUdLKC4BTr2wqVdPIGhC63ZXujO3ltlbPra1RPds03BQYWVn1L3zV/3744YMPP9yg+v/5l7/7aKMe6q1XzC9ffWQhPfCp99742vbEeKPRaEBydPtl7LKX/e4v3vbNb/79X77rd//0z4YaN51aWaGUSDxVBomSzHIpHsFYVAVkyciKI2ZWAGYFRmPJGArAAmrBOrSdpFfr4MTs7OTxQ/XuSfBzJh0RD+3RegNSU6zq8LjbtBsvvLg3OlLPHSdNxKWq6GWNupV298Hvff7V687q+QIAnKn6xmPH9bEcagyv/MuHVr75eRVXXzg2njX3fepDve/+1+Tktv3f2ru49+6GAKBp33Bt87xLsZ71P3jr0Oy+xtzxUTO0WE+BK65obHTbQn/p9mtuQdDJztO1kZFlyIuZ+T3/8ok3vfYNYpdzTqu8coGrskpIQ7SDZXmW3SkaVCBEcfABHpZ5IHGXJk7WWEMi8Ska45xzlS8AgIMIqQrEqc/i3LwX1sAGkBDFhzKUsSQKIURoxsBlCBEHEsoDvoQxJqroZVmWO2ucBUORAJMkSWAJKqGsAMAYgwTGGEIzwHwOTPogzplYFRFtYiUMOJERRaVrvFdjUBgRgAaGoRLVJghVfSUmNc46ciFUTCBViUje+zi4xYGFsE2SRAtW1RCCWAGJNCEhIh81RmRw3SJk2hiDYJg9IsZGVrQPMoiWwPvSG4I8s8b4qoq3w3sBRFI0xoA1a2zsCE5fA0ifUZgOsqhnGbzRJEfTCudt5+f4ed9aeXx+LP3oyq2fqV1A3gaQRFkgZyjFiJoEQmEE2a6hsdaOCYpIILF/ufZ28d8BlcrEGEOkDF41GCWjRoJqEtJAglQZIsZEfKBQ2GDLtCNinSGUbChRqqaca4opuLr/SPKVPYvHO6FoJ8faKLVsqNVUgJWVDvhxhVJVLdRUNfhVRCOKDOpDMOBHbK+CTJg8ri5S/WRrhE8988zmnfc/7388c8mra52Fq7/1wd13/O1wv3+0pUM3EK5XpzTXGCog/beZq2rJ9nW1g8PHf7CevMFGH3tSmzBARad6NgDbVp6i2AZqCmDSYjhkj2F10dLWLy2f9RUQj7184zfK4fArxch5XJ8CDbVqOslGV8JwTGQWd3xOsAyNOXZdEFLizjqxSHt39V//Ire+WNk/nKnazKVBKzNAV62BsIDgp0cDg3sUb0WcGSP+FEidYp86ssVgsPTiL1KUn4yd59PC0ABCJMiRpUcilgeTZmb2mObWez9Sa/R7BWe1brcvRkoOntRzQGUIUmKFBnLjwkA3JtbeEYYpAOCcQSJmDswFx51Eq1Aa45jZKxhjrDNkLbOKqjU4MjLU6XQwqi4DAAMKWpES2CMmrBagb7hjsOGRFsJ58xue89SWUyO9Bzae2Lf+1IGRI/vGi9sm9qwPG86eX3/23Mj2g2HLoityx0a7IJBiT7niMk0ckuEQkAAxy6D0llOtIXMwha2qyoFh13KpL8IAe+GroeE6sICoN0qKFpGjQp1AAI1yRhH7gqg6YFSAkENQUjFYWSIEq5AwoIn4KyJjBIiMRq1QFucJ8qXF9uHZbj8Zh3I2CE00jKhJ0FeIFdSQesu4qeouJk0cGhr62q3feef/nAEaNgolrQadygUSNqSYWj/RWl+b1HYZ2qu9pf5i6Vf73Xmz5C1Red6FT7z8pSc/+fHrXnpLM09SluFO++wCJh/ff2K5k196Wb5169k3v2RpZNe51573xF7ODvXfdc0FW6/Y9upX//zZndq+sJBwrmI61K1hI5hcOm2mBKMEtwgBRt9qEcgCOjKKwiw9CJlSQlg6Xh6C5iJvOD5jTx1rzZ40vihNi1zStz4tV32vldTH3dZdYupU9ceGxvooplZCo+HTnNZZi878qHXh3RftuCHMAsA42vVeVq0uUo5lMdQY7a8YS20cyZrrtp/86/eOAiT06PTtt2ZZKPvdrc97aTKVnfjSP+Id35w6/KRm44s5Di0V+fK8qdliauJEX44tzx8+6+oxXd480jSeRiwUbLZsOV+yjIukK95BXkez5IACAoToz2MAZUDllUix1aiDE0mEzIajl6LGNTyQSmZmH2IJyCKDAaRADEJE5KxVr6igLFHKRAMDAosMRMhFQghJkhgYoP+TJIl1grVkrbXOZGlKgC5JIjAkbjEmCAknFKEig32EiERUAcyak9qgc37aXGEw5JJoeSISkAwBpC6JMOS1Hq4igrUWvMcsyVwCoitFLyHMrGv7PpH10fyNEBDFS1n2LRkAAdCo9ux9MEaDcEwPYs2BiAoUg6bRyNOJPB/11UARBWQAWkHEEII1BkBVOFQ+Nmrj8rNRLxBAhQURbYRCKeAAa4MDU/ZBX1fXLpSqalJWQlvTyXdU1723+NHe8cWvHL/j9SM3zxazkDuouCa1ru3X1DMjoAhHi8rTe/XpSAwAkZJ8RgAAsCloVQUWInJkITbV0JigFaiVkIuWpB7BeFJxjaZMpYkjWu6X3TbOLiffnfV7TiwcmnUnQg9sM0FGU01MJFrJUj9IJgxsAAykwqxYGVBVKD2LyAiu/kLti89P78qxKOvuB8XVn1595ZzfdtfO5z75mv956JznNXozFz3yyUuP3TvfOdHeceHi8T3S7k9/XbevtyOj2Uc2v+rh2vWHOuNvaD0yjsctuqzQLnUU0zQ1XFU2bT0bzqCv3qyD3lEkGyqmLO1uZlweOfL6fPHcoPtrs5c30tV+c1TqEwhCyKa1rldpNBoDgKVt3wALnHRN1UpWJkmrYujQcjNMtPXSX7xifrRZVVRTA1pVDmNxixLlYAefAREJ1lyqYmNfB3j2SAk3gLz28gFcfgDnGLC/4hRfIoh9jUE2CM+AiOisjStnEDJp0KMONoBoYh0B2obxPpBCCAFLtQQhoIIaMmxMqHwZqrCGFVhThTTRu8mQAoEqGWPEh8H4OcrUKwqCRUQwIgKgxhCJrqwsISKiiR1cVbXGlFYlIIIyiAFroyeDgjGmi72+DY0FvGFh/bVPTC0PV89squ5dd2hheOH20UN3rK9v3L3u/Pa6rTOt3UfTqZXakrS5JkndiQh4SiTXwJ761hKh1qHsKavNgfO8CpXzZb8gQAMgRpPEteoNFS5W+t4hEBkFEDECCFo6NNXaPkaKA//mSMwLIspIighoEiJScKocZ/mIoBRfowAMEoRBymartrra/eTd/f9+pT28xBta0qo5FiFkY9MHO0Oj63YUT0yrxzpkKyeOf+ZTf/O23/6rZx6fDpJ5BSueVZVwuTNz8In7H7rnh7svu3zrWdsmGtsqaTQnxldqzRkWeObBqY//5e9lY2H66JBiZ3xzff1Uc3TX8A0vw8vObWVJb3pl3+GDW3ds+pP/8UvHLvnj+sKTND76J7//V4++83eesSPdKy6FudWGCZlrUImifU5T8IqIBh0ox4oufuPKlJJ+FTBwYoxJmWxQxrLK20vDM/Mjx5+CTjfYlEFtkD6wCwL9YJDCoSOpa1RVxSKBKMsbXVih4WFWdI3MBtVmPux06w8mdjUBPn/WK6/TfOfikR1+oaKi9PP1sNCxTpZ0cemZtD5EVAC4jeDBNcGa1TvuDHfcltVcZpKl8fWNlbmJFe25Fl/7qkVPT/7ke0Om3s2HlzddfnmtWxsZ534PfL+q9Irf/l+wcpgDBBeysl8Eb5whtV5lIAiJwDzgEenaygzCz/YbjSJQpUpExlrFNa86Zu99xVUYwHApinsMOEuioBrXZOAq1rjGGw4hsg+99xqXPaFGC1gUawYeCUhgrWXghFJLxlgrOPDeUaREjUlQRAhB1lwTAAQRfQinPRVEOZbFqkhkEQMhsEr8d1BVoFAWatAZcpaCFy+eRZTY1pxhLUOJiU0rNElSEaaUBEBCjbSQEAKAhIpdHotgBQA0lBjLPoTKgyVBZRVFBDKASoAEIMxR3UOjUZKqM8YZ671nhMRaa20l3opB4xTBcyA0xhLwwG6IFFFU1jBZeNrhcbAxnynDDAy6Bs6iYAwV+bRduSW/6dvTjz46evhjtdtv6u1o1KaW+6suM+LJsCqqpYyhBwz6LPga4q3WyFmEM/wYEKIaNoVKidiaoBpQowOACJOxCRth9CGk1rqhzDrNy+rwjPn+fO/uE9X+U7q0Qv0CxBIk9ZrBWsaGOsgQKuyU/cgAVAURBvEqBlQdYeULAusMDenih4feM2kWHPgUCwfJ87N7aHTT5xrP+1bt9zcevOfG+//xlX/2q7Orbx3Vdxxd0KYU3D3SLvsLT947857fl5X6jxsXdVstMGZP0bqBVOyyykRRLVIImg0EP09fVQlVkmTrFve40RslAaY0mFwBkMusfQHyOZnMqR3SfBSADFSo2veOQwDDYByBKjKYgT24Mhb9VWlpMzXrR+pz51+6Ugw3RDhXLCQ7/b4KGuUmY/Nfnr3rg/+vccMY1AhGANZpKRVBUIrwWiVBAOAz7DRwIJkySLBO2xjGppQCKAzcGqy1Jk29rwyAL8rEpQKau1rlPTgyChykU/arIFbUEWnNdnpFXO9R0EYjfw7RGGAFkbBGSiQlCd6vwTiMIHjhiLY3hjwLGpu5NPaxiIDIsIBDg4QYpDJQkjgvTrSbQE3IBMfqZrPK2Ir63s2lV802rmw0inF7cGz+rol9hycOHW0eMK3WunOHL1zZuvtIvvlgDiccp2gzKZJOURNgcmBrYqskUCJNtb2kqKBHwSbWkagPwaaWVU7Oncptgi5JomeAag/FG0UGxwBoYl5tLQHDAH6JhqlSRVKwAZ0iWFXSioAYYU0xnn2c8ikgliUa9Nbp1m3bPv/oY5tbjdee0zm8JNSWzNmzW5zn+fvv8DfeODG2bqRThBVYdUOTH/+bv76iyjf/+h8u9ntwHEtrSm5PjJ/17f/zD5/71PsKB+M/3HjB9q0vf+1vV7Ws8+g3Lr/4xbv/416/dIxCPTGhbrUYnzh361VuMiz35Nie/U9+/v92Z5/59L983gxVn/mb/7z1+7eff/M/70wfe+uv/dHhX33HurO2rdx8ZffAiZFsKAiBL3rGCpNV0UEFhdZRCBLYA4Cx1BXp1yETytWytVZN1u1Luz156PjQ8Sdy6RANKwgnSca40l8SyOw1N7qt5y6vLKfHnnbjw54w2MQDi0WQRGbmQlixValzIOvLY3lnPQDsXbfpsaFXTnFmZg+eXcxsSHWq6CbdY2OLi7V+t7d6IlFnpV0knHSLet7ESYV2zlQHhgmWYuN5s9fcXD7n1bLrqiOf/zDd81UcGj689RpGe/kQLy8tZHZ4YWGuF7LJyS0jea29cjztJxX3JXdcSSVd4IigQQUMwoOdRdQrwxnG6Wv9roHgMgJIYBGB6CAVxe0QfQiiEqd98VdcmnSLfhk8OSsqJNAuuz5UYjWEAEAiYq313huD1hhjLTMzcERRxUo6coFiCCEkiqE6MiBBSTF2ntf08CpRSdM0KlUBACKRIysiIsYaEQsqA0taFhUUEC8BwETt4kH2TUREylIQ5MZCGYK1GgIF8aiA5nQ5GDcmAjTGqBcAMMaGEIxxiOicAxCIHjIGAWQwecMBuskYCwBJllki730IAUVNYrxwFbxRComwr7Ay1trEAPPAsjRugmd2BZ7df8/sFf90bGYAJEp92ofSIzcweZe+5M373seXJh+fuf29tV9eMitpMBV0CVU5sSaUZAyYMzOzeJEskqCepnGffqACm3oUyU4InQT1goiASbvX5TRpNqmRusrzkRPtn+wLd5+sji7UBMklrbIokszYNGTOSuXbCsUqO0WbmGCCKCtDaTwiVAXXjVH0ABw0DYLWhSS4d9Y+M0HzY7Q4SisdbP2g/uYv1t+1aNdvbz9wwae/eMlPPrT/JW8pjr89W146slIaosrY4GV08qL2Ju6HbplvDOK00vG6f2K1ddKN1hMz73veaU2l05PU2aBntKClXvQrUy5e+OQXHz7/rVk4qWTFWG8bVUJqkn5tEpEUyYS+IFnlGi9qMP1sOIAdtt4d2VDuPEJVIq5XDR3VZo8Qmj3Trk1Mwi3QYUA17DvkmgGZQKP9Fwx4d6dj6pl3H0SjxTIOMHmnA/fgy7FG5ic83bImQKIYiAflMiIRRt65njZviHIoRIPhMWJmjARO6paZydnKe+tcnmVahSp4cBQCS+nVJiZLQCmGzLhaASQWuiKS5VlWq3W7/bJX8hqDH9ZKAubB1EaV2QdEV3oWLgGlVqsNDTUXFxeNS63XyhAipqIkEhCQwCmWDgm8ABowDWhwwlVLSwLx5Obx3LnN5z+5eXWiv2fdkT1jM4dac9PDR787VJ88f+Li1Y0XTY+sP+ZqK9iirG99aXucJGRzVO1VXW+xKSOldCHBTqdnrPHMRDbJcla1hEZAQAMICRIgg1jWAGgMJYiGkAcCtwAgmWQBWQwHEjCaoCU1qbiATESxi3Y6x0JEUEOI3V55w80vevKJJ/7+gfybT1W/eH3r3A1D9z729OXnbX8pHbxy58VFUVxwwQXf/873pqa2KK1Cvn7uHz/Id3259lufBLi+mSWun7RnV9/2jnd8//v/vLQsWV7LxtZv3tL8wJ9+YHbuIL555aW/9j958aoTvshDAbW84cPXv/3vT939/cw1/u0//vayCy56wYtfvlrO/Prr/9tdD/54y03vQNCXmmT1j98+vnP70ht+uTvbnsybfRQNIdLhbBGqHAwLs1ciYxBRaQDU14zVBSNoPFpXcVJ10rnjtcNHh08drnVDmWcGexTUBPJljxp12LBdnn9zvzGKRw4kG7epYHniyebwqviaIGKSpkemV390my1tMk6ZZuLcKAA0qrJelqdMiVNbQuOyR2yRYBrEv/HGSxqmZ4OX+Xl/at8Q8/HD0+XxQ+uT2urC0uTWTXM0enjd+pUtu1obt7qlxYmDT46u27lfJOu3/UW35POHFY7Vm+uWpg+u+jAx4adoZM9td17+tpdUVVBy0hEwIhrp7mqQFJCZAdUgiUQ/Hqmqgct3nKQOyq8B3FLX+lEKAwgrqiqKkjNxcWrgvFZb7XUVgVU4REIRIRErswqiBgkODagacqfT6hg70zQFVGZ2znkOQgPrbEGM1CAlHEgKI0hkQA2oL1r5EiMOK35+sOoMM8vAE0J1ze5eRAHBOccEHEWKY5WPaJACcoNc1qyvrKxkACFwIEzAlBIAIQoGWWvXTH71tDqH5wEMyloLHMBaBAVSoeh3xmjQkgHC6FGqzABgkNAiGVBLIgEgFgcGAFRAWCsIp/0QB2hVQ6eDwen9d+AtCD/VpYxnHWOxR8iSUkzaK1auGrv0VYs3fHv/PV+bOnTL/MPXrT//cHcpB/DGWAZGdDLQo5S14wAqRctBoNO7+hqQJ57+KqIhcJHBIuLRAhpz1lQdPR44VXz5md5dh+hgJ2fIcwgh62RsEpZKq26FGIQrH0LFkDeMcGbmS59WaYpQciEWNVNrMIQqYvd6vZ5zhgOAn79u6N4Wtido4bDd/dbJBxToBb1/f/7sP4+Ux/cvbqoQ09HU1EVOUtpYZ9qLveV7kmSbobTzxBNTakT7UquXBWdQtDZuf3zunJvkkYXeiqgOpGkR+9UZZgy+PzE6kbuMFp+5+oG/Orr1hafGL6g0L23T9U5VtUkwTgHI9wAp8atiM4M5OPEmed66YvHY40/ffWVn+0/M0ojUT0raA1W7RAHXD3e3XX70bB2vtF0rDNRIqzP41s+2HM5MfdaEJwcxGAaaKlGtBc4gL8WiOcp069rvRm4DwwBPYKPamgIRiYOBKFrsCA8Ei8kqGCQhIHIBAyI4MiIigfvGu2CccyBa5lyWJbA06nkIIYInYgJvkBSViKoqBKm8DwPcBrNIiPKWP3VesW5mNZaqqkoz572fmZ0jIsWgoAgolkg08QAEAlAX7DI0ORWjDCpQKUm9RE5MDo0gvV6tVwOqzejzZnbdmOw4PtJ5bOvsvsbM9Ojh740+fetwfWrX1gtXtuycqe+eaY52rYgrHbIjRZeGqpQOZmmapktFn9nnJs1N1jC5S0xZ9hm1jyIqdUaj2BUshBERxRBIYIjUTTCkqoV4UDAMVk0ChNagoUBAQRlERAygEsWCmEVUOIgsLcu2nReedfY5Jw7uO8lj/+Mbi6955fNf9xt/89TRk1f33vMXV868+N9Wf/Htv3bnd7+der/IeScLd+Std+/df88fvgX+9EBNlvId62qh9o2v//2Rw8etwl/8yUef+/zf+NfPvO+Jx+/GRuOsS3bc+dXPfuyv379py2ivD4f27f2XT3/+/b/3cze88PrzzrkYxLz59e86ujB9y4te+qLX/472F2Z2bpg9+viBv3rFJW9+z8yd3+20Z23WUk8mUzWAmrerojmcur50uVjzXBvsTlFDiTWUCiiWhE1nZXT6yMTh/W5+Go1wo2lNTfxqpSERDRvG0m1X6YYNnohPHrehnwC1D55yM/NVc6EaVfF98P1wZH9y/Kj17Fl9mk946RqAzDCmyZY8qwJIZ7bltOiXL7nx8gmsekVe5TXcuRG3X66uNnvi+CP79hkztGFTc+GkNzi7qTY+0aSRk6unsGvO2fjEh94+ZhvqhhfPf+HI/V9Y7j2YvOm9y4Kt5lg9TTijyR2jS912EfpYUUaIpBi4slYDGyRAivQYAyLMxlpRjgE4PgxZipIxLFGILTKAJQyowCAxFDEBMg3y3LIsi6KoijJ1SZQyDpVXfHZFIaKAJsbEuamxNj6Jx1RVlkBEZNSCCkJMjQ2gKgCrUhxJCQdhZlSJH8BzMMY4dNaYqDll1BAaBh9x0VEAgMiyBmMcAwuiAUNRqXhQCEhqqFsVYSkYVXWkoSJBcWjQAEAVPK5Z1RrjVDXNM+hDt1+0UmttAiwcAkWtooirVo16ewAiotamiEiAykLMhkySJMxcFIW11sYOsw9gaKCkk1gxMshvTpekiLGOiY0HjIgsRUSKLuuRU7uWNCMA9A0OcU1VS2NWsfuHG97wo/0PdnaUf7vyvWtXt5Y1qlWaBvRWBEzCylEBG3CgszHgPUe/RsQzOt0DC3cdFvSB+6xlveaGmk0OuLTS+fxdvR8fDY8et6LDrYaMuI73nUIsd3gJJUhwajAIJqbSAKRN8adY3EqYSNPC9nveD+WtpSKW8cShMuoQzQY7e5l94hJ67LrmvZe4pwyKAiBhLp22GftB7XWztGF3+0c7h+7pItvJCXRp0cz90X2m+FEYeuHJZbwgrYq9947YZmYTDmDyHEuF/updKxdePPZgk9PVEApkl7kQQj1vnD7fQL1Kg1aluO64QuuZzyazerCTPnLVBzmpBwm2XEm5A4rd2hQaN9I7dcm3P3HH5T+LW/I31575/bs+dfTzn6w9mlXvLhQELTWbY8ND40OrjTd/78U4dsAm63pZRhVWaVnrrWnCxgx4IDypQiDABObMGLzWq4A1T41nbxIJSoQpwrOvjDTeQZyOWmeqg4QWIbITB2ncGcHernGfZM3UObGJqgRkSByAhqIMRWUTl6ROvdSUfRWYuV95730VfISIiYiABlZrLTAAiCrXs7xXVrEXJVEZWQaATeHKlz5N036/n2UZKJG1ZSWJUackDsWQxmwd1BtseKwSsEIlhQJ8rqCIKYtPmIxtWmvZLubFKvQa/Wpsjp5/ZNNN+dbZ8fLprQtPTp04lh24dejpH6yvj/Y2XLQ8deHixNZpai5gH1w/dUVauW7ZK0p1xhhSVYcAyqESjto5noG5F10SWawH76IN8UA6UCkqygr5kok8GgZUYxNF4kDR7gJQlRHWuEmqopI4o8Gzmk4Rnnvjzf93756h4fGh0dEvffk/+4W+4md+4Vb7a2/0v//8MVy3Yev6TduOzczWsmQyaXyT9cVubKdbBwCn/uztzfO3D7/+nS95+zuvvPGGH9761ZGtN88t23vve8BjtaFOGzac/Zu/9kLfmbv2+je2TyzsuOlFX/naVzc08re99S8rwn950Ss3XHTdldrMnrj/kb/7y/aDP37q1++8SmfO+fqPT33jR0+u7DtreLxYDf20QI9NqJWhYxHKLqsNKLi2LZFq0EEzgCygU1Xt15fnxw4dGT5+LOsvo6XKNEx/wQII1BWWOM3oouthy9mVFegXWci6h0+srq4wNhvDI6WnMHti9MSJ5f1Pw+GjFisrQosH72sv7dt43s0GoOdJl1bNqQWuFX6l6rJesHNrE4vp5VmTDdlVcquLqfPHO7V7fvSdnMY376jDwQPNxG+cWp96XFnoTCf+rNEpe2qFZhcyrRbHt5VD63Y99qPjMt/99iemahvTa64b3rQp69H6q194fHGPq2pii06OrnRCNhkMvgXJMMQBK2gIvDbfHdRYhpRQ10gLsfwNIXhfhlCpsvhKRKwza50lQAWXWBWxSNZaYwwpBPEYBWxpEO8j+lcQRNVGnXHmOCr2RVkGb4yxxnnxilBxiOV45hJCg4jCa04sQUIIChKNhM89f/f09Ey33cmy3DnHIt57jVaDAOzZmKCCRIqCRBSqEtAAUpQMgOh7bwhAcuNE1SWJBEZrggpwMEJoDQG6LI12UqDAzFWoYtvZGOO9d2SMMdFj1yIJgkiwREliAcCXlYQQEaGZSwShqgIzl8GTgiWTJgkRWTIiQAiOzBkbK8Ka7JQCk9pnuxFAOuAFrUkjyUBLMur0ikjNekYnCCnRarG8a2jnG0du+bh97L51Jz5/5L7X1F48ryeRUIUclRXYJOp3IupAZzgGXhoM/s8UmwQBxZ6sNnKaaGSI9SNz4TuP9+7cz0+eksWgrczVh0GgW3otS8fBSNBEc5E+kgdkBwg9AmNLkyxTaIol1XbZt4kjNt1+mQiB8ubeQy/JV69I91zqHtuWnASA2TD2RHXObBhlxf8cefeXG7/x18eeZxOzN7/hjvQVXx7//fC2tHnLvt1Tob1n2R367tbi0faW6++44/svu+kWQrN6/AGn2gY7lBWnIHinKeQL63fdemL9NcljXQ9C+bBHcq7TLk8H4MqTMWVie428BdJOk1p7pRpFZxFIuURE46iEMmvE7Lbqds89fPftr/nzXqd9+x0/PPTwD0eHRyYennr6TU9u/+3zJm/YPCTDG7+aXnHfrqHn37Jw2z+c++Z3LbWkVSgCdpIEC4XokBELwYEUWhymMugZ4OUzxv//z2MAy4oqKGsyozqw91jT96YoP3N6kDFw+jKAz7ZVwAAJrpXEseImhdh8Qh+QwNTIOYesrFLkAqW3NnAQIiqtpZIEVJXL0ltrgFlVOXhQFQ2AViSompjzrSnGh4HWLGrwpTHGoA2i3U6ZJGlAzw4tggFkA0bRMTCBN5Un9AQ5GyNWcocsUHkjoIlV5qrfaWVJEaiyDmsuUFV1u9khfM70hhtqm+Yn2ns2n3piZG66+fRtIwdum6q3zpu89NTktTMT4zPZ+lPNAoq2VGwsJYgKhVYBUAylXgMBqkoIlQKKJhwNN0UFDBkEY4wNyADCKoYy0KDApILMAo6BRNQhqxk0sUSEB7NyIwyIwVi3tLy8+/zLLrjwqmeefty0cGS49Z1vfWFpafW3//A9Pzl+3gde8MTdk9kFV14//Z1/G3FT00uzaujpjEdTAYDNK73mZ/5h5t8/OXPtTaOveesrX/Gqwm6bT/wff/TfZmefWZkpTLbrj/7wIz+88xtuaNPV5z7nhte8ua+rv/Ohvz92qIeze+SZk0//3dv9T25b7XdGr75m+eoX+3zo3FddPPNPHxn9p4/seP7Ll0SV+3kgAVYohK3P1FRlVhkGCiGIQJoRDeaEoIpiIe32msdOtA7ury9Mp8BoEh+SintNHPe6qrlmmy/tBK2NTIXJ4WShz3mOE1Pp04+bvofrLuwvLur88fTo7MrDD2DnlCu6ziTWuWzL9puWj/RPLR3bDHDs4I/8/CmraY+LRl7bvWXL1uHWoYMnMMPMzSceKXcuqR+fmR4b27Zp21bvS0yGa4FPLXUdFMOtsfVDTW8KyusjV79Ov/dxfttfuFA+tf8bhn36zE8QQ/m5oZWrXnb2W35reOLsrtZCp584xo5QnWq+7DsLos6gQUg0QlUxWCOBQwhgSAGEuaoqQoOiQYHSAZE3ep+VImUZxJMKlBIMkWEMCOLS0kuj2VxaXtWAIQCAKAIay4G5qCyAcQZLyeu5FwZDNk8yMZWyqgaVONbyLL7Xr1trRNAQKkjF4JisISKjGEAqDgawLPpRky8IP/3MYVW21ngugQZYDwZvmDxIgUJZQlo6jvIrpUUbe+AigVUILSIyApF11nVXV9I0RcQgAtZVwacIwGwMYggJYsxXrCUXEBGdInl1CgoCBGCjQS/HPUtFQ6VElJiMCcgmqtoXUUUliNEXLApw3PwCD2oLVTUMAYMSgiETjEFRQTCkoGgQDABE6V0gg0BiGUFUVAlRaaB8QkQIDiQFkSrrDklyvCjeNfHS7z5098wNk5849p0Xdc4zQ6O9Xq8BPdGGcR1Vp14VjHNWkVkFnCt9AIO1sihqLZESKqq4cvV8POE8aRxY7N3xk5XvHaoemTU+JJSaemonnLgCq1AUQVNlRae+rDhj6fbJTwTXDUmZshVR9ImAAvUTcaAlhLN47hr7yGW1B0ftw+93B/5t43vO0uSB3q72xpd/qb/1Y49u++Dvvfzf/+uB9677l2MHDn2x8d/e0PnoBfZJFNjRefrCxW8trdSfevjcO3fesG/dL9y/kNvmazYNv3Rpzx07Mp5fnU11Yz4zbbmbhyIJrCZHNhxWbT17iK7fVTxja+q7BTtbR1+QPR3MRDoEGaWNwrczGZ73q/0SsV3Y6UdWJs6npBlcrdNcB4iknBRL6dLcwbMu8UmyMB/+EV5km4f7T32sGp98z+v/9oYdr3zmvx45dNt3L3/li/UtL7z/z3/21S3XmUyobQXLSgEsyEDrmQwgAkZWEgkkSoPAGQvWNch7zJAQ0GiUlBEwpIaUAxACKgGAQV2bHSCAjZFbAIAYdc3wahDZ4+RhMMQxqEQoakSJkNdgYBGeEVtZFlXJsopVtczeWMvsvVcDFEKaWV8FzwHRhBAsErNGaTl0yUq/ayGL3LqYi4sKK5BSKEtnEgGwlLT7PWMMARBYpwwBFFnIkhIpFhVbawNSilZZQrRbrRiMgVo9t+hZjEtpNC/LksueMYZLBRZoWuODULlScf1Y+qLpHS9rXHBo6+yeibk9zdl5e/iO0WfunJrYumvzBcX4pmfMuuN5bY4FRTIXMmhYoqAdSq2ycMEQTJZIxRUHQ0Y5ENrRfGil28lV+hF9aSoxiGqR1JFRFkQUVDDkhUjBKGhgBkVnwQMBOi0C1QKXzXqtLPiml79k75P3rtPRpao3Nr6u1zvxrl9+nSxNH/htO3X/e8aHn3PeOVcuzMxtGB/vBvlSu3tOswUAXd+baAxvKoO/49beHbftzZvljh3pxg3ddefUr7psbPzcxelTl7/6NTe+4ec7Xa+dpSP3316/4zvfPnQiXTnR2fdgHmj9a39p9frnThw7denn7vjMPfvTxV717l9t/PArrdEN3xzJrnZm3veStJkag1ZDt0rFATlR50S0RTUDRbckmxiDaKlq92rLC82Z6clD+83KnLEovgLnRV2NJed+kaWsIhfuwvqmIrRtv9AqmFW0qyf71qVn7ZAk607Ppg/dHxb2JXOnlDJfz0LPW0XmvFp/zi6TNhlgx/brdXyW2W9bN3z9dVc16zZ4CoGdYwiJS6wnLPqy1INMoNPpGFQJfqTZ0sy1cpdZI2k3CxgELn7T67/03Y9X+dbRcrYzuv2Cc3YPHzjamn1ixaxfao61jQwbsFy6Wt+EfscM1Q20rWRo0EKUFCdAVhFWYgYyhpBVBgsYEVVUKQKjEJF9iD6+UrH3XmIIWnMYFRQkjeUsQ5RtEsWI8tBYuilBUAGFMvi11hYGVCIDKOpFAwOLKChy33vDgawhQEZiZmPJGIOKZVVRYiqRqvLO2qLfN9a2221HyM7kaSYiVemjIKJyEIKBqR/AANaBcUImIqwxDMpg3kWogRWMLdgP+mwKyYBvKxGfAiZqmVLsCsbNKvZmzUC+l5w1ETalRuObBi8AUCcDACWH2OWLU25VtQKGKP6Ha4wRDoGchbUBsKHBGDjiv3mtXllTLQMGJIJobhA/8ulBGgNa6adpWlZUtEg781pv/G7+hl859G/h7ImPPvG1DyS/eZwWNAwnVovKkabGCpNWIAHAmTT4fmqJ2FQ00g3txNOEg2RirNdvf++A/897lw7M0WqZS2rXNWxaei++1ysryj1g6dGgeuEiUAJJ0CXDjQRghntOASDxrrYqq2cp7aK7r032Xdncc2X26LhZAoD9YfNXix2zrvPANe99+b+ctfWc1ld/6apPfuy2N7x6J+eN2dXyxHPffVv7CdJw88pn21ov1S1JjQH3Pjh6/UOfLx/+2gVveOG+rL5Qjv3kmF89+5aHgI4ZWf/YkYnhS7vLe1rSNVVg8swefD7G2amd1/zghz968dTTAZoBq0pyX/ZOB+B6YwSUMHjPrf3zR7vtHhQFs3P3/V+85YPUPeXyYbEpl30HFRX+4ts/8sCL/ufGI48dXOFFWR66+T2TWy459PVfWypW2t3Sjk8+7zd+86HD8/e+64XvmTj2+HR543yvk4G1Sb3ybfNTep8xsg406UAB6DTOmU+bZ/z09DT2MBQGppBwBqJnkOHRYI3HEQchyhmWG2ceZ/DnYJ4zkE4cvFKViAZzCohmroP2OHi0xlgkAxhM8N5H1iI6IsB2v8tImDoU4bJoYFoCVr5w4FLriqIgIuei9o4haxBNVYUIqDYmERE2RkUFgJQJGBWQxKYOwcQUw1orqizMzAaQEeOYpqqqKIZT9CsiMg6NGHJZURXDI42i2+sZ64vOWU+tP+fJsZ9pXXh8U/fh9Qf3ZkenzVOnmqZ3TX2rH9q2PHH+oaFNhyropVi6Xr3Iw0oPUI2rhdSUZqXqMgkZhrLWs/2Oq3pJAItsAkmimisWiaALNDw83OGqWxapdcTqrdWIu0woISQPiOCsS6jGIkao7Pc7K70tW3decuV1j99/dz42NXdq8dKLJnbv3r28qv++cOjtU/f8zMW37HnMzswdP/uCzf3D5SnRb5U1AMjQdLvBDOed575Cfvi1DR7C3sdWHn+E0h+c/HSvl2OrrwcAEko8VU225WXXn5jatv2aly6cOrb9d95/fOb4jte/7j8/9I/P++WLHv7oXx7e+dapp78//MOvbJnc/GVu3PfYvhf9bF65jAX6BqzHFGulr4SCCHDZzamejTZXVvuNAArWl6ujnfbooUONI0/kvZXSWUsZuVx8lWZlRxMJASEYdhllNLmRl05wr8cuYZr39Xp6zo6qYeHYk42j+3HfM9521Fgy1paA1loAS5oqF2haAFAb3dTTYsfW4ddee12S4nKXTV0y9mkwrZH6/XsPHjs5KwKJSaKjQKOWmYjaR4cBOLP9ynsvzZZ7+Lbbm9t37GusP2/u7p//6O1uV23PFz77/W98b/NFV9zw+rfUm41lXW1k6Vy/M0rrskbFHhw1M4d27REDjPdeWIG5IvQcSERwzWdemZkJgUVCCCGEqqqiIUEkNqiFgWa5iJAq8sAcjWLHVAAQCBVADKohUopGLlHxQ1kqVHNGoxURVUJghahLxw4RPaqpKhNh04BKyD1vrWXQ3uoqWmdYrHgwWPlSWVQhMKdpSolRBWQgGKCdjTES/Q1RFeW06aKqAhAKlFaQ2RrDRSBjUSVEZyQ8PQPFGFCj9LFBC4Mmu8UBspEAwAAigXNucF5mMDOrJCAM6LwRWRYNqNRZRfQqorFjH1vNalVRwQCaNfmEOAMmIkVRQFDl2CcUEBBCUgOqA63lgdAjgPGaOlOG0KR8tezUE1wQ/8KpG57zxHfu2er/s3Xo59qPbJk6p91ut32SWicsBJa1y2pBScAjZAjch0KVN+fiRuonTurXf3jk9n35gRlZNzJac33rCmGzslzZzDj2GaadsgeU2rLXt5KE3AmC9hNHIVToXA2Lq/jEufTw5Y0nrq3t2WaOEGoHmg+Xu/+58/qH+mcftJeuwshS+cyC+7ODne11lG3D+bG50Kyt48SHDjYpO24v+0Zy8Y3zn6oYT8I4KCz44X/xb3vZ/OfnOR+95hJ39lTrGTpLZyeai5NNeryUE3Zs78z4Y+/+9o+K9tlP3ZH1azUdovJE05mFkzPa2nbk7Nfd+cS7b9y4rlOQG4LM5qdDkRcfyEmfe74H6PIkTZtZDZN271T9Jx88cdFbRYSVHRmScvM33z+1cuLErufd9O//64dbXu4Cr87eg+uuf+4v/SfP/rha5V27L/rMd+667WOv/dHN62pnj3cfWSir5d5IC8u+BwVs4lrUFECz1gtGABnQanHQlMY4BooEeVVQ0TXYLKqIRjKwrhHY1qYKICJARGfkagaerY8HMTsaHw1Ux2M8JgQ1sRk+8HXQ079yGiqICC4qh7AhQD+gGpAn0ysrRUjrGZdV1S0NITrX8R7ZDw0Ndbvdkn2E54AoV56c9awGWVXreY6IDFp6FjSARAaATMzzUQktWiYwIGiYgx+0lpCZKxU0pGiNMXmettttQBHRxKTGCBAouX4RKMkr4DQzp2Q5I8kx236odsnhy8vWpXtH5/eOnNjbmj6eHzjSOnr3ZWPNy+tXdCcveHJ43amhkc5QzuVcWi3mRZ2FuKoA1aMJnboZ7iz0OQlqaxRIuSKQBMlbYEszxWoClFlXhkCJJQ+INgK1YvYNAAhSBTEOnXMWEJS7Pbn5Ba94cs+DQTv1Wut73/vBi176kt/9/b/+t8/+/cUn7rnB/euuc1/w8IN3HX1mTpgaDXhouLUZYGPZ1szM9qsL//CPnnjlqw+84w1br7+h+bwXzX3qH2oXX0R799kJ19o0XlTbRjel5WMPXvauv53fNrV821eCzg+fu2l8fPKhb3733B1jh277r5XPfuHUX//eCx7+7rps+DGyn+xO677V2VMnEmurMu49REpxVxSt+omRbr/f7RkHnIV61Wse2986caQxPW0dSC2vu6zf7klTbVlQVYO6a9ewoWiX+9XMdNh9vviaOblCuc1sEya2qC2r0K5NTvB1E8X0qWT/Q+TIiPXNRJfb1nPVXp0vV6lhhhzAwuLMc3fteN4V51a+fejI6p0/fur5L7pyrI5p3jh6au7E8kpjdLLo9etJliRJWXWLqnLG1LIUnSXAlY5vZGlWs8J9WFmeX3c1CE8euveB/bff/Xt/t3Di5HlDk/U79u85cPfU2Zdc9ht/7rCDx7mVZ32uU8M7blDKMfSCIWOMBgYAMeJLISJSYtAIcY5Qo6LsK6RVVSGiD77ol7EEpCLgmgQxCaigY7EhOENxekSAAyMjUQQQFiUVFEsWFQgQWLjyYDFOmIFlAPSV6OZjUVBQgBAFYjONQMnYsqwUoVf0VZVB1FcOXMViLaEoh0iww4rFlJgai0RhDc0f34JEjUWAgQEhg8aTRUQNKsBAVCkHQANgGCwRpea0x4MxBggNgAhbIUR01rm1VkF8WASrhJZOB3hniAhBMCJROV4bBIEoYJIQEVmDREAY62Naoy9F41UAEFVDBIhRC2kNkDU4JqyZ7DIARG9rWGOIgnoxSgSBE5NUiMH3lxr2vevf8vq7P9K5Yf2f/fhr/zb+20tGjGVhVeMFBNg6BSQBATTYrzDFfGrSPXTCfPEns3eedL1itAa98VwgnChWGyu223B1ohAYqoVqlTv10XrPlw2TGOOM7xPlTW1faR64fP3TF8PDl2dPNl1fgJ4pz+pOXfn2n7zWb3re6Oadjy6t3nuiGE6bE4l1HARs4NAnaTaSo4tlPbHIPVfUmtb4tPrWCbGEn3xo2/2jf7LRHDtZ1e+mK7fNH/jl7uw0+OFrfuZU1W+O5zI0tC2dgL5c2E2uGavyv331zkNLB65+w7EdN7qLrjiHqN8+e3n6yetqP5wayz9/ZOM+e8Xlxd7GyFlL/VNbR4ZPR6Nep99RzZO04L7z0uvJyVPtWol5mp0Fczt++IHF9eeUY1tqoZwcLTf/7v/65l0n6kvTY4cfq8ZfkjFBbbi7dOxR2LTzvJdjmL317unbPvPHX3nlBRs2lCtLoTG5c9U1nXdOgjEYqt7pmfsZICzgtfAJAITP6jsP/DnOqHSfTdpOmy3T4MWDmDTAcAyAe1FrOoL84P/zofisV+UZHwPOQPzJ2jhaAdBYlEh/t1GHI65BJVNVBXvqG8hHm+12WzxnjGxMv9+PRbmigIGqqtAassgcRMgSGALmYJzNa7Wi34/vG0JAFTCoqtLrG0PRby0ICsTNDUQ4+CqvN4qi8KGczMc3bdq0b99+4xwa41GBuZbl/aASuG5sv+o1OU3qeZpnVconQycJdufx8XOeGX3ZyK6nh1cOjRx7aN3xOZj98Z577tm0fvP4+M7ark1zrQ377fp5F6gC55zKUm7qlGnZIdIEEwqhkaU9BbGUeUSVKoh1CbNACDVriqIUdLRG/Yp22gAArElqvQYNxKhpQvPz87vPOf+a59x8x61f37H14m4Tv/Pdr6+2q4vOP/uO5lsu95/5+R1LXyNXy4a7VY96whAA4Mft8jzxEIp9//7FrW//lYMXPP/qr9z62If+pLfjoiu//o3/vOGSG3/5D7rXPkefPDzzw6+O3LTjyIEHFu/bs/z0wcnNk3M/vqf3yKPy+EM8c3h09vjSc38DiK5+6rvH0+b7ux304Fspl51+PkxgnAKlpmAwxiIzKeXB9KndGm7aopJTM63Zo+sPHtbeCgzn3U41hFRxMC5FDpTXqomNlDdXV2dtWaYh0LFjRgqaHOXjJxKoir7HU4s0O12rpzq6sbuurps25vseC1Z6KFmnT0DWIFlI0iQ5NX98I8CNz73opu1T0F093On++9fu2nrWulpiNJQ+18eemm9OjJRtHxcMi0+SDImy1BljOtzVUodbzQL6oS/Lnd7ya5srcy/R3gN7qn2tY6vZvH/1Za+aefq+xAo/dOvcQ985uO3ckde+xK4erRKoYeISSiXzliMuSTDq/g8EGskagxBQMSgNtmxQ1aKqYrs55o+D1jMgGwQGiWgQgxznTIgIBCyxXIs+WwOER6zhAA1GQYnBG6CAKHMQFkXRaLeghkiir+z/64rnhfM8j1ygsiyjkB77oAih8gRYcRS8TIT7xhgxIc5TI8GfjIncR7NGrPIEIqIIKiqgIoKoHLwiiAZDxqA1ZMiYwRGioiSBqKqaLDhCzJzLTRq790SxtTyQ4nJk4qUDVBFxaRqEq8DKrAPFQGOQEsDEJpEMzczAagAMEpioHRQ7B4gEQko0MBQCRBrAkhERkIyyAACqyto+GdvUbMCzOgM+BGvTgiUHXi3md2265E33XPp3i4fu37b6tSN3vH7nS/cvztVTU4kx1iM3AAtERswZe87WhxqdT34rfPrJnFtJ5pMh6dbssQ14qOThQ3Zz5htQLYFr9U92/+odYxua5mc+uG/L9qnd5cPnVQ89Z/LJK+zerW4GAGb8yBG46Cv4pnW7XvS47vydvzv0p79xybeLo632+CtG0no7aQZNE5pZCP1C6hMGgY6t6rqx+qHFxa6BfCyf76+EBESTB+fwxrHVp1Ufpy33VWelajvSuOnJe1yYx8bEwt99uPrkeyt0+fCU33qWO+d8OueKdnN554OPbChx5Ft/93r58Mcv/o3vXPXzQyP19TsuXtp9VUF82dTMwZH33/ODX7y6cZK8K7vPsgMqAfKs/YVgHDocnxpu5thd9idPdg8tzvV9t7lv38bzzpaJYbz47UvuvCM7dj3n23+p2eaqWUtWV9sKLmA3Xfr8wSEze/SHt/3d5OJj2xvnLC0ttje+qj9xXUlSq5ZLqAVuU9aCXoxtZzSBIx9IMLZOeMAQWnOlPA1dXks3ow3zafEWWAvP8TmhiVylQcxbq4MHPZ8zI+8aOyiOnHFN8HIt/K9V3ACigmtGTKKMCESEzpExxCaiO1MkUoFUyCCS8WVSiUdAD1JxAAAkYtaqLKxzzIwhYrMxSZwxSGQUFVAMaGCvghHtiJRYZ4kIgL0PRBQn1LHHToguTaLaQeKypaWlXq9nDKZpao3ngKmrsw+pAZOSL3wjHephVRa9ggtr0wTqINg33aJZ+FV79tHaedl5zxvbNT22si/sPfjU4b164sDoAd29buyidefOT5x3ZHLdfmotlMOlTEsvXZeEQqkvvbzgShzWA3FPqizLQBgFTGJDCEHFORcYB/xpBEQMPoAqkAApCiK6ei0fHslkbuHQsaMvf+Ub9j2xx4deVfmRsckHH7hr35OPvOGNb9478dIX9L65YTI5tbSMmPYAG0AA8LGcr+4O/SrD0S9+prr0ohd/7Zt7P/Z3vf/9vvP+9H2dE6v+4FPrrrz+rvseGDs20zo1PXPkmZ2X7JqYvPrs85+/98Pvzu6+i/c90wIjm7bJ0OSBHVeMTT/2UKf3KerNSdWqtVSTYVdfJOMUjFVNVSsmRRSogDLEbqg2jg2v3vvw2InpodnptNOr6rXEJ9BftnljVTmTqp6Mtsc3mp07NGm6B1dMuch1gMXFbKFDo5sXQ6BuCe0e3HufP3IIztpsgqm3mmZ4tD8y1OI+IEtRQZpZY8zU5Fg1S+du3rIKkAnsO3J4fNPI97+5B126a9dmkLbY5PYfP7TcTdt+VTykxhiVsoI0qwH4wFWoyrSZJnne6a6mRuc2H/rKZX/fSbvjX/+txV0fmfm572+8f/fNT33ihi3Xnpo/VdXLo3d8vfrMJw595F3rzv2P+rrRqkRO+zUYUiuJTaKBgaoqSFgb3Hph0cgwGhjOnab8R9CvSHS1p35ZYQT4gGENAJEjSoKR1wBrhWJsO/hYuhlEAjRk3GA4ZCLFUYLEFRuElYVVlDAxCVUgA/rQs/m1AURjFMQgJGlKhEmShMqTghjs9/vMUdabAquIuMyBaCKAqLGdFZMPp0gEEn0VGREZGKsQVERILRkI7AiNoCVCi+CIiCzGKGwirymoqGqaJISYpS53afyXWMhqBFIFjsiU4D0RhRDUETElURBbBm721tq4ScUzJSIgIEMaU5A1BjBGz64IEB1UtgBwBu1kTexXKQp9S+SXkIJF9gReuGGpKMuciK21Vf1JmP7183/u2/f+wfEXj/3NzPdfuHJ9K615rhJINEjArjFOgQx4LWF0nL54Z+dT+2rj65JqAUbC0Xdmn7oyfQQAVKXbGPrX7qu+0r2hU/afOzF/E+xpHLn3yLV3T5X7UvIB0/s7O77de84LXvKSl3+KLrj80r9+49nfuv3g8MaNG0QkHH3LTRv/6weHni7bnZ5urLlH+gsma+ZOLLQTCGTo6JHOyy7eefL2k0UBFuRUqCUGlkd2pQRXjbc/AYGCG+m154bGdk8f+7nZb50iJFpcl9RC4bMA3fZBO/MEH3q886m/MmBHCaxzIRQWzOaZx5eWZssDpR0173je8ALSg62NZ121Sa+8c9/Jx8YXHp5/4gfxUienntw4VlNfLoujBNaZetHTvFWbHB87d9Nwd/lwr1c0Nm3sjJsqmWxmO+9+ei/UL7j83s/cM/XKtB3KWpUF6kg4K0jZLL45v3HumcfyofqKOCtnh9Zm3b09kdqilC5UmU2hrCJVd9AGiZqjODB5HAxcT09/RWVt/eoa8IJ04BiNoISRCT8Q/B8gPWKzea0yjof9f8S/AGCtnTKgIwvgadB1/FqiKEYHiHg8ATJoiAZ4BlEUiCmsxERdveaQWky86612G9YVNdOpqpzy0G6bxBVFwSKJy1iDqqIqWQMQ+2WhVqsJqNcwMtzqFX3PzAykxKwAogaT1CorALEEEAWEitlAFJIja61IKPvdTqezYcOmuYWlPKUG2F5VqKEGJrWhoYOdY3XAPmsGzqKiCYFKJWX2ECiYsjtk+kG5Zy5enrhk+HW9evd4euj+1T1Hbp85avefGK8dOv8ie+HElvbQxceN3TNUO+g9FdU4ElMzyZb9igUqXFqVZd1aleABKXGCa8M/FVKQwMKsLAogQKrqvW/VmyIyv7hkLYQQDOWvesNb/+Gv/nx8ZKSqXFpLO0X/H/7xY/8xTHt/mf/qBeZnP19ADZU1c0MAoAsnH27ki3nv/Olj9739rTPbzoGD+9YhHXt4r7p/GC/9of3TrbExW8u6rTy5/kW9s69bfXL/obnHzcFnSgCfJTNBmssnUg4Hzn9p777/+HOzYiVx2XBwhVkoTvU6o8Nj3aItBgIXFiyWlYIImHZYaSIs3frDbdMnsuUZI5UY7fa6tSqVRtIte5lJyryZ7DrH7Nhla1Ne+i2AVgg9Z7iz4men0x27sNBwaI+t+rT/mVq/UwwlhXYJbSCfVdyXChKDqQvsbVFW08dPvuKqV+48f+vnlmHKPw5m/ef+44GZ4/0Ld23cNDpUy3XPk0ePHFtNsqToFIrYEUaQVqu1tLxqDGapWT81kVqzuLhYr9f9WOdzV36woG6YuYC4vnrut5g7xy/Zc2Dz7aMPrLNEYWH18tf95vRVV+3/wO8e+eI/7Pq9Dy50jhutY5qg7QnYoOLQAICynpaWjDKTkWpijEFQiH4MA54SR9suFCVAjfgqVlKCqG4DQhWjZzAUx8MsMFiUcWWLouiaCzEFFVBBFQMoCB6EB3k8GiSDZBwhrnnQIhpAY9AYg5YcGZe6qqqyNFXVtNks+2UhQQIzsQERgcAsIqFSsDbWmE7XHP1igeoGmlzMbEOg4FGlUrWKBoiNDGplMqmxzrg4vTLGWENEhIYcgKo6dkhonUtSZ2BACiIiwOCcgwTQkK5tPcZaYBZjwKK3rGsPl1gFQCIRiD26eCFUFa1FY093tk9HXSKKDoox9mrcojS6QTwrxgEDKBYaIQfkkVXEoCGW4AzYjLBjh5r/PXnJ7z717RNnD3340c+/7+LfPDA/k1tWtmC8QqKKooVzjXan+38ecWkzXTy10vDLf976s0maq2m3jn1BGNLVPxv68H9r/N9JMzdpl+F+ONiZGr7gxvffd8OPFi/+xz9++a/978f2zbXu/PVLqvJLN184/rmvPPm+Dx869+KFj//Zeaj9nzy9es4Gc9+PGa5N0tQmI80FXc7QVgKlVxWxZBX56ErZIDhrUzp0yiyyLLgNv3ieHDx8atmPTXZ9Z2SoWOr/8X0f9X7W9OsNJysEhq0m2HVa13Hb17Haxn7V1Q1jYaldL3W57jfrTKaWGtUzvf4ffaF9/rGvzakPF9xCGy5qmRU557V8wS/GK+qK5bkjPxybvad16IkJ6i73etbalX5vYX5JzAjWi42bxqAOxXz38ZGdd3z7q82X/cZZD32j1Tu50JoIqfGVggaieh/D8aML15+9+5wX3PTkrd/ae2h1/JId2dbd7Kt+Q5ANct8rhSRjCboWZTXSkAZ/lzgHhbXyN6h4DhhUVQXWZGIp5niD1xiIuEjUnxLzBsGoVx55bj8Ve/EMVNfpsKwIqkhr0vG6VqCvwaIHtTLpwAoiokFiepqgJSIDmAbTqQK7JB01UJay3B6zLiRZu90OZZXneVEUELiR18qyH/rekk1qeZo6R2iRxKAIn14OMT8VERYJoFAookmMYVYDHLXgBdEqKWFVBUR1LlXVubk5NK4soNlM+90Vk6b9Slf6p8CZsiqyzFBIvGBSgbEi6iX4BBskecGL9dz1fHE0qYru8tRifWuy4YoN5x9rrBw3s4cWnzx069MH9CdHt47dv2lq5NUbt62On33IbD1g/GpNEklNVVKTADM0odNP6mmaZ6u9borOKZYqABDFkU5TPSE6xxmupGMlVaSAZaOenppd2H3etc+5+cV33f7t+tBQ8EqErebwStV7923Fp1/ee8HZ2Q+Oi+EcbSKh3HnhOQf2Hvqz5tQvm+oG9uUzezpJM5x/fvcb/0Ff+/Joku/7peenWcNi5cemtvzhB+a+++Wx9qnsm/9Vp0ZwsK4o0A5NM/zLpit8a+rI09+q+yQk3iFQD6FG3/nm13/51/974QgN2GBIKKAhhLSCMVE6dXJi+nB9ccYhB1WPkhCXY7Wq1xWSen3EX3RlcdZZtbM3FZ7w5AyyDxULpokUdOpwmD3qlueKpx+vJXlAUGtxbGJ45yXiceXE3ej7YCAxtbJY9Q7s+onhP3r+G25q3zl//JOfa/zbdeUX68ceALr4a8Mv2Do5kpTVwVNLjz19TLRGXCkHTyZxJrVJUZat1vDIyFCSJ57sfOFxaH0H3cPZfvvEOzNxtcWry8bh9viDgFjrbbzH3Xpe9oKp9kYYrp08/MTWi2/kN/7Rwb96y9DrfiGdXK/cKbAvVc1gZZDAWFCNOog++H5V+qIUBGEVEbDR0ixal2hiXafTievUh5BaV1VVz6hBIDIgioKBtEysdwYohmmtOMRhcFSXjqir02m1DKTwIh4KRFBRkciCQWFUQWOIMBqnEJIz1hi01gpxq9W4+sqr7rnv3n5VJkmiQet5jsGnxvgqlP2KFUwIYCgy7eLugNaQQTTGGWvQMokzRp0JlY94aEFiZCVS1YwsKVo0hGScMZkzSogYcciDOpgIER1miGTTxGWpO2PbctbhQPgPwBIABBVktkhRb8SIgahD5L1VEktJMuA6M7OImCiAlaaGDBmLhuJ+GsuWM3a6iL2JEBQVBaWBeAKeNvpFrQATTFCKyigba5SysmRTJExHq4WXXnTjl35478O76avu4Z87eN+6DZcsFidcQg5GNDrVSD0bTh/bc2LRQxPaBswb8m9NmblxXBqlhQyquG2zUh17/zR/S3f02le+7oa/+7q+43nn3Xhh772/cOsj8+U129Y9+v2n7nx88vkX7Np7/8m3v2nL5+48ef756QXnNM6bsvfsXdq9e5JuX9xztJORK4Oy16ikbQQQYa7DU+NpURt/y0fveukFU7NLq585MiP5xL13Pf7lH8yNjQz53KwuJn9z91+e17/zGcqG8nY3pFnotIiWyzBMo1j2u1Wn5TKQFZ3YLZ64d8zQyNbFlZovAqYbatxvTj6+9VdSn8rKCbt8am7uxOYvvf2aK54Tb+vKpud0t954fNer4Dlcm9vbOnzH8OE7cLbX41LL2ZHxsdUkHD701Parf+GCF55zX/1o+1irlX6oNzV6sLUBAtQw7SgPZ3gM9bwR+75fGHrfP71gk7u1ZjurQxeMLR3tiimTjR1dVJMY0TSUsuZLFt1zZDD14WhGeLpJrAAswiIUi6do2QmDL4KqEiiRRQWwA0slXUt5Y/OZ19bamhTHT8VgAMCf/jsiRBIgD8Rc448AEGx0fwAQFRWOoEwYlNsap1FKpAnWWs1QVraq8tT4sWRhdbnqVcPDw/1+n1kTNAAS+mXuHDZqhYa17hoiorUWlEIlKkjGiAiYQWEuyiKEEryoeA4cGAENNRqN7nInEotVwUZaARgvzAInV9tAOYEpoQRDTkDQYWAHRCgVBbDOgsUglUDNFTZrza4urUtGFotq0mGn1m2bamm+K6VuhJFzWs9fHu/M1hYOrj5z+PZnDtYPnEr84xefC9sbZ8PYrsezzSfHh9qgCZYJ9IeSVa5qfXYBRHyPCJ3xoYLAEfXOwoBgnS36Ia1b0VLJgKTWOpYydfXO6tKNL3jBvQ/+KERPuyoIAkP65X30zmP9v31huOTzTXJBbE19sX5kasmemK8W/kayCvWStMW+75++dx00EsACdGtNiqJP0AtLtPKuNw0heK6G0PUTa2nqySZ/E6qHy/7ojmvXl13dd48Yh6Ysur16uq7S5dmTh0PVV2Blk4srGTRJuOqaftmcXxx55lA2e6SsGYaUnAsJ5T3vegVWyBdf44e3+q1T2Gp11NVHR0K7gsSKE86TpO31qUP99MEw84SpVitZNWlOHZWEqlPzxbETdPARj33n8tAPBtwwql1dWg63/qZrzGPzQgDg3lwKvZcNP7h9HKZ33bIidGym161tTBojy2nianVOa2yyDrpgkmmyAcwgH10jIsrCVL66EGrzvn7y8PPeUuuM9YeW2nZ52E8dm3q8vljPi2aa2/lnDmy78YXHv/zq2kpncaiol0HKxJiOIKq1VdTWACWiXllUVVVUpa+Cc2kUlICYMIKqsO8FjV6ZZLywklFLaVAg5LLMXCKgKtJQJBErir5yxBbIe48ARhkVPCtbtYTe+7Hx0aIoyspH0isTSvAmoHG2QmENOVgR8WWZJAkRKSoDG5eqIcu4utK954GHiiCJyxHQkLrUoe+L2MqUzhoRKYqBlI+qOmsZgVWcTa011hgitNZaJPYysImP3iMAIQRjLRMY54JqkiSUJJUPzlhDBokSlxg7EO801trKIKIlcqd5F4pERDhQgI5wUBFBEauGGKwxABCYBUFETJ4iYgpgkKy1iuCFRNUCOuMCikECQ6xArNEuhqOGqgIpKaAQCuoaT1QhiBIimMjTVCJFMCqipQVEtQSsGioXv1eu1gbXxN/b/vrX/eSv+8/d8dHbvvlPGy/oiCOxIaksqkFXaNFCONZVI81mj1bLzo31u5zKCC1bZUbbllqG/WVuLcr4nlO7L7r+lpO+3hhb/tO/euD33rrrw3947qe/c/x/v+s5u7bbq8+fnLD0lj/fe9516z/zl8//0cPH3vXXD/XXbfz0nYsmrfKtzQf3lZbaSaE6sq4s55voSrAIcGCm+KN/mnFD+WOH4d4npmvD9d3P3bB48Phnn+xMtlpO0+nZxQ/e9/4Xd+++3ydjlqu+E1eOqF3OhhPyPi1N0MR5g2rzIXnkMYumk6T9AOOwvH3pxANnnZv9/6h673Dbzqref4zxlllW2f3s02t6rySEQAISitQLShexIcpV9F71ioIiiuXHFbxeC4iKIIogqLQAgdDSICG9nySnn93barO8ZYzfH3Pvo3c968lzsk/WPuvsrDnfUb7fz7fw5IetEcZyTuUZVJUe33Ni27sW7/zk1uHz6DWTf9hpzbTPeUFx6IVLl//MwjW/bIuV8ZPfmzl9tzn5aDF3JNs5e8//PPzk2Ccm7/oLkz/w6M/e/btvTfu3nGw9eohEWYbleoineu/8jXNPPLn27eWrf/KNf+6e+ZW9xZHkBK1fe9nIj5Qf9I3NKLcEkaMIsASGZrbMAiABILL8l9YTRDRgZHHiGBgBgwvoUWvdKLbQauCgABQAQFSASpMCEoRNbBo3KGnQSCLiG5qMIDQb3zPDZdriUcetzEvCJjClkXHxFqRWgiCSaioFAs8RCBQQCkoEq22M0WPANGEE9iHTahrGeqZwLhDaEELtpKgLo1TazdfW1pIkURSsTogIlEaBTNsYHZIWjkluNwZ9BhMZUMNqvdGSjonGpMo6XwUfQYsLaAl8QERrbfMmWUSCRPaglFKqqiqlFAcmoiQxSlkfAodgdcoRahG01pWjhNrOVRRxsegZY/qoMCIh25iUhlexXlsvuqtqH03tbu981vQNp7Pjy6b//fd9avbGy6996zV3ZSfv0Yvb5n1yOlx9esf2xbxsU0SoDZIFqh2wUxgi0oiRItfIKFCV9RR5dKhRmYSilhC0wpRVrIa9zuyObTMHy6VFIVciA7EiTVH+x7fSO36i9z8u2PjgDxIyifjyvgce4YyMbyuRz4K+p9PuFrTdtK12iqOGpGa1QY70mHYxT8dUkpZB962at3Ck6q1VpacIL4tjL7lpqL/bf19f3ZzInTjWnarqngYU7z/1sY/85M//cn+jHECFaUxcp7U6bC09YZ58Il8euB2zkqdx7nSagPGpA8Pbujysefu+5Irn5MP1WPTTOAmdMTVRBDVReaIwAqNMsR5++L2sNqQsR3ahhhzViYUIt9qnDtu547E9xlXpTZpDSQPSEEbb4clQkjaLAPCn0x+tt+U9mnKYwioAAEzvgWlwwhSc8zXGKtZFigzVSNUl+UpHN1hbRpGxVjZcXfrhe35XjcV6doOc9u1R1+0rREAJMPfL1Sx0EyWFuDFKRemxH3/2iVPHWztn512vFUdWa2SDiJtGWEQXgwveBd/4EBp5glLUNGHGmKJ2zeCLttB0zMzCWhsgyfPUF1VTobvIQMSaHLAXRkOoLQmwR/GiBTVS4CiE8ysrBglYcpvUjfapYchHFmRmcEq4LjKbhBCMMUTqzJy8gVcWRZFlGTNbpVGjNQYS9C6C0qrBZGpd1zUqSpVmESRMjLXGaK2tNgCgrCEBgoiIpCIAaK2bG1EE0dqIiCZlUXHtldFN4a0QWGKTztu0qs2xrdUmUBNgkwoEwo14qnFVNkIwZraJaX62FGPzxabWQUMKUJMSBGDVjO+UUkYbEFRb/o1N0ZuiBovfdDmNkDUiNrP62ETT0ebQshlfbo7ftxom3ho0Rl+nne4zYeXig5f/1NfO/7tqdOvu9c8/8M2XXPGjC6O1LLiaTApMSjlFUPsCYglBox6jgaWgUABNYDhWz56XnrQYWLglp2wOH/nHR79yfwIdPvKhIzc8b+w7D6z/6ofu23+g9RcfeFgZ1zlr5n/976e6SbkSKaiJtG3ShvhEZQtVqVLKYGywUInptzZC7EfhpLvo2lgWMctie6qb77Sj+uFe77HxyWoD61Dk/+vpj16YPHKzRkvlsvdpdxxUfqzeyDtFrJxC0ZkNENDEKvpElGSdqQvPefSBe88BOrjxtftV2+mlgF1tSumowdxiruJUR1/24glJfvdM+3f//R9TxcZY2T/rgXebu2uavWQ4fsnS7stPHHobcMg3DvuddyydukUKwrnZuYt+J9hoKlh+2SfzWtZO7rO+/KkX7+KRcmNLz5woU73wxe0XLWd//qdrH7jj0hcfk55aP6YYnOI8SY7rrFedBJAYJHpGxOhjg2CLgUERAsZN8sampkFAvI/sAzMr/E/MHDsmItDQ4EsjIqCQwkjIIIJARkmIDa6OFCo4sxNGQdjcI291ubK50Pl/OuMz82nYTDdEAGBNwEIstokfBQ4goImYgUgprQAAxRFAZCDM0ywh70i7GEwS0szWdd1b37AmFQZQjWSSEDFGBggFRmMNRfDeG21DxcToh/X2drtGPHLq6bWNlUsuvDg37aoYFfWamKx5e+w3OYjNzUcBQuTKe6VUM6NCFseFTVNu/A9bdgYistY65yLHxFhmJiRgIaVQYKiCDtzxiEoXKvalkv7I9mQvjB3Ipy786d9LutY8oH/MXmi76tHkxORlk0evLu8d9s97HHcft3lle7BOZmIjxVYdKwnjwGRVFRL2wBAr5JQShTpwjORIKc0oNVA7XVtYGvQ3bGpLV5nESmTxEQjvPlb+7cPd99ww+scHS2qNsStFokb2wNJKV5kXR32JHgIkxkby7CvQScwozVM/Kom9H1bQDpntoFBwTneQ/8zhoVb+4HWndv6aXBrjCwv1JVP+6YiQYowhxPvuufPQxTuec+1PnFg91WWS3nz3+DPJk0cm6n61fVvx/BeM1cat3dwbLhmoQouTi1/OfoMzQxMp0TiUQxgWblSSNdTNM5sFJzrr1qGyCXkpYmWAXJuyUQy0NO8WjgGwb00WVE5SN8ZBMHboV3U3idpn08mgiycO+YcPhcd2hWfa3KuL4ZG11peeObiyPqfK9cqva89Gp5ntKKPTblenSYyilc3GxjAxkz5ZO3VkaenE2PI2Hu+72I/Gg0Cvs4CAuk5Y4o5wdtUqU9OasdujL1qZOXTgBY9+/g/x3AviGvtWNTIYEbXWVmkOEQDqug4hRA42SRgEmOu6BtgkMRVFpZE8Sgxh84BstjsgjcRqY2PDohJSItKclAoVMKAgRJYm96C5Eq0eloVSylprtOYYlTIBUBwHBVEkEIAEjrw1ruUG+lgHjzEYYzhE7z1rY0AwojFGEwqw1oY0Eesk0dYY731duUZotknACEEjpUmqSCkiYwwAIGyeV80kQIVNqbHW2hrduIBM81MS2Yw2YuKtu5LaEkU3g0HY8mAg4paBiBp01xmwSdMHg6BSRCI68pm5HTOLIhFRpESkCQEkAUVEumnNm0gqINm0miit8Ez+Y+PvbDQ5/8+sEBq3MTczf4DwXwALm1slY4tQKVSLw/Lnn/vzN3/mV5d/Yvt3ZPFFrm5RUpPT0USshQVBR0EdhgCtEmFFpmdpSEgYa4t4IFkSkZJtYLXQ3v83H7wzmTk0cbaqoVgchU/evDgxvfvOY9X3Dg+TMV2FyYlQ7r+sM7/RyUdVJyEZRenG0kMmqajSqLFelvJKMaV7L9q94/DG+hOkyvQvRwg0ph0wII5UO6xWPOGiIIHeNzz5pecOP4ukG/cLIFMfBAhRuN94VUXqZvXZzOcFCoBF3C6kiPlfpH3HYNJq9qWAr+PUbp0a6lX8PW38f2Gkp7tPTie6H3/4YPRBGa+eiuYLskayqiBpeZOFhRwWEsE4gDdXjz4ECCAFxNPV+J+x22YyvNNlwwD//oO6nStzrutF+PpEcjfomeE3yke+BcLYrExxS3pHaNmwi0KbcZ8hcB28EoWKmjgQQgBmEfEhRudjbHJGuBE9KQ6bxHXnWelGbM9BPACikkYzpREASGGjBqHNLndLF71lZ6It5zGc8RyLiIhiiFv/0iw/ZAuwRZvjZ5FNfAcCYmh0C4iwKUJEEQFFpvKBlDXRBx5VpUKy2uRpUnoOIViTIGIzkGuM9WM6HZYOkWL0EoRAsaZawyCoOg4++w9/VfbW5p9948te9nqbtAoofO2s0gK8OaVHbN6SMfbQoUPf//73W92O2pyvh1Qlm7PurRQyFCEAoxQzIrDVGGMkQhDSSgMyxai0FoPCbIUsIyvyMbLKhvWo87SMfLE+aWa6xcJILjPbXu6u/7nio199+ptLY1fEN07sXWidf3hyZk6m19yIvDZpqWLbBgAkVlbA2JxUSqhEWKACBUIcVGwn+ejkBoeIbWNNpkltLK8a1ENXTk5O/vVj3VceevqvXk5/P4t1a/Spt6w9sw5/9UD7tqOFRsqNiWgEtABh9FprHyOEYLW2ANGFTpZiZXwZAlEna6+/exHPx1b9PBLbv/RrYFkWMb7S8+OSf7VtjFlZWekm6b9/8gvPvuRF09mkPvzw1MIJPX+4HUMvyfXV13aueGF9/ERIVC4TvO8coIDnnOdoKIefjqz9rl2+Wsb+IKwu6Lzlc1O4um1TRgoh6oFnCqln1lSU0Uxud6O1sVCVyGy625yt2Gd5p+6dWrn6Eq0wEsaU4pgsfXLtuX2zr+OPsqueGozlxb7/WIcd3Ux1u6BmxrMZkFYNG2meDV1FRBr0eGecrF3a2Di1dDKG4aGzD9bHzUOHPpf3psrxtSg+ap/2u0mR29XOiU+fuPpV1+KKf/yhf/7oF770tnf99+dc8CKz7kerKzw2uzZcyUZkNDGRb4wxACySJEmat3uDvtaWIWpSSZI456qqUsp4jpV3pJUxBjUMh0MiJqLRaICK0jRVTJ4jkvIx+BgaIpZVWisdY3MjiCJQe99WGhEkxLLyaZoGCc5HRmbPqtFqRgYCJqyBQWB9fX3TfrP1UEqVsarrOkkSAmlokYgCwFanzBwjK0ZLSkTAWBFhRQZNY4tq8O5NugMCISBgg+WRZo9FRMjcrMQIkAR8jKRUjDHyZvHbHOpNFUxGMwiAxMgcRalGogrM3Lg4mPnMcbi5bOOtX28pWRFRKdXcj4Qac3XzdjfvYJt2wCgAoDQ1eRQgIMLCglsnLzfDQpImzab54hkb11ax3wSwI+Pml9QoQk5GkpHbaKed39r+E799/JbvTj39/aV7nzV+lQ8qAqYMtQ8qMhiFyhikUSmHdz7/PPfxFZiexTmUkKt65PUGjxVR30tX5DszKgKS5IPUWl0mnlRMxvJuK8/celFEMPXjJ1uUVBmopYqiH8BijX6ElE5NtqbnHr98/vtxe+clv/Mb3eHIwc61cv/ScPD4I6ce+cHxY2ddMr3vQJ4nzzzxAOHYpdXiT9//11ODwWndzr11apR4W6Q61URWiwsqsa6qvKtyYoSUXRRNxhjHTpAhoGp3x6h+hsY/cv7PLO49h1dXX3H52Nn7k//7uZGPKwdO3bW3vu3Mh/BKdf65zzwRVVHZ1HhKmdZ0oIkEyhIjH3l9+wuvPoGrPwtx+8Jlfzi5KGGc+i0F6zNY2dZfvxeFpyH/+evOYq7Gu2P/+B+3fX2tvX8Kj83Rcy+nmw4tzZVJGgVQOxl2bGZ1q511J2jaOUdE3nsW8b6uqkppalSEAJtS5xijr+rm9D3jHhQEoqhDaEYogTZdeQCgWGtHRCQITWaRUopjRIGozrh3+IwouvkUbarzYVMRLZu1OZ3ZG29es4KIyHUA0+xemGFrbBOFtcLI0Ai1AKmZVRNpJIcYY9RGyJBzLsbIbLjymw5ABBHxMRAgM/SjkyBGwGpTKOdjJODpTiuy/uq//9vUWPecKy+tIRnVLoemNqMzNeiZS4MRhmXxwMMPpa08z/Ner5doQ4TOORTYjOuJEZv8qOblRhkyIVZEwABKKxFRpJufCRGNylIpBczaWgXgKaYmLTgmrY7V1i7GTi+Wuv/Ria9vS9Vb7x4/cfnSHXc/5r9PD79l384Ld+4YTBw8mnTmgUZcsCqkBuCcxlOWSHU0ViNlPjVMrKJLIiXmofvuQ+fStDPoVaDYpgkF1loPBoPDxeiDd9kP31T10se/TNceGo+HJuCmA72P3W9/7ZsaiJQ1AbhypUWNSkVmQh29l8iUZNDZxumEysaTsVnaOb790Jg+Od0unlu3jtTqKUgBdgs8g/gmrr9Qg0Cr1cq2bS9XBx/9xEd/81WvlyeeGNs4olhKHyUbp+2z6bAoTz1jjIG9+8pnX58c38h4oMYmqqwDvaKVj5fKEES9ui5zSy1mZxRpqcLQaIxViCSSmEb/q+IaYV2AQZ2KsAOOmsz6RjB28affpNO8M5tMa0/g17wvfXVKdF2LtXmn5/ekO3bvGMtPzp9ImJf6yzGsjOdjo7o0iUmSRBlT+0Airj8qwnCq3d6xfe/K99s7d1+5eP4DemVHObFWZBu2122ryfAb6u4HPoP9UQvbPzx2x/j2ydXDp8dfYMJ51xRSxfV5IYnBqLJUuBkCKBAFIMaYiRhjmkMGWJyrAAAVRWFiBoBGrqWQmrq4WRIzc5a1yv4QCZljlmWEWmcJK4xEmlSDceIAEsUISGQxehSdUWZUlVZpAqUwSIyR0CtkZM1oQUnkoNFz1KKam0Jd1w3lgrQSiXVdQ6vNzEQUglhrYwDYFFMxIjZ3KxF2IRilowh4T0TC6MBrLYgRAWIMUdjF0LxKa40CrnbaGOdcBCSiyjtmRuEGYKKUan4RQTTH6IOweO+dc2eUWUQkSjWzvv/q6GjwWM3gWoiaqp8jIyLQfyYck2xqahBRNsv0M9EI3Pw3wTkEIdkMaRdscJjUuEo2q4S4qVAFQVFb6YFNO4LQlB25SkK/jIGhZZd78zdccMNPLA8/NP7tv89/cMFwbxRL5DxxFaOUJRJxJB0V+NHJfa87+uS3LmydHrqiHdfm48xanQDwXzzzyo2el8kOGt5YrDgAj3qzmBc4RPFLJkdKIYxijiauiAjG0Z6W2jWR799H1529/dkXXfrKt73wpvXV15968uETBk69ak7vYQ4x7p/O07e/+nn3PPWOp25+5OH3/pt54L6Dx8Kr+l85/+hnN6qNSsYuDsoD1ASdmXHXrygfdwO2GGMVq6G10SbipOmIx7I2zajFjWjrQEILZTY1cygcu/bpP/733c99cPs5zz2488u/ff9bRgvP6z1yOSw9IFNn7tdDXtrLtg6mjBFzndRhh/MwNyjBmNQeO/FM27+wNf/7a2f/LW5b6c9Ke13BJGmkWFA37E+ovP1bKy8/d+zAORd2ZWF5fccOfU65tDpr47fual+rd+6fLft+YL3oyb1JjEaU5aQsCyAkotpVIuxc7agEAGttkxcEvOloCJG99z5yCKG5ipsKz7tojFGAjFG2LMI6Gmr0jlo1L282TQQY1Oahvrn53Tqz4lby95bxTUSaEG7V1LKbuxIGQNFai48MzLxJ/GhIqyQQHQCLOuNqAmxsF6AoyZImQElZY9OkLqu6rlsZ1QQxxi2LI0ThWNcmSB2hRFAomjCzcHpp7tHDD83a7lte/+NXXnfd2OSY26i/devt/aqXWCVBSbNKF0EAYfYcBaj5u9feYTFKshQFog+kgL3fKkSCUspq3YSpuOC12pSwMQOD+BAyokCAnpNIBjUY42KwpFNUXBajXCELSmWihHbCWgPHbYt6lg7JSy7fToNLRqcXtj11W7U4//ETfDC77dUzM7575Yk9O47bseUWMGr0IRMQNswabdNB+cASATQeOXIksSZUZTUcBa210c4VzNzpdAjk7ZcNAsNL6n+9LXt1boAFaoY3XdV6Mrnwi8cmOelgaxLTMduZUe0p1Zoy7WnVnlatSZV2/2u9wrEOK8shW/Gt+aPPfws8BTCFMCXQEt7Nqos0pCzLCl/qJDxx34MnO53rARa1SqJNOAizcnHDrYqh9PLnVnv25Lt2xRJ7vkqjDRMd6K+o3GBm635ljx7zTz2Oiws6Qmwpq8hXFVjdbXV7YSiVtAh8MVTKBLKZTsNwXRDJurhQLLz7Db1zd2kxE8JuhBNdWUsxkBqw4FrsAsgDcv322T1U+P27r8iVLTQzB4bIIRokrj0iMnJvYTmsb7S77aw9Oeq7WTO5/Wtveurp/cvPfmguPgb71s/fuLz/22riZPWWn/zvH//Wzfcf++ZLr7vhZ97wC6XNC4Eds1Nz8wt21+7hcJiQVAYUAoeoIiukLMus0ojK1VWa5s2qlQiYOdQcgxB71AoJkyTxjQM4slIqSW1Z+eGgkMDKKoFIBIiYGNscCRwisKDQZs6BSJqmVVUpgBD8+Ph4Xdc+hkbKiVEUI4koQhHxKMGHNE2D88zQaiVN1+i9r+u6+RRorQPEEEKapszspU6SBFia+07giATee0Il1KT2NscYxBCaQVgMIcYAhEHYOcc+aNokRzKzxv+EWFWuBhYjDACGVJN9RCFUIiUXLFzXdYWVnNFIa7LRNAfhZn7RVuuJsHXKatWU3s0960yXDLDJWGhsYRogCjfn+uYF0GziAZFAIzWWwUYU1miGYRMGQhQ39azNcI+2iCjNH9TcLp2MQFvgyOtiU//02pHXpM+5b+n4d7bPf+f4Q9e7S0eJY+8KhRvFMBVyAYPy7QQPwODv8994Od581fAz3VzEtB/p7fv84Mfu8OOOoxkuQemvnah2HTh4wVnpNQdngoYk4EkuF+cJIVeaCcLUuNo5NZvlNJGWZMc2wlkF8Buf+5z9//Glx3C2N1gxX/zY2Fs/yOsbHoC9f3qJ0p/7tY3tR8bXTvzix66f0KoXcYHJQJon4nU91KjTsWJ1w0xOB+c72tZeqIopO00+kInBQ113iiz219lICFWetoqpdjVY39gztX/Xrp9cvFV631m5p3d9knbasqrsYyHtdv9zBL17DlctazRGh3akVQWt7lQEJ+wghtnVq1uHP11P3DJ/5S9RlLoFGzNRXApV0p7bd/NvXTXsUXcyFgDzTxZv+di9vbEdk/kKBuwX1FOjv37S/K/uoB5FpYn6PbFZBU6QiXRzlDrnmMUHH5o5cwwhhDMMDRHxkUdVGXxERFSEW4BI4QgAsQGvb1GxlA9ExBJRq2Z0rIgaIUhzISihM7vf5jPqYzwD3ICtpQcAE+mtbTQ17wcbHI0iqnzTLYuIRxBCbArYBuu29YjMQhhisEorpZgZiTQhG6MAS1dDklRVdWab0zz6IAYNxgAoTqJG+tw//MPiySMf+9y/Xn/Fc9cHw/5q/9jjT69uLNmxdhCPm0ChAAAk0GiwNotvRIvW1QEAFJIxpiiKJEmQt2xdELnxJggASoyMsQGPNT8QFogGSRQ7jkgkkTWgc85HDkmqXEy1QVBQO5/6tLJMrSIZaKfK+RO7h2nZ6e5r33De467efezb20/Ndaul/sqDZ62m59l9C+y+J2WLd/bbghRBeQqsPRCCJIntFv1B4cs0NUV/mBrrYogugFYWSUR+7BK7bxwjS5t6e+NTb5h5aJ1mejQdUcNu2Hnmf0GxHoarYbQqxZpffLJ4+o5Yrte9hfP3zT7zyN3rC8dCuVFfuwZ/4GGbwBRACjC79WJBRJQgMcbV1VWtNdikM9P9yN13XXjR2ZlPq13bsLeQVREpTffu90keRalWmrqwlmqtU8q72JnQIx9Gw1CV/MRh8/hDYfWUjSQmKZxocEogQqiLUS4oSucqGamsqEYpu1jWip2Yjj25fPLV1x9/zfMOBKMD2LXxq+eeuSVLsRa77vNKMma8df2cZ4qMYUFjIjWNBD2KzhTGqKx1Htj5SACWRuCnds7YzowisEmirO6P+ufOvbD9FxckJ0899e8fOv7kQvVA/MnX/tyf/cvf9Fr8W7/8BxcdOH9ubRnyctt02z34JTOxpzh0MCtEEqdqba0GTVEYibywiy66sp1mzjlg0VrH6JlZGSPIUEdjksrV/X7fKD0zORVj3NjYqFGcC1lqjDEIYpVeX10LvpYQ2QdkoYYSywKRQQAUueCt0o1HphoNPQAQ6dCY/NETCERkwciEZMzm0dLE7jZztigcnAdCpbCoyhBCNI1eWovRTVZxCIEUNtSLuq5zm0XnIwgRUQwkoJVCRGNMCCHGIAhBuK5dqANwxRKU1jH45sLz3jOCRHYS2QkABIybdzdEllhSyZGrqhrFUWMTag7gerMT2MTl/2dUnPDmcUvYHKXN5RtCEMImwxEFmikd42YXcuaEbm6yuDlYbOybLLFZLgMTaUXNAay3YF2CELfSA+m/Rt0giohTkBV1rcE7SFzUENcJfzF/7rHqln+ZfPLqE2etxZjUXKm4EjQG7Bra8FliYnsKv/NVes6bf/bTX05/J//Am07/xsNrh1oZFckws+XGk8u//obWG3782lHfa8wWe1Iji9Hjxh6YkCiBgdLYdqHu+WptEY74jfGpKS/Q3U75Dx9ITj7Rs+NTnan+5z/df+27bDYp7DkUXA7V2Pknr7jkuk/+wtoF5/UfOwI5tYJY32JyNBrFG2+68Ou3fONNP7P/K39PMV8lGut0jBhfU0UKnIJEZTEpRIx1dli1JscHKNqHCof5enZCHvXcaWc2nVVLJZ4cxKxaz4yDxf9kQffTKo1pYTBV2jsZCwmgi7GwoPrju75502eVPFHte3NnjYeTqJ2ERGCjwwITD73oyUNzE5PT//rFh3ftPviyZ3U64xNrqHePw6pL1mW+nYzPLYR7FsevnTi67IzFrC/rHcoFiSg23qGqrkU4hFCjCyEwSDN6QcSGqNoMY5pxKLKwNMonIaLIYVOhwJGAkAVEkMhLMMY4v8leZpaGpwZb9JumZNzclYTA/2WCS7I5iD4zo2bAxuaLjRyaNtfJzeCtAYIgYqYTZg74n07iGDfrAPYBN1OtcTOgJUZA0YSamhX1ZjlORC2GOvoIFCSkSMeefqbf7z/7ec+ZnNix0RsYUmRh5sBsa3m+KFxLJU5i3IJ0NqQLRBJmEVYmcSEk1nrndWpEZHbbtuWVFYAmfwWJtI+img4k0a4oNRIjAQES2EQrlACiCJAFUBjJaA0sSEzAKakQ6kEWiNByqthzshq9VVxjmq3rwL5qDVU+4snJs15fHXrBN1bv3zl3eHLjeFY/qU9ANvy7645cHPW+ldb5cxOz6xadHmJV0cDmbrTUCxDEahusi0ECchSBoEiHEG7YWQOAJiDxI2xfXn1nUpYneLkT11ph5Uc/NvAbCxl5TTAqi/bk2MbGhiUNoiNCiNXK2oG1UyeCi76qJ54a68N67EWYEIgA2wFKAQ8yFHlUMm937Ntx4tQprVIeudF6sTxmvhzozTr0j823aSD5ZNlfz9cGCrHCKk9zb1SSpRllcUwpmRC36p95yp04oY4cLnrHrMYYJUbWSoMXJI0SFDIHI9GNqA4GLLCmCKRLSE2oRlO7F199PWUt+etbtDbqA3cdvGj14v9zxUIZ9DBmPe7c4Z538/AaTIxRWEWXCnsOBjUyGmRXlyRaA6bGeg6T7fZEqxUpjmc6S30NRT6mTh19Znlu+dWvfMX/vfvTvYufes2N7/z87V/cvmfHz73m5zut6ZPLS9P796qF3r+/+zem5x7LrjnbORMYeQhioXK1UZqIDFAoa6VUYkz0QSnFyE071aCUXPAJYl3XgNDIoObn5xv9UV3XWichsPcuSYwoTtPUGBM1OZQieh+aKxi8AomgBTAwWMOw6atoGO0RraLm6o0gJAIRkZGAQ2iygbX23qdp6mOItd/qAgEgljF2OjPlcLT54QLQpIio9r65wLqdzmAwjMygyBgTY4TI1hjabAI2FVhCKAzee1d5MKKFUVEsCvYhhlDVdZIkYshzbMwWAYKE2Jy1FZeReTQaDfWQGgYeISJo2QxFjlui0ObAayhaTT0hIlrrJqileSEZDZEbXVUUjghKNtvZZnDddAYAgIJRYgRpwsxQSBAiohLhJnH0/z2AG82ZiGwGKG215hBQkhj6WlMcqRS9FHhs53DHC9O9n5957IHx8uynXH885aKqKzWsMAQ1yWyzkMV28LhY6hfftB8ehOO9PVmWyMhLVMDpmKku3HP2kw/NgWDpbdSrLON9GSbOrSoJmBmoPUSUtLYxI2FndnPHqVLA9ONGBCI2I19IKM23PgmveI/tL6nYZlt/b063oZw9fduOC5419+jxyYFIuy50ZkuNVNY297pSvWJYw+4P/lmVZsfe82uZ6aelWBdTrhHaoDSH4XqozIEDdUz56DOTWKStCahHKQDFuUw6Q69iGGCKoBNRusl93GoTEgGXx1SVKm3hSlKOF8pwvtHpfOoXPqN99cY/e+O//HHBCqdXYbU1vbx3BOPL9rZXnPzms9/97Uf+6Y9fedXBmUvOMd+448iJ4XDP9PYsqfz6qdv+17Wv/6M7TrZ2fPuYuWK6m1uOI8BMl1QH4Wa4AQBlXUbmqnZVqJmZULkQm4xCH4NzjrYSihhEGpU+i2CovQAhaoUcRYQEFQOIcIikiQRGo6GIECqSzdVvQ6tFVMwceHMFyyHK1qnZnJEAACjNFQEAgTfjvxSCiGgg0ko0NX2zJQUsEtmbSjbhVoAoQhhjVEEhIwpobRDRNettIiAFBMwcFTkRo5W2ifdekQH2CWlQeuSqoCnrdMqyft6zn3vBvoO9uo7ALLxj154XvWDme7d+u+TCcIKIQhaA2QdQgADsw9jYROVdU+u30oyZEam3tq4IkETRJlAohBBZtNZ5aoAlTfMo7Fgix8QYkJCSdt6zRWJIgILzYJQkilhcYACbxw5I0MEHZuVaFk1BvcwF0paNDTBClNUYsqI10TevnL+gTs2p6aWHJlrfiffEuPrQxL33TyTfOjh2aLD9ksWpPcfT8X4+IZPfP/4IKFCJqkfB+dgdG9PC671VAWDm4XB4hpj2vvW3eSFLjACOMQjddOVLi3J8baN3/PTcefvPPnLqRJIaHZV3EtglqTn21OE073ryQRytk/6K5pdFmtNxKsAkQAQ4DtbY7lcmS6hW19eJKPR7VSs550euTCd23AK9V4zy7pNH5Nzz8dA54Fz95OOybSqahLgbNQCtOiJYW02GvugvJvfcqRfnO62kECVMQGwgYuSAxEoRmQpC1KNMk9PUBhsrrimmEdAEqrV6+ZVw4LJD3z869bHP6/X+8MSjT5yavKHi79y89qzPrr/g5LCVZ5SQ19qkSiOBSEwDBiLNIIA7pmfWVlcjxKCU0SZrT3mR1KsBDgHHEsqOH3uqv1Zeds2zn376oUNm393Pfezux7/Og9ZPvO5XU2mRH7R2nLV48+cWPvme6QN78t27Rw98N152SRmdpnGUSkQiQ3NPb1KRADiiiiyGLARHoMrhIEmSlCAYoEgQg4rakAmmDogBo0EbyrKdQdJO+8Oh0Uk1qjyLJiLRmkiBAWBAzjCnSAJRaxLghmaxNdJpHPuKmQ1qJKEmJwc9RyXCRBTZSfC1OGCkGD2SAYTAdXRg1NLaeqjqhHRNqJEQmAQQODKT0Q6QizoQaK3rYRVQVGKH3psglBgnHhFi5ROyAFRzKEKZIjnnmv2KSPTsp2enl1dWUsqZmSUyiwL0MaCxzFxwYJHAsSgKZkiSBACUUrUBqVjCllFBac+RAS2AUsqHYIzx3otImqaKqF8MG9SXAIdGJobkvTeIQAoIY4wo0AwMBQBRY+Bmgm2AlCGvAAMTB1RaKxNBvPdaa6UphIBGx2E11un2feVi0ACpsXWoQVRmLDM7jgB9EkiS5Mjw9Nv1c/VY8uudj71j6tnPXT142o8c+VFhcrNnNa7dODM+kAgAnZjaZAwAtpnefLZtJE7A+KHZMbsteLU0mNdiMQAHSey6Z98HUUppdI4p+pCYXlUk7W2zR/7uI2e/4931jrPjCLZf96aFu++ZQjHRsFJLX/7qjhe9HSkZar8Wxp4p8KW7qNq2bf5r/5iodu/QdHp0rpP4qlOX6zyx7wCN0n7SuvLOe9XVVzz8jl+eed2PHfyt999z5RV7rjp/VSbcrTcbvZ7LRPfD70+v/lEaqnjsB/Nf/2r7859ppXvY98kkfQkZBETyLGQrDBb+y4MMV4FzRGjp0lctULXFCPKZn/nHsjPztg/dNLO69us/ae56lXnyptaxAxfh3IQ8+2bedkwS/+43XBqK5UeP+n+9/8S3nlhLxw5N7nELx8qXPnfPfLnsQbtAp0ejHx7feem2x2owEANKKyrhYoSpEUWVr1nE126U1MwcKyEB7wMieucUEQlwZEFk5CisGsdOHY0xHBiDV0o1NR8DeO9L1KoWGYyi28zfbKo6G8kBo9XRB1TkOGqtlYAVapDpERAArNVNaehcZa0lgTTN66rGLS1CIDFIRhkvHAm01hTFAoGiZmXjnOvkLUZwwQshiYBIppNmC2N0AiKh9l4LM6PSECIAjEYjpVSUKFoAPHNtFKKP+3bu/qVf/pVsfOrwiWMinGfJwX37n3js8P33PTgzPc09X2uAyJm1VVUbS00mfABeHq3maZYmxntQCpVVgpAnbT8qUVEARAQSNlobY5yP/WGvlaYueDIWOACAcy5VJkQhMkSU5EmoakvkogOJCtEmttMZW1ycT5LEIwnqIJHYpzppt7P1tQ0EDSoXYCuCSVmL6pHvOt51otM6MvOY5C9/5HmnDrijE4On8sX7O4/dn+r0wt37l7vPXxvMPbROp9d9q+2M7Nwxu9wbeF+rVIcyAJpbT7feckk/SsNJAtuEeiAobfT0eR//zS8NC88IG6PBXbd8768++uGHH74nQWTRSCpEMDbrD4cxxryVV64OfwJ6zMbnBhgQMMM4gIPWJ9ozT2xf7C7LSj9J9NDqK15749KRU8u337zG5tPTu/77+Qfmr7oWL7pYry6TnfBp1h5thHohmCRppeVa7Z96ys6fToqREk5Dwb11z7khUSCRtSQOFQEbRM4pGwFFHdNRrLnn0pbxyTAZxVFhDpy9c/bgxDcfm/v4p2cz1r1+OTYxrhylFObd2PxoPBkLUotHiXVtMDYrBEu29JXN2hTrwXo/NalSqqxrRZCQ9t6PlFeDuF4W5WChHLqde84LdXXkh98+Fy67m75avAjePf3+qk2YdYYb60c/8Lr2E0/s7O50J09vPHn4ond9YH5q59GlORkvg2elFCBH5s3E7ghKIUuwyjKHGP3k5PgLb3r+LbfcUleulbYdlMZ2qlCE4Ehnvlxv5V1xYazbtalVSeJHA9JIgApEaWZxxpDWyCxKKYwMXpSxickYJATmGHHLF5sYABDmTTsNMwOpZvAFACE0tbYGNiLCURRwJAGtAI3W1OyCAkQfm0h6jm4zqRAhuF6vAtZEVDIGJqJYV6R14Tn1AQxaq4dl6bgGEReD7WYDVwOjRUEALULAq3OLVpvSu1aSEmOQMKwr0kYJi2Mvnpl94E1DV4xaa+eczSwAKMC6GkLjNSRk5qBVCMHatN8fNlO7fr+vtRatt9yGcEZ5JSI1CyOhokYf15QFqMhHUYHJ6CisI7BCJjRIzEEjIW5JuqjJagchVJ5Ho1FEAKMoyoiHyhpmiCqISBErpZRiKMsyoNxZPjA91s4vmLnre/dsOzVSew/0+qcG5RUhZRnitjQCyVKZ/9MP+6fc8lXjcNH0+mNPnZpIWzG4/jDoFg3L42v1yICyaDjIECBg1Jq01uxDSXFCd9eGzs901W3fmVm4R11xoHy8EjJTz3v+038G0YAvQGdJNv/g2ve/MHXTf++snP5umYyTv7Bt5t7wC6v3fPv8mV3pWRcUqxkPjmnVBajjcKPcGL3gE387vOWuU6+cMaFz1SOP3/2pT5PxZ3/pq4/dfNveX/jpyPT9d73zVT/+U09+8dGVe/5latv506btolFJLSrPjKrKwSD6Nqgxj7XRSfh/G2DXSTqWqrUQEUCHyE7JV9/6D6f3XfnTf/3q7tpjUk0g1m/8Yve3nv493nt5G5x77Pz6nR+Ar113YqOzdpf+k387Nn7O/vZEJzOr7VHn0LYwFeubbzl92baxmYmV+59sfWt9/MD2nSE/ORF2oZR+5MAozQLeKwEUqb1j9FqZcjhK01Rrba0tyzIIE0sjn7dZ6qtSaxVjFEEXHBEBBwLWpKIAENqsE9aHLtRBGp8hGlLiPArXpDyKCaiRWEAi++gii28yPZk3TfCYxhgDR00kghGgdHUzakJEohhQfGCjPROSVqX3FAWUCcJC2Ol0RGRUNRT3iIqyJGUfC1cYY1ikjsEqzcS8Je9oFsxak1LKcyRRW0UDGGO8qw8eOkuYH7v/ER/qycmJdtY+dvz477zvvb/6a//zkksvPn7idCuxzNzOW7V3IURgyWxi0HLtQSDTmrRyMWitWyZbgSpXNhHQiS2CC0AYRUVpJUkIAZWBUAPLVHcsa2en5+dSm0QBFnFVJZEbN3+zdI9RNjY2muva1xXpVCmDAnVdgoQYIyIohZEZFXAFRpNiCVyOUlgWloG0T9c/snCWHsO5meH9rSOPuMNL9r4nxtRTu7r5b+6ZfO2Pl//xZHbLqfmnT7WU+BSiSaBjktJ/+Qk5eR3uGxezuVkAAGQ0gDR39i8efWyhlbZAJE3br3rNqy+98vIP/MF7vvalzxsDIQZNCccAAEmS1HXdbrd9EfDdGC8E9ULhfSxvD/qLduMve3X3mFJKQhiG8vyXvnj98PETdz+STowrZf7j9DNv+OmfHLv0Wb3+aMRmvN0qhqv10sm8k6pOty43bH+teuBRvfgwEvnCKm1rJiUFKyscrY6hjqQUJXUVooBKNIVKKM2dTXSvnu644IsV39129TWy7fz1r30jW5wbTY5pwNhiGvVWEgrDMvRHq9PY9RAMUG4NIEel0jQdrPayLI1lHRJ0dZlpq0WLpkhQRR84oAJkXZ86NQCvs9yNjlxyzuWHnv+qO773tfGTO3e8bdfCV/w4jx2/687wT38wPjxO286LS4v9PN/3y7/Ref3PLt3/RDTWK7aRlFICIBBRaQFwHDVpo1QzINVo6ro+efJkZG51WmVZpmmOWmId81bOPuTQ8d6TNqPajYoixtjqtCvnnaKooZFBNaKhEEIDjgAEAQiAIhC29MbNcasUMDMRACki4rhpZ3RupLUFQhQTAgcXtNYqSQW8C4FEiCVjzSKOsCQmDwyMJKHJAQJAgRADKkQfwUVqbLu1U0xKq9GwTFLT3+jliQXY9D/URZGyJmWabKagQBkdgKrIFlVVVdiUFEoHZhAiBkQAkbIsWtzSWofg6ro0Jqkq17TCzrlmj2ttSkj9UdFut1vdzqAYhRCaGXtd+yZ7XIECaMKCWSNorUNgz15pi4gcI3JkBIqiEhU8i3fMDKiCSABxLIhIoJA9CjTrbUEmrROdxCi+dqxEHGgyhlRdOAUSpNbW1DFqQQgeIgeUOvrlx05Nh2r438797u2LP76wc4lMKWvK7A1RpR0ZejPbon7g21emYRzaCQWTV1LnzArrklXhjPahDlEUEJHnKBJ9FOVIgiiFa9BXQRlftT76Z9t/+71ulOmwsT7qpzsOxWdfF37wQOya1MdxnFj7978fu/71j5rseD95cXtYbQz2X/OyR6956eD7t6ytHFOqaNG4iV4mxstPf+L0o4+tdMfat9+aZPll3/3i6WL1of/19pd95J+W7h/qex+cN+ng4dsvff0bb33f3+2Q+Qte+wvzh5987Pd/afu+7XhqaGwxGoqOoLNpn0JkUnWota+kPnMAt1O0kDqwqCollCj/3Rd/4PGLX/mqT75t+sgDicqqtNwn9KW9P/aNPVdtw2XrLHz3JfNX34q/+eH9P7zpTz8/nDq0B1sqbDBtH1up+m++fqLbmVrw23u3PHXTlM124neeWvju2OSPHaiZCw+aQDwCRZbA3Fi+ERRjjN7a1LN4V1fedcbHnHPReSKMMVZVldrEBR9ZtFYNvoqBAMEDBxZgieLEqAikTVrXNQBVIba6GbBIkAZ9JZEFxDQKf+FGPIgBlVLGmCzLXKi99wCb4LnAUWkFWjWGBCWsrT7D17NKA4oPQSnlmXuDPiIaEYjc7rSrqnKlU0oFH5z3yhoQcMEZY2TLQNjskqzSymipaw0YgdJsM0aMfUDE2nuVJ1alg7K84/Y7x8fGfvc97/nnT39qdqx94/Nu/OHd9wQfRIRI57nmEBUgGShj7HS73vtRWaZpCgCD0XAszYMwE/gYMIAiEGA0ysdglCJmRAwkB8452N/opWoLs6OVRDCkRERIovew5StsCog8zbyPBoEVCKP3fgvLwyyBGGICEhmdAFGLVZ8JBKoUHEk1iJPr6cvg4hfm5z41tf4wzx1ZO7UxduTEjnb+7suy//GsqW+dWL7lUfzOUbU8ZBCenJYkf/VXyu+9HjPthSwjNXnRK/vfvL7/Z7YZ7WvfCFaOnVyZ3Lbvnb/0mz/8/h3z88db3QlA7VwNDS8IsdfrGWO88/phIw+JJnQTRfhpN/bXE1Pj00efecaadM855+zvtr/7lQfSzqQaYtaKA/R/9OXP/eX1128s9cyJ47iymFiEsrbOry+dzhYWh3NPJ8fvjb1CzWznpISIRCoDqKmxovugU0FOfF95y4lS3jNiEdwuTC+bmWG/IuXYiTe86uk95+Rrc+7w/em4dVgTs4xCvPisvQBQU75t9/gAvEgevO9Vo7V+r1haG8wvF77ql8PBYKMYjTSp4HysnRXUEdB7I6K4UCO9PhwN6zCj8JJrzn9m7uQnPvfB57/uLRctvfDxHd9FtvP/8Q/40f+x27eT9FC59ER1xTUX/fm/TbziF4ZrIZ/dlhqbcbvhRyjEzVTgLYJE8/NtdpOVqx9+5JEkSUVAJSmQ1gA6SLUxAuHIJIrAqMhApNMkiS5abVBR7Zy1KQrFCMJKqxRBbyFnSUSYAVEBqUbBiBI9cwTQWjcDWKWRFIhIq51qg8HVITqlUClkDmU1UEwKkZkDSC2RQbSgZVQEIBF40+DYfH+LCn1MlWGQofhSQzI+hqkto09SE4JLUxtjBETXiK2IfGJK4goZtdJI4EIupsWGfGQQSEwNrJAMU0O4bNpWDZQkSQjBWqsTy8jNIi1Gb4wC1eyDMTG23W7X3i0sLaIibQ0zT01NkdFKY57aNLPWWmNMg0oQYNQKCJGEFBABERk6wwwhYxQqtanzItJaazKICIpEN5HPaIyxmuqqQsSslVprE2NJAFEZUkQaYNPjWHvXrKURMVi7a3Xiui9SOVg4Nl3dG5ZybonlJBQ6mrYdL9fwTc9Kb9zr1xwCwNzJ3nieMqaDPMe2RSdTkzC7Y2+ijdaajCallNFEpLXNsixotqT43LP2fv6fthdrrjU9clRGDwFR8rGrX1Bx2cEO+BGEovfUw6cP33nvaGZChbNbjoGI8x0//z9PAE13u4ndWZvoiWxBU+2p1uMPnf3923e1x0N3rP9g9dSrXnT5a9+04+0/fuqeL6Em99Qj1Zc/m59z6ILzW+bKF+LBc0898b2J9/3BNcdOH/+Fd61cfpX8yq8tX3gxgfTWl8NwkYh8mk9uP3jmKRHKYoV97WquanX79b9854ve9eLP/dYVP/xSklZ15F1J+sjuF/5p9zXTw/li0Z8qRwMTt338PcL0zvQ3njhlE+NotW+sW19aWx3aD3197rf+6nFVbSSz8q3DcuNZ8A/vGB834SsnJyrDBUaVGwJgBG+pBBYAAuUDa209RwZBRc31q5E8QIybqr12u6211loLokGCEJWAAY0BDFKqDYQYJGiC2YmpHHUmMJ61JLK2how2iTXG2EQnWmWJTYy2Sjc54tbq1GgSjq6WyFprTWgUWW2MMVrrJEmaz6e1afMZU0ppUhIZeTOBraG3aq2ZOYQAkSFyjLEsS1KwJfluynEnIZIACRhFpFAbRQiKkBAJBWMIdRXqqhmJt9ttC6CiZDYxxmz0env273/jW978z5//7D998uPjY52J8TFDikMMzmMjOvOxZa0vqlg5gxRrp6OkqA2pGKO2NoSgFWqExqsiiK50VukYPAPf9+D9Tzz5eJNh06yqJPgYfQiOGUSwoaPAJsg7GEUQA9d19A4RhaEZKsToUZhD9N557yVEUGTJQNyEB4/8cKSGq1n/uF5bG7gdz7Rf9cglv/bMy1//2GXPeXTirKMuGQ2Gr9krf/6C8ltvrP70enXjjiKtJpfL40eT9z/8PABY5fENmVwYu/rpGz67ct1fMXMx6hdlb1CubQyW6hiPHV/cu/e8a5793EaN0/gVm09UnuetVqvxfzbKFRFR/5+BA+Be68tR1W516+DOveLSu+5+iDFRFB3WsfDd7bu/f89Df/vZjx245MoKOTiX7jpQHzxUri23Hron3vFN9a2vmtkpuvE5MKrzWgBYRx+xwwi+lZeYZzHJmCqgmKSKUq0s+TrFeE63DetL/tAhf+0LfGdsPzD/2YdbajU1SV453crS887ddlbWBgD2sN7bQMOaUzbZuGkn7aRZ7KcSWq2sjqGoSkUaKBKqGGNmdYziXcXenq4enyvWn2cGV/pC//O96PjqK159bG647b4DwyvXnnj8T/bd8qWJqUvWR8fqsXzvOz/WueElMgjrx4+urD5z1eUvf9LuiHFO66SROhttGsSaUUoRxdAMpDcVja1W2/sYA5BWw/5qtz3Z7rQIcH1jhFqATaKwFhCFHgFADKEWnGp1q6rarJUgKIXcSHYBUDaTbUNDjxJWiIDIrLQmY7bsjABEigFcUTcCEBCKMQJylmfWtubXeynphAFBRReCUayQARJQzbVqjQ0hYIP10NQ2KZfusgsu6lfFo4efGJ9I6tLZJsBeIwAwgjKb6CmjFJfeKsVIiOiCV0aX7HWatk3WL0cueE0qtUldlI2GuYlMShJrTFIHf0ZvrAiRBZRqev0tN1AEhYiYZVnT/lZQLSws5HlukBTpzVmxUgAcomcOCtCgUk0gkoDW0OzGYmSNJIo0gmJCRUxILCZCM9xrigOrjCaFLFPbJnvDHqPY5q6XGDI21C7GqI31MVijDETFYEghaK/Zkrnqymf/oL5t9Vz7xPG5K5cOjkmbVcbQN2EEOJKUlk8W2/IcAF58efzq1wfpJHLty1UHXRk5iD4gNjEYiiEqpQmMRo2IGUzLNn3l5/6lc9e35kHTkQfzF7wlVOuStcu1xT0vf8eTf/uRid7puazbftFLz3/dT87nl55exxdPBGFhZTY2Bjuufn7/V98/9+H3TE3OssuARJv1mjM0Uz4MI8Dsar348z+yl5WP2e2vfPm2i58/84a3PfCnH9j1jj9arXujOaX695z+9IfUwvz1n//Gye+P9px/6e6f/ensov3fWzwpTz6w/T0fwO37+n/w3nxwYhn/0y2DASaAK+87kNx91Y987TV/8Oxv//Xlt37I27ZsuG6rux6SX5FnveWNs6/ZnT+8VBz7wYM3+7Fjc6n+s1+p3/u76Y/eLN+/0egOJZKR7feKXpZ1p+zCfO+yPdPIQ5tAKe1LdpsP37z66kPji27VA2iRSAoRlGgBCAGCQpEYYlCgQBGCOOeQJc1zV1UcgzGm3+9Ls79gVg3fpWnERLTWxhhmIGFEtbS8aIhQGSKCCDFGpTRDFBFDOkIEwAhCiIYIGAgACZvkbolCiFGESAFKBAjBKTIooJBkCwtzRslPRJtGJGbaEjckSTIY9kXEJCkRiIhWCiLUZU1EVukIsVFIMPNmqiBHBcghaKUYhBQam9R1HTkWZUCBNE0RsXY+SbLV1dUdO3a9/nVv/PfPfObo0eMvefGPgkIiCMHBJt6AfIyKSGtSmzctQESPMdEqRWQAIaklEoqKERjA6H5VKE3iA0UypArnSICZJcRmBBiEwXlm1ggA2EjWRWKv6hFQ5MjITevJW5nrzYTfBh2MeKhdUTigAZYCoCsiDIhGRZP6CBRBSz+4nPPs/vLRz33l7CvP33/Z+Op12D9Ii7ryb9iP/+0KPjro3/6o//wT7FYA4PjV/5hdfH3KeajqqgpeuKxKBRJrR4Ct8Y4gepZnjh5VSiNLMRxqo0Q209v8lsR1c8aJmDyRxptD+csj+BS5ym0/+9CoqMoTTyftjiC1MoheYll2utnf/eNnn3fjy7vbxlhn9cSYHOnpux8xc0fsyvKgpyZ//92Ld95Gch9oUKrtlLJqaAplOrYC34cRKtLSTVkgepXqAGpSYaJqqQbW2SMHZrMa93zi48OuWnL1WBSf5iQIF1502cLGUQDIx2Z7I21w2yjUBj37YW+43KvX2NYCRW4j4oi0ChyB0EMs6zqCeF+H4Hq9o0sr7gqtnjUozerqjsXDk7rctvvcGGX7yvNUSQtTXx5Pz+utHtbXvfac9982fsWL1o4+kObwlc9+6r2//fof3Pu5VAiBG/0zbUX6WG0U6ib3qDHsiohEjp5jEK0t1PWuHbtIY+Fcryx0ojObKeGirpRVEPmM67eqqm3bZ42VvJ20WqnSwhIiOxYvIMyBABufAwBoIqXQKN0UyHKG09S8NxRr2oIJiBZGEUQw0bOrooqiAYGQNXmNoJUmlYNCFCJAFCI68wcx80jLWqzmVpYQMVWmqiqValTA2OyoaGpqqqpcQ35xzpHRESTP2syQZS0R6Xa7En2/LlObdHSiBCpXshJEMZu3FXExlGXZDMeY+UyTarUSYKVJaTLGAIlSaO0m0MN7n6fZ5OS4UmiVJhBk0QiNo3qrjdCJscYoIrCKjDHaKG2U1Sax1mqTGpsanVpjlCZArUkr1Jq0IaVRa51aowjrqkizRBtDRImxpLAsR4hijCEFaWKyxGyf2WYUKU1pmqjUANaxbL3m72fxvtUj58Unuksz4+TBe+1cGgvr55f4135s32UHtwFAFjaUUomyWmvTHs9bmOuq5wetLFcKAVk1OVSaAJiI7PbW1Z++dfvnPumTGQfcXzpZLK7VMc3VWCxrnJ7sv/TV1Y++dv/n7tj9vk+V259z53DbpAoH7EBnkKSUKFl+cvX8n/v10Rvfsba+2MqSSDzknJFAfJK0EgkmSbvtHTEbx6cemP3yN+Qvf/+Hl810Yu+C975rkFzR2dvdd8m+ctS78Q//9603f4/i6anrnr/yTDjy2ZsHn/rns//s49VzXjHz8jf3zrmoHKndf/uFM8/44Q8eZmi94+cevO5Hb37bP5x7/NYbP/NOfe75xoIdmyjr8unzn/PuP3zDa9vusS98Uf7x/Rd94lfe+tm3X7/DtI68yH7nJdWbP1RkS1GCBIOkk0zlFAHd7319OGq3Lz6/vViOPvEX909MjI0c33oq6+jEObd5HviYgW6C/VihC56IBGKoXWMcIqK6LBufjtY2/pfDjxEoMaLBQxAEH0Pl6shslLLWotFOQU1SctA2aWV5Zk1zqQoCg3jeFOErAdyCfgBi0/ACiyIE4RACsiCLAtSEwFGCby7Mxl/UMOkic+M2Bmjcs8wSAcCkiQ/OB1fXVVNoksBEd0w3Aakojbew+WdVVU1gTO1d82h+CxE5SiQog+uXI8d+NBpoMuvLa7nN3/gTby2K4g//6A/u+cHdidGNF8vHEAm6E+N1DJV3jByJwahIoJHSNC2rihGCd+wcgQQOLoa6KEPtXFHXPozKqqxdURRFXdV17Z2LIdR1Xdd1WZZVVTnnyrIMIVRVVdd1VdZVXTvvg3d1XUcXvY/MHGrnq9qVblQOfe28CEeMEbQXEKmkdtFAVDqIBhSt0aQARDYxeTfJuofvuL93y2M7/3H5go/ycw+fe9nqrunYM4ekevPB+NkX1M9SANDZf26xCksrq6PRoB71MQYSElFJ1jp4cI8hlxh+9NEHXc2KEgCwiWlaXmttURQxBgBploxKKUMmMMgfK7hQ4st95Hj2hRceeeJw7WXIOOhvDNaXR2XPra+G1A2L6k//9I92bz9H2ym3tBru+QE8eHcxOBHTItnfOf57v0pf/6KembHOxnpF1+tFadALmGgUZ76aTlqJ7RR1oVTd7/djUNtsQv1epavw8A9ad9wy9b3vuv7CotVTVSsopQZeR88PPPzEC/fuAICZ2V0X9pLO7FQYpf0hc5Igc8cpdmGUJkfm1jQgYrBKA1EMHoBjWUpd+dFw7tjK9ce+999e/KOrneeUD965MH7g+A2v2WCYcqX/2P/ddzmeeJEe/d+lsXe9d8eNP98f9jeWF7ZPH1xbHdz/8Ddb3ck//5Pfet5Nr73s4itHPFJEmyA6pUQkRBERQAbkECMKWGtDCFmauToAcFl6paD2orWN4OsIY5OdlX7fBZ8ZHVy0JgkxBoSjJ09G5uFw6FwtDaRZWYwgIppUs97ghg/VmAYRiEhCjMAN/QMRRZAZSFOsa0FmYWEB0FUZYhSdmdgEHoikqIhBiAJw4AAiJLgVZL2ZgoBexjqdhYWF1fnFVmLI6rquEq19k3vIsr6y3mq1iKg/7HVbOZAqi3pYjDqdTu1GmkxwNQo0OuQYmRCNTZxzJKwMaSBwoIhQQICzLGulWVEUm/cXQU1IAoKqmZeJNN5fVgolIqKwDwSiyHCTp4YokRGAkBCQUJhAEGgLDbzZUjBYbVCrIOzLSkIUYKsUi9AmZRCYtCIVBUQpZIgCQqSFUEBbk7by6LwwhLomRKM1x5AYi4oiQjpUMReKo0OHzt4D64f3jJ7oxYNPhxnRPUp4LT20C//yodVt02ZppceTtLZRw3inpr4wR/aAaM3stokReBDnfQyIQiDCAgg2s9vveMR85dOnxgMUve37z1tYrkKaRzMcQtVS4/5E/6q3f5ATCC5Zf+rUInVOeHVTd2BdiRaEx6ow7LY6vSPr57z3Lw5n3Y1P/FFubJbu0lotyvEuTpqYr7u1tjIhmJh0lTKJqIMH95Ygd77/L8dmd3au2nPXn//tWRuD9Xuf6P3z30xd9Jdf/j+/+6rf/OBn3/uRm770nfGX3XD8r7862jUdYxlvuKLzrCvOdMDbJ7onXvpq9bPv/Y/b07O30Ut2+6de/bYLXv+WU2/4kbHOTBrryy+97ORD37jlA+9LYZ4StUITh37yLe993dQff+bh7/zjr6gL7y/e+b/pD/6iKoPGoaKsNGnK6exs532fOhZ7/tO/eRX1jp9cXJq0/adPTD9nWlN0HoQ3XXCMIKgFOGbWxuglcJIkaZr2er1Wq6VQNSOosq4aaS4iap1EqCHGhkzOEoWRRFBkuj2+0Vvjqu52xpIkWVpYpjRl9JtgCdociTUW9MZTTKhAETO7GKAGZiYEAIwxxq2AXhExxrjabwaaiTQ7GmFuhIohBGCOMRpjrLUxBkYYFYVFbYz1zlVVxcABuChH3nsUwBh9DIioSCNAjFGiIJEPnohAwDmnUDnnQCiN1kcHGMGoOgQXAxFtlEMI8tofe93Xv/q122/77rnnni2CIqCV5dKvlytGMM/zoq60NRwdIYooT1hKUIAQREKsIEYQSgzXUQFWzkctEYE4pqQZSAKHGIDEcURFAAQsRXCNSyKEzZxWCYLIGphZSGGEKKLYewCSCGCZCy9ArHUwCiIhQpbqDraiqsDXRlllEgSjmTW7tKv7w4UrXvUCe+mB6pEjy6eOtD5ycnLHvtmzp0ZXTi/uoWPThzuHjsMJOL4a2R7rpLPVcJBopcVokSC8+8DMZz/zqUvOPfv8y646fHi4ffv2YW/n3PxxVLrhf1VVqbVmFmttMRppYwCQAMFg+sPU3yb+1+v81qm8nS0++dRke2KnlYvPuWDfZWedfehqYTUWlx+bO/m7f/eZv7395rf+2M+Up0/bC87zR56hWoYxtDhMz+6NBUY/CnknqHb0/Twbh4210cp6bhMa6y67kfVFptRGcJ2sU4S4zGGfGUtzRgpTKUC1+mjWib3VYZ6aCsJspl1wR546ee+pObgGMLHnHDqHtP3+0Tm0MmPHK+GRCXmapsGPtXJvKLrILGVVI0KSGiGsgj9y/NgF5+2b3f6Cxx/8bo4hZAcevvDcUI/GimH/P35/7+rGvm8k3/vDQv/K78J5rxgdeeI/vvE3t37ti3/+l1879fRDlODH//7Jn3vdxScXn37+C29aOrrU7XaV0SINTRgBRGvtpWZiagJ/OCiFZTnqdMYKH3vFOgSVJUYYGTOmenXkE6VBcR0DWT3wtQI0CKk2iKauGoajFcYorq6jgKDGEIJjQaWVUogiwA0GSyllrNKaiqKIUUDQ6KyOo4ieLErkGOKWD15BECBhq0RYR/GuIqWCABhq9iWNcsRzREVCZIJEYNvKFDNmCSsSEvasQJNusnw4Ou8gGkVGEQMgcGLSwWBgrWbkoiqVUhoUAKDVGDjTFlwATaAIAwIioChSzFzXdXS+KSOIAAHSJHPOCYhzjrRqOATBh2YCAZEjitY6wqZUUmPD8yAUCiEAASI02Uabs3wJhATAELk5qAGEFJGgUlR6T4DEggJGKWEJyEJoBEMIZJrRA3gX8k7bC4yqMrGWmTut9sLCwu6du4bFaG3Yt2kSsbZVVlr9mqXL/u6RWx86kMNdd6Tq7GDyhVheM6vX+6MD3R3nXb9z+Ei2vl500pEfSk0WkCeyzMfFXqHIRWgAnUAIooka/e3UFz9XmArr8UrWcC3bOP4FftVd3fOfRWsrS22YLnNfu7BSsU3a4/nXF/Ip7S5qcSVj5KCjdKWzChl0un68PO+3P3Dq/HNXf+9XD2ycoMk942anduRiSNPUOavFKZFK1XHk4cC129/xa23Voge/lZz7sh32X/ViefKDv3YV2Cdf9IJLdszc9yvvOjfrLh155J4L37vvp35s94Uv/VoRr/zQhx/69t1nDuDpwj7vI598z7+td3z/Zy6V8ujFO9/z7KO3/NM0Qqqwvevcx//1Dx7uj6YVZedcpS/5kekbbtpx6fUnnrrnJec5fVy+9Xe/63/zF0c/+s/y0HXm4GESzk6fV83tzTxt63ZOWfeuj9x78W71hovGLXdOOuxHr0FVtScUNHrEXgBAQUqGQ+zmrfX1dZWkTfaXC55okw69ufhohr0xpklSFAU1iNkQUchq471f6K1FXyfWlkVRDEttjSepvE+ZowIB8DEIg2giwIaJ7iBuls5N7Hdko7UPHgBo6/ivK9fklVV1mSRJI5n2ziGiAIQQRJA55HkeQ4g++Ohj4wYOElzT/npWIMi9oo+IOjYiEialQW9WBMzcfE9upE9Ede2b36iYk1T5EJADEbm6ttpEXztQbnXteTfekGb23/7t36699trp6VkEVYWaAFuJpcT4ciSiSCshqoNQ5VMwlXee2ccIPmY2qfqjCBijaK2jj0opIl1xIEbkSAIxBEEAEebonLNaIZJzQSkEUN7XJkmlYXUjAECMIhKMUgSKjPYQvPIRYYxQCGtSEIhMolREnShjE6UdMAdv0hQTY3R773nnFFDc/pt/0tad6CqXxViWu2f3GO6OXTjzIy9+1jn5ZNRt0iQbJDOkdQKKWuOTUDgB319b+vVf/ulffuevXHfDNVMT7cqFnTv2LCyeavJgYGsN3Czpd+7aFUJYXV3VSjuREIL6Yx2/4mffNL10eBEDn78v/ZN3f3C+c1BTd25YpHG9+OFXrrj4qm0Tt/3bVz/37Cuv3HvOlcuWcWYcnjg+mWVltMVgQ0i6ozhIXF4rkDzWp2trW9kMVT4Go4xjXgUal9aEXukZi8crLpTfk3W7zni14+jGgyujMA6qilFbM2LRKDq1MaENAHh6zlfJsJOo8yYnFwdDV4LgCIH6rsQkkTRT7GLpBIOPYXIs8zHt1/XxZ45P7Nwx0PnN+UW7r9p90WP3Lu+9fDRzfvfE43jz+7bTmM+Tc76/49vJo8efl71k0X381n/97tf+pZ2Y//2Bn1JGPftZNxyc6bzt5953avGxqYmZQ7LW63sXQGtEZHSihEKsRKFSChUysyAqRO+r/Xt2Hz91vBpVZCgwK6UUCtexZaxzTqij2XOsSJkYY05AqCdaLSQpXEA1TBBHG8NzLtlz18b4xtJwfGoHYVAQdSCtbCDvxRN6IIpOVFRRFEPQhrx3EqIO6CQCkUWKzFGiQQqKLKH4ElE5AVBKaYWuTJyttUKiUDkRVCyIwJl2Em1AUjpgNAFQWAOikGBwEWyacJAQvUjMWnmvKsShUgpJEqtJgKTBFSCiAmBgAUWjUEuiAYCbW5wARtKkEZsyNhJRAw70wqEsGERrhWqTQJumqQKsyloIjdXIEH2wVpXRp9Y0/HTHgRGYIyFCEwDLLEBRBIyqOWjAOtRKFAmQ0t55ReSDFxQGjtEbk7gYRJRGUkwefGZtXdeSJiECiIzW+kmShFEFhohoZXkRABaWFgHAgiJPwNqZ0pR+J+y89MEd39u+9NDP7Es/utZWnaeXa8SxdCy89SXT173joVee1xpvuXoIiCZDV4UC0TkxifN1oqGoybSGJmYYsOSxqbx+5DTMzVW5iuVGO9oRj1KEk598/9V/+sVVkomR8iYxoYa2gYBHBvp0SF8y0Y8xJAYYOSAyowEUK7bQw2c2znrrm/ovfOHT7/kl+/UvnMc5T45JonWVYAVeFSFRWuWtqenBd247fPvz1GR37JxLT3/oLyb7q7OXXRaeGPPV/EQ+40aj6bu+GZhXv/1Pl9rWyd+6/9HHHt2hBYvRwp99+MwBbF73uo/fdamv3B9dX335/3z01S+8/thtR8y7fymMz5yM+cL80/HAvvNe+IrWdddnZ5+Fti2sN049efrJR2nn5I/MHk6PTH7lG69zr/8rfO6XxOeMUFA0x8/O//mX1zZMqxqt6rGvPzZagXpDFbpP6zwxRvMBnIQ0V2IkEoCJogyKSK8YUpKUISTGchCtVBUdM7P3WhtgBIQonlFiic47hVoii6CABPaAMSHr6jpJ7QjqimsbSYvJsnxjuKpdE9UpGBmC8golshdItTIg4KNRqigrZU3hK/ANs73KskyT9t73+0MRtJaKUdmMr6Nn2UrnZPZOYixEIYDW7AP7AEqzAAEH4iBCThgJgDVgFaTdzp1zwXvhqLU2SpdFwdJwrMRYO6wrIki08XXNBisxyAQhMngCqMuKAazBzvj44sLC5Vdcnbc6p0+f1kryVtYZ29YIrZeXl7XVSBBCIJ0iOgAgIOUisiBAVDgKhYqEwgLsIzOCMGtQxCjkkTEKsjKMjCwSQavUe9eyZsOPUjAEiMbGUZXkrYggJKg2AZ1CVDsPHEjIkMpIoSFrjMWEIlqlE21AkUpINTwBrZDIc5ztdKYuOHjizjv3nbV9fjXaoCeZVidaq1xRtX7iO08evuOHP/J8WXsW7dm748n7H9+ddnzWtu3iL9//Gzf9t5vOO/9FqxtLwLA2/8yddz744OOnzznrssOHv3PxJc964IHv55aCGBImQ3v37jv+1LGiKH2MEqJogLLWbRO/kcE9Ze8Xe+XzSwH1zJG5Hz5w18a01nVvat8BaKXV7oszw9cenPnifY9+7atfef30Tl0OKmlNq2yovQ4RRUSRbyVcYdTEYYSoLHSgHHjPSQBUMUKbrzzfHJ1zSeI5dnQsefDQMkcKneXT3LUtMqVyJNFAOjYiAmTQOJa2AeCJ4/OPHjvBre7E/rFrbrgCjNdphphkSSvPEvbeF5VVseitTuetxJmpjKXsXXT+3qnp7sIzxw/Soow69z3nBeu7zoEn70u+/OeTnIEQ77986sUfHh/ufHzya//fn7/3meM/uOK6V4nmtdEzp+ZXXvSin3ri8JFnP/uFP/+zv8eCV1xxdZLl3MSeMDOCQFRqE4WqtmL2lFJpmj744IO9tfXEWGAG5hACh2CUZYZWZpFcBTUrRq6s+Ha3Pbdy6u/++R8TYzNQM3k3tUlrvHvs9On+YKBaSqFuURtFq1QH4wE4xSRhBMcYWW0mgWNZV6WvQ+mtNomxemvZoK2JCEZpYNnM3AXQWvsYrE2DIWZ2o5IQGYE1ESBXjjgGjIBsEDyEKFEiewhCSFoVVQUAWZblSd7K8oQMNtG/URRgAKkwBBQSaPCcDBJAYmOxjbEhwTZ0vSYOAQhZhEVcVfvaKQZ2Hn0Ez4oxlC7EOBoWVVkDALKURVVXDhGHVSk+uKKuOPSrIvogLqLVHGKSJMLgXWgamrqsIDJW3gjGGMEoryCCxBiD8xbIDYprrrzmp37ybe2xbu0dahVwc35ojPGlY2ZSqnL1em+jM9Y1NkXSNsm89y74EFkAitK3mLrU3cCkLDZeaa7cU03XaTmW8bjPT85X1idVhb/0t3Pv/4XzA6VptW4w01ozKI2dPXlHRyhZhyIwR1VsZMOayzil5e7FJNxzRKSwHsUJp2MMCUGu7v1e/+5bbXvnUKNXQy8SOfcJ3zdszZhwwNR1qgSVsonjWBtIA/sYcSayct96609MObn2X/5t17/e/tjzbpgv13hh3tcbtlMn7V0TISdouajtJG2f6OwsBO78dvfk0wl1508tt+sNdKPSDZRSIVG2253Zdpbbd9asTcI/ffzse+976mWvedZ3vto8r/zeLV94eu/8XPHWj7x5/jd+9pJPfmzuXe90v/OeojV9/8zB5RteNvOev7nwjz637afepQ5dUaythEFtyC989zbqqNTwYAN/dsfxi7bNQdSyfT5OLsjkAtjC7zq88lO/R9jTdooQ8jY9+nCRUzIqqvlTdTuUCbdiSzwFABAkIS0irU6biGL0MfreYCNJDGqsK88RhEgEXfBlXZW19yGGEAAoBBZGjoCggosYAQC0MSGyNQYZ6uCLqlxbWzFMDiSGQAE8YgQhDyyokZxzRVGGEFyILnjnHEdhUiFGASyruqrrsqqajVLpahdDCKEsS9jKqHbONZm7EmJd1nVZNSkLEaRGdsCeYwiBY8TAEKLUXjQUvh65CrRyzGVw/WLoJTJEFlFal2XZyD6YOU1TEYk+NNIhImIQIAIhAMqyVrsz5gOfc+5527fvPHL05He/c/uxw0+gSIxRJamg9iFaaxMFlhRHicysoWI3KgoAIMYzIaREDd0ORSQIKxeDAm8JEW0EFGhYVNba2rs8zRq3QghhZmYmBBdAUJEW1BGgDlx7ICRrksS0Wq28nWVZlqZpg+tJbJLneSvNGq9EmqbtLE+s1ajG9s4mBS/PrYx86yWsryVYBn82p79Ztd6FE/u6M8VYPtWqTq37YrA0u23yjns/+/d/+/v793Tv+OGdR44+mefgqgDkl/pFTXjk+z+84Vk7nn76yNT2s6c73cIFEg4EGnC4tmHyrDcY+LKCxGitpmdnq9FIa9Ef0mvnzZWX9TvjnYUS48J66J8YDbi1MQ9Pfo+K0xuj3o0v+hEV8YePPXr/nd+eyXIfBwXEqNsapPKV1LULdSfJWp22QlJkQCoITiOUFkURAVcPP2mHfW/BRKdGOrHd1vTubfsvMl2jgvdajCAC97E/1CVlVveHNSgHAKjyotLfuO0HS2tFubx89q6dSYRqbeOZxx5YPPGECjx/cuW8/fWPvnjf2edYx2unVw6vrC6eePrIaGk0tX1b2U0zPeR1X9/3jV23fnCPTUDq0VXPobf+9862HbMP7r9/8quPPPaFZx598I1veefV17x62E/e8Y5f606M+xCKsl5bL5Qe/9znv1QW3lorsqm2ZeTSbyLOAcDoRCsbY7TWNh4GiGy10cpS4xNGFeowipCRtqw0paAIkmxxtXflBRc/etcPjx07kk2OlRBFJFVJ4BiYNaAgB415q1OXFTQa4Fg7RCYExEa5kAAljEkAyKxoqkJEZRSgYqDAGSgIPkaPLM1ya3NtjBRQlDXGJBxBEJqpl0WFHIFEJCJwkFgHLyGaxDKIj7Ehyja3g2JQhDokQCxSQqzZCQcrSES1BgfshSWyCkw+EkdQgIagiSUnZIWRIBJHAg/cSASdc95FV4eyLOu6jjF6H0IIMYTgvfe+weRWtRPBBmhQOR8FnPN1XVdF6ZxfXloZFCPPcTgaFaMRCbhRGQG991K74dr6aG19VAwLCbUiB1DGsLi6vLq+VgyG5DkUlfYBgerKFaOysXwopRpJbKP03uj31jbWUVHw0Xs/Go1A/LpfXylG7Or12C/JnDva5+2gP9UPFBzhAPDFF018+vaVv/nXhdbE+NUHeH11pQBAAqmGMzPDxY2TGIpQlrXCQYzOlQNYO1qqr96bTS4+I1lVQLtCE3ZP9FuIRm2X5PS/fdiOhUHFVGFuBKE/N0zmon1We6i1whg9x+AFFXXJVgpDVaed9PTf/d45t3zm0Zdcv/R/PjZ53ZUXfP3L3X//7vrP/8LS7O7+oDccrRWjFaNKZQWlLVO7B7t2+Olu2m1ztYRLD3vXV+NtBIuedZpGgjDc6J88YgnydKrOMZ3My+6u5vm5n/zEsUPPefNfvX7n/D36e7ft9NJRYbm9K7zvUxf/8T9d8NZfnLnkSueGxcLJUG+k289NdX7X+9/b3jNlds5sVAPW+p6LQm/X6faoDdpJayBJIRPL0B7EmZXwrPtrqCgwem9avhVdK4a1SiqaiKrMo0690mQAhJmi8Gg0YmabpQDQ7XaVUqPRSCkDAI2SOQZxgWvvqqryPm4ucZm3tJCklEEFOkuCMHhuJ1mWZYyMwD4w1Ox8rJjBS3TRBQ8hig/ReQEo67qoSlLG+ygh1t55FkZopIhJksUYG28xomKkLc4Mi8QmcTzP8zRNjdKNQZGZZZMN1OyapcFXNsLt5rshSmOZa44tQkVE2ipBaGA1zOy9HwyHCpUCbKBDQA2NQInWpasPP/P0sKzqyEXtDh4659Chc4LHe+++a215fnJy0vsoCFmWcYgoHOuAAEG4ir4zOX7O+ec55xJrjTHaGo3NkhyaoRcQVsiKIRVCRLHKWt1KbG4NoiiF3kdr0hDYkKnqWilUAsBSBx9QTGLT1HazrJskSZ5kWZJtPay1iJRak6VJmiVJkqTGWmuTJDHaEtGEmSgHdXc8cyvLhwifrAZUjX5prPsJWPoy9X7GkN4otndobr363re+3d0xK9F+7l/+LgW56RWvW13qifcoGWDaX1k6sGvmyaPPzO4/pMvqgcdvfdOvvuvSay9tnMplWQ76I6tNmqZAqJPEOTcqiyRvO9efuXd/drLdf+c6EALozz9y17a0KGH02JPfGcvHZWzPgPPLLrh65/bu3MIzTy0sLKyGRKWl8uCHzhU5QVSBh6Mq1utzi1EYmNHXNTArFFIWFLSS1Hnt6lAObKeFmat7p9XCkdGT98LAWcqiFmFLnIvHltI0MzV56dn7oFgHgJIg0XGqNT1/bLlXxZEb7d03u2N2bHZsHIq6d2pxVyefmGkrq1odMCYuza+xOJ3nKk8znQ5PuznnZ37w5Svu/cQ0TCb1yvD615Q3/nqhpm6/49vyFe3PLscvvmCqM27b7Z/5qd/58//z1Zte9KpBsWKMTVJdufWxzo7Z2V24hU1vYvFijFmaNxuUZsQPsHkV6cQCQBCOwlW1ybCMQUjbWIXRaCBaD2uXoVYeWu3xU/Nzb//Vd15wwQU8qhJGq6xKrE0yRDRkXXSF6w+KHgChVwSoDDaXNMfYlJDDqix8HQmQ9HBUNqr3VtYOzpMAoRhSnbyjlFLKbHnRdGCxrEIUh9KeGEuMrYeFiIyCy/K0iWEhZRrKNShSyoggAVitm9zf5i9Oxo6C01pnRiskQWh6R8uoAQkgIgSFbJQyOlWmrex/9VLLZoAviIjDEBQ7ZEwNK4yARe08SxAOIQTPCrVEiFHq4D1HxRCRHIAEQZYq+NK7UNW+DkQ6BA6Bs6ylyETPrazdZ+cVBOCJ8e4bXvOag7v2hkEBIYayzm3y9OGnPvfZfy2KQhmNGlmhiwEAWq0Wx+id29jY8ByLulpZXl1f29i3bx8R9XsD730MgYOUcRQCu7JyEOuA9Wjpecf2zKyh33/UgF5zydMnR90uPffq7m/81MGlvp2B6rnnWN4YBVScM/g4Pjm7XklwMhoOdctUTl+Ybrvj6GTZ1x2sqkFE73OB0dMn7Uap60GgtLjvDnro2/smpkvyJQTx7oejzqxx+1IvgbPAWZZB7VKihLEKIR/vLjx+J//zxybbB6bq3vJ7337kxueUf/kfs2ddfOH//dAF374//fQ9/u1v7111/QakS/NHYX2OBht5UeZgSLccjKtku7TGylHbRVcRU2Nko3YH0sBxxLVCahXu2zf8QvO895o3vvQz7zqwcL/xfmz7uY+n9ptza0/XdWpVXDrZH9SjoO3Yts7+/VpX67d+62s/df2Bs6Z2XPPiYn4twfEy9p7a/oCLGNIaGCEQzc+KEHd6wFCfcz/TiOsBlE7ruVFvo93WL7vpgI9Ok5UaJaHQhGtpHwITadSKmcnoPM/Lumruy0apzc88bbpvEQBRCNBabTQlSaKUSpJEa8scg3CSZAp1nrZMkp6J9dWbAWIROIoII2ig6EOe50SktAZURJRlGQAo1EopjVopQ6Tb7bayRmgrbzvGzSAHZt6K3BYRBFDKGDK02Z4CRiYBDQ0Lj4AwKBUMKQYtmNsE4+bAjBBh68St68a1iDFGITTGsGcREQ6NvTZE9hxjjMaosYlxZQgUMMjqxvq2me3Pu+H5MzMzDz744JGnn04UEEhdVojoQszyNpI2xrRaufdubW3NpElkENxsf63SDYJeEBlAZZkxxqDSmpRVxhhrVGI0EaBWCnATv6NwVA6NMRZVamySmKyVJnmSpmm71WpnebfdSrMky9NWK8/zzBqDiNbYBt2zaa8nIiCNpEnVJnQIl9frwpr5UP9oJ//xlv768srRaNZ9lkCSpTyeynJBd37zy5Hhikteevnlz7nvntMXXHbt+sogRGPasmP7vsefPrFw+kRI5dSoPkjhW8nOqdvue8WPvDJibNnUJllAwcgSIxsFPoJIFNQqidFfdOnVs/+yV14u9bllt9M+emrYkV4r6bp8n19f1+2O+IX100ff9IpXri+urwxPPvXoreOnntIajEQEAYIEEC1JXbfG2uCFAbXNFKWKEhTCQFWI0ZqKVN7a5jhzQ+5ecDX9+GtHL7ixv222aGmWWqBCza2swyPWkfn1L7kh/+ESAKzXedZOoh/aBA8/cbI7TXlre3u8MxX3n149dXz+1AtecM5Yd3pjw3XGcMeO7tzibCm9EOL82vFUdkzEatftHz+3rlzSKesTR/fcCJe+aEebj93/3fmHbn/7iz/06/wsvPH0jWe/Y9x0e73exMTE+trIUDt40CCJSTl4a/KqXjcGEJWLoghJtA+iEyUiRtkQQmTWqBkk+hA5otJ1XaM2QspzzGwyGAwmJjsQXG9U2IScL7XKCOOp00exdP31jfbUTBlrEUnJ2kopQNYgISbKAmGEuNxfv3jv+Xt37fzmt27LslaI8UzYQGptjDFWwSobgEvnNnwPU+NAGCAB1FqXDq21ZV01geREBCxKE5H01tbR6iRLY4ypUVmrHd3/T9V7hll2XWXCK+y9T7ipUudutVpqZcnKsrJzzjlgDDZDMjOYMQYTPAaGzAzRZggegzFgAzYGnKMsS7ItS1aOre5W51i5bjjn7LDW9+NUi/nq6acftbq6qu69++611rveMARiNYQNKFMACMMJqZA1vm4QERBSSuSykCIyGWNay4t1QqkPSNRYZQUrYNcvJhLSGlLLkQIFVAHVM9rHdT1VCCmE0GbRIGKWZbVvAOgZRgMRiQIRiwgxhRQtUQiJjQVIuc0m47FGbe+syWRiiBGxaZoSGAWaGD3I3I6tN2Y3LywvoxIZDr4WScwcYgKAEAJnWUptopQHJetsSqHxnq2xSCoyXB1edsllTdM89dRTKijJm0DQ6WClObJp1Jegp9NLHt31mbNPFCTdMv7gycn0ZjqP6o/+6f2/dGGnSMNzdvbvemzBho4Fl0G9eHKtTMWYKVMzrnDA8YtHOvefyFxXKVDDZqhjYKtQRyCgbn92Os0Pn/7oH5/7BzfSUFNojunGU9G8cnqBNeRZv6lGUPtOnklTxe6gn7JyU/7YL31oF8aVMAKVfv8su+f+hV94/cm/uGzHW95R3fzcwa3XbnvdlWujMNp73H/3zlOP3G1uu41PPN6ZZJA5mC4lZTkrMOlUJ1Rar1WmkzKkKrIm40pS8N+/4o23v+KX25p0y5d/76YHvohrx/n8G27nsPDEEUHYeOGzpnacu7ZauboaH3546dEnmqeemjx2fxUXb3rPL2997TuPPvkkdigL47N6Gw73M4AkJCAEJFADNqR5QAlkh7zsG6wLsp1mOpbZOC8feaQ5u2gynJnkTR21ZeNFEWQGIg2oqgqytra2DrRCIlBLKEhCqkkNGUTD6yo1g4jImNr4UFFnXR0CGxZKk8kkkOZ5DjFqkMCASY2oWIMKVkBZWTiFCACCIKqgEJsGEVlFoiBiPamSyqiaREkCCglE2yAHaj3yDK8be0pMTZJWUkW0XhFb47xWhytJFFQIGKD152kVzG3ad4zRGFM3jXMuy3gyqY0xhSt88oKICdiub68UgZmjgqgioXNmNFprbQayLBtNxhs2bLjhOS+47bbbvv7VL5999lmXPOuy7Tt2NkmJswYSWVNVYwK1zI33RadsTeVas5127GZkQiAiF1NNCiQlGKcEBAKgmHLroopzLkZhZgaFzBrC5NgaztU6Y6y1yGQzh0DGIiITUSscNckgArXQRWudpAgICsrMmbHOlpsHU5ianut8GeKrm3w1d3t8c9Wg87S6vxwHKDtzeX2Pdr5821d/ZP99L37+tbc+/wX3PfztS254/skndy8snxacPv+Cc+781p79Bw4/77qbB6l4Krfo1352//g/brnB9UtFCJKscSlElzmQpCpsrI+BgIGAs8L+Ww5vwvjzDb87P7E6euyJA1uv2P7wsTSaHsC+b1NnSrPNl1w51TX/8NC3H7Av3bizdDOr2LguW59SAlE2TECdskyNV9RkHI9q45I3GkRMJY1JedmrxydTMTvz6rdMv+mVdrBl+0RGJ/bE1Wr80APVN7+py0dHGdhOaQB0ZW35+vM3wRGYyTtHF1YHs12b5Wxx5KvltTDXLYcrY/WjAhi9p1SxQlUNp+dwMNXVcYCAU2XJHbzszs9vns4OnjymYCZXv56uf9umwQDrtW997x9++I3v7dGGDQd3Vi/1rz3x7oXhWseZ2tdFngcPzjFAij5okaZ7M1GCT7Wq5NY1vlIBbVFakXYptB52ZAmYfWw0BDJOVeu6ttaujUfWmNXxpABmIQ2qbCdJXTXetX3n9OxskNR60nashQCZddAgkxauExL6apJleN7uHQ/d/8ATjzzZmZtGotgEZMIkwdesjhC5yKu1kZo2VoVi0gTJWZeSjKpaFFv1FIGmJEi2AQ8ROIjL8mhpEnyv6NR1vbi8YoUFJTVijEOGKOCIEJMCIK17JguCJK+KDjlIaiSxQoEWVAGJMptLEJUIKiAJAEXIKwMqnhl7z5BOAYBaFz8Rh6hAJjOh8QAAIQEAAypoaC8jY0BFRCIji1pBYVBCB2QIhRWJkqQWkCiL0tdNa5KnCCmFoihCHb75tW/u3r27Pz21vLyKIBPf7NyxfXZm7pFHHiGiNvEmM1ZIUkqmvU2IQgjbN23quvzIsaObN2++5ZYbvv3t7zDzzNzcqZMnEyCFRKhZPpi41TphbE7v3DN16c76u70TO4fl3SfCT11U/vvXTl1x5Q4/zOrx4vfuXRxMzfhqaQBw4Xlzjz1xcCrjQNzxJTpo6vyLi3MpDkNv07xMTXM9lsGAwNhyUjVR8qWF+aLoLNz3raNf//j0C97rTx+8py6m/OI5U0tzbuf86jJ1nU5iY4hyrpfWNu2e2/+Fvx/ce0eRTTeerOvyeKTYn+l20tGDi7/1i1U2mGzcsHDryzoveD7fcNP2H33HXPGOuABLT+6Br3925e5vp/1Hpg7tqSg5Kp0relDE3Wc1J497UjUBu7lB2LPrZV94259ec9fftC/xSz7/m0mbxVe99tHpZy3/0//Z0OsOx6eru2//wX9/0eLiYifkbKLhnDds2vmW18694YeqtcnivqeWadWwNhUMzr94Y29J9W7rc597sFE2rkIeoM5UJZ3ekkzWKwfAsSk2Rhcm88P5ii+a2TgaznNZ5BFXk6qqkUwJ2z1GkWUpQZsuH2MkMiBCRGf0asqMhOCsQ0RDyM6mlBhBQgTFoGqJQwi5cwlUICKaEAIaJpUYIgAYVWyZiyBZlrGl8XhonFXFGCWzthXdtTQiQNSoTdMkFWQ2yDGqJRZJbVltY0haVQMhoEKEJACGgVXrNi+4zdsUUFELaJCiCEC7TwVl40WJWAidtU1dP+vyy594Yk8KsbWKC5KsyVq/eQAAAURGECZt3bU6RRlCSCkhwNRUfzKZJLTPee4LHvzBPQ89+IOF+ZO3PO8FO3burmMkJBDpdrvJh6ZpOnkx9rUSGEMtOcsgQQQgUlWLBAwZsjEmty5zBggkaYwRVBIixASFFRFW4cyRCK7fcOCMdW59wCViRFnfJgAAEhuDgG3LrrgevkvMoNq+3JnGCDZX09jmRTiVYzWbilcVitK5sK4+XVdRw1ypS0MaDasfeeOrXvemd1z4rGv2PnXyuhfaCy9/1nCx6m7jc3ddfsdtX7XVeGlxZVTBz+3YffbC6v3dwSe//flU+zpnBHLEI/G33HjrXbffqUxRkVGJGNiSodOHT9k/KsKfTJr/ReY++ff77//VW57D1mOvPz135XDSjFZPbzj3wksvuvi+Rx548uChb6X4I5XxZUieQSWScojB6Mr8ohqgBKkaE2IdG2tMQ1owJoNYez73/M2vefPsNc+ZdHMAsSZNlec1bmAveVa+9eyV797eW5lvjh82olr0epPjo4jZu9904/cePvj9h44N67CxX4CWJw4tbn3WWVu2DLg8++SxesvWLVV7EkMxleF5O8tH98QRrhX9gd1/39rCoyOXdYuZU2/4zXrDbjM+XqfswCNPLB96uth0waP33T06mfQ9q/5Tp/IkSl2XZUkSGxAIhMqGY4zdzvTTh/dn3XV7Jstm0kysKbz3uc3bOAGXZYgYfPKhBiBmGo/Gqtrp9GKMrbMxN+gNUVFoaiSZkkxZmKVx9bGPfezGm56/sLhMjsSQcdZ6JsCMTdU0aDJB6XXKH37LG751+z3zK6OD+/aassgQoyYlRWs8akyNJm+dUUJrDTRhutcfN3WqKlPkTRI0Nkg0xqQUiUhSUsTMWsvQNE0ii0qhbjCKkIB1BJBiRMcSUwoJjIuSWoxdQxSJzDYmsdY0KbFPGRMiTlItQJk1GZmASVWNEgsysqICUSCVpAAKeCZHQZQQBbS9ZdpGX0SQz7DGFDW1zuzJZs6nSAqMbb0Wa0yUhEiSkqiGqmFsZ+l1JWUIDRm01q5Ig8JW0QgcP3DkyL6DzIzeC3HpssX5hVMnT+e9DtusrusIqYMmQCSipqnaL7Vp06YW4S+K4vjx4//62c/t378/y7KqqhQgaiTWrMzIYKFoakXGsBxe9NCmE1vr4SNlA50nTlvsyFuvn5bvd7Lx4R9/887f/OgR1x0wH5yfX6XSTsrp1IxWUyh09btLu9IiD5zMo//y9mv+y+Ev2+SHpqaqQ1lBySeQEPOzTOfIR35r9srn7dWdp6M5/9gXfux97/nlX/uH573q1U89fWzKzIz9JOu6MnO1P7HyN39yacyWrQZcyupilFFg6IzHrsjLqWIKSl04vfzpv5BPfrjZsvP47su6V13evfntm6/dnf/Pn5+FX145uuoeeHR0+HjY8+DSA98La4tmIO7ASq4RQ5IxLW296p/e/pGzn7ztFY9/bH1ou/yyr1+/bekNb+q99X9tT5NGZ/sXPNvuusJuKrbk0zNbL5c8zzcPaNN2zIrq+PGn/ulfn/OuHz/w9JPqbARIo+XzHjuPrjDdSXdsKrAJygl4g4sDBbRfu6ZXhVGyHms7f6g3M3AAl5+3JivBFXkVErKvkygAS0DEZlK3rqWTyaTT6TC7GGNICQFBVBAFSFEVCUEBiNlgm2xIKCBRgAAIDaHWzWhsgiJoFElJAI1rRxtUVQYUgETgkJAhRs/MGhMAOTYpRQBRw5AgSmQwQEiEItrKc0lFBFUSIoqqiCAZCTGCt8TOOU0CIoiALTIkkqgFeBEBFIEQQNYTkb337GwbzNA0ycbULTsPPfigYeesDTHGkNCRpnXPJlVlsqBqkJAlBmYlS2wsJkpsTAih1+vVIUKSW5//gk6n+N7d3/nKl77w3Oe/+Nrrbhg21XA4ZmZFYpdVvnHGaorJGCJqRcyx8aqaYhQRzW2pbMmqRbVkkRQVEa0xQcWIJoPGGK0DFc4oZmTRMWW2ZagZZQNUuDzi+k5QFRSAIwMiu3YCpjZgFFv3FQTRJESdaevr4TRu/3xYpM5EQh/9JMfTfzA497Nal46nc53dddHUnqeX5sd//dd/ASwg+tCB7z3y4J4XXH39n/7tX/78z//8I/u+MT9qpjb2ji0fjWdd7PDwPxx98mtPHO0W3aoJM71pBBhOhnfccUenLCZNowiUko910e0bK6unFzZ9cdvpXzkIP1vN/Le5I08d5uB37Lzsrnsfvfki4O65vPVSqVZuvuzs7z32QLFSF296xZPf+O6GxVOaO8jQ+hAcuKCSkVQNF5lNEl0WUuxGBJfVIbgmVqybXv/m2Re/oo5eKpSavWMM2m0mVMTBTc/uXnl5hGb4nTvM6cWVtX41V1Bcy5qYvejySy7bse0bDzxxbLnOHGbd/OTSwtzUlpTGVb3amwbBJBTJZLWsdTdy51QvytpkMjp/zw9mhVydzd/wI8fZFQe/m9vNVdc+eeJJhfCNv//dRot3/fIH/jz/mcd6D+1ONxoDgI01RdNEgoRMMaDL1ZjcGseAQVKKCSFmuZEk7Ultf6+qquXptcll1XhyxeVXnDh5cmlpxVrLSCklNUkQoBpPqtV8sClBCqEyrKp6+PCh/sxcJ8+aumbShCqgPuHAuaoadYvsxKn5j/7dp7pFt6kar6mNSAMAAiRElFSYLDFDSMomqqjhUV2hM6y5IBhjmiRMhCCGmKxrmsCITYpRoSgLS+wnlSVOjpzNRnWVscvYBBVC7VibRBOgEnrvLWH7pTJjk2ppXN00HtUZZGVLJorMT9bs+mKM0RgABdAWgJP1ARiBEAQEgVRFJBFoS45EDDEWnQIRJ5OJUex0utbapZVlJYQEeZ5LCk2KlDli01FXVRUaTsk74CBijAkpWWvH42FrnOtTnIrU8icjYzCAyOOqNrlVwCQpgWzcvCmkuLY2YkQQVFACZEtFd6au625Rbt++fd/evTNbtrg8O3Tk0OpkuH3nWYuLi6cXTlmbMWUoKWtgKVSFIjEE0yky4FOjyzP/Fczmsvi9/amTBieWRscPwnlbmu5MlEKNxT5mKToQ9JNFaDz0yo7fUvih5AVOzPTy5J7i7LdOb7LLB2auv2X8/XtjM1bMcjIuihSwfRxO/OZ7Hvr5O7e4yb7vfopC+O1fe305+OpVN7/45MFVZ9x4ZdTZvn3+ri8NnnhA+rPg18R0Q6qoMd08w8zJiFK21mRq3aBnp5BnNsTR8h1fyB753vCTf7NSV/6mVxYXXTF7y9XF5Vf1XngDdt+E4Oslx3uHS4t7dO8D9dEjo9D9xIU/vUFG737p1BcGr3pGhvTAhrX58p7njp6MOvGU73juW3e88IUro2HOxcTFzBdD12Qnj+WDTSf/44vT939z5WW3Nilm7MZxldJMOT9z6/de9s2bvojLU1qcxKVZmnRUkv3s8+jY1mUShtUuFbaP4kwV/cLSrkFnKSwltjZhYWSRFBrRlhQTQlhbjXmep5TasTK13giYRDFpa3vDhgmAGBgQJUSbZ3VdiwgAe++hCdaYgJpAEYABIxMANYgoSAIBNaWAiFIakQTrOUjYGrwDABoWxnZSk6i9Xq+JwalWTb1eKVLKnG3NYlNUlzsVGIYmQGBngVohL4qkTC1Cm/qJgJhQIyiokgARJh8cGwIybFWViI1jESmKos15a4MQAgiiimibyV0W/RACsSBC4xMBssHMuhBClGSsFYTSOhFxbG59/ov6M7Pfuu1rX/rCv4dq8qxrru8U5bipiQgIy7IsnQPRkW+e2W2DMUyELrPEURM4i4gkyoDsLBBbEU6KmEp2LRDVmZkJpCYBZxYALFNbzpltlERsbWwZ7+2C68wdQ0zExEwIKbWxpLieDllRrCvocLapP7sycbVM2XTezM6tMXxmVDcatpEFgLt+cG9Y7pdZSXlunF1bWd2/98Duiy/89D/9XT7Vufqqa3fPXTZaOdztbC7yas+WDW/71qe/MakddVJKXXLLy8udqf6WzVvWxqNAYNkEjSSJ2HT7/STeGJsm2vvr/tovreR/0GuWw3f+43OXvPXHVusdqpItP2EJ1sLJ5936so9+9o7Dx+87cHj3jh2bzzpxuOr2yqJc9UsUklqbam/yDADAsA/J2VITZDNbxssLtDTfv/qiqQuv0jpmFXAm3LFeXHLVWMhImfpUDBAVypfMmST6kc9+5errT+0o8q6lU8Nhb6p47YtuePqJA997as+qj6OjaXVt0u0Otm3p9wunSfJSG63qBvuFveKs9O20ofPo93uTowS0tOPcPbsv4dUV193WNcq2dHWj2sztPPdlr/yZ4dJ8Xncf33XPJfddbWI+cjyXwJo0Ie5JR6n2sSlMVljbpFUhIjRGLGKqwFNMo6ayRRl8JKUUBIgBSEm8Xx70TZZvPn7sWMdtACNidTQBPz69c8uGc87f+fjeA6bsro2afnf6F/77+//hM1/CCMvVKBeTYkyUQMEqo9ecC4ha5P3T8yvHdEkIu1wkL2qgDsHl1jnDmGtUAogEhpkEG/BMnJoGEbUJ4ExmEIICQMNKGgCikDEEJZvogyFTgY7BG4QYxDKTgoI6skG1RgWWGERULHNQTSJEQIiI6Bx2XTkeThSNiEeLMSZjjKTY7ZVVVUUMKqpez6zTAAFDklqTjcqOGxQ0hptIYBhJRTM2EMU6F9gA2rXRqmN2TCGpkJ00PiPDjNRa/aBiTpKEyCRVyriNjiEFBEYlADJo1CkDRJ8IMEoCw87lMXofU8e5GlI9nlSTCTKV7DxxG1PnvZ+Znu5mnVOnTj289igi7tu3N4RgrcudYyH1YjlnZJUQU4plf4ooJBARkmrofYH+nPHCHO1cFs8VrAz9x7+tb7T9OFn7lT9ZFFcOx2uDjgX0riWbuIFpUt0xjXZ5+UA16LrghmqeLi46d3K8OrK3RunuvLA6djRQjUnHoZ7tb9xnz5r3/Bp36kN3fj0rp7um+eSf/2K/GGy56CJ/ctTNypBGp/78dy60bimszaRimniEda1Znk+lrBjrcqFcyxqFLo5MrQ8WMzv6P/xfvrVxql7yF7rpjU9/z3z6j0/8pdiZ2XpA/pzds896jj3/snL3jqnzdsMN1wjLH31BskbetuXAntPzT9vTzxTgxy88tQE2ja+7OPv2o6fycmrLOWvH9qEpa5mwKxtLOYSIrszzyWR+y86zTt/19albrh/GKgL2Bv3J2F987Jrsazv+/qwvxa0nyWe87wLz1Zvgqeu0453h6IxLnWgmSGDUDvFEHRksAtRQNxGSIKQoyKllGiCAeiEiBooqKME6B9akJrIgqaqmSahLto001rTZxgRArd84qI2ATYyZZfIJBAUws6XUNSeBzGoSEjAmE0gESKApJFHg3KIkIcDMgpcCEJitySqo67rO8zw0MUcnqEoIIAGSzWxqYlKRmIC0m2e9Xm9ldRVSQgBTFBgjMYtgO3cbYxxim0OQlBKCoiCiEXRsjDPrJiGEgJq1mQrEwpiZLFXV9dfc+Oijj4mkvEDkpECInDM45xhNSqlwhTE2pRTPkECDpKB4/Y03Oee++pUvffWbX1sbrV5303NzY6OvyNlaZKrsYjX2zhljYuOVoez0VRUlOWOb4FvChyAgmbahMWRDLiVYC6alcSBpxsy2FX/adQNdZAAwZJCxQSKCgMEl0LQekZZIWgMlTUJM4CiEIElVIKBfaKpSe00cv9n2t0RYtLQ/VJ9Zmp/ubCgS9no1AJysOpzZqV5x6vTyyDemVx5/ZM+5V1zcmZn52F9+5G+ZRePbzbte+LJbHn34kQ3G/M3yJN8ww2vDGlMlSgia4rhuACImAEQbyVJex3qwYWrpxGmumjofDT41u/bfVld/Yt6+f+77B596TQaL6rMN5QB2NvVk8cQJ7C3t2rzhgaOHcX5yatcFftMWXT45HCVhZ601ZVds8lBlEWrBbqfw3nur9vBTuXP62lcMXvIaHkylEJMDVRT0SGA1Y8TGIielxoe6VgMmiQwKPH70+OIWferI8fPO2TkZV1jNn7t756Ztg7u+t/f4qUULGXg/PR1cibGCTmCxGUZKosXG/vmTLO5RhabCYnju843W01mnTrrkvV87dNENL7nhuS8+/9zzlxZOZIQXHLn6ye131/f/bOmCS7aCCYF1Pks8wixgUMpLQFNPki1sTPWb3vDGr339G0cPnD5r+86BMadOnTLWKgiwqkaISTnvz83ccfe9TnHD1NQkNiiKEPuY77jwkqXlU6RkBKT2m2bnXv3+9y2O11wvn4QmSEDGlLRKPonUIBONAMCKqjgo+2hwOB6JJsrMcDg879xdDvH48eNYsBjS4AElxHVyY0trUtVi0BPQuq6Ns0nFEokIGi7ASIpV9CIyCWNBYFFVJSQmkiQtd8kYkhiZUYhMG8liLSMZZ0NoiHBtdZJndnq6t7y8bG0xGQdrWTUQMeg6zRKUWjZESqkNUm3pV+sccoKWoqIIwJhlebt28r4W0CIkzjJPAoQZGEoIDAKxTfBlptbDL2JrFSQhxRajI2NQlKwhoqgiSbpFiRhDCEgokDQJkckANCaXmdWVlSzLTOa8j1yWDClFcd0ixliWJZK24u8YxGYuRbHOKYqAOmuMMSlhEhmPxyE227Ztq5tmdXU1z3OVjjF1P699pDJ6KNOjR9KLtgx6tsIO9ghTbleWJsO6p/mYA652eeD9fzydP1FvKsLRoh55e6LJd96fn3OduePAoZUSdHT4UI3JYA+yaKRXr9UHXvcbGx//+rXdez/897d94Kff8c+f/8pv/94fH35679aLLp3asOW0xb1/87Nz+x+LpZsKvTWIJkrP9XpQnq5WtapK4tSQSTTVpWBL85y3fmvj5i/c/dj4u3efPHYSi/KsXRdufv5r3nnl5eXPvW9wEuXxh+NXvjAJfhkhzs1pb/vn3/xXSxt3P/eRP7z/rC2z/blbNz7rmQJ87PP/sf3q8qkfu3Lm2/fvvuLF+bYN9fL8XN6tMfooq057ZdG1naN7Ht5wbG+xfVe68IrViVah6UHW6WSLK8soaceps+yd74rP+3bx8R+i71/jY6jTKZt3aQKchyE1uRSxHkXHh57Am69zx9fqiWEnitGDChoLRN57ay0BRxFS8CG0EqMm+PVloQAigkhmXd4pU0jWOSIKyYfk2VmKFIKYzImvQ0iGjTEUJJFBk1tGm1RCCJwUEZENGI4xuU7BSGSQg7RQdpFlbHmdGGyKqIKqxiEyobGtRCCmwMyYZSSYkqYog8FgbsOG1bW1jRs3TiYTALDW5kXWTpbtQ2jfU6pKQAlBQA2xAbBIyGzAorajQqtvakPWEA0L0alTp4qiQISUgnPOZUUIyXLR4skti1hiAkAACCkgmxBCjHFc1Vded13RKb/0xc9/9/ZvrS6vXH/L8wcbNtaTqiyy5dVFdq5fFgBAzrWWO4jISCnE9suqamtsh4ioQNhuccmyY2YCBEKilkVuEFEJSaHF2AEUCPMkUbmIuRAoekZSQOOddigSKCIwqmpE9aQBZCTJLy7XNk4H8xfDeSED4wiy8t7urBv7P2LpWwCAUd1dqesQ1sAwte07wvD4fHfQjz5Ya6vl1ZOHn7zlhdc87wXP/o0P/WziEGvyXjM13MkDJqmjMQZd+yqLYDPxjYBs2LJ1/xN7IuiOzZtPHDvJH6b0fs//K99zaPFr3/jawrD5zreOPfn4gWMrCxlJjbEYbCmtffiJB+8/fcCuVC/ONp7c0nVHjuVZtx6PZTjMi75KnRsNtUcAO5Smu3HqTW+efflLspkZ7z0CapsRtc5KU0XseJykJiJgbr33pl/m1XJjNukw0adv+86lTxy64borp6fnqtGqNcVN11957733jzwdW1y6dMuUAUmo6ogTR1SjouDP2jW1cGeN4psdl5466+w4rGsqeIZZoGP6vcJ2ynxh1AzKTq8szj9+62fO/W2xa6tep2TOWqVSA3twydpuEjZuSgmz3GSZGXn9xN996qprLl+p1zq9nAAxRmJaretOp8NAKBAh1CEZdowsoEiJmaHhjdNTG+bmnnjqsVOLy4PuNCqEED79r595x7vfORqt9W1uMjMaT5BN7StmFklAuC5CUPDegwdKWKu3BoyhVNcvfeUrP/uZf11rasgzIrDWeu/z3CXL3nsACCFW0Rsk53IgSiFUoxGAuCIn5hSiJ7XKNgS0jhArTqoQY7LEklJuHSOwhMzYEUqMkZEy6+pUtzkO0ScmmtRVE+osK4KPzjlC9Y03qEEmkJJBRARq852UFFq1gyRNJAACZJkRtKV6EiJTm5XIapghGGGiDpjQeJehEvgkyGRpfWcsIgZJlJhZCakd00VYUQFYgAhjSsS0bmVAqioqQsQaU57n3ntG4jzPs8xkGWdcSXQmU6MoWpbl6dOnN23aFGIMIVhnmLkoioX5xeXl5SyzTV2vrE6mBjPWMTPPzk3Pz8+nlDZv3jyZTHwDpeCWvNq31kGw1oRs2o0h72I1wnJ3P6w2fpRiZmVFJEl2Lvgvz29/cn5qY2hW2QSL1ahUN3p4+9Unj30myxelzgxoNykkX2fQQdpz3UsXN13wpk/87CP7vr3jD//2k1/ft7RY/+lf/MVvv//n/voj7//w//3y7EVXPrGwaAFT6k0w9gYWgccjyxj6RY+U4spa76JLH772ii/ff7/ZfeF3DhzXR59451tec/jo2mf/+ZMAK3sevuMlL7pu8Lf/1yBrJ/Mp11z70MPJWrYw/LvX/tKpHZe/BO6afs6Nk9UVvzIqp+eeKcBnLU0tfvfx7IfPf/Kybddc/jyMwWS91abBHBntrqne/s9/3vYG4/0P431fO+v5v3twY2dluDqFJnVczAhIc2sqpWx1ugKgmaEaMECbe70RQLSQm7wG6uSZcrY6TivN3NieiKA5GTApgYOkyY8DBGZOKqJCSinGJgYyDKApRCJICVoaLSSwbFaGQ8emZSe0kSGIBABkCUCdc22pVgVmAkZDxhqOKTETKxhCMhwxqWTqk2Xy3g8G/RhjlOTQRE6tt3xWthVUW92RBhEENpSSZebcOkJumiYGpwBHjx7N8zzG2Ov16rouy1JBAKCFl9q5sAWuGVlIE6glNkAEoKStO6uup40BKLUJb0iUrPXe9/t9ZorRIyJbk5ISqHMuqTDbVqxviFNKGWaVb4waMgweq7o5/6KL8073a1/49/seuHcS/Y3PedHWzTtM8GisEBMgAGTW2cwlFSJyxqYQU+u2HZPCOkTcOrg7Y4mI2baapba9IGrjWTGBgiIBtpstRNTcakioFIwkUYSEoOwkJz5jdw8hRQrRxtjEUIwqqX2vX+L07GXN8K02u3M8udRsWpTh35hhzxVnlQkATq8u9U0pIGidbYLEJCLV6eWpjXPHjxx1MabM3vb9h/7P//5DxfTxv/1kYQ02Sy5z6CO0lC8A2yur1IxXVnpAgkSGUbQ36Nejsc3yk8dPKCL+dYE/P8b3rlW/5D7yyX9RaETB9jf2ZqdJlBgyLUwBw1MLhe0Oz9ltdl1ZfO8uqGUIq+76Kwso157ca5ux2ixDHVGIW+d6Nz1v9s2vgW4/jUeC1PqToyhRG/ahCUCSoGFEgCTWZWZ2evCe977u7B88EYLxaeqBg6NDp75z+ZVnXbn73Gh8M/JXX73r9ruf8JG3bJ2hioVDYzjUCRg4UVNVdqbnLtg0ddmbD8GNdZ2EJslkKU46kYbg8ykS6gCMpT44XtDLwkWffo48fPa9rxi9w3Q7VT3CKP0OAZmUciB0BW3auO2JPac6nQKhKjvu1OmTKdLevU/lWSaQpqenr7ngwttvv6PX6QIgGelT6X30KILRGWQlLfPjK6cPfP/AYHZGiVSNAozqyYFDBzNn/aTacvZ2HmSPnXqy2xtAkjzPwEfidQsrAKhjVFWb2b4rmqbpluWJEyf+6V/+Zb3tFbKZQ6QQQqvfb9+NiJgQQDBJjKBN07zg1ufWdX3n3Xf5ARdJDZFLEA3XnPqChZIkIqKqqmzmyNnheC3P7ERTCLElHNa+EdDRZNxmoiWNpFR0plJKlR8NeiYFsZhFCjEFOgNVA2BKQVWB13lWSArMqmqQDBJASiKgUE8qRGYhy0aTAESfpNbYHfRKl68tLWfWouPopZUTgAHnspQqREaErG20ERSA2SUVYRRkR9ZHD60rGBIAOCRRrNWbwoiqddYYQ6KGoe9cUHFs1p8Ka5nZOIM1tM74IlJ2CpfZajyZnZ254dnP3vf00+3gjoidTqfT6YxGoxACOJrKO2c3p+9e2RmZYzAksBb7hLpR/PxqDtXsKE3GAbtdt8HFfaOt3zo963BlhaIDrU+OYdNctjY5ZHt7Nl573fFv74e1DhQjQ3mCnFPy5YMv++D2o/eYna4czez93f+2+7d3Zudf9WNvfOmjj96j0PzCz7/143975y2/8clHzzln5Q9/e4vthnJ75puEx0zocQZ1PdFdO9fe+eY/+7t/XNh77zsv2PW+nduXdrziofvu/MLnvpAKM9h48W984B3X/Ms/Dh9+RPudJvhMO7xSTeIpt2Prv/7w3z9+1suuKR6zayeGq4k8uOli9PiRZwpwfwCPvG7TuBhf+qMv3ri6eUWSB8mdwQRZr7e4/yl/YF9A7heuuPEFeOOVo6cOWguBTWHYSj5ZHUJB2ICqxbVePbXUMSbL8sW12lGXMhOxSiZfwspitpXGr79qtp4cVjFZsm6qOHRiDKpkEETImBBC+5oiosszHwMzKSI700wqa9fjwUSETBvAlVJSRG31wUkCaQqSGBiVkggAMAEBWGvRsDEGMmuJDTFZrL2nxGCBramqKi+LJtTO5JnJmmrinBNQY0yL5RrjQggQtU06UqPGECITIDjb7ZQAQIN+G2FECDPTUwBgLQOAgK5/JoCeYai0SXEEas16fhq0fOkzLw2uF2BFpswO1iObAIwpAaQoCufyajIy1kZJbeToeqUPsQm+3+mOJlVd1y1Du/Fh67btr3zDW+/4xhcfvO8HKysrL3rRq84/7zyJkUOyJRtjCJiIrHXrxVSBCFBBWUQjIgKtz8GEBgCQGc/oiKhtMNqNACogKCAgkaggaEysSTI1UTmysAOAOlaVEQAFJjGcUJNKk3AsqdCotfr50cLy8cv69Z+sDH91+uyvLB35cgq92W3Ncj1wdUiwGLM6hTLLJUTL3DQeAA7ue/qiW58NgBEAMZhu/o9/9/Gl5VOdTt5QByYJTGPKPKQIIsAwHK40kkCtR6vaNCls3rKlaZq1xeVep1eNxzYrdMGVH4fJu4f2DwZuNJPZEJM2FAEssuW6MdKs2aYg9McXDu4+Z8OPvP7Y0okpe2n+gquzi6/Kyi29U8dNClD5ev5Ef7ZXbt+RbC5IMB7FLIMmEREpKMT2eEgrCWMgBBQFUWONWVlZ2j85/NazNts63OJn79t7YkXpzvv3HT++cs3FZ5+zbaDQefYVl3WfeLRPUntNGZKIMuVoVCXjMsW44zk3L9zxvQNHDuOuXZuonKSiU3u3YXORlu1dt9H+e7MTB5q0OhE1BBueB4/v/bVrPv5kecs1Mze/ND97hz85b0axyocdEqUNU9NziLy2NiLDsWmOHzuRjJsq58iaCicRcHVxwVEKcUyGQyPGGABiYpHGi+S5E4DObL+n3dy64bgCYwAAkxnkPQKxTIcOHKiddPJODIGRRsNh4pRSIGcRsfVERKYYY2gaEkw+mixfaibOOUPOJKzrmohEQARaiLe18uHMhabJjDWW68lkbWW1qipDlkIkoAYEGYU4YUxRjbFeAQhdnqHhqh4DUSNx0O2xj1VVqWpSFdXWl0BiiipAZjyZpJSyIq+ahpUwoVFCBsMmSAJVZiJso2kNyDrUBIZRBBGjCp7pG5IKA+B6i6ZiGAU7NmsmjYRIuWM2pOAlEpESWuK27AEAMztrUkpIAgCoaNGkJG1kcpZlrUEmW44+MnGRZZyaFgm0bIg4t46tIcDpXreqqjaO1DknkoiYiLZs2ry6uuq9d8ZaNqFqpvqD3bt3n1pYWFxYKIo8BD89PdXpdMbDYa/TqUMYy2TnlJ09mpZENTfkcZI6ANDvLh2IJYQmstV8sNsde2Rt28ee3mjchKBXLx2sbIdjQ6eO5XMblyv4+ty1l538nusPZLRaSAlajSOevOHlq1sufOH9f372b//Z/a+5+dLl8f73veaGf//+VTc8/9EffNu67Nf/x++sdm33xPKNv/Jbj5x95cEP/sTuow+j21pKL3WsXzk8eN+v/ZXrffk3fyuMF9727vf+5MbN9y+d/q2P/09ZXuxu2/GTr3/r+UcOzf7h708WF6ScMcMJ9DlPzaKDje/7nS9c8qaHFrddLvdtW3ikMWq4yHLArm1GK88U4Oy228q3vuzwpFm+embxfrGVZH2HySPmVIXxkVOb3/pyF008cXjDua85XQe1nPXt+MjyzGUzmHwTU0fNKEUMiivTOL0WieKoIucKNxm5Dorr1BxttTLOLtgAB8dLO6BwVEc/qlez6blNcAqb4E3GZZ6P2/hLa1NKGpNl0x4bY0xkJiJr7DqNPmONCUQRIUVhRsNUe4+OC+NCSDGGLLPGEBIwo9HWXAKcsW3JYGYGowmMsyKa51nSmHPeuuKY9QjO9ZCGXFERLHHWdS2krBLN+vANrVVke0oNsYgMBoPWKs44xjOufO1epi3ArUA/SgBRPgMvqSoBn+mJgVpJHygxaCRrbasbbsswM4fQWOcAwBgnCESEou0eyhhT1zWBWuKUFFVSkKYJOzdtf/Ur32zRPfjgPV/44j/f9NwXXn3Vs0vjqqbucocsszXsLCJiAjKgCIYQRDUZIFUEJCUiFAIAoDPMzPWRgto3e/uI205aKCEAGhtiIoWucCKD7BCoR72MTEu8ggQaJCVlQfLSOHPUNGPkLRlwY5jCPc0qF+V71D06v/CDabdz1i01nqTMIdT12JGxRZH5RomRaegrVTXOVpX54LnnvWb7ee/+we2PLK9mk0XOOUygYc0zA0YTRgRTRATSgBWTSUG73W6om4ytqjpjU/S9PB/+nuh/UfjJmH0kE18TFSBVTCQShDVqzACLmcGJYyc//5XP/eR73nvRL34wrg2LXbuCSq6xOG+3V8+Ud/0lZF0lklWh16TKQBxHZjSKAJDOVF9BACaXQBRcljGgApl6XN3+/W/8yFV7pgv7xlc+58blldu/89Bj+0+cWKy/cfcDt1519bPOnyl5dPklF0Q+7rJ+B42g9hgU6jUUIuSkYsrezTc/a2/ad7AODNaurXHR//I/nrX33+ZAJs7WW6/ddPWV5exUHOmlT/7z/S/ft/LTnzj93b9e+bMtnTf/+NS7fzafmfULR01wWkdEjNE75RABgU3GqKKota8osydOnzhx5HCv1wOmoNLPstXx2GQ5R7XWQIZCyKIhhNI4ilKyTYZUlV3GQj4KOoPG2AxESEVCisTUekihJPCJAQUFAqiIGiSBzGWJQCm1wRo969pQFxX0TWyphu0VEEMEQrImxjgou3v37m2RKxOiMicAZ9gpWMQam8gqqpAiimIdiMGxoaR+rYoE69Y8qkRkiJqqBgDj8iZWyMCGUK1liqHqdouJT0oqLVOUSVDRMpN1WYYe2+Z3HdlTSCqICkyoQswhiUGKEo010SE3iHXqGBcJA6Zup+sUJ80inLEhExFUJVJrWS0qQUxBVVWDsy6GZBgJwDEpuXaiQYuWGEByYiXKnDN51gpIuMiqyRhaD1siFcnzPKXY9v4xxlbK0soip6amJpPJV77yFZdnZVkYYwCgzTQtisIYwy5P4jfnxRSvnIpbGYYoaWwyAGDwHDeabm+yPDrw+Im5a3f97d5B7pw0TTkmKjZVw0PUnRutzft9T/XP23UfXnFk5qpzVu86AgDgByBBO/e/6Fe2Pfy5reduPfnIiSO+2rZp98zxR+97+4t+6ssPLQtevnkq2zzzky8//9zNl7/xJ3/hpW99w6Hrb7zn1//73Bc/sw0kg+lxb8faJ//9XVdc+Ir3/Oj+UXnfU3f/n2F1fO14wWHNTr3wlptvWF2O//4v49I5dblM3DVXjx64+2RJ43e868CVr7nr+NaLwmOXyEnIO9MxNlo0Op5Td2J48pkCPPfUycu/Nz72vE1PD4/s6y3ujrucSGVkUGs0advVV3GvN1lcQfNELKZPn7i/zjlvNGX5bHe2AUlI1mYdSiFjWp1Kg2UKNAGcKuw0Z5PKaldM5mtjTdR3vvCCMBrj2qE1o0KZaTDSCIkK2zNIo7Vhm20gmgghJo+CZAwjSYjGGFUEQgFd98YCNIzGWDA2QQIA51xATSmRgiU0SABgkBjZWadnkNW2PlhjncmEJCigD4bYEDOSI+tj5DKPMRhjrCEFaK1V0ZFKYHLCEiNnLksadT1Fu+VKaAvAIGKrghVIRAbOuMsBACogYmwDGGQ9m7adLFUVlQERCYgIVUSkZXJDIERoY8naSbdliSMyIgIhiqACtkwOSSCaUmoRb0jRh+icwxBS8o3wK9/0zk6/c9cdX/3W1740Wh4+/7kv7Ez1FCmkSMIQpb2dLDuRCISMoESKICi0PtgiALXfmnC9yXjGdKyd4nFdhwKKGoksFxz88WZhOa0cHx+qUrVvZf9qt1HVGH1r7NU+CUMdQjXvlGbAj308f2rjOfHUE3FyWNMe8SfQXbR9+5apQ0s1NzTOwfbVNAICSkS9Xr87NUBEm2Wc9B27d7ymszUceeJPp3b9LK09PdiACrlk46U94+XTHEdCoFmX1SEmsYkSAEA56Bdoow95nreg+9iPZ3nryj8eCT8zsf/sZLW7sjy2FgAjSjQdK02aLvvjlWquM1hcOv2ZT/3Vn33kbx4/xb0QRWMVfeZECCWuoFhTBSCBjFcFI6TcV0kd8Hr0FiACEzGhYU4QYwCAmDRyMr1B/7pn3+iX79i34P/i8b/auvH8t7/0VU/uP/SNe+5dqoqv3n3f2uo5V110DuHqSl3UaWKDa0zMk43kQUgVTWb86iR24OzzihX0xxeKbiF8z3dunjnun3fz07h19qqXlJfd6rbOddg6m66bufBb296C//zJjV956ti//sXa3//P5Ts+d8kvfqS84aZwctFgyLKCqWyCJKgLy6FJiEWDNTGBpH63Cx3yPooXMs6nhIZVk3MZgMSo1pJKyk1u2WiKnTIfVhPDtmoaMIaIkAyx8aF2CbKiw2QkQAKFdnkJaNmklFRTVuSq6n1DgC6343FlnDPGVL5ht74Kag8oMYCgpliUnTYzABUQAJ1hypg4YDJK3QisLCjKBMZi0BxIAWamp4bDYZ2aCCoo1lr00TKbIgfC0WiESI4NAChgZm3bsqOKEnTyoqonmS0AIKkSEgH5pgFkdk5EAQEVjIKH1hCBkFAJVATOeOVEEFAxxFkTI2DsuCYlk8SAri0tAvF6qAOujxpKSUFFImuGbZuP4oyRJA7ZIINBUCBEY2yU1FJdgm8sUt4tIUrb8qOI983s7OzOrTsOHjy4trYGiEmk7HZ6ne7S0lJrmNDtdrGhTqeTZdnT+/a3r0trMdbr9VS1LEtEHI1GULiwhkWezttMe/bGHC1RtgRTALAV6xOMaTy0g/DwgeKxDVubfMVNhlq7RVNZ1lRPpdFCr7uxTo3f80Rn57P/4dxX/tpDPzCdAoeLK3Zw6LpXrW0698q/fdv0H35kwhte/fnv06bpR97+I+f94Cvffd0F/+V3Pzv9wmf/7i/8t9W1xRPFU5//t/9934Pf+q//7Vde8kf/dPJH7nzsL/9k+rYvbfATrk7QnUc33Qk7Nm189i/80lt/+UMzAMnTWbu298/ZVU+wKacHSp2dsw+dd8GTk9H1L3ozvf31+2XrXcd37OJjt+Z7VGk8kVhibDwpD0ONT+x/pgAzmNnPP8U3dJehuX3LoWue3LaUa85ZZSDaRrzJEo/2PNzPbOPiUFP0mU1KAZlmYGkJEWPjAyimCKtTML3cytUmsUarZWYoUdaQZiXQ8HOPnhpEuOGsEutVYsqEopekwoCCpEACkCR0ikKTNME751q9UOs5JSKta0yUYNkaY1C0TaWMQSKIYUbVJOJMGxKKUSOTMcRlWbSGU2wIAJitMcYQBw0AYl2WmgCZoZQym3GWASKZFudez/pud0ZtoWUwbA0zE5oWMxYBQ9z2f957NiYzJqXk0AHhM892CzUTIJh16LaFuIGwfYBtVjetZ2Uz0XrKOCAgtpmdgIbavpYJgyRmhijrntKECdRLEh+Qqa6r9o3TYR7XVUppKflENJwMb37+izudzu233Xb3HXf40eRFr37VdH+AQHXVZCW1zy2QGiaDRIQtCSu1q2oFAGmfitZPA1qepighICorisiZvwFVyBP6UB1cPHj3qQeW4lLC1XEc37P00PTyoZb+2Rp6tw+8bupZN9PoknNz43zyiWq0jLFJ8Wxf3Gz80SLc/tgeOsefXBWLpVZroUgERQihCaHswsmTJ8/ZNENEm13vJ7qDUxqfuvjW5fOuvXLDBW5qLovUpbS4vHbs3rvyhQP+1J7Fpx+hamiJvCchVZFG09z0DBBOJpMLLji/qeqn9+/PCKY/3Dv1zoXhGxa7f1VOdcrQ1A2LxzSVT3GHT5xadGgtIRWD+x54sgnBUT4uOB9qMN2AjSgmYQumYRAIJqpTw4agQKkinIEVkYmJCAkFIjMiJiJjGCSZsje49fxrdz++/ejILn99T1pb+T8f3feOd7z+DS+49rsPPfbogfEj+49fsPtcg57rcXImUZSUlkUKkSyZymGcjJyzpiLv6svOm5Xx6SNVb+vxe2M+f99NH+CzLt+yfc6GpbWjS8b0EsbdC5fYC7InrjzytnM/uOOd7zn8qb9b+/vf/sFP33zRf/9w/10/M1kYOQJmbZIXpCaAISMqgg4Fk28iBs4MOsSopM1EtHQm+tAkb9BYNlJ718kcuyjBWSOEpAApCjEUOYkyEgDleW49iQ9AgIgGKYVICmxANREBGZcQVJPLMwbydZMDMlDTeHaZagIg0YjAKhGQW15J9F4JlQmjqOq4jc5WzbMsJjEm8ykiohMoXJGqMCEhhcloTND6RCKQiVEyYE3QTCpgYqTM2BSipCAoBEaEcyLAiBRTBAbrU3TOkWpSIcMkDIIppaaqVBRVreGEogRMDASGMKVEbFJKxlhVVSRUCCiIBCGxqmUQAhFlayBEAWjtuRmpjewOkuqmbjt3y5j3OitLy9aZhGLJOjIpJAYkkwmkEKNxdudZO+ZHQ6kaRQXUrRs3jUNzwe5zkQwiGGNclhHRpBob4m63W/uGgSdU+QxEAAEAAElEQVRNnVLi4MmavCyKopAYAKCl0qhq+27v9Xq5aDNjs56bPKhICdAhND5NAIBpdRwwJ2ZsTm3pzixPisaOFfou0mhSIVlHpjNYHZ+gwbQJWO/5+qPnX/fFrc9/3dFvP26hK/Dwyz648+Evbzv06J4P/tfdH/iDo1+9r/rSXxWr48VbXtG742tP/uQLynf/znt/+bfT2uTowYdvvPDaj//LR77+tX/51N/c3bvk2td+7R8PP1Ad/dRfDr/yD9NP7i9FNv7wu373s5931dE1mL30uTf/+gvetPhnf+zmj6LNsm0bf+/ssy8X++Zbn7t4y8v3nF69u7l0g1l9jnlYtRRObkabiRjGCJRY7NrwmZJQZN1076MXnbjg/k2DJ8y+k1NXd33RgHOAZay0V8RTR7LDD3Tf8kPHTh1G6zqKDU3yol6ePzn0pzFKA2Jszgi8NPBnPd2EwEqYsoVhMyhjYAqUB4+bpuD7j9ZvvXVQuroZc5rqNLiSVWSAaiOtr3KbgtXyhJ1zikiAjCTr9zm311MCpZQUpHDZOlcYFAAqHxMCEcYYY4yucC0oqkTGcQhtkA+0REJVjZIoRWuMRSYLHhSsrWPU3GJK6+QXQlRgJmNMUmkNKoiQ2RCRILQV2iBKiFnhNElRZO2yxmaGpA04+U9bgha5gTNvDV43uFJCQgSi1LbAREyAIgoAiAbaCFNQZoqiTdMgovdN3QadhWCMIUIfAxoGRCCu6hqYal+lKK1LHRE1CQr2kKJIcf2tryw6c9/8+ufuvv+O1ca//CUvPeecc7z3GhOxEUkMyITt6rolaxpYH3SRUdcH3pYZvf4XZ2w326xvUARSUMKmGj+0+MQja0/VstQr3GIwIFrazLoMACS0JC4GAAUgy5pK19jjtDY9yk/h6i6FN+Z9BbkzmEcrgV62pRNWo6tlNet1TYCgIQO2Ga5ORrmS1N44+8YXvfzei178jc3njPK50zYS2G4TGplQ3ss3T21/2Ws6pph/8Pv0g6+a44+vLRxMqcY4Ll2edzv79jzF1hLA04cOaky9Ij9y9KA7WdJniuqnVvIPh6R2jCZPDGAna9GR7+bZmEQhddg8/tgDD3//4bOvuXpteSJMFhuF3AU1TOT9KCNiGxvPpjYRKjXOGWASERBYz/ATlZQoRO7mE4l5UpMSHTl68jPfuo80bNx87i/+6ofP2f2sJx6551/+/d+bxXjLTddcvvXcsLbw8IGDYwsT04UYJYhENAJBcIJBQkJEYZFSeqk0Nlx9w/TFW/K++kO9a+LchYWNTT1Mk2DBRcehOzBm7oKlax/ofnX5xGpKdudPv+/if3kie+5b7vnjnzv24V/tb+wBF6KWtO5HIpAiGEbqIku9esXl511y6fmxDoUpHeei6NiRyV3ZyazNC2ccGscMSKlBpcaLjyFZ8siQZECsyTix7CxLTBxcji2rgpkYNXMGQooxgjNKCpNJMw6kWV1JAAnWTYSBHKtXxclkFDUoBGszRBZAzl1uO+ohBQiIwaBxDJqoXfOIAkbGZIgaEVNk0LHEoEyTGKuU2JIxSBIcYcOhoZQMKRCxzbollxkWrtPrWufQOrAcJA2mZ2wnx5xdVhAZRGY0IQS2Fi0pCoNFVXQuOsughjAZgyohiSIRtaQSMYbIcFIBYCKDBsFwAFChjIwN0ZG1aFqWuI/Bx6RIg8F0rAMpsZJEXVsZZuSskFM2SoAETJhZtogIO7dvf+kLX3zJpZfbCBLFKhUmW1hdrSb1A48+/tBDDzeNVwCX2Q0b51S1rpuUBARQ0ZElwWZcD5dWcpdND6bKvGNtVpZlt9/LywwZELUsstgxSjzTy7ZPh3o0hOV5Ha6u+T4AdC3lFozVPvULKhIlDRX6ZjRcbhqEGJSbWuuyKKipiym0c+faxx/4+NQ1j2aXnmuzJ298y2jDuc/64gcz7GSP37vnR15S/p9f4SN7N/z4L+76wucOX3rt9vG4++H3Pf2ut3/gPe9927t/+tGTp3af+7yffPfb77r9w+96/lX/6x2/uHq4uuXXfu55375n62e+c/p1r37ksadegPqK5zxn92Xn/PDNL1r70E9vWlvoos2Gp09s2rLlsaeec83NR2593dPHjt7ubywpvLLzUOmILVhirg0lAIiEKYzWJisnn/nV+DjItr740LNM1cwX4e5ybz9m4MbMEyuOG1ePFu0VlyVrT9SBCaYwF8kBZHrDlB0Lq2RaWO8hVLw6C9MrKoY4EUqe5xpTXUuydR1GN+yYfdZGWl0YjYmRHE5qko4QK6KEmkUIFUDaifAZbi2zFeWkLAkskhFxioXLJCoAVT6oqoSYQpSYBFRTgpDa8IaqqROBkIbgl1bXPECjGhFFQGKKMYmCZxNCqpq6CR5DgpAUNQN0yBaIgBEZ2ShwElUBQUI2AkjIKmCJWYgFELFVADqTGbJE1nAGZNkaZBJGsAyWyTCxYTbGWGsdZxYMgVmPHjKOyeZAzGxbz2cyxJYVIhgGpDbyREIQSXVdhyjifYg+SowSJ5MJCKQ6pDq0+6Om9oSsMTWTygB2stxpAmVTDESgrlavu+6a17z2jf3ehr0P3v3Zf/n4/v2P5d0sokRIQNAa3kVUYRTAdh3eCpGiqCLEmEDX3UsYgQkF2Gvw4JEIEkiMAdIoyl2LD9w//2jVLBXWJLAqhohBIySCRku0GZiU0jrhAwk1uE1TvZqKGX0J8w2uc9yHfbJ0N0cuuaz9bEfK2Z2ldC2CqmaqE26cOmM4SLW04i/7qd9bfNv/+qtNl+xLdn60Ui5Xxcow+Cop1nFCk7EbLiwvHR8GGZz/XLjhrf1LXjBVntXvbaBgZqY3Hl08jY0HRojo8l4is3njdkTsfaSv21L1Q5KcKVNQikVpHKbg68lo7OdXZOwNYajjo3vvnhlQbcCCiYIikTg0mhpDNgGHaJxNNg/WIiZlJkGHxhIjshCrMUrsIVbjCYcURSYiRkHvuueBpetP7F+YfdIcffnrf/iKy67+t3/9m69+79OXD2+46Yadi2Hh6LET58wNnCxWtrAUERCI1lsoaVVqUEaadGI57th87Zyrd9z/lbMLQbQpr3xl+vlcWXJkaSSgzbqXLLzw387//WSGY+hkRxbMdHnNn/39vX984cGP/pZt7IW/8RtsqipSRWg0wzxK1WhWcp4vr45C3Rh2KWkCpYwdOsMGQACFmQG4zQqrNSKRCHAjCMCGK5W1aowcAcWwY0ZI7WpUEFQNp6hVU5MxigQx1tX48ksvLSl/6PE9mBkRYBANkQw14n2dLrv80sOHDyIyCIqIqDZNY8iygCVOMQCIYUwRMHi1SEQCam3Weni1egBNQshkCYBjqmNc3y3lxnkfCYAQreXQeE2toRvF6C3bGMPs7Ozq6mpLadF1ZItadEkAnDGqShHXOY2i0ZAqso/C6kxLM4EWOAMgkcRkAQXXtRNyxjkXVJOoJgmtRXN7k0pM8/PzBllaQhYgMCkSgCZQTalNsNi2bdvGTRvuueeeNlVwOBqHGNsN03gyaQnPEmVS171eL8SYUjp06BAAAIGIMFNrGVgU65AjgI7H43bqbW1qyeZncEXNAQ05TGHnVOfs82RxtaNQV94AQBmGJFxFtYzgGxpFSCGPFFAbEBCQxjvImSBSLUy11m7z2bB45M/LCz8Exx97+Qe23vdvmw4eGlHl8pm+6/m1E9M/9f6zf/O/fefdPyn7769/74+WvvTVwZ1ffPwlX7vwR3/0ore+oX/Zy5584LGfecdVimFp7awH7v3kR/7kOy94/ksuuOFFL/jHz6ztPTG582sX7zv2qrpa++s/HlXNsDpqZzqDl75NL7r6RdsfOHB4CA/ce+fGNybFN/UfmsqM98LMw/GErKEUvfebNu6s5vefXj71nxC04eO97gvg2m1Le/cUi7cPDrw8XQ2jjpgk1lS0VtNC99wrT506xblxLl8aTrIsWzu5snJgf+zkRj2YYLvWZjEs9rQ3KrJJCqgokxjzbr4zdE76FZKi6FbXbS9PHzsd6iYFgyxsLEcEUEOsAkBI2roVqoAyEiEGSRpqoyyIdQyucLmham3EeV77hgA7eUFEQdZ3wJokNl4RyBgiJAXDtihdPak0qYBqE3KXGbYCEJtGHYMqILb2eYgICiklQ+vEqDMj3zrxMKXUCmLPmGchE7WG6Ejcbl4RkYlbRLEdzZ/hYZ3ZkZ7hOdJ/8hmf+Zt1/BnPuLYSte8jJFCFEEITfJvCWVWVaJt7qO16K8akAMwsCK0ImIiYOaVUN01VVQLibOZ9ned5G116+ZVXTU9P/8dnP7Vn78FP/N0n3/L2N1977bW+RiASCaS5YQsAShpCaGmeRBQ0UUoqQBRTTMTMSAqQCTZqEklMEQLYojjdHLvv6bv3LjzWHQws2tgoGzSGADRfx7I1IaqKQY5REDDElAU5vfdghVispV1uYz1eEwhv4M3fHZ2e7xmnOJ3JFx/ZF7nr61HGgwBYNNGXjas0cLHlBT+28/pXf+/pvaBsEZkxOkAElxBjUkiNsc4VnbFfq4aRXO+6m5erpcm+B/tFt4GxAJZld2V9JMWmaRzAWjPxKL1HMPti4d/f2M8UnBsT0trKIrKZ2bJx57btO3fuuvvO7y2fmk/E37/vBz+05p3gxKgAUZNiYShKy2UhphbLaV/6IAmRW9vwqAmSKDApEFHd1I6K9t42gLx956bC6oHj4e8e+9aFWx941S23vOfnPvSP//BnjQ/lnr+/bvPMPcfPrlYnoVtG5MH6fkDaw3Xm7FFjEqeODNJwVYtUn3fD5U8fStl4flzkRTYmKUKdS2EdMI/iNadu/eeLfv3J6R9cvnCzK8u4NJ6fpOs/+Ot3r9YHPvE/M+cHuy9YHceB9T6CME46pqvEmB06cnTQm3IuTykpaLsiUk1ERLxOHEDE9XAx0Vg1QQIi2k5HEQJqYvWYCIQYtSUUEAFRqOqzNp4/NRg8+PijLisKNIby6ONiWE7gC2YQqMe1c6WiAFGe2z1P7rXWxtgYssYYlaRJJ1i1KSiCAKAeRAiITIEsoklVJLY/ZIyxCb6tHACIDEkYEcjaFNWqsAgjAwgk9D4QAQNW40mWZyEEZ7lpqsxYYBMlJUiI2PI6CE1M3lpGRUUAhBZSIiJNoIJouF2gtimnLdjFZI0xoGE9+nT90kBJIooxRkBFxE6nMxwOnXOt/kpUCEAQEpNRAIRI0FJF2Flj+NTC/PzyQqffO+vcXY888kiovKoOpqaqqjpn587xeHzkyBEAcNY2TZMkjsfj9mi26Z5FUTDxyspKuwCuqsoabtuXNgijvc6stW3uWx9dxdyou/pZ5qMPHW+qQWHtiBMADIo1TZ4BS0dBAERTrMeV9Ps2Dj2KIpmqaYwxmXW41nQM+LSYT286Ner+4SXv609tf+0d71gB6ANNZNmPqgi5uf0r6b2p+MePXfD7fzX9th9bGO2Ws5/Vu/sLo49/fOHj/zddePmlb/ulb33l9nsPTs654NIP/OjNRw/tv/++r/zsT/zCJ//s9OXPu/miS66/5JUXOFqr3vsz2LPF4ulTK5Px9MzTf/Bbm1f1mH96//N+eiWWr535wRQHItPyZrtlMZyM29MuEJZPnaIU/rMWJO7PbsLpbc9trtq7+oWjgxMPLO+9ga5egKGzGeL8oCyD6602R3xslMFkxhmbHV+oRo/oFVeAtZRkUHR6djRa2wAAsbeE8xvEuhKindhh17qJjdbe+Zh938s3NouH0jjmrhO4Sr49z8rI6AyqogoAYLsCQ0SALMuamBSRiRICACXVqBBDyKxlpElTt3Nzaw7TcWXkCIbZGBBVL7XUbRxvFeq2SFvUka9bXxsTFUXb1rbdxaJgSx5ERFoPIIztLdmecAUlRVFp0ePWQQIU15MSWnZSa+kFbb4YmjPSIkERRCVEPbNIRQDAM/aMQEz/b8HWMx9E1DRNWwJ9iFVVtfncTQzOOZR1F/Qk0hbdOoUWRWj/SUt6mEwmmhKgFkWuAnnuEDH4eMmll8/NbfzMv/7TY488+vGPfmK8Uj3n+TerCGiWUjKG23SKqGKJAaCuayVUVUPoo6iqM9RmwHiqmAuNypl6q08v7H/oxD2Hxo9R1hFFxZTlxkk9jpUACJVIEThFTW2NiCLRUODU2LVk/QBsrfgX9bEpizslWb/yodltt+fxc3FpQ6knJy4KFGQ1RQMhacfVXvtzZ7/ug/3nvGjfoWUvqbROWAnQ+CgCNSoyGI8mmJrSpB6mwwcDo18brz7yCJw+FLdud3l3eWHV2dyHUJZ5Nayz0lYSiwZnss5aip0/6S1//XT/VRL/MTZsr3vhizadv1NE/GiSZflVL73ZW9x370Nf/dJX6/85yooBJRyH2jhjGkBnUJICIEBKKUgiBWYrpAqQQNvWChGJkkFKoOtWJzEBokGQY8eW3FkeTI6SHj24dPDEF2686rwffecvfePBh/718a2vvnTw4IrJyjXhvJ84mf88Q2faPgSAlGVThWnm56mYa5YWz3rOs8unq70H4whNGGtwTafLqQlQUG3SxuVd0/XGU5feufWJW4+eHnachRQXnzpw8fv/x0OP37nvY7/XvOcX7ew5NsRyqru2Oup3RDSJQuaK8aRGZNXkLFtGEEBUJsB1yRoAQEop+qBgYooGBBTaJRIRoAeT0IIyqqCCgBFCRWS2Zbk8HpJhZg4glNk9T+9zaLdv2rK4spyIzKAXQtQQHQjYDAmaJqgqGanrWlEEKYhHpBgjEzAoCjIajeBTfCYkWDQKQBRRREZC5JQSKhlaVyyIpKhxfZmEEGMgBaK23xBNEVGt5eRDGwUqHhVbugSpCiIysDEGQWMUUBBQj2qjCCBadqiq6Ix9pgtGbBWAxMoCEhEBSBHabZwikKUQkkFshbmqCogxRkXIhdVxAjVJkmoCLNpcMgU06xYlRZnv378fAFqO6Gg0GlUTe/JkSmk8Gp1//vndbnfv3r2dXifG2F5M7Q8mIjG0vYLUdc3MgG1C4rrimZEISZO0wWe1QWIX/Oq526Zu2Fz+896q12t0ZCvJSq2tdwbUEVpHUe2w0iI347UhCtpEASJlxnvPmiZTWniyaRtPVsz0IH/xm5cf/8HSwcVLy+p0lVi7GWQBl80TB5ce+/PiTa/b+FM/du/vfWSKm3jFzat+XK3Ctb/xa098+457f/OtxYbt17ztJxamuh/4/T+647ZHr7n2hrtv/+vbv/LP3/zSR3/7D/9433e/+W//9um3vuNdhxbnTy4d5pS2fPmzr33X//jW3h8cec3PHYkzr+k8ckkfVkcqIs651pIpM3aqOzheHZ+fLK3tP0qm88zhj5htvfryNVq9YuXcOdOb70y+Uz56fX1RX+3S/Gmztm/jpTcdWT2eHM6V/RW/2s+yyLyh1t07uvvZGd9YVwJJz8mJpWkASDNDHO9AX+W2O4wTu1z1ZorJytIvv+HSpw8tZLSW2TwF9eoNFzEInLGIQkSD/MwUyNiaKiFkmY9RRB2xhthEL4SUkke0zCmFtgNrz57ElotEZ1AWFoBJaFhARICw9g0AEGM7D2QxPTN9GmM0gjGmxVXb+vdMaVRVFcFWG4TrXKq2+wSAdX4UALRmISm1vZ6QEq5HdK9/HVBFZUNt2QZoHdefISy1nOb//Kbr2/EQ22/kU2xiiCoppclkEgBCSKa1lI+h/WTLZK2VmNo/Iq7nZyuhRSdJvfdZlmVZRrheUOe2bHrHO979H//2mbu+ffs/ferjK0sLr37t6zkTQKMxtVkRiFiFOtSNtXYyaQwSOAMghliTBEkIZHudjdO9wpRLw+r7+++599DtJ+rTJpvpsAMyJrOQEkTtUMaATainpdOgrPqKLRFikbkQgqQ0xsygOV2f6k/3pz1UIT5M9knU//CLN2tnA4wzA6er6MRQ1s3QLodxN/nV6C569a9kz7rh6SOnxqt134WmzxkaQ6QGQdAqCkhAbeLY2qz2o9Hayuzu7c3iQRwdMYWZTHxmsuHqaOX0aQAkhbLIQvAO0WMKxEWl4W4x385X/uvS9jsuuvytr1l8cv/X/+4zOhzbzK3WQ2Ye9HpnvfoFsKH38J7HLjjv2eMmDopcfFCygiJIqgl0fRRGYkZiUk2SJLV4xpl2DRAxyzKN6/+fFGDL9HRpQiUmCc8Oepyb2+596mP/8KmLN21bsHNfPZTNDGqwc6UUlUNQAiWVZ2ZfVU0isaxIwljyzIioMyun1/JZtBu6moQRrEPnLCKmpGiy7uYdV09e9nW4rR41fVNaU/Rnu9mGneWuYvvP/fkIp+fUxE45wbQ2WbSFNVVhUnKETQyUcUJhRg21tHG5610qtML2lFKMUZsQQhIgYUwEggCiDCiMygSGE2NETQQJQQEKa/cfevrIieNFlrMCEqE1U71+1u+Pfd0ri5Jzp8YSK1N0NmlUQrJGEGrfhOSbGEIIRqBrM0esqlFBGCUzIw3t9rQFkYJPhqgFUUXAWj4zB0OKGoO0iltgUkJFQCY0rIRRwVqLiJl1iNjKIVJrNqskCC2BBVqCMa4LdlW11fC0YFpbzFSTYCtOQGOIqJWLxDP3JrXmz+0fWxP5Vt0YYwRYdx5Y/+IMSRVEA0hKqcVkYggxBAZ0bIwxddUcPXLMN6GVhDrnNmzY4L1fWFiwzrVVudfv+yaMx+MWi2tfxxZmjjHWdT2ZTKqqCiFMJpN0xnC/9X9XRGQyzpI1iqljNlW+uPpCYll1PqsNjLXMYSEWq4mlwe6Qp4/ND0tFM6wxScp0bEOEqFL3O5kjcdLV1FMburumexdcYIvi9FNHf/uiH/mHqWumsm4nNLX1hktnJv0sW/nCbd9955s3nr81uc7oS3/RP/yD6de++clTae7Hf+nSLz2+8d3v+86ffmjtJ14x8/GP/fjzn3vhFTftvuTam175pr/8i88cffrw7/zOz8xs6c1uKx3Nv+m8i174hc+8972//9U77ly8+S17Zy+7lR/eli+v1ZMW+R8PR01VV+NJCmltbS2l1JEiVBPO9ZlfC3FxjB2BsDWbu7U+T+t4f37k6eJEb6bsZWG6v3F1YeXE8X1CWBo3XfZ9ig0EZwg6dhUCOsmKsgN+1wz75QIAwvQiN7URqbUqyoxn4NjJ+I7nbvuH2/ccavzMwIUQ0bBxedsvtmdGRAjAELXZdahgkByyxpS7LHeOUA0CSgohtDl9bdYZM7eLiZZR/IyUpeVhESAzGyTwEWJSH1PtY92kOoCPmKSpvW9CU/sYUgwpxhhkvSS3s8gzF6KIhODbubPt9tpTLSKS1v+7rYsxxiRRVRD/f+ojAnyG6Ls+TBPqGdXv+uCL6yTjFjp+BvduW8y6rifjqqqquq4r37QUaCJyzjnnWiytKIr2yXmmV1ZV733TNIoAQM7lrNSCRsTY6ZZoSZJOT0//0Dve8dKXvXg0Gn7xi5//5Kc+MTc7fe655/b7fRRtmqbxdV3XRVFs2bJl+5at3W43+uDrxjnX6XT63d7UYDA3mO50ZkcSHzrx4H1H7lrUxU6vm7MzeT6RsFpPliaTEVoxDhHBYENZpNxw0bXdLuVGgERJJYvGZh3MOj4ZRZflxbSYBmBn5PO4mNk2BwCLVR8ZIabxZCGTYuyrbW/42bkXvoZszNHmllBsM6qrSTMOYRTjWFKTUghJfGRNICkf1Q24zqbz7NwWHdUCnehD7ZvJ8lB8mp6eDY0nBEJFppBiM6nQYEUxfijoFXrWhy7+/ic+9b3PfjZoLR3jWfpTM0Wn1/j06N9/ae8d991993enNruhiSPWhgEQW6BFVQHQGOuMNcSqSglbJKZ93ckwGVZDradbC8kAgAHQtWqcUYxoCmuG9SQr3MbO7IHx6J+//a2rL75oBujk0mOu2yw2blagYQYQAtB1K9F1dKWherKKXU618Zis5FyP/Ja5vAllM66bcd6YxelBZzCzFRbmT3/1rzfOPPmNX3vki7/44m0n+pBn2iko72646JoLf/Hnj/zQzyysrEztlCEZl3fqWEMmfbB13bg8b5omz3NUyUy2Y+vWQ6dOAUBrKyFnfm87kYgqqEGlffO3B5eTN6CUUCJSYEI0qqhikedmZkQkjirn8nYgNQiaEEgrEMiNFQIvmTNJpWmaGH1rltrtd4crq612sA3ljSqkBCAUgDXZOmlOMaWyKOqmsdbGGAkgqbaVktZNfqgNB2SDqiigZEyM4sjEGAUSI6WkNrMgur4oyvLGeyVFZEkJIIkogBCapmmib0QTIJRlOUWdtWoVYuxQTnkWQmgN8Fq2CACAJERYJ2ITCQKAGEAARsTae2MMiDBjCAggSdE6iyF6AidigIJlh5QJBsbSOQCoJhNjrTHGIGWZJQVRaSEKq9Y51+12x+Px4SNHnj5wIM9zY0y/N6jqSYzRsE3Rr41HFk2n01lbW1PVTqczHo97vV4btkMtZsBEAK3UUgMwg6fh0LuLNs5M0VAUe0CK8Nbp7zzXP3G67n755LlfXz3njc+//uTeg98+YFMIvq6KvI8GY4yTyaiwqanmM0JM/cmkOfvWa4cHDlRLh1yn+4Wzf+bpjQ/91MFPl6t7c5oWP01udOVgw6n/+PdT9z3KF1wUs8HxrRef++zrlw/vG37us7LrosnS/l1Zf26cHf7q1xe/8jmz64KrX/ma1//y7+DUuRsv3Pqm939gds3vfeyJKZ4d//7vP+vqF/7lf3y6uvD5D53/4osnd589N8/D7tDUZadomiakmBflaDiMybenPWQWVtbykX9mIKst2nIGEWtduRmv/PLCg6vbw+3uyctwx5ou9gY7933vm3zx+aNqzdYSQyXdUpZXRX10BatI0Pk4mSplxyxnezoVAA9WNKpYIcrCxNoApXML835bMX32IFZrk02DTbWX4IVb8gDieqCQthHzQERM67WHc2eADLuQkU9RFBmZkUTEGWMMtXyr1mMuqbQeuowIClGTBq9JAcAwSYwtoAeIIcYkxMJtiXqmykaljDhShCRERMiCsl5lRQBVfRARtP9ZNdsEzzbft934EiOiPbPchRZvXF/utPRgRIltqMP6DlgQnpmSn6nZ601ASClJUqmrxnvvmybUTR18CzX75Ht5yUQqgogCGpsGZd26mYgkRFBFxKQiMTLlqpplGSYgBSZoJuMsy0LSSRgaSz/+U/919/kX/+VfffhLX/78ZFL/6gc/tGXzptF4uLq20j7PO7fv2Lx5cwxpPB5PqjERTU1NreuSgawxx1ZWb3v4m4+e+K4UE2xMPaqn+jiK4IC2d2ZL24vkTg+PgVKqcTE/QULGshiy5GJQY0pN0qNs6+zs0XgIrPGUyGBdj03ZfZhHz6/dxmwFAJZGkZNOyJT5rA7ns6tede6L31XVyz456uEGQrSuEZ8YVZWCqkgwiARWUQQj8MSZcstGO7WhXlgbi+1qg5gJgq99ZrILLjz/sUcfHI2Gpo1VdaaMWKXg6jA3edbw8Mnv7P5scbIzN7Wh9k2nKDWJcZaL7NTy4mx3ZnVl4f/+zcd+6O0/OtBSmiSOK++dJGJqTQYlJllHPYCF251gUkkiKC0VgEAJzvittrsMtSAZxWE0iVJustCkxtR52Z+vxk8+vn/LpukNM1MjbwparTVHSqgg1G4XYV06hhhCSsRDtqau12DSSyCNJLs86GxdFRGb5bPn1H5t9S9/137pU7Zae/aGqU/9D5i/cP6Cu1fX8tVUG4Z8393/evizn7j+f/zBD5ZPDlfGpkxZU5ErxqGqU2RnfeMRASSxMU0IxxYX5cyJf+YCQlRq59r1ID+1CK05nA+hSslrYlQBFVKDGJIIaJWa00uLqml2MKPITJQgiWOrFilSSgZcEik7eaNxPGoMKSMmBSAKTYWk0UdEBoJJ4wGR+Bk4KqSMOEZrbVmWKSXD3NYzx6yCUZKACrRujlbVP4Pxt49OEJhZUmJnAcT7aIwRAct58EkhQZt8qMqmrW6aUKXlOiVV1fGoGvNYGBixCXVKsc1OBkQE0jbDBRmAgAnatl3kzKJBALgoiqZpGDHGUBRFVVVtV56xURUATbB+x0RAEVDVxvsNGzZkWTY/P98OCt57QWjNfVaWlweDQZusHlJ0NqtDpBTJGhWYm91w8ODBs3bsMIU7cvjouiYSaDyuiCBIckCaNLaRZ7Z10UMBgDzlCYeUQCyVC43pTs9t/LD+cFdHFCYmnNpWuGdfduB9nV0/8407Hnz4qXjF7/CIrjr3gice34e5Y0TgfOyXLzhr+/EjCyGuzGy/lrPi6CPfM1P9NMnm7NrTZ1//G9mmHz3+hUuO3p4gNDhzdFIX5ZZdx/eNt22vLr26u/WsU08f0cas1MMrrr/4q5/6q+KaW/c98J1OEKZucezAqQ//6f4P/0HZmcte/sZj0Bm60/7goV0HT/Quvvib93y9vuktD9303t3NkStX9jcb87EsG1esjseZsYo4aWownFIMmtRQiMOiFfaf+ejrTH/jrAAtYH2enn1lOO+7zdMP9I+dOLBnuj978uBpmXMxd2WmUpEQMtu1kyfPyjop6ztRzjo0lih8Vsf3DNVrPZxeMYkD96zUyI1irzc9PjBcuemsjdXqMANaWF5i4wznoAEEVCHLrXFOU0ohrnfqTKgIqs65FCK0mzBUkMSIBNjmFGgCXicfRUQmMmgBQqSkiKhEoklFGUGATOZAUlTRFCybqNI0wbaEjHZiVrFgU0rgAQnIZmjwGRC4razrE3aSFlMxxgioqqYzXA0GbB2vBHRdmPP//2BABE3cll9FbKW06zD1unOyqoioQIotX1NrHwDAGtPUtWEuMBulpAjPWIMBgGVqgleRjE0I/hnc0hhDTCkIIor4utYYTZZlqlp2im63n1IYlGWRb0yQpjZM/+z7/uuuc3b/+od+7Ztf//Lq6uqvfvCDF1xwQa/XWVxcFpFO2fVNEIS8U2ZloapAFJIAU+P9XQe//8C+e55e2tdg1HEMYbUoipUgW2zvuZfcdO3Z1+Y0GEW/d+WRu+/89+fsuk58MaxWT66dnl9e9LACEAtDfjgEmUlFSgY6JneC03XtS8I4OdWUIxdekRMALDVRqcgg4nhtNLPxpre/33WKuNiwlWEdJ05yE4KSjcoJBAlQswQRkjfikikTUGcq27IZOBbTfbtpSzr0FDurQBJV2Ty+52kf1OU5M6/Wkyl1IdY++sHszDUveu6X3vdR+IzYF5fNnSBol4ejLLN+ec2OmFOqZTiY3Xho7/5DTz16/k0vGJ+uXKKxEw4kiICsKilFSIIG2RoABTKIADGCyPrRQUiSEBGtA4iqagAQpAKAIJ1Ux1Aaz5A5zZtqW8aLy2sZyK6zXIoatTQ68YDMTAwoLRatqm39QwnjemIzFjQ0hhg0QhM72fTCih2j8/ffJp/4s97RvdOb5mK/9KdOb3/YPfaO/JatH9i+9Zxyy5ynbqH541/5ywc+/teXf+R3P//5b22AbsjHwUPfdCv2sYVXNSmboEEAR0201Pa52v4c7QI1pUSMioJAjtiAMqiAJgIiC0qSUpRaohI7BEakpmlecuP1+/fvX1kbdktHQZB0HUYDMiiIqBknFAhpKstDrJsgKgEAQpLcWQKMATAqobCzkhKgWjYQQVWTkZj8qVMn2oV8t9ttl6lBNYTAqADUwryICKAtriUxAjIKlmXR1HVmbJMiQGtcZ6OPQEjGRPUaE0EylpA0hMhAoJTnOdSMEQGg1pRFVRKxrmddVQcRIWuMAqgaw0T/Dy1FBZJoTK3OUSEJs2UmAkQjIlmWAZkQAjB3PU5AA0EeJCLUJEUgj+qcu/GWm+fn50+cPkUKEgOKtnnJqfFzc3Orq6uj8dh1CiWsvW+/9fLycpnlqkqI4/G4Xls2xvT7/aWlJRHodDrtYwdNMUYkEiRzBoYhgpTKKlaZ6UAMnZkdb+v+31sPfvT8TSeMjVM59ktQCMtVnBw+cFNth2e96pHRaCovVocrAcQ0HgQTGHVmfnUYShubTu/SK5b2PkknxmCCL4bA3X51pN7Y/TP3Xy6evv6tR/7trOXHxwnT3NkH6+ycN75x89XP2f+1L2267jIzt/O+55x/+v4v75jedPb7P/DYbT9Y1LoP2fw/fWzL8uq5v/a/H/v2Fzecc+7Jf/7T83/il5vvfHfQLb9z/+3+xrc98ro/3mnG1z74H3TFhpw6IS0xTLNpggoCEEKU5GMKSYg4q5qlU8dymDxTD+jim81MEYZ1aftNii+0l901fPjk1Km7usfesLbliXSsc/7uMBw764jKCnDANmoSiUuFXUtVbyylLaoQtkzpltni9PIg27xmB3nVVLmxwqGql6dJX7R701CzHGvWMmMIKQYvhSPnHI4hhhhNYgBkQpGQkgCQsZm1oMTWeIwpJcsGrTTBe02OrWWjmloU2hgHhL6J3U5Rp4nGlBAiphbxZUVGFtFWsK6qCaGtZ2c4Chg1EqzLxGOM1nKMUUT1DM6sIEkghtB+jj7TxRCrKhK1Cfbt/NuSFlv6TKukfQayRmxH4pYcI//PvIsALWrdutW2xAlsuSA2hipGTUmTtHRQVS3KUlCN4V7eGY5HdVOVnW637KwsLMYYB90eM6+srFRVRda0Moo8d7OzG5xzwUdV7XW6c3Nz/X6P0JJBm2chxdOLo1e95qWzM1MfeP8H7r33+7/6K7/0Cx/4xefcemu/PwUATAZgPSscEKRVBRuqQ3Pi5MlqdXzzBbfelJ59ev4UEAfR/4+9/4yy7KruvtE551prp5Mqd3d17lZ3q5UjyoAAGXCAx8YYE5wTvsY2tsHYPNgGY2w/9uOEczYYg8EgMpggjEACgSSUU2d1d3V3deWT9t4rzPl+WKdK4obxjjvGO+57P3hrCFrVders2mfvNdec8z9//1JsZav9s/tvuvA6I+nAhcKodtJKVHrlruv2tPYjheW1c4fPHn5q/si58vxatbo0XD1w4a7OZHhQHh7K8Mq0eJltHZXFUKRU1R1bhQkEgCXnK+0KI70h7b/pJ5tbd53rrSa5JcmaqvYI1qkUmFk8iAIhpACBQHLPTmNVr2jdWsub6tyZ8cnZfmtHrWfQOBZCpgBgkoJQuv1VJ2Esb6yurhw8ePDIocOX33bLybvur7/m6FHdf+Oy+a+WQgJCQTJ5oy6r8da4mWrWC6XSerm/nOfQr52gThRpTT6wZybUZBSQFxIiZBGhyMHXKEAChMQcCzQQjcJARJOgZgCAynpEwkDtjDLUfXJGF3kB0KpLSQmGJMSSGvQSgIFEBMXhaAhPSKOw9t7awOARBYS9L+ukXW4uWghr5//l9y86fz7MbO2tnYSknbzpjTe0T33ukn+f2fzTuiJVCYN27G9+6x/c975/OfWZr3c2Ff1KFw68rUpjCpChtddef/X8/Pzpk3Mt0wBFoM2wKnlQNdqtsqpSITQqMGoxNYAR1AqRjCdkMgop1MwiGBhRWNRkM9OKFlYFRHKVnjo1BwSBgifGBIwyBkEpFkFKc2ZAYW+ptGiMAWAINYZAQgLoKocAWaKGDFlqNIivQVAJaIcOlBinSUNZd6+69OKdO3d+/o4vZUWDHOsgBFpQPAdCEbaIhKgRPAgrZq1QgnMlM+CAndGkalAEJTjKDFiPAR2ojIJ1dqIxU9d1OVwG5Sk1vbpE9gCYqqTQuZUAJEbAC2glKtFpmlprkb13LssyZobAFlgQiSAxBlhc8KiQAnMAYUgoddapxKAgoAYGa1ADSghsSLxrpimwOOsCyN1f/9pabwDaBOdJOC6uxOCDEOp2a8w510iy2tV1EjQpV9VT4xOrve6Z+XOUpJUP45OT7MOgLE2Wlv2B99YGb6xJDSllFGGmFbqgjUFA71mTRa1sY+gHjQmmFzx/541PBKWnVT0XWJaGoaF8i6pd27e+qtX/y7+6ffKKG8/rLQtPr1AKDKqB1nkn3qwtdk2Sz1x8QKfZ0lOPh3EVnISKxQ5KcrmxCroPbdr72Phbn3fu3u88+/G955+eUemRX//Z6oJrdv+PH8CyPvlXf3AZbqofuGuzk2Of++jM2NTyzIya2TG51B0mg5OP3dk50z2vHt7VN8Vbf2F4wXMeXlnSr3rT3Te9bZrspQ+8b/nIXbuuf91wrTKUA9eJ9/1aciKTakUNIcdUjxUTfHbJP30spWQjADc3bUmKrW7YBa+Hhi/ILtp9JDt+xeoD470Xd3sqMb1erRnY8RDLlJVnlzG2M+oGaWKqU67R85D9eGfX5NrDq23fWcOqNiofgC28SdrtYV3++f3ldXvCd41XPT+sWSskpT0oE5wHAG1ISQARAlSKunY+ydO+tUppAC0uFszZhyBIjtjWzhilWSlC8FzoLLgQy72LPQYRz0FphUjBOqVUlmX9QQWANtiYLwbnEm186bUmDEhaAyF7TihJbCIijRBlE+icS7K0rmsgCsETo0kN14FIpXna0q0sy/rdfn9tzfsQR4fSNCUirQ0RmiSNi6jW2mhNSkXjvgAhRuXg2AXv2UtMshlIq+A9IIKMmn/OudLVzOyDraHywRd5vmn75OTkZK9fElGWZf2FXr02nN463mxndTNtQTbWGQMAs8TD4VAnxhgjCK1GK0mSJDGeAwdGkh6tDFzXxTmRoRKRwOHYkkxfNPabf/m23/v9dz74xN1v+703vfrp1936kluHNTvPKKx0ZBaxAvEg7CXTpirqfXt2EhECTk9PxhGpWMPXKj289KTEajjCqd7xypcne0cCOgIirWa2bpme3cxBnPe9XveCfVs/fvLfh8Xa1NSW/6q7d6Rd8CmoYIpqq05uHXf3WeDpfHtjcm7+bGP/rsbzLp2vHvRWey9ANTgnYIRDrbRiRhZH6KPuKXAKWOugBF3AXJe97gK5gdtey0LFCljKAIBIrMAQcQpAXoSTdmMRzm67ZV897h/94tfN9hb9WVn/Xim3OHlUaRInpdIEhR+YnrEVTwOTfPquz+y4+pJT3TJzOvRDgiYwx34BEooAIhhtIHCAqItniTN5iLkudjS3M7NSGEIAQh0QZjYlANCtEwvs3RAoZ1QtlbFJV5a6m1vNXDf6ZZlpCgLiHJEmJEYMLMgsjAzArnLOiaCAgGgfggKzacvm1VWXtirP7XRsp1s6YXxtk3zbH/0DPO+Gi5c/86E9f3Fo9a6d/X0oqqDUBjv/FF7wwhcf+tIXir71WA8Ar7/6MluGR556rNlsnjrydO2sMcZKyE0y6PW2zk7Nbtp83wMPNoqGr60NjrTRxhQm6/ZWE5NC1CQrJGHvLDGvhnqvmTS9GsaNQ6HIisvSs2fPplp10hzqEISpaTwE611iMvFBKWWdJ4LUYFn2sjhyH723gBlJCANiygC1cySsFSoKAkSkQVQzx+BZm5XB4Dnbt2tSCtQwuBQVg8Sy1mhPDQIAmcmtrQCVcJSiiE4UGTOWJQMYhhASZvGVAyCkXAAANJn5+Xki0jphBiRQqIFgREaPrDscsW2YMQQZbfFJKUQgJQJeWFGc3aSAIgQoBCyWAxICKYZAqXLexo4URswPAoqgiNa6qqxRemxsbFCV586dS5LMe5+ahINHRJFQOyFFp86dGWs1t+/Y0RsOBrYkoi1btpw5dXptbY1BmDlYN+x1y3IQd4szk1NrdsULF0VRlmWmW/HR11pLLLsTKqWCoEef9RKj7MogfPhfP3hg79pUVtdKFmulGuNsTO6XK6ntoD870Ro+/XezF77iTOPStOzXZB01ghuoTJNKJYGpiy9ZPvpENeiBGJNkeUEDqjIWu1LWqytJssrFxB0Tl39l9porFx567qm7bl470jp876nfv29p0ziz63Gts5kyS8aq4dpaP+PgjhyT1oS2Gf/ze8el2vqg1FNbP3L56z6XXvb7PzT7r1tfoVleuPzAve/9rct+7qeXRBEPtYgdQpGlE610ue727SD1xlvbaBam037yr9+zpR4u0TM94FRjEINGkKlCuxm3XKMvOn76jiMXLDw8szbNxXl0qWmUoUeoEiJnh/X5pWSs4QlK9sSePTiVrGH33DnClel6YjHNmKwtnMKksGEVqWFAjTmwEGs2gCCIIQTe4AcXJqucVVot+4V/G74bh1GdBTJqVUj8Z5SQAkA1oiAiIvZxo9fK6/NC60s9igh2kdcTUIpcKpY4p0sAGE1oNpqvMjLdYxmZiMSXYoQ7jbpXokilWUqkYjYavbZi2hpFVuvoyoh4ihOe8aUb/wMbXxAZvTh+E7OMJgQB4JmKNAfmWI3XlaauwqPwDF8r5ssrKCLrfEiMf7UBspKNN4ZndeHWrx0SxktMipgZAZUi/i6XXqqf6j3wO0888jcLm6ZnpmJtHCgia0e/DK4PRoOsXzAY/YbPLsLLaBYVKl+e7p34w3vflupsvUk+msjC6GF6SK3Vy/B9blnPTaFZCc5gAJAhgyV/Z8PdSzB8Ya9Sw2CtHV89Vv+uPRdQGDn6CMGGtyM+c73X7yMAJcgogggqwCQt+eB29HlqNdbzNnrwhKAU+WB7gZFoMfh+MTxLx+r/4Ul1vQh8AOzz+7gDsyxj7633pFTJVQmAhBL4ffUH7v/UQy5sKAH+n1sSsD519owL1rNO96++44NbGtvjnc4iWiutAwIAtiaQTTNv5KbRHa6u1PVU6N166YV790wsdY9kiQ7EwdfoAhNw8AFEa2Jm75lIY61QgEGcr8kIENZu0O2JA2ovpqvbp+HSa8Ohr4mrTGcy37xz+dDi3uyi3Lce2fJfe45dlECjzhTWmtEn1Ghu3nH25OPjzfHVgdWNpNUq6sc5FWgUxY6pHfc//GBR5P1+P9NGLK+udlOVZVlzgBadYx8GvgaAqYnJsiyFndYGSayrEcUG3/HkQcqm8YsrBGBjCzmERqsZfyMRlICuttFOoB5UCjAAa1K97uoLXnDr4cOHHzl2slEUta8VIAARQJZkVVW5XLGzyJIggGcCNEjEUoeQEDU7Y+eXl//un/6pmebDuoIk8TYE8bjeSpeNI0SeAUTCrefaO4chLNUVelZKYZp455Rn9t6hKCalVEAtiIIEIuw4M0YFhABAQBoNKaWUSAjCAMQszoX1OxtdkBBEoWghCYyEATEgK0JxgTSJQJaYqKZOjXbOoUCq00hdH/mHMxulvfcqMeN5trq6KoE1KeccCSulCp0Mh0NlTJEYDmFpcVGEC5U4hcvLy7EzZ0gFASHav2/f+YWFsVY7z/Nz586jNlmWGmMYpK5rrXXkGAhA7IohAbthkU0AdUuT5UTf/fLNk8vUVEkyDNPjWd9AWFiuQ73UrQiUiKrd4lX85OknPw4XvjHnvD9caVLb8UCBnth3CZmk98SDGbD3gIHEi23kCrW02zw2EexiMqxMf+D7/p7mnvsuvfpDUN7Q/erNp754wcrT5LXHZGiTau9E93SvkXdY05bJlIjEep/y8Wb7G7j7ruTSM3ZHOyx/aNNPDBhfXj/46G//yN69l6Uze4dhJeGOzRZYGgmLOIcutIM44UqBsnLu6NxHTxz5HwlOtHZtPPybv+M7g+8Zh+Cw2TKhN7wlvfT2c19Y3nr6Tnnyxzo3gmMUqKRs5g3f7VkECW4w1qjYN0SxQQkVBdvsZ1fvHn5zuS17jzsLOs1tsCgLU7TFuzrp5PvHz7PzOtUiorWqynJmZoZMkC6nJhGRIs201otDj4Avab5qwmyKy6AiRQLeBhecUloReOdEkUTfEa1HqlIARDCkNjyy6nok0F1ZWQFFzrm6rlNjrHVeOEkSQpwaG+sPhoPBwCQmSdIkSaIRnFIaUCRwlmV1XXsOWhlhLq3ttFpjnXaSpdPT0zpNxHFizNBVwhycr6vaOR9RMHmeW+9SkxijY1WZRvMXsqFrJSJQI7azMButouYZSWujAYBiQXsduIFEiTGglQ8+hEAMRBS8FwCVGBEJ3iPGBroQIBJFTwciRK3QR6I2j6aWEePIhSLyIGmSICKzmMSwBGEu0kaQwV/++T986pMfd0249MU3vfaHfihr5L26ykA5Dqg0ekYi1qAi6mSkBUFEJKUgVtSj3AhHts2n1o79wTfe+qZr3znb2Q1BNsS5seRug988Zb76iU/9yp/8Vj629Rb0j/RXksbkrW71LyBMcfFrt6xih3/iM2NVWarW5v2vfXNj8mA3hKQO1XCtG+pEa6MNQ0rgCUERAAsLr98niGyQXCA2QDAQpdWwnjv79X+DXjdJ0HnQWiNwpGvbetVJBV6qQXf/cy5Z6q6dfvJQkjVq8loH/1uV+t5UnlYgJA4bzab33oUAKEWa91ZXv++3X/Hi733FmYUySxSMpDIjFsVobJNotHsREGYg1FrPdU/80Td/c1D3pYjhOPY3hYErAEiKsVa9hVGXtn/Jnj1XXrjzgpmWRn2uO+fXrBJlrUsw8czBBR8EFDHHq4whBFTRB090mgRhDqxV4SxOtvn4Z/5908um0qtuHn7qb2aLZG3u6MKnbh/7qZ9fObt66dLNX5r+cJ8c03Bbd991y9/bHGa1hfbUtnD8XuHJsYn2Y08drod1p9MR5pXV7srqamYSOywFpD0+1ltdPXlufrw9VQ+cR0FQqSKnlLXWZ1m0CmTvvVfRoUgFyZp5o91aPdvTmcFE5zbDLrACy6GuPbAAgCYCwaFz1tcio9F+o8khfvmrX63rOiMDLqTaWGu9MClVlwPPoVEnLBKQPQqLxCEctj7TJhCXrm6YvJEUdfBaaXDBQwjCEoSiyWi8p0EFZiF07DmADTbRI3MDVjpXmpnLwIJac0BQotH5OggnpAgwOK9MYiWUYUStAkIWnygTx7Gcc0atu7BFe3AQ8p6ZFeGoHq40AHjHQASk0PvEmHowBIBojJii+o7bvuORxx87evRo3mz4CLkVYOHMJKfPzN14/Q1a67On55IkyZNkWFfeu6mpKSfcHw6UUpkmW1ZaUZ7n9aDMkzTRqQ+WiEIImzfNvPi2W79x34NPPfVUq9WamJjo9XpKKSBs5AV51tpEEBYCgBplJzopUIXgTToUb7qzL/1uc8cXBmdWyDJWg5z7IOAYBmxO9PCpc2uv/N4fO7myMjl/uAp/19/+6nZhrFkuelN1GycvvnLl8JN2UIESRV4AA4MZOpsKChZsOJn14wmKqJU1vXwqpW53svOJra/4QHrN9vMP31w+ta+a21edyU4s5DVCAgOjn2BaUPi0n3ykuPRU+8K1NM3bO6eLIr30hrPQ/PH22c/97Guec801jZf80CA5m5QJqmEWxpm4Fk1BjC4q5YY+kEvSJD+2drSv8vdPb7uqKDYC8H41wbkWSyG3VsYG9ak9ofHy/MZPdh97eGJ+Ydg7v1xPNHUTEyIyY03rKugPrUm9iLjQlrRPpif1RA6d8Um90vLjy0YS3+03W9nmma2HF89lenJLs5o2y1USQk1x21oUxeLi4jLMk1LRa4gRqtLGz2VCz2zSW7TWeZIOBgPUWhdpWdfW10QkKqSiIhMGAEiNDIK894oIDGitA3ONFhG5lJm8WZUlKjQNnaVpCKFoNGa3b5uYmGgm6dHjxxYXl4pGI8uyiYkJpWk4HDabzaIo6rpOtKmqutvrDYfDfr/fGuuMtdozm6ZFZGQw7IUiW+DZOQ1LTFujrUjsKwNANGhiZiGhdWuTOIkU9RwxMMcW8sarmLmZFyN3MsQotARSRBScFRGS0SgXI0TIDGgjISAKwTNcEVjvZ8efOZpgBtBILCHigBTp+J1+pMoEk+t3v+svLtx04K//7E8//g8fVivJm9/6pv3bZwbdgSApk4ALNniTaq5d3EyMNkQjFwqKWwEZFScEESWEVGXbW7v3jF24EXo3roZzbqzRnm0eMv3MGTnvssuluH2t+4P5psnhycNJ9hyEY33EAagVbjUu6C7NlD2bbJ9FqLTKU6VTSFMFAUkpVZBKUUIIwxC8iBLUgEoxWalcRRq5u7y2ttraclFr7ytWv/J+STkJAgx5YlDQkKoq1wsBguAajetNK+fXcFmrDElpeA/Sr5H6eYQfxcQkCaS266KVhQbSuYHl9M4Pf/H1r/v5pqQahGh05WMrRERCJDHgKCGp65qjI6cPIMIgDLLhEa1DAAoWAI6fHZxePX/xwdmXv/CGa/fuTp0+0lv76n1H15ZO7to9bqhSNZQWQgi180anqJRzdSx4es8eENVG/UTiLWudWx4UY1dfXq90iyuuXb3qxomvfrrRnj3zgT8wB68sX7B7Pnv6VPNJLUYBPdL5yh3bPvDDh37zov5zJ6ZnW0VrWJWTnSai8g7ssDTGeJQiTUMIxAhG9at6MBxcesUVq8vL588tsCYlXJalR07y9uLyciPPXRBESQQ5SO08cuiGOh8OG0lWQl31+4lGAMhEKQEnKFrFhBg9c2UhTeJNp5SxzlsXrAtaGxRh8bZyqJUCDOKV0QrIshihAgiDBAYkroGdloYEFEx0OqpECEJgcF4Z7Z1wCBHuQ0RIqEj5wKgIUAkBAWFCxOyFC50wgrOenE/TdOBdCEE5lSB54YqdMcYzK3GJSYN1sUASbwWlWEZyYtjYsAty9G0SEURAhqDEoyhhZCAWIPAgCekQROlEITnrjdZ1Xd9//wPRu0lrzdaKoDYGEdn5PG88/viTZTnITJLnea/XU4kxxsyfmssaxXjenJqZXl1dJc9FXlTDYZZl3jpEMMbkRRHX4uXlbsytz507Nzs722o1Tp2ZG28UdV230lxEfAhJVI0BhyBEpEj7QS9JxmzqQ7Dj5c1/89XOS+REQjRZQJ5Q6WTNmeD9+57svPaHf/jxQ488cu+96aZN4+HYlcXhry5fCOPZWJq2LjpAWi8+8oAE0nF2ABEUi8hEZoKV1coLG+gOMTGh1aFmW0wYLC/gmSfbjcbirhd+0F0h1bCztJLSArlhnSAX00zNribsTCuY1MpNkEpcP7/2us7ui/bMP/XwO7//4isu2f7qXz938pvDToerSqfJELgIQpXXKgdQFpwGchp8ioOTR7vczyE9mo1vhIq3//3ffM/zLr/isluLZgc9DP3qIMkOptd/7PiXViYnv+KPviC/pMtdQLWyvDI21rRr/QlTFJObWcBrZaFfqCLYoQX1ma+fl6tnpNn3blm7JNdsMMES6kRNSD8xduDGmjn6YENwItJoNMpQwBDYczbesrYS75VWIFKkSTtvloMhs1VBOPjgASSgC55rCFwaIyKJQkXoA4sIBR+sA6OSJBkMe3meX3zhwXa73V1dCyGkzYIE0jRt5IVSCkmlRR5CEOf2X3jggiBx2M9kaSRtxBmhZqNlrc2LxqbNm621iAiKvPcKSWmy0W4b0YfA66kNcMTUKCLSpCvnYi06PlNeRuODiAiESkYRMUTnX8JokRSDcQy3cczXcQAERoCIulRGPDvx0bGYmRWQ1hTbnHFljy6KLKMnOu5TR+H5WUf8/pH3MIJWhkG899FDDISdU244eNNb3rh109bfe+dvffwT/35u6ew73vGOvQf29lYrDVASG1IagPNEmEEwxhURAaDRCr8uPQMgGf3VyMaBRlsHiWeCAgrQD2Fsog3Meyamn5w7dZsyl1BR+N6PmaKqoFFQd1kGflgEmtpzUDcaq0+fTGZndNHGQYCQSJpQAuQtcwAAAWAfgP06AkIqJYlCo/JV6Ot0cuHEN7nbLZJWpSb6S+dyDYFdnYhJUpWYWqGqRRRqTNbqYVlXRdEIzieUVHVd/GVz8Ntd85jYgyU0RI4BfbYhX0cMajAo22Njd95558ljh7fuuXppuZ+gip/pxucePx1B8BxQIEmS0WIbuQVxFhwkUhY0EjQzBoAquJ94zfW3XX9NQyXHT5//0r0Pnji74GqzZUKCcxCcl+BRWDgu8WKD1iMygyHjvBARSAjsGo1GotWw30XgyYnGmePzx+//zPNuuGXqJ35t/pFv7uwPZrVaeNsPv/ehnWAQEOaKIwpU4dttO/HP+3/zlx96z4F093Ovf9EXvvblZtooe32xkJmECUFB6a0gKKW8SO2sydsPP/x4q2gAgC/rALxvz55ev392eVXrpKxdHLQ1xtTOE9FQi7LinBt6O2bMzMzMwwtPIeHAlWMEqYdQeq3Jx65nYhpp4pxzLnCoQxCjEu89ewnEIQRtiDkQUSNJIkDDE4qEAIIsohFEwPmGVk44EUVEdfBKqUIp733aaZZ1pUOQEZM2NmCImbXRXhiAvedEa+ccGU1Czg4NqRREUpXnJtRDJ0FGvmcCRB5EFDGzr6vUZFEJorXWohUSS0BCIpK4JUcZtbVg1H6DIIIgiC4EDdGBFQEgxOJ8CIoANXmCJM/Onp8H4FarVbmY6CAAJEnSLytQNHRDYxIiKorCey8izjnTyPddeAC1euihhxqNBhrdHQ6yLGPn0zRtt1r9waDf76vEBPbfevCBs/Pn0iSLVcc8zzudTiMvAnujEm1MzO8ZQaFSmhARnE+aTT8YstZK6d7C/NPq+//1oRM/fpV3WV4LiWZS+OXystYVB7/xja8dO3JyeuuW7sqAJi543U/v+351//CzJ7Z15K8v+syWc58d4OlzNI2csQekoLTUnub7XjCg8kbrOpDy3tQuJUKduem9dnroAqMbjvF4Z8v27uZ6QYtGSUoNRgPWHaZQlpx202EiSWr2XTS25+LlB+/rf/J/XnZg18wv/unZe+7mphtD1lQM2bXZewDvnBBWDEZDi7FMCMp+9bm7Lj9/cuHgweXlwcb6OwxzX/6GffzEmRuuvHzn5l3VoLemg9x7ctuF03PQezw5eWW93Q5Zt5KUtDi/cPbcxFizaraoN9TOC6QWg5J2K9HbZpOHVlIAGO7ERm/Tgqdz88OZsS0nF+fVUqUOmM2FrgOWw77WWlic9YE5ftDWVoPBYLwz1tnUKPqNyy+/Ynu+69ixY2dXl0AlwpKkaWHM2tpaXUp7rN1otQElTdNmXjjnCBXF6qVzzloR2b59++bNmxBxZmoaADxGbb4CAOGR13WcrNPaoGJBJKNFpI7UJx55S8e4FefIvbDWKiEK1o0y0cinUwoVxsxGJVrhKNrZ4CMNNF7qGCkRERUxM3MIADF5VUQYm0HrGJBRNrgRvGPnEhFx3fxAKUOmZhvN68QLAGlNG7FNax3zzhAEWUDYkPLRhUmpDfV1LBcTSBAgROudMSZJklGmHlSSOVurxYX+q173fTt3bn/jG3/hrru+8iu/8MZff/tvXHfNc/q9ARIFDhr0oCpzkxASESHQenssvktAil7IACMZCeI6GjauRwiKotsjQiowOdnMm2P9QVnnyTv7PWeUqaRBsMlUP5/VJ8q0ULnOyBy4dmzf3uXTp1afeKReXVh57OtVv0tbds/e+tJ02wWpq4NwBcDCWlQc/mJkG3wDYU9RaNENNeeuu2RNplqqPpX/wMLZI/Xasi97KytnMfhGkvVq58RYV6skzVoNIgjOizD6kGeF+5SFt4N/Q63nE8cOt2J1Sx8/ruGP20miBIKAfvef/Olf/sO/LsG3HdHqNd4aIbhYIY43j8j6lVr/mCLTRQtAf3keAN7y8z+5dc++lZXyI1978HP3fauEdGejk4prNnwjV6FMKh9I+UBIpCT4uPkiYO8ZCLKEo9MngnjrbOVsyVmSdavh1IVXzmzPl449Pb57pnj7+0+97ZXT1hy7cens6rl6ahwEPTnNqm9WAoapavPdOz/WefJ1Jz7/5daOYtX2W9rMTBQLq+cJyTmvlDJFVte1okS8J0obCQ16wzQ1rSztdrvzi112XnzQifbMiYpCX0FhROQAJftxlkbQlOhFNyzZCsvO7VvdORcgYYXWe4GgkYipVw211pQqz4wKRQKgsLC2ITcmMLNSQBQEKTXWWgOaog+MiNaZaKq8S7LMCEBgJK1Ia0LwzME5R3H6kIQYFKwPIwUQ55wQGmM0kSbFzOzYCSQGE5bgfUVYhwFXTillCS0ReJcwKi9aa0EYsqvZBh5tVDUpCEwCwQcGASCIJQscdXeARUA8MgWIKhFBjUjAYhiseKUUkyBK0ShQwForhJlJN5LpIB5Y93q9LE1r76KSRYw6dWZubGysHgwj8evM3NxgMMhVavvV9MxMWQ36/f6WrVvKspw/f15rneRZCCGAdPs9nSZJYuq6jjysVrPJ3unEGG2SJBFCL8yOASBih7XKB3WllUbvWbCRm8NHn7jhpe96591fzM99but4OtBTh8Kec/P9Y8c+qFPYPDWxsDQnlM9OTjeHd77C3lVdjH/PbxXUb6h/O7t05S1HXnlf/6AopUmBpoIGtS8YjYSe768qnXJKtYEQ2tQLQn0vHALrBNYCrdgugU5qqmAgKm0NVemrOgimzpRQ+aW0XUxfdd3Kg/c9fe89F17z41f8xBXHHjq8PH9q+sIL0kG9wGWr3yrTWilkrZUPcaWXoTWNsbnh6uf9ubEGXrK4qq9+wcYq8PTKiUrDQ4cfOXfmXGMy2TYxXTab7a/e/xxufGRb/9DU4onlpy/l/UphCVIHb62tlBw5c9YKNvLC6ip4EmhW9VpStKnexACtdKE80Roz6NNOCa4w7efeuOU5V7bPnO8J+uaBCxAxgrsfn3/YHDb79+3ZO35hWZYTExNl0UsOm7zIxycmLm4Uu+o6S1Id2fVKVVVdVZVCajYbMSalaVo7G5f76L1NRMYYF3xNQIpqZ5VSGUNgtj7EQBiroMygFEZ/TK2TWKCldc+fmCyaJGXnbHB5koPjqqoSbQDAsxAqUBjtEB1zzHoRRz8QEdezmVFEJBqlPoIRXg2jFiAiEkVne4FRiXijjQqB2fnIYweASGOOno0BRGttrTUqUSp6uccMmqJsK4RRDi2IgaNX2EjnJRK9Dkfl0IiGGPUnw2gTYIwJ7MWnxH1S2eJSee3N173nfR/8xV94/f333//rb/71N7/5zd/9PS9ZXBsSKcusVcrWo4JRQT5yvUAASGJv61ntYUCIOFvSSmEsQeMIC6/Umh5Mj2/f3B5fIwuUPrdhtq2Wx1r2O8d2HTl1eFuBRaWG9arOtvlmxwTYcclz5h56cOXpU56qdlunKmlBbr0LwYU4HSkCQgDA4iF41qJBzzZ4Buuk+7Ww47qvD5trTs9ccuPMwasi3qs/WHLlcM+WmbXF0+/7o58fb6bLvhqsdquy1DpB8jagt7X8moUFlFlxjRoApWY8p+V/hPqRKv18RsSCyeOPPuXLPqiNaxBvCQXrTQFjTKzVR1QWaYU0ch8AiOUDQiTta3frNQcA7ty5ffbz3zx9+5e+sbi0Ol5MbyqUUOK8z9CQwkp8BnntvABZ73SSJJA45zRSkiSRQZEpLSgW0dsKiLJcs7fdQVlMTN35iQ9MbL35YPNlZt8B87t/O/cbP3n6ugbgoOvnQaTg9vRw20J2slQ9xk1PZ/duav/KqXaWJK204XvLLviKQYA510kAqbwLIMboBNF520iTqelNJ0+e3Lp9J6CfXz6fqdSkytU2TVNblybJgre2rAjEBMlJD8taCJWCmby5kOWadWesdebsqkqUALk6aKWDUCTFalK+tkWaOq5NkhCCtRbyZgguODcqA1hPwaRgUAFojREem2kC5folgeVEgdF18Cnp4J1HTvLMDocMOnhh5hACrkN0RUSRRo3IgkS+jo8rCAipJGgdwKOwswxJgVqHuk6JSJvSVaTIIQhzO2/Vw1JrtSGdDCEQoBce6U9HFTYEIUJiYGTxGth7HRUExIEAPStAo5C9M0QgHOq61WqBD6CViNTeRXyBJkShSPFNtaE8r6rKe9fpdK6//vqzp+fOnjmzNuh3lAohJCZz0f4F0ORZVVX9fr8oClDxBjPR+klr7YWVpi2bZ8uyLKths9nUWoOgEEYTuBACsoAgKaygboRWBT0lUIX+7Nj+lq1+/11v2Dy+qaqm6bQHGPaH91IiRashzvWH89t2XjQ/P982525dORcMKkm/NPGT39f/64N0+ByNvXP37T/45BsG0hAmbxNnOqy9Ip/mOfrU1b1q0B9PJy0teeRAiQZToIi0Br7XVIX3AwXUBJWbLoR+7sDWtR1QkiBt27bjpS/rPv3U8rcegt6SjJeL89u+8E9vv/lHXuWHydABQhhmZSftDGy/UZtAHDLlgF1KRiXLx5bOLw+qsVaYNvjUQxsB+JL9F65U3ebEzkphc3wymZ5xA4a0ftHe2+449tHVy7p3bz5/ydyBM4PlPZOzCdDRpbXG1mm9aztUrpmmnCVopeTVC7ZednjuJNxdA0AJ72g9TbyAXm3HbVeVp2zjVd936SXfU8xXzcQ457RJnHPGGN+pGmcaF1ywd+/YXiJyzj21tMQiQaAOgRIzkTfFO60IEWv2SbMoxtohiJIAIQSQij1rAiRErAV1noqI816TQkRb2QSRAJhFkRbkmH0CKhBIEu1crbWOHNYYijY4zFpr7zfadaZ2lgiNMhyYNmA+AkQkHDQpYWFm0khaAQBzqF2NG5BniB4N8d+R1FYpFY1PAjOzAHgCtZExI6IClOjcFfiZ/vGobQyCG01h5EhKMjrKHkebDEKWEesjvjV4FhEOAtGOheIQQ3QSU56DVnqjKeu91yYB54xpo+KAtLxW7dm3+z3/8oFff/Ob7rzjS7/xm795dmnhx3/kh1b7dUBSARFD7H7LuvPxaFMCgPxMBzp4LyIhsOdAXryw1jrKcmNyj56KsXYx1hwu97TSL7H9TyH8uB7/yNLyf1L4h1wuGCZXDcKha144uXVfLziYwPGZKXukgzxW9U8H06uhcmHoA2sGLShAMa/SIlqY61QlUDv7pA05XdrpT6qVChEhXfTsKochzdTY5vFtmXX1d/3Aa2Txvvf/3V83N29JUVEQL4IuQGLUJeQOMNTxAwPoATSAtwY6pvj7a/lcKiE0G2MP3P/Qvfd85cBzvrPqDUdlGBGJ0nFQRBiCi7cEqSiaG13AuPLiepzW27fNPP/gXngQ3vFXn/vyQyc6Y8XM1DQpZcNwU5MWa6fQsPg6EVMHwNRwpVTqbJAAGtBoShQUzaRf16w1AIJTygMiMkBAbKsxX/VvfsF3z+OB4WCQnnly5rJbzG/8U2/pewkVYAAANXDKaEQCFAbxiodh/jwtN9fK8XxbMF3TsyFYC+A56EDKKFCKfTWeNxZrC+KttbESnhYNXVvDJC6ElFbK7lTeERuoqVWqxdnAbLncMbZ5eWWtXLaXXHfD+UNzfDw88OBj25p7ONQGCVkI0IsHVIAGUOk8dcyzO3cqpLm5s632pAMvVtksBQDjsZKaEmOQStfXJrM1gyhbs4DDzFQiaEMuiiR4xaAAmGpAqxP2oojAh8SY0vkQPHEolGJwGFAESSfKaGbWCjAEZg7eqjhHocSxc9Vw746dp84v1nWdGIUoEhwAdcue0gZEgYAGJYow0b6ySMpLSDh49ibJEFGYRzQBg8QkICWLAmxkRV0NUUHEVz3zVAt3B30R8cgKMM5taKWYmYURhUEosCJUwhw4WPfYw4+ISHOsIyKtZnPQ79e2BILSV9baNM9sZTvtsWanXfYHS0tLiUnz8VZ/MGDnJybGt8xsquu6LMsib4BgjLUKAYXruo67lsCBgEBpxz1QKKwSducH9rbXvfbr9zy41F1tZJyaTYniXBcCxrHrs2sWrT95x0d+4S3fc33zsHetjgw/pb+/xlxB+XDj1on6WMG9F3We+PTajXVgoxNfe6W502gmSbLSrbhoI+t2pzWo1dpaX2HP17pGZfIh1+JlSVgze1UYtDoUbVMk1tVmMMTG2PbbXj6cPz33xdtTrbKJSd9rP/ju/3XwebcU2/ctDE61daJUqzsY9oY9qy17BPTa9ZgMUTI101/cdOS1r7nswW+dPnb4OKpsIyjMnX5gvDO9bebydgfmHj/ZSi4fU1k1277+O3/8ik8e/sriQ8fbR/ff+MOXVxc1JzKdNu/60hd1s3PjjTfkQ9/Poc25xZ4tk9kt6a+945361rstgH1Tt/szCTwp6h8OJfc9vacx9vaf+9iV+760d+813cEQiJz3SBGO4kWkDqEWEe8Dc0AEHJXeAKAKtRJgxzJCY3Goa3AhJDpOTQIIIYoEEURF4L0gCIL3XsGoySooQCYAExEIROoAAYIAaQNEgT0CIIpSyIwMTCSew7qciBBRo0KW0WBLhFCsn8BIlrhOpIriDCIdxcexABvPQjggETIAaolehCQEqERIJAgrUjFjUxCH9UbZs5CS0ZuN+qYxpAMwKR1R7QDAzhMiAcXWr0KKo0wRRS0wCtc6DjuJAAcQZARk5tiBXqd8cITaiRMRxmjjzRpVf1C1pyb+7J//5h2/+tbPfeSDf/qH7zp3duWNb3yD1tZ1g1aFgyGIJhU4iLICCToOSgKCdqCYjDFKaRM/ZmDlQRg9lg5VVmFinPNpBZLnKk8yWR32a9aHlbN+9W1D2AP1ZCcBGD7uaZxa6XNvM5twuFLkWA4aumpWhHu6PlP9E0XVnyw2u0Gd5EnpQ0ppypqITbB9XWR1GCLcs2gpZEQHNWooGME3FQy7xeLxu2fWXJI1uWNEN7++cuLdf/vHsLb0X9+8a2LPLnfvYw4r1CoNUB3wiAANFBHoA54m2cQwLpILXiiehAkbqfR7cPLM8tWF6y7ZJEmAUERi/ZnFK1IS4VeigIUAWQRGWhtgHwBIKRARXRTpancZAL786GPFWKvValnHGfRrHFuxoHTmTbDgNSOiZJT2QxW8eM8aCYkQlWP24ohIE8mIwqrirYRaShMaA+3GiwnVnGrvzFRfza9uuuG6vU/+xIPhL4qBGo6Hgek590gwlHMnSZJLwjXf/N//q7l1qnfhBYPTp3Wq00whGluVlBgmABJUVDSKvGgmK64uXTW/kCf54bmTtq6LogjkFRl09XOuuOrRRx9PdFoNhm5YiaayhsWV7ou/48DqA/fXbO/6xteXqMcCrfExYRKEIAAGSRnkUNWuUIaZ8yINIczPz2tSxiiRkIhAajKtrPVOXNbKnXNDtlmSWeubzXa/37fWIkmRN2N/dwgBlSSAWpSVwBgMKZYQAJgQFRlQcSgZCNl7pRQZHUtnsTtV13WrUIO66nsHIopBCzjg+bXFCCgAiNNwJMyaSIBBiQQIyBGcSUYziPZSKUmATMyMAT2BBAmWAztBVEYHH5yvTaq996DIVzbW30IIxmQAEjUsWifRN1AplabpcDiMBcOyP6iGVilFibHBi8iZM2fGx8cPXHSwHpa1tVGSWqRZI8vLssREE9FgMOh3u3mep2m6srjkhZWQLavz589HZGAUfGmtna3j0hO8j0KteIkMYA0BOThOUtx87szZi6/e85a/+Jk73/fQF7/yn4PiUVATIF6cL0yKwmON1j+/910S3AWtvlS5Se332PfeXvzs7c1f+Ld2CwASHja3nt26BmsLy3V3BbtB7GDQq0CzdmLFhzSZq/uIqMbaCG3tnXF1CKEYyx22AyQZaeGB4MDZPq30fW9AnU1bv+N73HAwd+cX8/ZWCDB05SNL/VtueXk+YwfDlXYxMdMa27R5y8BWq8srY83W/PK8BpNlRVVVs1un73vgU/d960sHD2z50Zuu+9KX7rnv7rWNAOyGM5VbefTIl4EobaXqwfrk6VNvfefbN104+csLr37ogYeWxwZf6T/0Wzf86Lk1Hkq3BNfaumN2qnn2zGojKbiWQJPSWO2r4cM/9wW/fQAepMXOWtkj8DuS/XFePDV1enkpoYYoIGFEJQgoiCwCDAgRP0AjzQkAwDMplCIC0HFufl2xQonyKES00RARkdhCI60ZIQhHUwMkEgCM6F2W0exNJLYCkQgBsQ/MYSRCFo7eWRL8+uzvKO2Edb1SLAVv6KTin2PtNNZ7NwrIWmsexWlijp6GCF6EwPkaRjBKYESNsYtNbqSm5FGBGoGIhJDC+pmPiJUCACQBFG2UNJ9d3hT6tnnTjW+Iai8FGB8BLwyISkWDlNGvuv4iJCIvAQAktsx5JMMe8nAc8nf+zttnd8ze/md/94G//v2VhbNv+9W3F1O03F/KuS2qGvq8CJVo7Z1jUsqxwwqM0qTYMgQAAGRPKmhrLDaYhMQiDrQyEtKF0NuxpTMxNulXD2O7/Uk3+LVdV5+hvju19vLCAayurrk7ivyyRsOHQssQB9hiNb1jur/WednL6eA1g7uXjh6b27Gl0RxUA6XyoDNyjkEGRidlbYG8rM9+sQgEYS8SJqTwau3Msjtx+EHGCu1q6Ifdu3f95A8975fe8qsfe9Gn627X2Uobwxyc9xAwFgaBAD0BAiTxV0NwoBAkAAQBCcHb3JjRvcEg8Ez5It5C8cc8+8MCAGSJN3/cw2kEvOebj3zHuP7BF97w2a8cx0aqwhKamRSHqU6h9glptpxi4lVAcJnJgpcAEhkxLIIgAKijCBtREwUVYnVeQIyUlUbFpeHeyfmT7e3ZFqX0mcUbrvzdL4195fzag7ANENFrnyxBa6Vvzent7/qK+uw96g2/PnAw3nbNdOb44tlmlqYqFed1miGiD1IN63Mr/bTdtODFmImJqZuufs77P/4fnKmsDJY5NcmhJw+nJtm2bfvcyVPW+XJYT0y0O53OHXfcmTUb2jTKynrjiSgBapjUOcfOKzJ1ZRVhgmCtzYs0etFkyQjTSkKauYSgfEgIvQIAphC0oeDEWt8oUALHUBEbMJ4g9ZIogkR7kESQ64BKM6kqeI8BgqCMKmCgkDABxCRJRqUnEWZQylSenWPFmogCiAckhOGqE/SpSQMgCaAwk1fRk1gEAGzwjEF8AA0SggYmB0FLH31iEg2IHETQCscpwxACi7cBm0lReyfeG6M9BwQwpNh5ESEcmSfGcREKI0RlnM5UWUIiMR/aNDW1troGAAf27wfCBx5+6HnPe54h9eADDwiHztgYMpNWZTVkhLSRKUBhTkzaKvIiS51zq2srBNge6yCBD06AlVLW2ogZBkUhBI2klHK6zK2xVARVD1y/IH74kfmLXq3yq0P5x/vP3YthAEmSNRv50J1lgqfmTh7+9N/k2Th7GpQlpZJB+WeDWxGwrNtPwuUP0DXf4Ot8etnkxTvIGABg76pe166tmf5QrSyjG9resrOpk3robZJkgoaI/PJyWF0R6Nmy54Rxum1vPF1u8jDcsnf8VajN6S98Fq1U5YpWSdGEquLN+y+Y3pOAptlt29tFwySJ14AsTdBrrkaoUJpFnp89f/QzH78/VPu/8NSZyemFq27acs0lN2484Z/+8InDRxemp6b6vbqVtE8P5l7y0he++Ud/6rGV7m033rrvyOX36Sc/c+rzb7jke1QyOX9qcWVttbN7a9nzkiaudkRagptsN/5y/vfcrorGiSlAWyBjqAFP0/ANg6XXzl139bXbt+0ZrPWisVDEM5AahYFYCI2FT1IKAQmj+kSBsOcQJCilYjU3JgfREDAOAeL6BAsiBiBEidAL2bAz8pjmRVwpI3I5Gr0JghIgBE8UAw+RQhTPrJ8xL8dnr4kMcQgfBEkihxGA10Wq8qxjVBmGkbDZkNooKjKASTREBAdHdkSIOwiKchsGCDFDRhaAOLS3fhqjEnE0KsFv+2JY/7MSCM9sIOCZVylScewYAAAISRAYwuiKPSuQIzIjjmrsIoCoUIlEWid3/VAn2Rt/5Zc3TW9+3+/8zqc/+PdLy2d/722/O7V783ChHxKVUYViXLDxQ5FGSjaAs6wDaOXIMUig1AfQWIshQR2cYWCnkaoyI4PObxvfFPxgrJn3Btnb8+Xy1LIy6sZ0+CsAX1iqzBWvnNh1dbFSnrTDocHV5QVaCkjn+6GamBn7vr1Hv3XEfuGBKzcXmxEt+JpJh0DNYEqduOAIAEjHeKQgSuC0tBabYW7v9Vf6m272mABVXNlNg4Xl82tXXnrxS77zpV+552vtIl8b9EkrIFb3KI8oDgFExhhaAAg4RKyRvmkgeBaERBNIs1l0u0yko+MR8IiohEQiIWrrBEa1w3ifCIBWlBjDIlHfrhdXeotzZdUxX7z/uFfsbT9rtS1LgTZPMucpI0KPNTASo+eo79Oa1q17RGlEog0gyOjTZREgBNAWSw1tC2d7S83tMw2nnCbUCk4uvyF73+9veTnAk5OHJIeGAKL1L/kdHPvEHfnVt5258MLBydM3Pf/qwbmV03OkC+M5aG2QJS0ytDbYkJq021tNRbfyYnFl+UOf+cRUZ9IIdtHmYHq2TBJtV/uPLjw0tmWmZp9pkxGlSl9x6WWPHzqc54USr6MVn8KAQImpnS3rOk1TFgwuFGmikNiHWImNWxsGDonSSvvaEmDUNyVZVgdfss2yrD/oxufJ1nWSpkQE1rtUcWoyIqwtKKq9BREMHFuzG64SoxaQGpF3EDHScWNHoQ6W2aekGaUWywypNipOFyESUnR9QVBEpBVgGH0unoOsz58xQsi0EtDOpxhIgQPwCD6IOI8aSQKg+Nr2Q+T+qNrbVBtCjFgaIgIQ7/2wdtHRLH720UAtvl10d2kWjW63G7w3xiwvL4NWW7Zs6a6srqwuZ1mKiL1eT0S01jObNy0sLxFRqk2/22u1Wvv27Ws2m87btbW1cjC03pVlmabp+NRkqNxgOHTOaa1VYkIIEhgAyBWVX3XoU8DMVGfWyo/dO3zTy8LUvplf/sML/+1d33ru91zRD+D7fnDyJW6Bbz537siZw/d99UvDyYsyf5iYmXC87gHAOKyRXZ3sP3T36ZUTK4sgA5VPzG6dqdMMi3bemUi3bp9YD8l1t8sra00eHHvyyeHiajh/NlUNzIOjJm3amd74aPnq20NmQWjvo39v1orj5/6I7WbRaAS51018snp+Zdv+q6+95fr5xUErN+zFiUIAb8tgQGnI0ulut59k8Od//pciWFa9+aW1uTPy6BMr1z1n58a6vHnbRQ89+dCYMkk795Qtnzj0mj999TCBuvY9BT99wXd/c/nJR/XRu8889rJLn3vPN5cnWpON6WZtgxajjXipwOUp0h3m854DTDIwYIVJbXzhaRJZwsIlS5dV7b6tkolWWKsFMY42ChFESdG6k8dGjzCapGzIQGNki1EZEDb0zIAiwApwxKhAEor9V/bBxWFcRNSkREbyq0i0gPVBoBD8SBEUo04ktQGP2FHflhGOxmbUqHIYkTQjR7KNpGUkk1yfMxEkQSR5FtOLUEAg8MiIcBT4hFkAMcoYcZ0XsR5oRQGGdT7Seo/2mZaqAoyJ8jNnK6DW6wPfdv7rL4f1hT7aSa3zu0aGpKPdBjMqimJdWddOR7NRZ6wfSqjK1/zoa3Zu2fz3v/TGb3z6w6/v1r/9P3/34PW7uwvOgMU0mJA6JE5E9TymKSU5CI8unogONUgVBIW1gJD32gdWhlWmy9pZGt++xUPuaq3WBuNF2Wm3VlbmNqcaAA5pffXlV7agPsMrSaqRQufaq6rhzrqbpBOrJd01K3Mvv/bo7FT90c9fVtWbxsdT9miMHdb9NCSKNaBE3/iNxAMFladB3eirJkMrkyXUtW3vsAo05V0rv/bWt33hlht9xUQUndpwXukvpvbHSqgB+ggaYACyRBKE3mNE2JhsaWn54MEDL3nJS86vDp+9WWRmkQCxNU4bOyKCZ1UynA+R8YCAqEgvnl2cbO4oA6E/dvnUxUW79diZk8JT3jjIWJADO2TpQ+hojaKdH8bAEOexWDyyYuQER8n4qGuCwoICorO8KFpudanRMNq5DqUp6aoh42wmTrRua//IP8rbDvSe0z/+rW0P4LW3d5pn+qG5/XyjEexwPE+/+o1v4TBNJzPL4oS1NpCY1X4PEY1Jh8yJgNPiq2EOymT5WjlIjNFinAt5I9cE01u3dsYnjs2d0lon2hw5fHIwGCwunjMJO+7PTG9KmySrwMzaUFnXUTswNTV19PixIm/GClU8ABKtIcYYz0IMQTAAGFHWh6DF154DKA0jQTgqESnLUmudKo1OMgQiCAB18BKYKvYKkEWLWGavMIRQKJOicgBJkigEpUgE2+32cDi01mOQEryLzKm4WAQXCBIyzByn5mMsBglKJcSEgAZ0EESlIXgBtAh5AKWUU1BL4BCE0aDW1oE2sdkrAEppAlSoOUhmkrquNSkFaOualEqLPFXKhRoAYoUAQtBpEgVl4AOJAMjU+MTJkyedcxMTE6fn5qLb68LCgnV1o9GYmpoqy3J1dXV8cmLz5s1k9GAwqIdlo9natWvXxMRElMxsmskBJYRQVVUAaTQa9aBqNJvMTEbH2ZK4eDnbrd2YCvnacHXPzNaP3nnqUG/WrC0O2seoCoeP3f89u3eN7ywcHFp9qJ0efR4Nzm6zFxy4/Ll/+6l//IHvQRZYo7ShgYOUZJKMzqylX1i4ttGwwbMbVmePnkClnQ3AoBMjaaIzPb1lNhRZ3pn2nW27910KAOy96/aH/RW7Mqg63+q98qHQsNDq7njkj9qrtxy5/Ef6N30tWbw1+dIByQKNt33SgDL55JeeuvGWKwWy2rkERziyZtoacqnQdHsr4+Pjd33161/8wn9ecvCy5YUe43IVqgPbL7vnvrueWZSleNFLvgtBu+Bve8FLVhbPXnDRwcVl1qmx3fDcS5+z5TPbz2058dcPfOhVV968cOZcd7U31WgjabaOGYIAJrBWq9XminQDEpkVkyxnZT2QCxgSQQHcWdRHE18Px0uzGu2CAACBAQQRAEes+FGAGiV7Gy0DQhSFwhK3aHEmh+OfARFICGLjNnCIg6Qx1gIBCcXs0wmvYwfg2QeDCHN06gUACIyIqdJhnU+wsRoKxjmZ0VdABIBhPVzxhrh3PW8erbBECCAsbhT1ZTSJGmJWGamZuI6mRCdMiEYpFGCRIMwERGrE0x95WkeMDIsIyzP7A5J4PWGjNr6R1Er0cgdhlLj1VErRiNUMUZItEl/9DLJjY0uBpOLGQgFGNggqYzQEwJWl7vNf8oLJsfe+/5ff/oW7/vPnfvX029/2h7e+8PrBqrLkIAQGTpzhluHakiUEZpI8S5RW1Eg6nXGHogQcO+ykmpR1Igr7WIaUJidTSOqGoV5LL/R7HJTtyyWzZRB8x8/ceLetjpQd1lMICmw1oDNe0roRnuzDrrU9F5juoRPDq7fMj/2Pj3zoc9eeW7ho09SmqiqCWbZU5C6AUEAIBADiQYgFUKmApaqH6Le6hRtb76vC2D1rr0kUZ1m21h3u3X/gN9/6m29605uyRst6JgKtdfhDhJ8ooYt0eqSugiHA72byOJJWyuiJ6ckDBy9MG227OMjSCG6T9fmraMc3GjMjUhstDFIEAJ7FjbifKCwaAFLyFSv4v+NYMec2212v+8grz733cGPVpagc/Z+/6r+P/z7+Lz9yze20OtXTaiaVdu4qz567efs3n35RkP+PT4cvh+XSWmPQXa6Wc+g4dtCcyscmGmNN3Z5IO+32lm3KXAL3/nBQA1vM5b39x2/68e7Oj9LctHvFPebL+/7fkGT//+NouzZYQAHOQ8i9NAUA0CIA4PJ/P6X/ffxfcHRS/o1b7MsPsgDkjf319G2bZPkszgQk0LmiXSKLYM/9//q0WgKTkvzPAiqSpsAJlM8pWCNA+T9/7f+Xx38/SP99/Pfx38d/H/99/Pfxf8OhZ2ZbL2g1t7nmJ95w8Xz7hjW1Y1Db5X735InlQ4cOrQy7hRo3ZBpZ4coVTQmNSivMEs1txDOjYCBBCKjWuVwoxCggArbfHxh2U20jrdzlEKTXqXMNaWiYOTy21e284GffOPuaV/XvuOP8o3ftvuaG7vGl8998rNXKuisDgaTTxoEbEqaZSSQEW/H09DQG7veHrBBMmju2pPuhtqXPVKqQnARllGExpCg1T8+dTpXWDLWzrYkGpWql363ZN4r2ucWFxdXzDOKr2vu6kWfVYGiUccNqrNHy3lc2sBeIw71EiMp7RgwiimuHiWIEIGKlHAgoTeJDcEmi13s3KtaykjyzZVUjOwhGaRxWgGqQBHSimDVRqtAbMklSpEnDmOV+XyEQUWaSWCxi5sAsrJA1BAIvpBJRIhw0ktbKhwASgKL3i+LRVDEhwGg+gjCapAmqQBAkIKEBBdEk3DC1Utuz2pDSurbO8ajrpnRSl6UxhgSazeb49u1lWZ4/f95aS0aj8NjY2LYts0VRPPnkk+x8o5F3Gm3nXH84OHPmzNatW8fHxycnJ733vW631+v5EJqtxq5duxrtFjP3+31woWg1txU5s3jrDKksy7ywD15ENI74QXkz+gErVRREFCWjUXwIAN77pNi6mereoG5t3jxcG3z04bW0tX2lm7c7meTVgRt2p61ieWE1N5tbLVNTL21uaob0qpva311j38LHFr7/c5/895feap7/ikuedM2T5eSLXt588j1r3oxZbzNJe1ATc5YR+Fpr8bVrpg2sNaiplWCBQ9E9318+u6zRB99I83KmMm97f+a3p7w97e86sedHy+QJUEEaQ5wBurzi41N2kDQQvHKHjnInz07wkCXxwVspm1p5V3NSGOA8M9rAZ//zw3ZQ3fXlO9vtZq9rtmzauWf73u7a4sYj/Su//DuN1lhtfZZSb7DIYarTnu6RFNYs2e4FU5tf1rzxb+j8/frQg3Mn1lbOT26eGksnBmKV0siuSLNuz89ux6kvTcE0yZLwJPNUiYiZaZBN+oPVibv1pluK1lhjWQCYiRSMlB8jHUpEdscWb2zuIo7uQAVaCBmiTkQ0kmAECI3ETbhu7Rz/7EJQIwEXxWEPYGFmpShAEBEe2QwhAEtYrzPTupkPAEQN9rOKsesl2Q1F9HoL+VljQlF7sXH+GwJXimongmfq67ECqcwGhBdHU0UCgCpKzIDjHDMw0GiKF5/1viASEcHrVWIQCQEAGEdsgI1v3vilGEEBEgAjksRJl5FhQwgBlcZnEyLju4zAdqO1SUSYUFgQEUtvdfTraz49v3bRNVf87HvfPfYLb7rnzs/+0q/81G+/6e3f98pXrJTpqhpMg+Ehee9dwmbcNFTqVsQtlFxaPnJ8fq5UIUuH7GzpQqUdYJ6nm1qtzVv9GlRJroOzvQrA17b8s5cMr9/bbDecB/588WOMakXNKEgDalaArk5oOtHL1dr44Sfwxlee2j08caKb79ydv/kHH/jjDw2PnLp631RjhZsZQ00BQCAytUGERYEyQEr1jE0wxQGuzjvXBX0uX903PkY6JEnz5NzSL77xF//9Qx+895v3F2PjzaIxWOvKTQEA6GMZn0ISMUCEWGKIUvo8T5aXzj7v1ludJ9AmYr822sC0fgTxMVISjEbH4+Ng0kSpEYOFiPQ2vTw2XIVgO0+8e0q9u7vpxU9f9PYJou1XNq6/cNs99z2gcBkRsQqkiBEUmPh+CHEOU5AIAAOzQoUKgCh6FAIIokLnRcAxdEWorNpJy6SZBKqaNfj8fHZiz9pVgxMDajSnXvZD4y9/7exm/sfX/diBK3cf63eFnTHETowCz1YAs0YxKIe7t+9YPHNuqX9eNzLtlc+Tsru8a2aGm8Xho0faXAgyi/IhGMLFxcXAkCbaWzsx3tEa19bWfvS1P/KxT32yLv1kpz0/mFOK0GhITB18JcEozI0yRcbOiQ02VFmaKqXW1npKKZMkw6qkgEopDggAPq4WAJYdIaVp1uutBeuazXYdJ//Fc60DBHSYIgn6GlgB5lZqib7NShMRQAhcVZW3dZ4mSZIYRVmWWmvrugQAk6i6Lo0KHCxz8KQF0SRKbTyT7ImifnOkrSBCQWDyOo4EEgVmjcQg4iVVSrTyRCLBMKRBJEEgKKsBMyskh4LOB/GzU9M7d+/atGlTlmVGJ4PB4NSpUwvnz4ui6enpycnJLTObrKtJoVF6enq61WqHENa63RDCxMRE1JEBAG/eLFEdTZimqYiIhKmxSWEOAMYopRQVKCLWWmQkbQiQ2QNiVLTGQIsMXliBiu0WYY4QHpK6X4YKswtmijf/w9cXm1NFb22lTtuUjzWnF4/35g8tjV9VrJQDM/GkUnvYNhS6Gaxu7Tz2ji/De5+851w/+/w/2gsfPvxTf/5KINhvV19606GPf+XKZLJD7FJjfF3lJuPgOs1WP6tLlhXPZMstRpmsOD+0rmgnaDJiT77RXOqZp+3E4zLRUy4NzQGcmoIOgmJgARggj+s8OJJ8pvjqo+e/+PlHrnj+pUuL1gJlJlGgAnjk4IO0xuTBbz307+//wPXX7+AwsLVkzWzfhZfWa+Od9p6NALxrzz4HQwb98MMP/sd//OOerTtf/4afWjvTJch0ZpY8/NTB2/71oc8MOmv/dvQrbVuPdSYoMzy0BkkSVQ2rvdvb//nJD33+J/5Tf8CwYhywFIEZEiqmJ2f23/uiT8//x/n542Np03YDaBYUBBYQZhlJgYRFJO7AR66oPIpzSiAEZhBgIQEZBW7W62Kr0Z0cVw8iHSd0o7uqxICEUebKOFrYYqyLgZk4CqZGIRMQEIFDEK1jgxjl28IwrHeLR13eDR9D+TYJ8QgvBYBBojwKRjFWkEUEIg1bEGgEcAVEEBA9+g1BSIhAAQoLCIsasSol9ptlFIOBEDcU0iIowPBtQqpnnTkwgiKllBERCCwioKIZomBUdD9rxkkwAkNGMp0olnxmx5OgK2tIofahDY2zKytj2ze/6r1/P/nWtzfe90+/9Vu/cW7h3Gt/5ie2+ry35mQSOoUeVtK95/Gnb/9E/yufO+kO2xcsn/3bn5nEKwY7tyht0rwlu/atzGyuTt6n7vi8quYnbnrJwR0Hdu1+/lA56p+9bRffcmE30z6V/oLaioCrapOFXEGdiHeYizaANsfJ5tTyyaXGV+7d9cK91bh5KlmFnbP+l1/1+N9+cvjUqZtbjWZQaASBIcYdBmbmIEHQsHeW0nSYDGnyoewHmCYxNHbMNBqtpN/tUZIPLNz43Fvuv/c+RPTeW+DwXEeHVXqaAIGBHLALPjWpSJ0lBSKC2GuueQ57YPEMBhVFmP+zt0qw3tMP0awCRzoDFI4fcPwgdF1VThiIce34eVfI8u3u9PLKlX+QNvPSDa+6+OIT579ZV4OcE49qaFlJCCEIRwyb9p6REbUSlojkjTpAEQ4gCOBTVHa45+AFJ+ZCI/FZriVom1hi0yI4l5+8YfmVSRpqoP7JRZPi0bmePPXU5tf+zPHDRyg3ULdWpZ8iBbJK6V45TIrsG/ffl6Had+CCI3MnCTOo2ShdBgdl3yAwMhEYUJioQV2ljBw8aKVSM1gcnj+7unfHnk/c/hGVqERCb3nVQSBUO7fvePqBM1NTUwnh7u07+71e2Q8Gqfb1aP7VC6H2wfuyHF1oZK79RLvlQUJdJ6iYCMF0u2vXXX9tPSzv/ea30qKRpWlZD1cHPWOMp2DSvB6WCjWLWOZEG8/OcdAimoxWhBoxIRKM5irRl9R7j6iE2QiKSSuwWqcYkAOg0pULGn1gDhC0oRCQEY1KveMgXkAqb1lZZAwAIYgJuiIXwWl+UCqGNDNDNyzrcmpqKk0zIRkfH28VjSLLW51W1sh2Ts0WzQYQWmtBUWes3W5dBBdd5HA0XOGdJ6L9F+yLsFzrvBNutFtx8YrRtypLnaUCQEbH/UHUbAfvCXVgp0iHEGwIRmljjPUORSIFTCMgonNe6XWxTpTGbdDqEQIzsBKjJkg9+ODjH7w/37I/mV/tPvTk7I9c8fiS79Km5Mx5e4GfcatDaZIfX/XLm5HTi+b+qpL0L79ZVnplvDPRN4Mnv1G/89Xv+fX3fGffZy986dqxc4cOHdsmDWOcprzoBlG6EUq2NphEpUqXRiA0vHGuoZWEpiu5r4aJlZMt5VNy6LAfTA0kMLUGIjBMWDAsbVVpC6qSh0ZPZC6pP/XZB6+7bU9t66wxQYGDS1HbFJ1DzvL0Ix/5yE/+5E8//NB/tDsIkIkKpw6fWDh92qhqY3V+1zt/NcnH9h+4aGZTUmSrL7rplcaIU2lDeU/50qC7b+sF1z+x+w66/0vVY7csjF148KK6CNInjbzK5dapsW888PUf+7nXj+vZ9H/qsz96Gm7BLOS1c6vnFvv/NPjpm3/6Ne/57snpXTWRD31DJjDHUR4SoRjARCIlFAIH7+VZprUiQACklODIWAARVWLEhWfiCogIBmECIhAWwZELLsSIzQIKRQFGmocXRhZgQRYS8M4hkZBSSgfxSIhKU8xFRSIG7hktMWHkJ8T0dePPMffdCHsRSwkAGJntQeJMBG1EzJgJISgcbYGjj61HQUIGEGFgUIgQ07SoU4Znpb8xsmIcZhit34jICCKCUV8pG5dxA4epR+ZIhFEIJ8hK6w3R1vpBGx8BkeLgI5gzblNExCNIoxBrC5BaKx0adoW3tNrf9+4/mJjaOv7Xf/TX/+utT50/+T//57vau9Lls+eP/ePnlj92e/j6F5owmE7SsKWTqHTy4FXFzueaK67vze5cbbQG1OhhogeDfNsl7X//+/5H3nNw896fazTerRtpdvHz9hwty4WJbICpPEaXn1E7ejQFAAFIQAkIogbhgIWjuWbR+NbjuzG4W26cy3zvsUP1Nfsm3/EjT33om/rOO65Zls3jYwkFJUEi+IIwEJNRJslaqSsaecFi67UJLezwfEYdxs25znuOncD3vuL7/uwP/8h7W/XKNDXuZodf1ZV30WscWHLStXN5guy4V/bzZtppdrprIcsyAolOrCEEtU4VZeZ1Lf0oJ1539oDgHQEaRS54L15rDCkxATaoLrL6dE3TS1/8o/f9y4ELr7j28ktq68o6CHSQFslqRU4cOhuINCKFEILzaSMjEQ9W66K2nhQ08szVA6OMMBjUlLVOnziuafeho8trZdg1syUR9E66rfmBXttR7iYiBUhaJ1NjC1/6Vtv2GrPTq0/cN84trWvnBVVlOPVOTGrssEq0NkmaFC3FOpBLGTkx3WHp54etouEQsyCsPLnkkgsPHj9yeLw5M9ZpPTq8pzk57oGYvTbmubc89/Dhw6fPnhMWL27YW7v84oMLC4utsenl5VUbfFqk/X4/TbT3Hok4QLQjFcHgGCiQ0gLU65bNZrPylUdJAJiAWD999LTWlDUyY3QIrpk2hvUwPmHeVoTogxVFrMGHoUZdNAohLJ3VWiutNREDeBRf2TRLjDGhqq2vEFXtaoxTu+K11qQB0KUJjmXNRqtJREtLS1mW5nk+OTlZ1tWjcwtU43jRbCWd4XAodd0q0larEcdBlpaW9h/Y1Sia3W7XGNPpdDrtdpqmSZLkeZ4kSTyfEILjUIfAQWQkzgQkVAgKKWYeEbvgRGpbiwj7gOvG44EoAQ0ASmsGCRwi3RoAUJFnFgQRr5GEBQA2GAiJNg5CCFYhiEgE8rEQADiyRVDsfQnKqMxr3wjClHj0VvyeifDr//voYHrvZG0nOsXtn+vu3LHjBTef/t6XXnRizi3X/TqMdXB1MN3VZzGRUzvPf+oT6vk9++WxVuOKS2471zs+/+TDg6f1v731vtf/3a3Dk+5Fz6sfP4xAORjOOLCmPntUSUKGAAc6kDVDtKFfFxJAgkfyqUdlQBp0z5X+pm/CYgsmugAAmnG5Db5QDx/A7kSWmIqGwdS0gq0dU+/5/NG3PF2NjU34snKGU3S9oDVJMy+OPvXEi5730tXF83/+v/946+yO+eWTqNJOY1jVK0jPkLAuv/zSsZb+/Gc/ojL+03f/2bVXv3hhyeWGayTFLsWkl8kvbP7+O07f8+CewyrVL+w8j71C4qCGSlr9cv6VL/2eyvZCIhc0rrr4Mwc+/7+/jHuhwR19NHVrw1/78C/9wwf+9abnPffsXB/SRJCQR3kV0ogXgYjKjaaPTMxYEYGUl2fmaxFRCEREQojNqvUcFElAQAiYgosWJYioYj4iIYZPR7HsDBQkRY2EISL/kAkUIoIwi4tBlxHQeQBAXI9zAIIBAGh0EoiRmTCKUxjUqIYetxQQRoa7rEfpMgGID34d4sEwCpACI5daAiIASyEWinE0Z8gSqwKsKUbl0djKiONMRFprQAggAXjdCAHXe3/fZjvIDCiViDgORkfr30BEXDtZLwwIghCSBAgCLKLQW4eI1tqNLQgAGE8hCaIJPBAwcKi1gl5NWj3vHW9q7Zya+l9/9vg/vf+zZ7o3bZ1d/OBHZP6hTENrcptpXo5bZ2B3hztfPnvjSycu/t6VqfbAZofPLKV2KfHCJOPPv3XTy1/s/urPyo/944tKc6bq/2WausFyaNr51u6/6Pzqp4sfEcBEKoQw4c/1aaynJgFRIKmgZWWCUhaXH1q8rP1UuOMrngh+9ycfnBybe/0tT10ye+KDn7viyRM7i/b+dMxLXZDY4CloTk2zpF3UqAtnLDe7Ml8fOQELq8cm7l+57vVT+rkGiRA++7Hbp2a29gdrjUYDpzwfDPkf5hFpAIhpaqqq2joz060GtrJp0J3WhCmoCg4kMBiGoJUiEOYggjEeew5KAAIDCwjgSOgOMYdBREUaGDSh9F36WDnRNG42XZtOq/M+u5Ce2LLt8ot3Nx554JRwv5mG4cCx7oD0AztEFAkhhDzLjC4AgJ3N8sx5j4ASwFaV1jreM8HjoF5JFU12ZipCBcN+b7mZzzQb6lg2DwCTwx1DUJZZqWy86Z745L/uPHBZ6LTGVZEGXvP9glINGkmhxtpZo7DTalrrH33koaLRCiCMwTt+7nNvPnn8xMmTpwFZtELgENyhQ4fY15ub+fTMeJqqc2fOaE29Xm9mcuquu+4KIZgkVV6J45OnT22/cE+v39+xc+fRE0+3283FxUUiQkXBsVZaE4gY771GUgBBpBz2G40GsQyGPa11CE6b1DIXjWxu7pQxanrzpnPnzuV5Q0TyJB3WFeBolt4YY51TmnKVlmW5d+/F55cWpddlBgxMggLSHwz3771g955dSZKsLK8ePXp0rT+YHB9r5gURZVkxMTEWza6VpqnOhEkSRKyqShultc7zvHI2nzMfv+e9z7n2ul1j+621zltDKk3TLMuI6PTp081Oe9PM5hCCtbbRaCCP2EDxOa+DH9l/j1ZYXO+E4Sit4WdyhVHeAMgoQCPAdXzhqF9FJIBIKibEG7amSinxIY5RjoZA1i1OkUZtto0AIxAQlY6iA5MRV4hdw2lXbEO0BJgdb97+xcc+fHZ860xS28U6b6acvOt/X/2JzzS+/zvHHr3n3150647xnXJyKZPx47Jp33Me+GSti4Xv3l386b1r5YrL86t2Xf0VnQ4ePT539Fzl0yrArsnGli1quASu4Ax0yiSAlnBgoJGaXJQrAgOQMVAjg+mJQGBVC/lz5gPXyq4Tsh2lavLuUzBoYK8D8y34x9tCvbpmLUJTUmy0pFUUZzH70j0Pvvhlz1/up5T0manRyIfdnmSl+OaX7/z39//bn2zfOVGX1dRk23o3OaGI9i8sntm4PqsrC3Onzr7h//Hzd9/zFUWgFAwH5ciXngGBhv3yyiuvuu7Tm3urzWo5b+ySLEDX6rPn5669svMrv/iu5d7Spsn9q/XisQcPHbzh4tc976e+dsenjp0702zbzdt2dpqdn/6hH940MXPJlc8rLTL6uGcaZXwi0X4AtRIRZInzOcLM7EHQyYhGgc+aypXwLPtYGVV3YQPwAQCxLRq7rQIgwgJhPU8FEGFGRK11JEUQYMRWQvypfiO1hZhZAkB8n/WxH3rGpybeZswsgdYL3RKJlRt/KxJPKYDE4R/RmtYthKNlQixoJ7xu2CAxuCPH4R/xRCNoiay7JG10E0dPHCAKRHOIDSoA4jM1c46CDlKwPu4y+jmEcbca68zRFp2IFKEDfvZk6ugsoxubRUVUawRENmRYhKWubR7Ka37+R5uPH/+Bf/pr9Yl/LmvK2pS1tgZtQGm1c6v5jpeXu9v13IOrl7/g625q5YmlcVnZPzZmp7LmZtpdUDi5cP6/HpAnH4fhGmK4waqv+sGDsP/Izp+6b+yVY7zwyu6f3JveVqvmgppd1rMMSOy0L71KWWW52Wvq89zthmHvS0/sdNgokoWTg8Xj3Y5ao4v3nv2t1993z+MLX/jc2ZOPzowl052Ogowqu1KeOmOLQcD9VdJzsFIkm9qXT3fPrg30F7w+DS5hWEHKT5w8dn7h5MT4rDJFecsKAJiv5wwEpBFABIn0YFDaYImo2WldccUFs7PbTs8LIYKwc8F7P0K+rQ+Ox+0hUrSDGoFiAMHoJNLyR37SAOJEAWoxRrDLwQfBlrGvuu1qq9RAOQkQHJukqK1nrtrNMa21c25tbS0Ep7Uu0qwoJheW1rzzSqFS6G1lEg0QRAFCQgCN9pbAmGjfaTUSwNXF5dUentz7CADM2j0NplRsYsL5cxLu+Nj2X/n9Rw4f6Q767U5WTDQ3NyY14Om5s8CRN8J1XSNLkiTOVmi0aEAFd999t06MxyAirEh5QQlTU5O7d+6oK79zx9abbnjOv/zLe6cnJn/yx358eXl5cXFRa31y7vTJuWPa6IsvvviJx55qNBqPP/VkkiRzc3OXXXbZ8vLymXNnkySxVe2cM2SyxMSCU6LThqTWWofiapsoLSIrw5KMJoEk0WmaLi0tHTx4UASPHznqvUdCACYkZCzLEhCdswCstX7owQebrZZ1ttVqkUA5HJoku/Siiy86eLDZbIQQ2s32zPQ0C3bazY3HzxgDAIGZiJxzoCgIJ3kmIqQQiBJKx8YntNHj4+MzE1PMHGBkfxTr27sv2Euo6rpGxCRJqqraWBYRERUhROAoIAuuK54AQAAVIBCtMxWe0Xows0SIIFE81Vi+ExEURIEQRpQDgpFYhiEOZQI8q78FACTAPFo1FWBsAaIAIFMgJvYhRIqvuKATZEm8DMXaP/v06fbEdqgWSCW6H5SGscm5w4/OvuuR7c0tl//kWx753uelt373YtfMq7GPHhx85p5rb5k+sGPXiyePfvB82Ts0c8V3XVvWd/obmxMPz9Q9vcfc/cDKcr11bIwqC9agACjGlJFQ5Uku1gIaYWYtnkIClDnGROqCwVwgIupv3uxu/KK/5j7ePA/M9PEris/fjFbpJNPppqEblErKWnUaK42ZzZ/4r+Ovfs0LV1eBnVLGlK5UYFqN7OjRY//0T//w/Oe+9InHHyjdYjNtN/Ls3Oluo4mJ0hsXrZm3p8azb933zXvuvu8tb/1V70DrRGtNCp31iIp9aBXty+n6Ux98Ym7+ax8R/Qd/fvO850sP7n/k6w+dWzr0S3/8o2vHNj/wma8+efbQV7/2td2bn37Z6374zLGnP3z7e572J7ds3n7R/gONNFOJCVVJIBuTjvCsQ2u90X3A0aZLjRIuYcQR321jy7VxFwEwkdn4yoZ4KobtjW9TcfIbJAgDcMyQXfARayMjJRdpUoKj+3Y9+V7/gQDC65VtYBHayCxFRCkEgWdP5SIiEMYmN627Cm50UiMPcvT4RH0WC4l44Fgxkuh5MpJoxelpJGEUFIGRoRhhAkpEPPC6c50gC0Wp2nrlYOOCCIKwoFZ6vVG9caqkFbCw80REasT0GHFy1rncER4Sn3eJ/jECQmQcWwVESjd0WPLH//kLyUdv33rv1+302IpN2kICIXMNzgYhmRkeflJOn+y+4uVhCs7Mnb9A8otnitae8W0TqRsM+l/+xsmPfAwe/Jo++5AGI3m2iGp19lrzvNc/fsmtE/XJ1535pZ+mvxdf/9749FG6DD33qVMHk/lS+6rZP33xkQ8c3/aS4zte5Gdmx5YeOjj4YrtafXz7i//jP384sCkHOPaVlefv/9yV1z5x5c/d8eBXTz/wb2X5lfEt+Q5o9o9ufVFv7AVscp230uwSYp+iN9uDbfx6z9abyCilROCFL3rxBz54u5CqqsreVNERHc4qwFiZE+FAWvUHA9GSp/mZcyd/5LLvS7ICoFTKMDMHLyyCikjLOoMlKq82BAfRhAEBHI9ykpi0aBFsarsZVhLwIGAxB+AFdcGhY91V7galdu3atWVqR54jOIYUE5M3Go3VXvexxx7r9XpT4xOzs7NTU1Nrg9VTp04dP/60rZ1G7R2K0oBKgVWUp2a2PygNOqjbpEFrQHSn81OZbyRr+aBBanU4fvHsN3//XQBlcfNt4dyhCw9esH/XzuZke3xiquoOtp0++60H72eE8YnxZl4sLy+nqU6LxmDQ61XDTqu5Y8vWBx57ZHxsTAkFQ3tmt4+PT85smpyamhz0y80z+Ynj07Yuk9R0xtrtTmvnrh0gWLSaTy08yp6zLCuK7Morr5yemgnWnT59moiCdalJsiyb3byp0+wcP368211NkiRN06uuvWa6M370+LG+rdI0Jc8iEhCWl5dJYMvs5na73RrrTIxPlmU53u48+OC3qqpi5gDUaDQ2bdoKigSh02rGNxoM+1u2bNm2bVs1LIf9gcmKnTu2AcBgMEySRCnV7nSItLBn5kjFCsHH8hwAMXtginDd6CNECgHReicsla3LskREUKNOFUW9qKLIykFFIeqj4wM8svV8FpaW1ptVzBDpuvjMPnod+xe/gswBiZEw4iqBoyEaigCwCyEo0RsbeUGIjt8bK90onPMoqqtvzzyeWYBAGxOcx+AboH0uuDIM+zd3/vHj37i/n+7vuAFrppAjrAYlqo1TdeJ4uFCt2V1/9O7uv31i+KZfv+LVJ//WNhqHDzwHV/o/+BM3vu2DH+wOuSp0u7354uYTD9U//Bu//43nX9H/wjd3D9wk4hABvHDQqAR1AEOaBCpEVjZECm9IGFWgoIIo66v+Gtcl1VYfv9T8xxX1L33Y3/DY+GdeBl5brGwIw/6a1gRIlSg3b61Jv/F49+ihI2ZsTwhpVdeqSDw7Rj729P1pBg8//DCC77RmB73eoNub2aL3HWwXjemNS3T08GnqyeNPPTm7dfv+Cy49s9DtdNqusiGwQuTAHEQY9k027rrvwwcvOvDFj370yBt/btvOS+aePPQ77/7l7/9fF4qqF04/jf7qtf8MK8uHe/3Vf/ibv33xbS9+/c++8ct3f/mGm67/w9/9A895d+izFISfFSw3VEvMlXUAjDJCsHEI7IOIkI5OIoLf5iIPG2FMRETCsz7lZ5YwWP+irKuiaATHQDXyYQVNZuNWEUGREV8KEGSUT6uISnh2xIoxONKJ4n8okFH+Cev7AIy8vwDP0iSPXgkiChkgGpPIKOkRDqCBYKT6BhaOmisAAFKyzkSKNXbRiIAMsN6Kjk7AI20tI0X41/rJxKQcIgoRAET8xllRvDQCo+w8qt4ISauNkYEo2iIgIgIi4RAUIiCyUJAEkBq6v7Dyqbf87ky/O5Fp2b230T+3R/OwkhVpzndgq9+W0GrRNGsrpzof/bv8VbJvT3Xw0i3jnWTw1Jn+332x/6n38rFvpCZUGkxnvFZ0fPtz7rz1F48dfGGxcPzcf/zG5tUPvfiGE9mkPdIz31++5WMzv/V4dqOzgYNBlPHVwwePv1/L8iXH/+XquU8c33TDE9tefP+lb6ZQmXoN+gBJQyVuENSnH3/1icf+68ILnrrkiuqSv37ksfvvf+Ljp49038pbbxMkKCZ9MQbMrDIffJItcHHhe78x/fpL/LhJAPwHP/AJQw32lYD4G2v8qgbkkZbFWgks0SWTrVKKUCanp3wAVEBEwIGUlohY+XZ4uIx8P9b/k9dLQSAkFAVy2oMKQk1VG6MG0uhW+aKffNhe+rW//PSrb7ryB19+Q6lWULJME5CqvdLgAEAl5rLLLiOiVtGIVgFpIxkbG0vT9PATR6z1AuKcI8US6tbYVpCGYBhrJBSUQkpNkkm+XJyeGWy3a0ur549t2XflA5/+8qm/e9uuG18i23ad/dxHrvu+5+4/cIEG4w0lm/JGo0Eaa+/279+fmWRpaSlNU50mS/Pnj8+dHKyseeuuuvLKLTObGmmOaTLe7iBiHexKt2tQ+yA+1IgogaO7p4gI0uzs7A033vjFb30oOH/dNdeNj49PTUwy8+zsbFWWO3fscN53Op0sy7z3U1MToKhoNgBkvNkp0mxyctITAIBBQsRBXVlXF2mWpikieuHgudVqXXzxwYnJztmzZ3du33Hu/PyOHTs6nc6gKhExLxreupiVRnM9V9UE6HyISxEpFUtG1lqVCAkAYuw8pWkqDLV3EQYrIpqUiCitEJFHqQONhh4QibRE5D0he5fkmfc+6pOtc8ycZRn7ABG4E9j79X05kedA676qG6U2jiUwxGcvZ6OqIK6H0iAMIoERFUYT03XaphfGIECKCGF0Q44WYuQR6O/ZYlRez7ZR0AbRWntXG0nBkAazWPX2TI2dOfX0uz+1NjGzZ8UO8izxNvQwiBLvJbNDHYo6F24Nx1q4Ot/8+185+sZb7/lz/cIJ0+nkFq7d+st/8/w7bz918qFvfvyD759qbMPveOnRpZuOfrKe2Qp7kuH8cmaaPmHJMKmJQ0pikprEE2S+cJG+Kd7ZAUoQV6NzTTQhpdAugCQ4H45t4e//rwVebnDOug3odQba6QScJ0tUqKw4s8Dnzw4v3EZLy5UyZIdoUhd8fvedDwc/DAFAzOL5pQsv0S/+rn1VWDp5bKHTaW6EkX2pmZ9zh4+u/PTP/SQlXkQGg0ojkUJmJjJaJ3UFL3jpd/7R772lRhJv//Fv/+bd7/6TX3r3b17/+tmVFR8qnJytx64+t+vQzOnzR7VTkg8/+5//uf/gRc20+JHXvM6qfLXvEkUhIOF6zW295oGASmsiQFQooLRGwHgHCvCo7yCw8ZJ4MygY3bGxHLpebVWGRuHWrw/VbMTgEAKqUSbhvY03nsRs+FnjPaNordWI0Sgh5t//r+F/FEzj/wkTqjhTENcKAAgiMbA9u+oT3yVhiuvraNmNsyGEmhmRWCSqYZFjVBSHAjE6xlxWBAIwiB8RuBRi7F8DCyPghpMSrHeaR4sYjC5XfN3GuwdhHNXfZQSRRiTAaLKEHAvbo30zIiaoIEt0oOBra9AA6UQ9/tn/KubK6Vfc3Fuq/+rMZ9Ta6l6VnIbmkWa2oLCp8ufU2ct5Yc/EjrOyIovHN33k3/Le+PJXvlp9+n2w/HCilR9rVZhKTffve849z/+lk/tunp57/LXv+/nkvs/8Tqq+nuX/emjsd244Pz8wWVK+5OQfTa7d8WjjKgU0MTjVHJxnxaLHCLRyg90nP7HlxBe+ccWbzk9cXOczAADiEVNFIeXqiLvB3Zc+/C3X2rx360Uv3/Oqau7x7yoHWmQzkAZEIAMgoE2g2WEdckg+/zT/xCXpqePHH3n0oVSn3lV2PPBBb/4gVQjB2bqutdZpYmJLLn7Waab27TswLBkRFRIIIKIoQsb1ogUKPgMvYwCFzyBF8VlKaRHRzHC6ajeFJoqmZ7U4xH8Ir/V2ODu29YW33aATT5wEjUPnAYQ1iw/ee6VpemZqtGN1zMzsU0K1Y/veZtYKIZxfmD89P9ceb6ahEyQfuIpIo0dgbxo5ae0EFhonZ3rbMuDiggPnDn396d/+6UmCy1/98/fefdfmidaDjz2kHF948LIGpzaRPE337t+XJIn3vrJ2ZmYmruDbd+xodzqtZlNCMHkaK+6B2TknIh5C8KwUY/R+0jqOGURHZ/4/yHrvOEuO6l78nFNV3X3j5JnNeaXdVVrlLIIAgQgSORljg21sYz9jY/thg/2e43O237MNNhiDMcFEAUIgkhAIIZTTSqvNOU28Mzd1d1Wd8/ujuu+M/Lt/8IHlzr19u6vqpG/wrlqtrlm9Oo7jnTt2bWpuy9Lcex9FUa/XM8Y0m83Qm82yzDq3efNmMtoLCwGmzrFXSjF7RMxszsxAWK1WScA5x15QK2bO87xeqa5au2bt2rXAMjk5CYROuFqtivPWWlKkkJQm65zkOQJ473UcWWu1NmGLaq3DLhKFwIIq6C0XYRsRhNE6Fw4jpQoHCA+B3QPhJ7M4ZGRmEgJFwSkhtAojbYKv6sA9YpDNiQh4BiypGmVTxYsEL47wWnkeIYpGHYyHQ75ORftRtDKMZQ5e/KFfDrHMKiBokAfH8f9ffkZEDCnLFk2Mqc19X3NcXzt8+qm9/+tLz+YTF9ddx4q2PkO2hno61xa0MmN9SHML2hqf9ZNa9Y82f/d0NvzBL71y5D77K79Buy+D62+5+oFjL/zeHY83Ko1e4sySHxnOsObTXDlJqk3lCFFEa+0xTB2VWKcZPOYsjtGLImKsciSkbCK57dleli3m3nvSSPtWeZLRbTP54YtS161pI1rGRtR0q8uq6ZhQsePk5NnuRcZnOdeMUgha606n8+gjj1fiZKi6+uTZY7e/bfOui/UjP8wP7I3bHTTRzHIgATM2Nj40srB+3SZEZLFEFUH03hljEFTu3WKnO7Vu00W7rj925LmRNUPPPLTnYx/9cnJee6y+Y6E1P1SrOJziyulqY0Pe/dY73/FLj+559NiJM8dOHM27CxPjazoZZZAbJOC6h7QQNQx8XwAAUERFSVpOFijEWgne8oEDLEFqcVDsakSlkFc8eg7UpvCZK4SIhCUsV/FFCFNIDOKcQwEABlIDVjERGVJOnAxmqwL8fGLPYF0NXkqHzjGwLyQnUWkFEET2STB8DoR5kEiGLCAKMRxBDgFAfAl4ZoRwUoMKMThU2MC6NJNg5vI2sgiz5wHUC4QEnMvKpKRsaYKE7S3iy/Z+ee4TishAkZuIio50SZWhsg8vhB5EWDJd/KJEdE95RgDnzzy8Z8sNl+ep/uq37/j+8SfTSq0SjyWqppPEc2/R079h+jQmv7i4VFlvSGH/p19Y+ubd2F1sIp1tNi03tIU9W69/4FW/e3rD7tVHH33tR9+88anvDsVxN1JTEO+DPPGykKpXfHnb1tHoHTs3WUo3zD8ISRU4taAZQStHxOIRIea43k9G47zlUAOA6EQAHBhHSVcPz1U2Fw/y2fKJFneFByBwBQ5EpXlktTw3J8Oj5ouf+ebZs8fHm5M5m8rLIgu96KdxnudRFEVRhCLOBYKZsPN5P5+amrriquvSDBGR2QU2B3JY/xSAL4iC2oR8KHxx0ZMAQCgCd1hv+kzePNBbP2Hah47io/bC73VuWrduy02XqC3jm46e3Ds+ud21RYhQabQZcOZFR0ksIi4YyKMCRS7NAIFIhsfqo2NDSmh9e+P6uemJqYkho7959+PWZlFijFIsPnMZIwHns7WjFy29pDq6cc+d/3X2o39anTtjbnxRZ+flX/mD9/3Ob7y7umaqGccxRYioHXvxWmvHnkHq9Xq32wWASqWS2bw5MszMoCjPbFhSkSqcueI4jiOTpj2tUSmV9q33nowW7xklqVbSXj/rp8LS7fd9A8jo3FrwhFo5CdRDydN+pHQoagvoIKEScuwciEYCgUyYFIl1ufiKSdgzBZNUxCiK8jxnhchCiKR07h0qxZ4JKNJKKeWyHJQiIO88EUVR7L2PlHZlTWmtDQvIWiuCkTaI6JxD5MAER0UkBITgJRDACql34eUmXnmQBa2SsO0HlHBgJiJZVhPAMKgIVB8qZAS8wPNK0sCnKMrSwKpg8d6HgL3SvLw4C4wCB+XhgkUO55nFBz8aH/w8AhAWCn6LrOSNlC9WHEkCJjcqrsa1B7797e9//alHai8isEimqXsdx0K6qepeZ2dsKpZj8WK9oIAyG8yzbxx78H1H3gkb1enZxoc+1Llg1B9r1Vy8sTa+uX/4Tr/Yjs480h+5TXy/4fqOI0sZiu4rYPFKRAOCc95ZsD4XtM5q7zVznqfz6MQ5lTqKclJRc6SqVQUh9rMTs4zdHW19zA7FVUx9irLYY6UqEIFyQuR1VD15boE8A2kiJJMyxvOzixs3nLe0ND939tQb3r5peEL95Yce8Xlcr1f6qa3UK4PbsnHTuvEpGB69cM+TJ1/3OqU1iXilIg/YTftaxToy1npE2H31i7906NENeuy5h5/93hUfv+kXL2ydma1WKk07ssh28axRGQC4NTsuPX5m7qk9eyqV2ite88pVGzedmUkrlUqW9xMDzDQovAq0HoBlDygrH/8gmUMiYBGSYJEqyxoUOBisKkQfUFBQWAAh0UA4I5CXLDAiBnAyEmmtGcF6F7qsIt5DAA+HI2Fw/iKUFaeUk46iXYRChCAUgpMvS8wBApHACZIHVqEY9QVQUKTwnw+nrVYoJRIaRdwKfBpw0UMGAEVYrO3CxBA1FmWqBywmw8WsV6M8r1+/Mtkl4YIVzTywhAr/3WhFgIGnx8wKKVI6pN0+/LkiLA0NHQDlDKAjpU3OUaMyt/8o9/ocRZ+742t3HzhcGdmwJvImwVRlxB6FjVGA9Gg6cSha+sXuEpJGHI05VyPjS33Lvey5y2++9yXvn1l7wfpDP37dv75hzdN3J8TAmOcq1nZ7Hh2sR+uG+WQ7IuK+tV1vTZyxQ/ZQ9ejjvudIZYnR5LDnNHcqDQASNAQQ9ebypOlNHdgb7hvJx058a6yS6WpNCx3CS9qja1OuE1gAZtBhqgCARpGAWBat8PTs0n/8x78ntZoX8Yz2upQOKDiNxsR5bp3Lgx+dsGPh0dHxzmKnVqvVG8Mnz+Wmqm2eGWPCSUslrLAwu2RXrDEuOj2hxaeC4sPAVmQxNX/x7AvE16v1VRXHGtSmbfGVl18mbnGq0SQvoNh4Yg+kyXjjyqF9AWS1BQGZVMj8mIh87pMkWbtq9fD48PF9j0+fXZjcsn6pO505VU2aXqNJpDm0ZqZ2pvkA3P8/X8NPPjc+RCnAxT/7F489fXR1LNe+4PrMVKxLhakvXLPSR9aESCgizrlQ+Xlh0Mp7L5o8c0TECBbF51msY6XAMwt7ZrCWnTAQehBjTG4dIoYOQ4lpUs4xkOjIMAiDKKW0IkbQSCgAnpkhMZFzDsNHK0MayYsAREnMzBrQSuDsonOMSgVlDK21BQCF4tg5BiREZaJInEcQ8KyI2DrSChFJyPsC2GJQ+VIZIGAoSJFSBliYoYTesNYqd560UkoxMbCgVuiEy1gZsCqEZEghCCFqHTG7AjHvHCIWhhOEpEhr7XK73CfR2jmG0uYFyyMMsRwpESIoEV92Wgop+2D5IstHCYYJdHC8GLSsQ1aPPhRQoVcZFq8MypL/dgA5z6DY9tPKaGTblQ//1yemf/DNpfU/r/V402V709z10vVJI1X93CsEiaxXrFLb5UizS8W5P9h614l89LPT1zEqGs6RayfdJIyboWoK3srErkY20zt718iWF4BEuVE96lW91kxWS+5cFZRWyhrF4rX1oh3nuQVvwUeRaqooSRIHyHnixVmf9SVjblMX8MyE37Bf8qsiRskyTuJcDIvqtvvYT5NmXeu8Y3NFRqjjnAKXVYdqDz300ImTB4dq+oW3DKNuffhvjtQr66pNlfWXbn/DlS+6+YWDm5OnmlCfnn3uxKl7HnvspouuuHKx1Q+bNIoiAXLOVeq1c2c7rhKvXrVu37NHr/7Fm17+q7uP7T2VjI9Gw7TIqcWWdIeffPB7wyOrP/7hP1voz5+3bdveZ555w5tej4aVtypPwGjrWwSVQXD1wgFMR4jiPAZbAi7Q8uI8M6OERR5GujgA8YVqDQAEmFBJwP+VJFsW4WAJXGCYAitJEFCY2fnUWtAqRMGy7EMszLvYgwgJFR6qBIP0FILZiZCIL22IJFRLCgdhOySpodEHRkko6EuFLxEhZlHE3guLYw8sXPiKkvKMGDA4gIXGBhY9ZQy6ICCegVkRIaB3jpQJH84l0UBEYm0AgPF5u0YpQ1J0qkIdhjrQBooLc4V5GYY6gZd/CIfLQ0VBwUZbj0JdA7mBeNEmqnb8oaeyfv9z93zpyyceXbtuHUsHSWVqRFzUpl7MaK3Vikb7rU5c/zi10OdRRWzVZD3/4NVv+MmLfnNhctuWZ79303/9xobjT0SSLXlE0an3JBDFuI51BWF9Mz/R1h6yjjTmWG23cRrHfclZIib0IoQpERIyWVXrtkRYuQwicHHT5EteV4CU2Cy3nWF7zk8fPnbsaK9r1Wv/MuW6wp6IElFhqgYIiMoLIkpEqAiaceXg0b0gIyAGVG6vS6MHEmZwLhcE0sYLsHNRFLk8ZWZr/a233gpCXkDyvBJp61xRD7BHDPIYKjDWEFdQA6QwDwm+bYMjVA8P13Zun8jmahUVb9zaeMFVl+5Yvf7gmYVVk/muCyfnpjtxFKdoIUIizc4GxTeFFPy3iZQPPQ3HHhRahhgw0tqRVT4Zgj0//un8f3y1+Uu/bRoTzYrpKYkWZ1vnZg4/d499cRZ/5N+Hn66o4alzS8cm3/A78SWX5F/5zC++9CqOvO/2IkicFkEnisgzW4daGaXD2JKI2LMm8MxoRSNaFkSMFCGhBYcCPs214igkIDZmn7IX63yBzkViZ4VEQMBbAu8FAUCTUpqKyQoIEgYCByjIvSNF1lqFSAJggYwJAEtEYgWG0YP4IH3HXmEoKplElFJeA7PHgi8PXAJRCvcU7wFA2BORX2HvOMh5vTAoQmECYJJAEgQm8QyeCVG8K1CeAqENoIBEAHyhhCfOe2ZQKldOYXHEBGdTrY0HdM6JsOPg5oTBrwYRAZiCnh4QIlpg7cijhAIYAFBASZkhASoyIgLAhpQHGXQpFSAw+zTXWhOAE1ZKMUMBuwZBBBGGclYcJnBYgloFGBGd8wYlRxqZqB/cd/hTH/mYas28872/+CvfWbvYbbf62Rt39W/dver3vnLc+mZV5S4XwlpPlmoqTq3PBXZXTr9h5OH3HH9XW1CRR/AaxZtMAfT72phKDFP+kvfaH72fD3w5u+TX1dIMVb32NeSMbIRRz6GpovF2KWWTKAXsqkoJJt5LxNjPsl6aS7ePEQE6QVsZGvKWuF9RxzbgtjONqOd1QyqT0Gl307lGhNdvGn/h5bt3rGMLtXUbh3ptiVkk8t2sOYGwZkh1u6dHR0dXr6t8/pNnk2TI+kWXm3f/5oVpuvh3f/2lQQCemtxw8uTJStJQcY/cJ/7+yisXLXvjFJAGw04sZwBmYe5cRZnrbrz9kYnvvfhXLjt69DCMj1ZUP++YaCOcO7h4z78/d+mF2+984PuTtY2ve/W7k1VrNozdef01LzjVyiBWWoCAxcdaIQcNJkQFy21iBGIAAfRSQEAZgUmQmYg8gGc/CCQEKMIBpS8iRKJRIyCzY2BFugCvMIaWDwmgUR4YQICQEZiFPDtwUWyEhdkDBwEPUUSktQcSTYBomb2IQlECCODQF0W1EKIwAbEwM3hgM8jOCxlaIsl8rpRmZuHgpCseipGQ1grYD05YEUQgRiZCEfCeQST4JYP3SkhYMExhESnSZdMYnMuJabBBmEEEXAH2LmbOjEJEqCR3XgEpQDK6iKyh0y1YRHABCHLBiICkBMhoxAJcVqS4zITojYoIMcu4EWepHHr8yUf3PnRvvzNZX0c2hkrNRRHlvg8d4kjYp5BdOZSdv+mchuhHWfIM6hlfOX7V2++64ReWRtZuf/yO13/mZyaPPjfr+garuUVEcVpAKFNArEbVUrWLa+v5PSdrGtD3cKnT8WNDPu15VXWEHkgZFJv3M1FxRBGatD2xeGC6sd3YnjU11mMkjgGdbiSnHz/w9BNtWk3Vi6oXXNkbvTTqzfp+S0bWFwKcAUEnoFHqRpDgutXw1bu+tOUlt5749v0sPdqgZaenv4ssOIUSY+SZWAA1pz5PKO4L99OZF77kZo4x1hYi1XdOFQu+KHDFIyISautzKl+hX4KOAyYwpHQFCnpqbOh3b7912J2HRrMl23MZZwvTpzetG8kycMLOWVGIgnlmCVAcEIla2WAUQERLDNooFCYUxwZUrDSnfOS+h8aO3J/+4R6Oaiea0HeAC21yrVM3IABMntviosQunaquPn/qHT8z38o3nJqPx7ukta5om3MfXVUw18oQ5t5BqaMWCq9y5DlYll6ESRQgKqWdy1VkAAqdMA9CRChMZWOMiEyUABSAXu8YIw0A3kuoXEVEvA84KCn8laGsRHXIOoOqInCJLEJQAqhUARv27L0HEQcSlV6BoSOkkUiWM/HyJxSPh6R8eCv8JjWqcr+IiICiQWc2FJdYAmEGdYDWKkgAlOdIAG4EIAhj8BUPyBcQRGWM4QJoXaA5wqfpyKBjh8EELfCyUUCc8+GXUvE1hAiaFCF5b5fjbukHF27p4GqLmlgKxV1g/9++uhDfCO1xERHQRmcur9eHtJGvfP6O733nrjVDE7/9v/7HPc/ggVPtW65pvnKb31Ad3bJ+6LJVeO8xATIQ237KibKLYAmkkpsPrPvc4Wzyv05fN1Sv2l5OjhyKAyb2gsQV0yU1NPN4fMMfL/zgd8c3v6Cttuos7auONlEt4kwQUVLJ+5mrOBRQLNTnqsuWjOaco6hCBnPdHPZa56pX96N5mnpDkZPuoTXyyh+hmewt9jGf3zYV33jTtqsvXT0x1Eg7rqXAZbgwnS5tbxtGboNqz/LqsUozHm5sYV4crl/1O7/9nt/5wK+PT46+4xcnDx/Z983PZ0PN0UEAZv/MYruzZvXVaRrfdecP3vTmh6669uq5Vkdh3O2nUUTIpIBaS2dMdfjS3RfkNxxfPNOqueZoI2lsrFAUH5058KU/ePiiaNuoZBvWb3zL635heNfmtPHE+172e0ml2WrnFdIA3SzjSEeDQSMEqJR1IsIFwApDYF6erhaIpv+uMyUicRQx80C+eLAGBtM7IsISCYwlDjEsf2NMoAMVjevw7aCQMHTFnfeiVMlQAhUETYXBM0UKoLhUKobTAABCZU/o+XuTBAqgImIIn0FGJhghAwIRsQ5ZdcGVXwmZHvwu7wYmp6C1RtRECokcOyiRaGEGRCTOOa0Ulx2t5c4BMxba2FyeP+F4sbJiaoPLICCPqIsRwIr3A4BVkDDlzA5xNI5PPfzY/Xd+6xHfiuoj1vWQtNHa2gwyBSQM3dGEP3jZwfOGMwBA4ctN8r7G8Eff/R+mcdUlT3318o/8Te3MnmELogmhmlqCxFvXa+TcYSBOG8noEKoKdNbV85n2EGqx/exsp6N4BCFR3jtBj8JWovLpsPWRMptPfb113nsxnY98z6uIfC5su/X1vOpSfMH6mjA3JrLqRKVzavWZJ2ZWXyB2sYcVjhoKPBEZgpFYtJZhQy87j9/0/v/TjWvX/vrP/viv/g6vqwNAfk+/Kk0L3EULwEqMZlWPEwST5/n6NevWrds0v5ARKvaQRBXxYThIxaMp9WQCXGnwfElrrRUgIiEQYjmd0SLUWlImhsz162Qksnf96LmZU89csPPqzPnAWkEZoOlAmWXG4aA3KMCAyOzCQY4CXpzSujU923vkie0bLm+dm0l8W9KsxjXWsYrWH97RBzi5+kBf6dacru3+w48oSJyec/2ZyvatppG0TncSlQxp7bJcEQmJDoP3MEFRiogCnplBQCQ4gEqJxbV5jsJRNen1U+c9gxBRlmW9TteQyiUPqocSOkUIxkTBG5RIswtSX8jsENFbF0YmQQFAEcVxTAJF2837cPeLQIUldacsBQKJT2tCRYVsRaAAInrmcEaEKlMKU0kYBCERQS47UQNWRtmbEpEAkaeVUnnP31ch71Xl57tiYCsoAenEufMAQEYTomOnWSlERSqsHhFBpUABK1VUwgLI4lGUUEBeBtoElFiS4DVtbVY0XkqkZbDpcMJCEpALIoIUOg2hmyeolXDh80pl1RvOEe99HMd5nvd6vWazOT+z9MlP/euZU2de+pIXv/xlLx2q6Y/881PKNH/xonhkcmrv3mPjI41uKj3jJnS1l/sRSNN+NdFd9uri2p7bhh79lSPvUXFitAbviCoMngg0kxWxDmo737Pw+F+en9j0gpsXnviiesEfqiWlISIVpb5d1cMCtk8Zo9aK+5Y9+ApYGysHukpgnfRVovoZeKlA3qJTFY5Q+1xJ4+SuxdGvz/LZa7euev1123dsHrGSLHbx1OmeUT1xUb0WpX1OF1HXGx6sjlCh2XvgXKXWqFf1qtUXnjreQoKbX6U67fg7X4zHx4ec7w525eJiUkmaJ0/v0waTKn/6s5+79rqr2XLGvaQae6cAIFbQWsxgurdn7N6J9RXOZO21WxfOHf30b96/NC2L5w5f0V47vqn6/eMn3/OBPxyNG9FWm6wb3jl0bbudRRQJa48dodi63GAEAyowIRECFqwjZBJCED8YJEDpOT8Y9xYLWxUiaPj/Q9Qjoi/3LyEEgFZYiAopVMZChEgcCHISZqsiFBqCGASKwDsRKRxKVChniRAYimga+AUCQdKNmDCM8hAHCGQPQDFFzA5UsSyRQ2YujOGADKdx2KniBfSKBFpKgLdSihEYiwGwEAohIwgAahV0C8MdCIyGIrVfTkQQQAoCQpnHYIkkX7npBn/inHMlJMUJh140lGluOHYsOy1GCCXv/+gjH3967sT82qnY5hqH4wrmPkr7adVkNlXVCv7RNYe21PKaySlpfq3x3s/Ubuksvfe83r07P/6hm6f3n0wXDTsGgljFrBTOZ6l4hHR0e37ZCz1kjYfuGRG3Aer1SA4tRXGfMsye7S9d4SwkemwxXTRh0OY8oTYaBLxnTVSxc1fu+fDeda+YH9+lfcbCzblnhpaOnlr7Aq0TRmJT1Xlb2d788JbVJ37cmri439ysOG8mpJXUtSiCHaP2tRfpM888cOCh5zIj509uuObtb/vB5Z+lA2p8aawLXXA+iSpOWzReoWZIgKWi+abrrt2y9bzjs1mMRrFLu2kUhw4lAwAUAgcQ8qOQMiJiAZQJOs3hVFcEiERKt5Y6R45PV0bW9F3n8VOnH3r4EPf6n/7nt0FSm13oeOEojiR3SmvLXhktIhjqMAABLtVVJSGdeyYBUAQKLftmPTly+KQ9O51POYJM5XEXvFNsqBIpNb32ZG1aNX3lVGdm14f+X3/1hWNRJ7IEkMUbN2Q9AC9WOWQQRVrQr1jBIQDDAL4RiLAAFCR4WJDAqEhr6vf7kY7YCRF1l9qXXnzJ+PhogCFAMM9RUQiWYed7ZqUKfQDvPRCZsl4OyItC2gmWufBO2KBazkUGSb3n4H9RxEUoZjnh+YSzoARqDCrDkPPqEJ5DJQElTCPsq4AiCQHMlfNmIWLhUFIEtmXRoi8D+SBhAgBRgQmJyMJF8gxGBBWhIHsO25ZLrZZiw3tGBtQ4qGcY/ADbWfR3gti4BKzrsiV7iQxEVagxFPdkoJOFSN7bcM20wl0k3OQASo+iyFqrlKrX6/v37/+3D38UYv+mt7zpkot3N8bwjq8+tzetbRrTo8NTszP9WlVZNVttkD4O/aFOimzZqHhe5RXW9ncnvnQwXf1f3RsMiLUWCDVpE1p3hASY2owiU735j/c/fQec3BtBJzn8IK+5HEwOidb9qjHK93PlolRjmndiT058u9dTBDHpLM10HDWxgqhcNQZoJWpVSqIRWPXTpyYA4Fd+Q99avzrt947P9DSKJmtqZPVwgxIN/fmF/PjBsxdfFaUtzl1OUFvqdKemkiy13//h90+dnLnm6u0bN/DnP3F2fLLufM+56uApDw03TIyCabslQ8ND3/jmnZf+2+Xv+aV3nD0naT+lZIlsLBYWFhYPzz1y0bZKNRsb3pl84+Pfv+PvHuSe+Hzx2mtvWXfJ1r/99Ic/9DcfvmDNptOnexCdGPNbfbdqOWXjAA2qRHFktGePAEqAiUiVcPcy3HIYu+EgsiKEVuygsizuOVKBvS/Nfwbxpli3/w3NF5raRYtq+aPCFsBlUa2SRMusSu02KWpEDwAKB4WLACGzIEMQNx+UqkVNWZjeo8+9FyZmUFSwB1EJB0TZisinlqtPKWETgzvDzICiNGnQyz9KJGwHUIi+QE5BWQeHPv9yMA6nEGKARSIisgQzAimFa1Ag9y58ZgjAWuvM2YDUGRwpxS91zEZhZpuj1ZM/efxbd941O1XVnpCsMj73SSpLWoPLVLMa37SmtbmaDpl8ZuiSX5u6DwAuav9zJ3/2za0/fnSouXh2VS1SNl3ICbm75EV14lF93Qvg+lfhpS8YG2t2nvhR/8GHK9HcxfXTAHCkNzQbQaS6c/P6yPGFizevPxdl5JwmUEjM3rEgCiJa4FiUuOlL9/8HHWz04rpyC1bggSv/IOrN5rVJAEBvmUxWmaDuTBolFz725/e+5D9vGV9494snZxdtN5VGnKGYtZP0rY/9OAM/Ob7h+1/+6q2/8e7kNc3sm+3mxMjskcVEx5VE9bJMrBIQhlTXajNnjm3avEEUWgdxTGzZmKgEWyGDrORMki76gkQacfkRe2ZmRxQiKepz062//O6na3xvxEPa8FBTv/sNV1LDnD3V0pVYoYayuoqNZpBBG6TM9MK0g733KEETFYghY44NnD58rOKMn++aWBTpGlYiVuz8Qjrfvmho7Gx6eOng1vf+df2VL9ezs/PnsmQobrUWJtZuTftc8ShOnGblVUZOE7LIIHFzzoXTnJkJCUihALMbLHp03Ov3TKRDVAtz6/GxsdmZmQsv3NXPszRNk6QKngOtzoeeNgXYVAG7MNrYLA3B3uZWStKOZW9IgaJwXmBJZwxJUBCmGYAni8PIMylFZcwOMTJYmYRrZmZxnpE9oAc0Ax4tlFNdEfAsZVYLYXIlIrTMKpMSdkHyvFMscP60Li0QAAjAsgWAKIp4hdIyEoj4Qb4cmnuIofRACiRGQkACCQiS8oZjIVfLwTbO+xC/B6lAsdWFTKm9F65WqQKzAOVNGxypIR7neR66HY1qDQC+c/e3P/vZz16w87y3/+K7psYme10LafvLD843J9dz71RLxjrOekOVOEk7tG407sk8WFORc303rpS7Inri1uYjv3D0fQwVJ1YxcoTIXjIG1h5QNAJnaSoM7colbwOpdp77q2E82VE3TR87R7V4cuOqKKPFvJ0ZrmTgxWWZreU21sKgFzu9ofGhjKmVYxSjhy7Fq+MMDWUGoqUl+Z1XX/o3Pu5sPDv7ZJcE6lFDNImP2EsS6Yee/uld950+8Gzv5hsmv3LT9iXMvXEpwdmzZ5e6x9esGXrisX1XXjMxNjKcJNPrNmenj9dq1aEt29cPVsXR40+JrSql4grUqs1NG5MjR/btf/ZkfbRaqzX7/Wa1lnc7pxfOPLv2mjW2ayvbss997L67/ubRNaOTad0B1p/Z88ShE/saZqjb7i0KS3NRN9Nxe1G1odtIohjYKW+896wZSBc90CDrP7DhVEoIMUSjwQZBAK2ozK7CE0dBZqGyrl1Z+4Z0n0gH2Y6Vf0UlNj7EkpCcFfO254t1hP/pXWkAh+TDBmdkZFYCAApBQMLIlIUVFF6EEK6OWQTCzEiIwnL13rP4SGlRAY0cIN3l3hQQBCqHZYPNOLhCEQmpx2DnlhERADjkLd57sA6MVkp5EYLl0ZIsI28RkdSA71Ww/4vNSOVRg4ha6yiK8nz5kBzcUgYhBvDg0GsFT3/vJ0/4lKNhXGpjpVqtYcoxco65U5Iozq+eaCOgqtbPg/3XpHc/EN/yUPUVx7PP/NOmP5oY7p1YdfDGYw/Vjz6m8hatvUZfcwve9MKhVVtdFFc57y62cx+TY5be1mYE0N1wcnGs1Z2r6MeMevjMyQ1TQ6ri8gxQMEli59gLg5dAxQTrGAAjFdkedJe8gXMTV3BA/rnMpItRupjVJ71ORJn20I72+usZtY5dd8lN1vKuEkAGsLJI//rvHzVVba019ep37vj3/B+yXf4Fz+79KVQNVXW32ybSmrSIeOXytFuN9Stec9tix6EC63MFIOAL40kp+w3locgF4p28L/A0A6wMSiF3LyC6WjNrVq0xMh4xDg2PSZ7u2DqVp4Ziw8yxiZxzTjhS2lpLRITIUrRAQ+0SIrEDV4yBHEdhyOKgdewk4YKRDR3IWaUOkBUqYIXx9JqF4b121ZW3117z8wv7jwwliWkm7YV82+WXVLdtXWrZmlYIynj2WjvOxEJYRmH1rEwYmTlQaJYbuQKMEBkDIq3WvFaxMbVuv4+kp6amnGMQNCaWICnDjFDQHlCR9z6JYuasVGqlwXqlku0Qgx6wdwYpdtEpBVaDAFluvOD0OUhgKeTdAUIlxWGhEERhoQkVZjahKB/ANQGlnMg6WW4xIaKUYgVSNLEJEBkL0vfgOIMVL0YILXceTMTLBhwzMAJqpZQipYKRS2gAsGMQAgVBgd+DGKUGtx0LorBY55x3vqQgSzk7BwBEYMDBoTBoXCtNUOob4IDDzhzAtApxbLR+4sT0Jz/5ycNHDr3q1a985e23gkraS73x4cb+Z/c9MOPWboI0jTYPNboAdz87fbx14qET+fAQJVmdpP+377zw1/7v00vV5AOTX96frfly+3rTz1ysSJAJmV3R0UckQKVUVVTXKz9zanjny9LuwZa/dFPr0Pt/7wLfz373k08tVlY3K1q63X4fwcZa0SJ0X3795U/tPcmilVLAijGJvGWjoe+UEnSxMj0r0dmlyrr+eUeTZ6BGOXrPDFa0z2KtXY+vvuiKG6+UA8c7H/vEffc+cvya89YttZpzra5kenR0pDnWveyCl8TmlK7u+cG30vO37zh84IB15tln5gZP1jldqRgltajaSXvnzkyfcVdc0sv6s0dm643RdWtXf/POz77vvb+5buO6//mP723xGYLKd//9odG165LJ5vQzzzQb4yhgOyQ223LNkU7rwihpaFL1ZGIx62U2TZKE2ApLoiMbBKaBB60OLi2BhYuGHDOHsDqYzsJyHxVWZGbFYGbwQ8ot7gEK1SdYFvaTQko3jgY65FKQ18M4A/D5UlseJOShAICoNJV2TLKcpyJiGGUBYUhhRQQch8FTETUVhrJVSWGkyz74/vnBrysqIQEQLsfhVIhpURFo1YqBGqxIOABQhAr37qCmTkVx78EzS+Cf04C/hEIrJDMH29kNwnyZ/YT7bOKIA7F1BZcJWEgryZ2uaO707nnske7YSC1HRRzHdeTESeZ9DC7auFmmJl0EbQe8r3rDi/Kvv+3Ye99hF4+MrvufamZz/9HZsdsOv/w1e1UMAEMzR9YOx2u2rGm0Z1QPGn4+tTM9bGIUS6wrqR0fUpmFDe0zqzfXH5jOHsuWTi6pR6Znb9o+6RLvM8fMpBWVjQ0EAEUVItRKDMYpZeJzjDBIXYNUXJcUOrEeYg+o0Bxce6v2C188Mp/a/LYLxqZGGu18adNE4zMf/9ixYydGR6ayfjfGOH8RAGRDx9bf8oErH/r4p3oz57A2zGC8WIxUHaOZ1txtt7zssqtvPHB6KYmrWd5HVMwu8MJ5hZt1CLYFFl2W5ymB0S4rngsAaKMVuzxPq9WGcoszt730wmuv23TiRKpiFZPO+ykRRUmcOSvCpWA5kwymyBQE5zDS4BCKrimSRgaoGCO66pp5nvbqPGSMoMqdRJXh2tyaU+ueWrvuA3/Xa814M3yme3a0siVbenZiZCiPSYG0IQVdqYnui8SCmbOFLzcXqi4IaIVJwK7oQdFAZwQR2API6Oholro89/Vm83Of/+LNr7iNNBGRMlo8O+EiNxGx3iEXgAWNJCgBAxwQazqOEJGl+MeV85UVLQEImo5Qas0P3sNKPIjzToEqH0A48kHEI0sIrxiSXBDPrLUOirclgKJIAix7Cc2NUDIAIBWqGlKC1JYj2Yp5asgSvDAiDq4wfLghhcWwDaH0Cl15DjIhhVG/CHpkZCNQpgTBkVdCp5DFO2+VVoUQPLNjpjLPgODmG44QwBV9cmTxxKjK7KHoEwACQBzH99//yL9+9F+SJPnlX/7lSy65JPPO9yygalTh0989i9VVztpurr/61Pxzp1ufu3dh9fjwZHOK1Fw7kfnTnYqe+sNfiT/9r5+4ufbYe07/ntF1SVhl4k1HMbInRlFEhtAyi9F9oxz0arXk3KIb2fzmP33rhre+dXdGonx88yWTL/ngvUemJybrta50CebbYAxE37j7KYqgUktmuh7QVSs2d8YjqKpC7y23YxlZNdL5wg/273rHtlOj+1NLlahaQQvGUdLIbO4hn+1wtRVvW1/5i9+9ffbcdLYt5wTrpjY2Hs/9JN5xwQ4anlk4N9FaGKqOwPTZ9Pxd1Yfv6zSHaoPntXbt2sm1tto8t3ZDY9OW6rfvkvvvfe5Nb5R6bWxu5thC66Fjh2fSfnby7MzXvnjHTbdf86PPPhvR6HnrLxufaq4Z2XJw78OLrbSB3Y0vnhy9XLcef4ZWmbH6RkJyFuKoKplnBMeWLAspQY/FhlxW/wZBJyDsJZgSBcqZ50BRHRxTEGi4hZtW2FDLUXmQ2gIEJVReqQblvS8WklZhOxtjvPfAg4CqgqxVaBRprZm4UJPm4t8liOaXHSYM5oQQmMOh3HQrN7KI2NC7BiSFipR3hS7sYOsN3gkQuA5QZgYFfjm8IXgGS7kBg0oDlN4PiMTgBzvUOadMWQ+U/psEWNgdP0+Pvfj28gaGoVHA6LL3nsoe9eBqC+cV1IgSRdHJfUfuPXSgaip51qvEEcTk8o6mqlAvx3ius/jCF9f8KepCbSMfPKx2bjb7IRLy+xoob/YfPoQH/33xzZd/+4cbsHNk4qLZq19z8CTnMgEAQ9gYw5GxWI8lZqQ2NpodGhvOzy6qE2OVyrUbs4dm4wOtNq06eDzdMC6jwwY9Z87GsUGtFLP3jhShSCk3T4xic2/mj/r1QrbndaVTGVc+86ZOwgZc4ttnRy8He6wJ/p5jh7TK3rZ7q4HMZfyFL30Cpdpj50k5Ar6+j/vpgb/59K4bb7z5t371wMOP7bvrnpRTYV3t+8Woj9698fVvyQCElet2K7VEPHqfB54RkPIhwyozHo1GCktsUIEvioOUrGg3AoDu9PyQS5q0KDJaiWove9numQUVI9vQ5iVSWmfeIZFOtHbCxRG9fDSHUQR5DvJoihQzeEKvYGzNBFuswaSzx7t2LgMzDLEXzmp6aZ1s3/K6Xlu5NDfaGqg1FB08c+KGW3fnOassr5H0QUBjzIrTNmkTVmpYuKG5FGvj2SkoNqFzxZyGiKx3SMgsSsQYg4py56fWren2eohAuihTkySBpYImEcJZFEXW5rGKStw/DYJ6GMaEs2OAUQpSFYNiN4xboKxEi6wfMHyj+OLuM7MBo7RmBCm+HZRShAGdBADgg/cpLmf9EP5WWBAYi5Ml7ECAokMwCL2DlAAgzOqxUP0Lp1gQ9IFCwCio4oFIKKYH8T7IWBKiE1ZBGMKxiCALAhqkAJAREfEMCIo0lvNuo3HAREatAjzNQ5HBBLonaVUi8tl7zx7KOXHxiqKk3+9++ctf/ta3vrXrgp3vfOc7x8ZGlpZaGmIFgkbNTh//3qF4dGw89UvVav3/fvX0ptHsjg/s/MJ3j33zWTs6VGfpgG786JnDr7h049deeO+hmQ2fOLrT1BdAUaVOZEl7hWK6kgNxrLXNMAcTI4xH8cLp9Io105/725eOTm48PtNSjA781Pjmn/5j7RXv/fyzixsqtV6KtaQf6cSNVOoZpRn6WCoOrUdnAKtQSYUy0xt24/M6rziD1fHpx0Zmbvvuhokq98VmkAp64arBkajCcZNhYX4GUtVSYk7P51PDkeT89rfd/uBDj9z59R814njbeeefPjI0urojurVuYuT0hr5IfzkAb98/MhYvzft9T50YHW288XWvWJy+AUlMhDu27/qTP/nHz/7nV8bGx/u9/DuffPKHX3q43cJmvVYfjXpRtPP8y5pT6+696zMLrezXfv6WhgzJtuPe2BF7ixCKjYAcKMoAQGv2pAZ6igKIqJEUBoGJMPElAA5mHiXQBxwVtWxxehSgCAzDHR+CXjmw1FoDaGY3yHCL5c0SR5GU415ewZVHVLCcEAtyAaQI2i9BtHXQKA5OHggoAhyIxgBQ9g8BAFGt8HIFREUK2XnPToAUkrWWmY0xoGhg51VUOaHFXP7SMvQWbYDcu0HraEW8JE9Q8kRLvLSIcy6w5wnJi4grQBVIwP55A+agxYFKmwLLtox6K0hT3gfJLY2l7IlSWmtmQIJaVX3xJ49ML3RGGkmVFFSqFtBwDZiBQEXR0WONB+9p73zLmiNzu290n5jG8ZrxAGiV9iQWa3e4N8/Xts1vOv3SO/9u2/wn23u/Pvn7f3XMDJ/Oo/nFpRkz/lS/mZlR+IMf1lund7jHp3qPnVu7d+upoxsrC1ubuMf50+3Fxw/l1+7enBA4xwYBmQkAnNdGxZ4ycMTgnSz1+1nfxfPP6k0nXWMNYeSTBiiD4ivZgiGqddowwU7S3LUorn/niF1fnb39xlX7n3v6iaeOXHLJ1sf3HmxGNcn6vZv8+iMbe/Xu/vsePvHUvktufemrPvj+E0893T15stvt5g5v2rXz+utuml9gQ3Gi0DqnKDFKY6lqHAQ4PLMq0qbA/ggMNB+m/oiAWqGiILsvIlppAMqHh3zN6ze8ZseGtcNnTvaSKiqvGAEUevEoxY7xhAzBgQ6wrKMFhQWhwEYZBOXFK2bJuL7zAjFRmp5Nosain4trzTQTSdoLekYUrB69MT02HderHp0ovdjvbxgZHt9xUW+hL7HqojZEIMS+j0kFQraLmIdkUGtAYhYmEg1KKxEZiJGEeECAyMQKc5cpVem0W/32UiCyO+eSJLLMzuVh2wTPAGRhcMDixIXJqFKkSC1TaFwRb9I0LQSfnQv3wZBi7zPnoyiKTCETT0guwClRitrds0ZiLMbYUIRADDuUkX2g5hHCwHpICgloRPQKlaiydQYSRHxETJis8QCVVmQDIboiQgBX+1KyRweg2eBQIwIvSGQAPEPY3lSk5QKKjBeHwfQNCEALeiQBYLZUdJ5DGibeM6ESAO8CO1nQKELFzjvnUCsVmXDUsnfhsSqtGQkEdWzEee+cACBhUq105uf+5V/+5cCBA29885te+tJbQLDbyeKo7kEW8s628bHvfGvuMMimarvRs/MdvnJL/bdfVDOsd22Lvv5UB53rOzXUUP/03dlHv3/Xdy/+8dIL/vPPLtyVtdv/9JVzbqRiEsasB5hQFFEvMzW9qmpPzSo1lp893X/7JdHf/fmrumrqyMlerWZy9BWgM2lrJBn5+j+84bqf/8y07AY+oQQhp3m1BEZVsMnQRk1aJeIwxTxGU8PhHNMqIGiTRHpuz5be6zq/f/fX5PSqmZY723HU5loz3jSkkgZetHnkxiuHN1TWnumm02daqybXzM9n69dteN9v/+wXvjC9eDaePpEawuPP0tTq9ds2+be/u7nYXhgE4LPTs2vG1l510XjUsPuOyOPP1l98/QX3fPfrm7adv5T3b3nNax564CcHDpxpjA3VSXf7SWPCpd3Ouen5DZsai/PTazevGWmMX/jqkStft+XImfYUrd8gL63j9sx3SLEggoABAB9Ad8B+uf7zII69FOJKroiOwgLAVEh5F5ZEUACFBrriTpiCBEZo6SiNQemCrQqDWS6Ic4QUzLziKCoDpiCitRYRxC9H6xCXBmcCEgU0V+jeiRfvWQuAAtJKGNgPWHbIgdzIwgwoAIo8s3cZloKOAhDk5AJIgssB0wDWFERCPIjWOlwGO0+kQuVtFYYhcTEOLIcyIp4INSIFSSzmILZFtjD0BGApkW4gEMZ9VHYOgAUQWWy4vQX+B0S8C1rFRjRoct4SobM+czZJEmY2wD6qdHN44MH9WlAj51HU1IkHv+ilqslwYiC94FKd97JnjwzfMjY/nU2y6s54s9SmM9WIa9nXKj87T2sm8rkntp134cbrrlw4tLjvsaU9j6266cVjC3NaH1rwU23Q4N3c5z7Xnljjr7n43lW/3H3PxF0AJpurnHx67ZHHu+cOtlqH9p/rbRuNgLwBrGkjrHKCCrM1xjoC0E44z5mti4zavucT+658H2YS2SUhg5yBSSZPPzGbbBpeOHq2mUsaDedM+fRnDnZvuHDk1HP7uJt3F1sV5SPC3iT7853/d1jsLY2MjWZZ/9HPfQNGo/MvvmzkovM3Nkbnzx69bmRqYt3Gs9NpNcIuWe2NiM/YgmWtdRCvV0RaB9myYgLDVrwXYwwG304BdIyOEQE4BC7AoUYyPrpp3wMPb1q3y2VaNFgXKVUSXQS5MAAJkxUsjRwAih7LMj6eg9MOkbXc66UTE1PZeENYvDQS11NCfV6qqcb8Fg/Qqp8bS+Jh4H5XeCyitH1u1XqiKGLvSBAQvffBsY6xWKZFQcmsEBgLRVMpB0JKKfFMivI8FxFR5FlirfPcpWm6ds0655y1WbUKM3PWKksEkY7CQYIQqKilH2fYooGWGA6O0NrlMPSCSqUCAIPaV8JcFovm7QA/DCUamABQEYaNGaZNzllri2avIhDx5Rh1cJiWeXe5jaAQOhAqQBY4UJoFQKDgJLychYhAcEMPNyfgm7h4DxFJWTqUTcRiG/83JCry8mWQKvQqPYvzPgoKfERYXqYQalIBExj61l7Esw9cqUJpHtCz5xJV7pwzSayUcs4FrpcxRmn99FNP33nHHXmefvAPPnT++ed1Ov1etx+G96nCOo2wze76yZGJoYuqHddXUUb9ONGVNWsfeezAnn2gVN2jr+qu62NzHP732NceX1z/b49P3Xz1lniTbd6598BpVK5br9eNtpHVUK3Mp5nXNDKsZs/1f+a85B//8paT3arz00O1ocwqMhkLV6GysNgbW7Xmnz5ww+ve/1Bt5wU2PSGVRsVrFdsOz7M3dd/wYrlSNdzzgUJBpTaYOHNiIwDsiU/6Y2tHKv7idcO7tybH56fv3Q9Jqp45dvLbj81tHD70jtsv3lRVactGUZr1KpLXxyZzUmd0NT9xJLOu533tmQNNPtjod5eHIMqveqo1t9g+o2V9u90eH7vnR/fcf/ZsZ2H6zHVXvOzm11x57S3XOHz86NF+hucinDBWC3C3k3Zz5aPe6JnOjpdd8Z4Pbp6Zxonkwp3+5cauytUsYlIWuyvWDECsjZQeWUUjRIAgjIqCF0wxZQwMBSmQiGE7L1OSAMCDKEAk0lQ0S7z3VK7VFbMeBih4wFJ0boEgqGuJLxd/WEiyAoqBEmrbsqs0oOSKDEwGFRaAwYJ5WLTWQ9guOsBBP64YeJf9XjKF/eLK5rn3XgEqAil5eqF8LTZmIaU+2N+hP18g03wY5ykKgpHOW2RQwdEkIMFDXyFcgC90ESB06gGstSs9Hz2ElJ08hJEBQUG+JxHx7IgRIjp7ZvHgwf1Ki7PeVCpEus8qQuhBrxJpn4m19mfftuooXH/j3J9/aWZy/URlbQXPTfufnNUnNql74rVjkUTqsZ21/MyObW7P2Ig9u/CTB+mSF83PHsL24pleMrFxfOzMMf3TT13I/TdszX78SPymvZsv23zF1I3XnKudP3zlW4aHVgPAbLrUaR0c6xxe7U/3e4ebS6d1TDbzGDlASdNet9WbPzeXdfpEWkWzF7X/dGHnq86M7wQdV6W7RtVr861nr7v0xif/c+GiCzKt2iIX1J+YOTL/+Mmpb3zpS0t23rXqgo0e9OGFAgAnPj87bJo5giUV10i6dv+9P546tP7E0UPvXbf52vff1kdIEzMknIgG1MJIpAZA1AEICRgDzhwLh4wipnhhAci9y71DVIjA6DV735o59cMHj33ot9542RWXnzq3qKMKsRbJy0O8zBhRBCT4bBf7QHiAvZMScIGKwtc750ZHR+q7NvW+/0NpbFL1WLl+3VR7nbNzV61FOTs523SSegUVH1dGhpZmz60+b8oCeAYAUIgeiMETkRAqQyLC1jEV6R54xwCoNTMH1nNYy0UwNoWYYu4sg0RR0u12u50+KcxSQAIiYOdT56XMVZVSRKo4EXTAdgWqTJCXAIUEZX2Z91NjDKllIdyw5UxAzSGJeBZW4daxt+w1KaICZFI+KglTIkQULlBghT3ZinFXOEb+W0iWEq4SpDq4JAKF000G/OzQu4DlV4EIkEJDIPzecFIQUe5duKTBPK/4QGYIYkY8oCWzKk0/igOlBE8hosZCSgxBOe8ABLUOLevyNGZrbYC1e4A8z6uNOjMrUrVazVp75513/vin9+++4KLbX/fakZHm3FzLCVdqVQBi56tso2p15tTcN08PNdc1++lR44dGI/WTp6Yf2REfnc5MTaWV9ryuJb26S/rXyHPXVx7/8rq/f80VF56bbjuDH/rZ6w6cbW9bO/HJbz7ykwPVqALsqIZJijC9aK+uLP3t3738bDsG6VR8PReHxC7LUzQ1ZSu6OnN6/qYbL3vfW4//zVfPjK0e7XZmKB5CrMQuXbsuWZjtOG8w73KkGEDKDakQAcQsjWG7PnXp0bcMv/rybVNDTT2ho1NnVz/27BPxRBPj+myezsylD/31j37u1ov+6Fc3zi/ZVmt+5wW7Vq/69GLrrIqWFuZn77zzjtNnz60e39zt92b7BwbPN0kWx0fXL6X99VtaV+4eOnb6RNx61/iaC/7pw3/xiY985ET7zOl6c9Nrh3on9elnjz30vb3dmSyddeIyTbRqzUQ12bjD0cG5k+ePvvIS9YaOZBTPRrbpyZYYu+XVJCJ5nhutVVhCRJExAMDes/dIREAYsMEsIY0bzIBhoLc8MMFEHLCBwzsGvdnSVwaKEhWAVggSqCKphWVHIBzUBcV1EqAKKhxhV2AITygiyx1nAFCFhV9gR3BxMYWBAQeebkHMCyJzZZ7qGUp9hXDgFmHYutBNKsiECKgIAQhK3r8spwgSxEAGUFMK14heRARDbwxLXGf4NG9dcQ89F4EWIHSVB9M6GTAkRYLafEC9KKVMUJeDQJVUp08unTp1Ik6U9ypJKiKIXcvKGSBmjZHd+4z72hC9+QU9S1F/W31fPZ422f/8jDunTG/y2Pz3v5+8aOvEMG/Kn9i//uLZjdclB79CJ/eoxVPS6k5D59wcrr547dL+bp1BGR3X+vGiGp9fPL74g6umv7HxupFzT/UfWhid23hDvGZnY80uXnfD6foqANCuP9w7Otw6OLp0tN46oNqHgGWk0UxV1F7qtlvt2bknm4ePX7ZlvLH5QnXRS55af/bASF3ZpaGDP6rsOI8h82ktl/Gda5781j3//GhLR0kz9W2tQZDc9Yz7VbxocoC81Rlpjlhc6iFUK7WTs9NXTax6ydrVwy+8ppNaBt0zgF5pIctSAeUUEFFgp9PySB4ECYPuU5k4sngQQVKo9GALaCCp6OidP//iX/7lVx455Shh5dlol7sB56wgdGKxFNxgrayo1RgIvTAQBcsxRQqQUGD7bW9/6vsPbuLT7FYvVnuVLCGrOtvceH81g1QSFhd7hLksH0q71fW7e/2MQ+7MiM+DCIJG8hqwpPE49goQmY0xzjmFBCyhpvTeB9eEOI6VUh4ZAJZarSRJsqwfKGXhzcVCB2EebG0QBSWaDITK8W3YVOV+DkscoMAKFXeKyAcJ3JBiB4koAUSy3jKgLtq2BYaZiCwzlpoeHEYdUKAqBmcCIq7Aai6DO0K2JQIoGLpzg+MGIBTLK1TyB9YohSateO9DkaG1Zi6m3SGPFgpkyBUDKgJEUIJhzgEAqCh0s5e/cVB2D3A0ZXZPJfE3zJ98IDrZIDeNAJBUKuFKGrXqmdNn//M/PgUEP/PWt11x+WVZ5loLnSipVJSy1gJ4UNQXV0/0T/aemBobWkynq6qZs89s+69+fe2YH7/t+g0PPXXkKw+cmxirudhrpz84+m8zyYUTV/zCvoUDf/8fe268cfNLN63esnnkqq1rvvDdZ8SnpIyBjuNhImwsHP4/H3lhj2tZ3q7qob7psaio19exGFSeMgRSSh07hR989w3f+f6nnupfMBTVslxZx8bU5ha5L1AbNYRx2u5TwcMZTPcRkeToxuS802/edv6+g9htzZ/L7Njaqe0b4KmZfhLlFV3NuFJZLx/73H237Iivf8nOxW5aH4kbY/H2aFXak3pdvfzWN5w80ca0O75p1ee+9oXBamGYefLBn9xywUW9/knm3v79EwvnDuvjh7dee9Hb/sfP/fzPvg0aYxs2bD626tnd2zZd8+ZL56YXZo4feu4nebQQPfPTJw8f/Pw1t1719qnf3GCuWbIzTKOc13rSNhgFhOCg7VTUYQHBAxAMZMoIAVSiJUKLbFCfIXPRcSr/ZXmBAQmhrCC8CgIozSIDjYsQ4gbLDFf+fwAIoFbgB1euTOQikZZBbEYggYC7JizoBiJCxYXJQExnAKEypIJCVrEr9TJYIexiAnBlF634gQP4oRdBwAGEZjlUL19q6L2FxpQs48AFWUghoHh2hEoppVEFEwUoU38iFCoaC8VeQy5TkyKQI6JTYVeqQfnu2UNwAbYwfbabpb2oASiRAGS5q2m/aJQS4D7rCCqjdCh+5au7f316fGztmrinjE3T0WZncv3k0w1oPfVYDb917hUv7QItVUeODY1tE7CyQBwPn3d+Ldez7ZOjGme54tAMjacAoJfMOu9/jHj4BOw4I8O7R2/a33n8qa88d192JIm2bF7zwu07krUXt4c2t6ubzo5feXDDqwGAXNZoHRppHRprHRmb3jd5Zr9Nl0ylMnr+rsM37rj3uv+70Di1+rtf5+SOT/+fr0bHV9OxV+vYHutla8YFlvarSRPXzHCOXa6lboFvSPX9kWZrqnEtGp7vdOuxjq1XXWvrdMv6NWbHFrtjU3YurWAlQ08Q6NssJfIGRXx4XoXYgwSTLkFQFFD6BWEplGGDBayVJha45OLzbO6d91FUU8zWZgIDT03G0lJ4EA/Ca5DMrogWJZGNGQnS1F/+0tvO/HHjxEf/58TJE7E0OCOV6JmhmcmlTQqrzuXdtDPSaEI2X63Z+tDIXKtvlEYurjiQBxSg815K+sry1hroL0IB2UfELMuUUiQQxzEACIsCZOerSWW02Zifn9e6MA8J8cYXQ1yw7MsoGMZUEDqr8rwCEgBAKWVMxMyBxoqIoZUqCCVuq9hooV4UkWDvHHrpzztBwmd7BhEFy/26QX6DJaUvvF+VSW6xM4WYhQAZhEq4R2GtGvLy4qwrPsrQslUqAAzC9uA5Ds4+WAaPSDiVggp+me6VFCwBRBqQH7BsYodTA9VyK9t777Jcl0Tkol0WKg0RYwwzg4LDh4/++8c/Pjo88q5feNfQyHC3mwIAauWcC/cwcBm1d8MVufuJ3nw+IY2UUXSujNZ+rmIarSvf/PnP/J9Xrm6qOYmHuvOXNB68pvrMcxd9cebk8V428ku3X7B+bKQ+FZ18YuEPfvDEdw/2q9WqJxTXqFXM7PFTv/3GrZfs3nHkcHe4TrnpiNcmz3P0GiqiM8o0p+2oOdrP+zS89nd++dJ3/PER2LaGwaJGpUzWt0pVXJ8jRSBBGUwLIIsAU1gG+tTWY7seP/hsl8j5Siw2T1x6zebVB4/PxNXhRoPyvu1JrFdt+tr9z7z69Rce2L/wvW/tAcwf+MljP/P2N509e3pm7uRb3vTW+kT1f/3ah+7+wh2DxXnxxbvvf/jJY5d4pWVmpmtpwdKJi+jCDTL1o6OPfOyv/2L1lmvf9DO333jxTY8fvKcbd3pLrcsuu8KfOf4vn/jXZtN84P2/8bZf+R/ox6fTVkMPR1neMb7KNQc2PNNBEqGRmICMyfNcnAdEz5w7KyCklVAxLWFmAWCQlbUvlB5EgwwPRIikSFrLF0kg2/CgBi33lBRwqpJHu/LTBq/BwobBYCX0ocvgh6Wec/jwghAPQAKklk9JKPdFeQ04wFsNziIdZHCYizeUGyqwVwcbsxSuAS5J/IPsofDQJhSBMGkORkahkA0t/XDCqzAvQ/LskXTAmgHi8vyLMOgGltezAhetgjMBYeA8AoOgVlEv8wLQWcpy2zfi4ygOStqLyF6kzzAkABnaBr7pitpoOnMfbEmiBBazej3Slf54jZMIsmix9eQPzNRWe/FVqemfaybXQcU7Hxk08Tqfzl6+baNvJmk+P+RsrUYA0Omq8xXdn0Q/hUjum992PQ1ti146kmzr5g8/tbA029pHh1ctnFu/ZvhiE/Vz33XJYmPDYnPLUmPzzMTFR7e+EpDQ22b7xIg9c6771AObv9K1z5r2LtPbenbnr0ClN3P1F4bi2sjeW2zevWD1BSf0c/tPdus7ru888qRSvebmZGHXEv6N7rNbPdIEocV2l7zKIuqA28y0a26x9qKbG1nUchlGkuQeNAp4Ek6VxL4gc+KKgx0xMMBDKlliiYxBCvr7y8aaWqOqjta++u0nrrx0e11X08ywsqS1eLt8TENwbCUQUMFH4L8VW1BMSYt1ppT3HpwQ43xvZmjjBav+4hvHPvUn8J3PDEcx6MbcmkM75i7nupH5bGK0JqNDrbu+s/2GCxko/DmzAxbUBVoSPCNiIZkRuL8gSlRIQAhwaWmpUqnEwaRPqSzLKlE8OLLzPI+j+uzszImTx3ZsP6/fzxERUcVaBzEKhMLbhEp7cJHQQiDy4pwPg20JvTWE0KpdGXLCf4pnrVRxEARhFIAw6AJVdF+FSCh4bYMHCRFxJQojtIzd4PgYwDgRAEAHq08s4mqRWBXfWJwpRQ0iKBjK4MKNNcTRQYgVKVpcUBbZ3nthDlO6cC4s8ypIDXReKEgaSbG9EVEFZd3y0ATPqIhKVPbgRhGRNsZaCyhaKV1gVQpJbUT0uX3sscd27959662vEJHFxUX2EkVR0A3s9/tElGWZiCBMzpyafezkUj48PMzDKKnSec8N/c4nDv7Hn13ymT+/vSo9rzVJ3o3k9xqff6i7/b7FHZfuOi8+PW84XlL4n3c+8vkvnHWVeGzVkEaZ93m1GXV7fP1G/YFfvX76aKdZr6XpXK8DtaSi2OYUZ56rHlmDr1cqJBLZk6dat9xy7fVfOL53Tobr2Heptyauk2PJrHfcN2EOB6E7qhCVCHvH1RNbll7yjSPfXbh666qTrX5UtT1buXjbVH7PsbQDM9NZ7OqmUpGh9MFT/G8f+atv/+i7p87uXb922/Dw6FfveibPWwcO7Hv8yf/IsK6PPPvGS5d7qLV8zxWXDaM/MUwjM1l72BHx9siM3PXt7184tbbRrZx36fB4deriTTuu2nndmaOt9WvGnnnm6X+985eB+n//z//ytre8/egx9rgQa3H5gpGRiJc81osIUYLkwxIK6aBSCrVGDghbEQHrC5tLAPBFR0ScMPHzDo3BSyGCYAE1+e+6UYWFH5UF6SCgLodzKGyCwpIeLLaVZ9TyBy4rBgICOOdCtyaMukI1aZm1iPz/SMkIwCJa6zAqduyppNjyypZ3gE0oJCIGkNLSWAiRl/vtxQ8p7UYEMYDJizIclQ6ymFC8j0rt2wIjCQCFsXIJsnXAgMwcHOGL5AaRCAeaXyKMpANzCjE05QnB9HNOYgAA763RVRMby4wgaCJyUANhYuuy2o6fe4v79++dGr9jb/ONm1sEkenqG6+qR5vzB85xBBWdzy888NXG1OpsYtWsanSw5tHnNo2oYydM/+Ri+ys/Sr5zR8X06w1hhm5HdqloLclRUBkm1ad6q+bS2praTnC1anTfTOeZbrb3XO1ih43NU9osmSyrnX24evrB9aLYSSbxwvDW1shWWXfBwsh5RyeuHtr7K010PlqwybmZ8+4hQN3n+Yv+a/WBm05Mzzz+1MPrN0STa6fq237hafW12fs/Vt2NAEA/iQnl2PFT9chIpNKeq1eTVNLrolE1OTpx80ttx2mtbSKUowG0KApRswQsrZAKiIdBaC3Udj3bokGjB899JV1NW2s9e6UwiqJex5mYNJhcclR68GED5RpEBBYqi8KwIIsCdMXkI2THHjwRNaTeb+/VjZHt7/uHEzt3H//kH27uSWuzxA/AqFadhFLJqt3W/LEDjV9+Q97zZULKQbADhVBAWEARFe0iQCIRJq1QwFobaVOr1XSpZgcAlUqFmS17l3PwVbbeMfgXv+RFa9asKfzRPINSGgmJYIWgVbGfmQl1MK4omvgsDEHCTpiXgRiBh4BlP82KKyc1AEH8U7FCtOxXHhmDnjMTEJH4gsVPRJY9UgA3h6IJZEU1vLy9CRFAAnRFAFFKUZSybwbLZO1BOytIYoVyU0QCr3tASgvcD0IaHGdcSPyIIIQEyEMJiodgBVc88ZDdh4VCRIV1UTFMI0FRSCqKsNAYKcQyMZy8wcJdayG8/PLLkzheWlpyzpk4AgClSalo4MoQTsPGkDvyTOtwe3j1eLOdtkfELDlIKi5ateovPvvor113/lcffO7QGVy/3l0TPXRZ5enXn/nrOz919Irx/a971dap2ojN3fH9rZffsvX6LaO//42nKyYalSQVXZ07+ZF/vnkmi3JtG4Z9q1ozzoGkHmISInGOSGUKY23T79//2GLn6GWXXf6uV61/90fnJyujACk1qW9VzHHVpD2ACLUABtAcAQp7YCCQ5PTGJc3fPnf/Tee/woMhqPouDY/SSy6fGGqOuu5CZzE/OZ09fe7sYbP66VnZuePiay6/rdddJOWXFjNTf+zGG15t00rHp+PbXwbV7w62dK/yWIwLis3ek63ZmbypR0aGj1Hcwmu6pzo4gkPS7J587MmZxZnRVZM3v/jKb9z1+Xf+3LvSdu+Vt77ytte8fv+RBQ9SUYpyJcpz1Cff9OKEHWChN75yp3OJZ2aQEMxwhR4WlABdFcQRCQeEdShbo8WSXuEgAuWYRkRggF0OSAfEAFCwwgBCXExbkMsLgxV6jWX0DaI0xXm2opYNSuyDNw/+kIhAipp4sO+E2Xnvy/4NM4v3XhWytSTLf17cH8+lq+DyznXCqvRoCqmGrMhImFlLKealqGgHCIhnUIRKQRgSB7+WFT+QiACLoVIY6A4yA1kWgmAIxgDB+TP0TQVBVC/N+rmYCHSMpBWZJJCGtXDkLKpR7rdyUlSr3Hjtlgv6j37w8c1nUHXOpc1J0+r3rr82mReRM7Jhe3vIbXz8ibPTzz5df91OjI0Fi73c61p/uJ4/sX/2Hz7QPPTjCd0EXdfNhbQDXTKbQF+e9040kwUauruzeOnxZP2+RUnpAYT2+KYXXf/i6c7i4X37h/ziJZuTTsQ5K069sS7yoF1nYnrPxMK+eufB0bT3T3/0gOBO132lWdp17EXvjLve1lSeLBGo/uqnZh9oHRyKzqMLmwr69eT81/2y7SwsXftFPKDcSYlZ6SRm8kO6MjRVO3z6xJr1ky+0dXXBhcOj49NzrqJMjsAagcEJI5IGZVWBepMVqANEpBUChaoAuoMM2opl3qi11q5NP/PrV8dRhY1DaLOLvQYMXnzFSwZDhKIzU+AdGGAAFCyMa8IYwgSxYsd7T57RNKxJ+RP7d73yjRaHTn3s/b1xnpweooY3MVfqo+np/evO39TYPDVzYikxSWF4opUoDHYfzntxXiulUIcGVNlQImQfalxQOkx/EdFaq4wmrYAFFeU+B6pEsf7SF7947Q0vfNcv/GK7Gyx7ndZ6IG9ZhKhQp0pxjHCemSiCcqMaY6hUywvHjda6aJ+GNR3uRjDHCC1tBChHBYVnS4mdBgAvjIIexAtrKtpoqKj4hEHFSRhGaqpEiBBgIB2VZyEACnFx/MiKXwQCYeS6PAYeWBxisUuVUrjiT1YWu6Ez5pgJsDCGA/EioQMoskIvesUIzcpgEkZEhZ5fOLC5hImCImHw7J3nWGsWYZaxsTHrcmCJkjhM90UE2A9OEyLK87yu4keOnYuixPhckW6hrUqsfWrzru7A1Prkf134skZ07+eesX+48T8f6l9w//xVl25NX3PhqsmhVTH0vNcf/OVbmlXcc7QDve7Q8LhtyuKh05/9gyvGzt9y8sSRanNtu5vaIR8zxT7GqCrc5pQ7edY6OdNsjGRjzcs2n39iSS/a6RuuXLP9q6c4tpEf73VcpZqmyIhxg7Ul58UHEykNyN4CACGoo1MA8Gz92QX36ooSIsyRnYvef/tl/V4UJZkz1OvbvN365N2Hzh6p8/BSZ9GePHVs1wXbd+y65PCRptjmzOzZETPco5kjz2WDgCdufNXUxPTc8ZFRJrTH5s9lkT999LS1eOH22r5nn/TJU2v0N37w02Trhvfdf+83/vav/qReH9924YYvfPrr8x3fJNWXfle4qox33pMYyEAESyNwEZFSpo2IkBA8M7LR2jsXVpEqFJ9CcBQIRnvMSBQ2adgCuhxDeO8pJKCIEHJTKXyx0JMQKR3QEQXcF4SsYgpi9MA6eHAhDawIBoE8uCaEs9GHJACLIjOs2HA9xWb03heWpDAQ9PBlfGTvw4kRPnyQQYaZ6oBNNIjB4XcVNAii4CfBIKiISIktqoUV07twzVIMxTyxwsIhBoFIcxh5r2B/BKJgocgVWlxQKM0tZzArIDskQWSXvBckj4XGgHJZLmAcgzaYJBEIsTgMcqIqcb6VGtTW1S647a3yn0fatbv3yvDIqU5ng2r0f/rI4oXb61B3yuDF58VHn+0IMh25b+He6vz+RyVOhpby3rf/o/2JaffIPePp6SGY8rWMfdoYVr0liVPIY3edw/19OBgnWV3/kLDuat0o3XzRtTftuunIsWePPPlTFPuj46e0n9y8fk1EOkVgyRlBtHhtVYXifn9kZhaibj9+bvzZT/r4VNxf6I6CskDkSKSreuSWNoxODq2tLB3qS+qpgete9Ka9L7lD/8hFUUSUVlT1XHexkeNC1ieQTXFlSHjo1pdBzIkjrBC5vGIhZfYoBnwvNhXSgsAMUpJHBmspDOYVqMHYDgBC8zKEGgDQjVplok5EzTZ7ghy4ZpXV2oiItVYXRHLAwugAwiAWVcADi/dMmth7B6gICZDFqkgyD2BiSmA0bhykc9LOVg2t91l/9SUX/WBXBADmwbOt62alrqJ5222OTtz89tZ0t+rrbHIWz8DEopURZgTSSGGS5lAYUTwTELAAeKN01utXk8QzexAwKs9yEmAGQgDS3jpmZkv9XgaYrN+8NfdFRHTOOZejojCjEgQGIYUAgB58nhEiaOWD6wShZwZSwICoSARLMeqBinKBhJSC5igk4DnQf0mIkMBzgfwMXWIotMY1kUkKOgcRERZxlAt3NBHPAEACbmC4DUV3G0L1QBRYjMFZBQCIQQkQKQEwJgYgn2eImNSqxc4XABEgjKKImT1AFCUULIPDKgkC9KQYwRR0BQsQ0CsB1yeEwsIipaYVgtLKChYHpSJkwYA+EfHCGskYI8zsmBhABJg1gmC4QSAIBs0gQhvBDHLFROI8InhCsWiqrN1P96VpMtVjARRjc4WEEC30Ft5y+3nfvuvJH+/pvv+9l/UO/OuF8b6fPfW3s2n/Fy5Ze+MNq/c9Mz+9mI2urT/01LHmaPw3n3ukEo9jAu3Trf/xmuGXveLiZw/1k2hY+j1CqIoBBJ0g2Nwy5eAJfGSSpFrB1GWxbFh3PgI3V0+9/qrjn3i420jsnBlmTxo1SKevxpXtJ3Ec/JVFwJg4yx0hLS01aWZyfuLoidn+2qFa2s8BsSZybN7nckZ6kyLdRNNQHP/ee15599f/7dDRUzvOH4mS7UNDQ/sOfbs136/XNs/PnxzdvCaOlnZc+Oxg58dqZKq++9TSnqHGJGB9pLE6jitRbcG53HO2ftvwULIFAOSCnRuHd992+wvf/SvXP/bgwuVXvvj+576Z9tdOja0i5UeaEzZziBh5cR6EgHyfsIJKRIAVEJIXxZDrVDmPJgHrM0JtjAEQKrTMAYSggOKRADjvkYAJgvJpbm3oJA1qXywNFcSLVto5VzS1gZRSIB4QHDsda52JZ4dEGkmYbSHOqhUVDEYf1CcgsIZZPAaqwSDmMYEHUW6ATyoGd8WYVhWQfiVARM57UsoQccHj5cLYPWxAEVgxcAlpaBC+8cwYhDAJwyQLWKQgNCM/v2xSSoEQKfEgQEAA4sKURzHbcAJgmC4GJBUgEAEZz058AWoJkDdShQ5BKIgLSoIiBbrrlhqUiCevhWynza6/bxY2bjAKIq9U3mSVO0amKM8E0ApYcCgm2nLdq27u/cGf71t7y6t/5u7vff2xvdPVo+rD/5Te9urGjhsWGzGNjvfueg69arrZ2d43/0mnfWVUrvr6Sx+rs1U6moeqVb2RlDKE2ojvLJlM5RrlfIyu7mf7KpUcjYZ+W/ial7z2kguu/8pnPjF3+hCMIER6c6onT51Zt5j1tkwujdQka1IGGQJGFGsce/oQHXm6es5VFj6u8qGjN78gaft+A3zkKY0AdbKUVLOFHx2YyXFJkisWUdI+VHdN8nmp+uvRqq63lrpeZzXBNNEuz3xc33rstLri0o03v2R+OiejmT2CZAQKsQooKNpn1hMzax2BiEbM81zr0NgDo4wTJk2OvXNBPwkIIOgTEWoA0EabF95wQ2QUFrMRNpHqdXugSCMJiCIFpXFNqEiYGZkDNjiMQ6z4SGlk0joCiDKbKtKRodOnjn3//odXbb1ibMjYbpYt9usjprszAgC8c6/ZPt3cfE222LPt4WYUjQ/XZl2LJBYG75iJkajA8UpZggOiotBblxJAVK1WQ86oiBz74LvoAQmQSFmb1ZKKtfaqq666dPfu0eGRtN/37MSrKIqYnSYFAmC9FgQizy4IeiECsGhEWFZUJhw4WFORsnLZbSOgMCPCQJUGACm8CAepKEDRRhdfzIIECRQwgATzA1LMnGe5tTauVgbpNgAgCwuTLydwgF7RAN0ozFKEMwmTpJA5caAAiUf2RaXLArLsmiIu0L5AKQVUJP5KkQLgoIBPCCLB+bGsdMtLQoVFmV+6QbBwkKEPxYF1ubUAYIwJ5AcEDNJXBVAAUWvtnGPA0PQTAYTgaYfgRIJYIKDWBiAIHSlkgZ4/dqo1VN3gXB8JQJvMea0gxqFHD3bfftOu3Rf72dPt35367IPppd/l62vq3JNHj7/18m1qRO7+1vcv3rVh+/qRX/37+3RjsjlkOh139ar8T3/rtfvP+kqDLSO4UDeEZJbDBUdxnPXBeUFEpXVSraBwr9s9fvzkW2/d8c933780NhZRRxkib73UDaeglBd2Aug59BWCE1nkJTu+yW45cfKhpa1jjY5iFRm2TsBVeRx9iwAYK/0sPn5i7uZX3HY73Xzq9AlStTxtb94wNj+/JD6uV+PpuemlVnzk8I2DAFypZTMzDxKuS3sikOf5oW63m9R6pLOk2ttyXm3L5hnS/ZHGBV//7J/svnLqz/953Zah+9esu+Dee+/+3vcf+fm3/06zOTmxZhqgOj4+5fKsUa+wy9k2GdjnqYIYwJJyYjUA9yu5sEKqkEeW1HMmIBlkUDAJPKMVYQHL4tlLQAsrVAAgpeY5AFjnarVav993wf9HaxExUaSBnXMi3lpHREopQOVzr7URK8UEA1FBGE+w9wVx2KzQiFVK8YBFH5YulWAov4xDHCQBWGImBp0qLOdTGsr0WsomD2FgDcIKMGOQ34TSHqoYF4kQD3JvDK0mLqXuim8P2QoG+EaRPoQPDj38QQdrGUqG5fUwD5IAFqAADaLi9AAABGTo1LUOJhNZmlI0nj/45OmDJzfs2NTpZbW1Q6PrNy7l+2IbOcksskHn8xiAa5e8+g3qS9bL91vX/49f/bXayM59T/3pFVe3a8OrvvwNucahXAM9VF64wgR6HiuqooaQ+0kuWaKVdDIHolkDIIsiqTX83AktWT/mRrfSuVnJsbb5YXXYWL1h/aabr3/Vlz73qYUz+9VwAqRvanVf790ayNzsifb0TG9ipDMxOt+sqJoaT93Q0YXkyAn0lU0/fuPp1W+avfCteeNY1gBtwSp01YXa3AVypJIZOz0fPXNIjZ+/sXOmNV5307ueAQB5ckvLHq5XhvrUUWKiXjZSQ7V507q9h1a/8Y21+lC/01FGSdGAKHKmsDhQEwoBoji27BlhWay0qJOASCvllVLLo4gyImhmtnkvUpok1BjU7faNMSoAmpzzUsw8RCQo48dx7L0vskhmRDDGeJsnUW1+vlWtV+Kk2uv1xobNY48c/sc//fXJ87eNNlf/4rs/dOnOC+aB4je/TPc/ttZGh+7+6uZd50nuzl+95djT9x54eOb2X/256bN9HSS8EIM1QtgcK7SZEGAgagGhM0lKQbDPo0LTjpyQIu+81rrVao1PTD7++JOPP/HE8aNHLt59aSe1AJx7rwB9bgPAN3SLNSqPUuIytHgX0EpS6N45DMQseR4apbwVyAGXUQruIJTxikpIM65QFAmU33LTIgsXJDJxzhnruOwVKyjfCcuv5a8eqF9B4KEFddDCVwCLLqCAiFaKAjPceVBUssWLTIK54C087ygJKr7lYSQS6Blls0WRrJD+GPwu75cNmpY7YIjAJSBchEpAu9baEwRdgiDHPfhAUkqjABffS6QQdcXgzPTCqSWKh8jmEgnlHpWwtXayEf3nEwvDOr1468jcI5++uHL4jWf/X5LPq1Uj392bwucf/o2Xn/9r73rZwdn5D/zz/dnYurpud7N63D/2sb+79UzHimjUqK3WZUNCpNDlDs8dSVerVSRljCFPnh2S7vWzVVsnLlynnmz1oghQahm3FeYWCYTQC6miqSEiAmjZD5tK/+Rme+09h+7s33Il+bZH5hygluBIHZzUAFg4YpcenUs36UpUGdqwtUJE1Wo90pH3fnZmfrdwvZJ47vV7y6siiuPmUKXZaGRZf2lpif3SYitdmO9Nn12cnZuemT51eM/+uZmzVv3wmaeOxQpffdMPOx3ZsXP7K29+18035idPt+bmDx05MnPszGfXrdl14eb35l5Prq3EsRkdmRK0zEJcz7IUwTO7qC8OrVWgVEWIrCVgIkvWWyIiUggKAMUp8JogQHeFwYWCL3RoA3uw3++XYHjKnSMi551RFOKuc06As8wRUVhCIiG1EcLicJOyOTRYP6ETqKAoBMuVXUQsDPJYK6h0g2p4oCtQ9IUHVoMrJq9QwqeFinI5wBoBy9l4+VIryBQogAAWCnwylrzfAJb2IgBctspRhF2IyZ5lBX0FC1mRgOcQXDHeKiK046LpXU7Hwp5FSRASwV7bZpU6ZBkc+of/t/WGl1AV7IzbtmHt7osv+/YPHo8qFaXYO88+JsVA+dgNb33d4iU/al/8vg/+82J/7rVved2XF5/D2kfWbujPPjV2fE5zLjqLR5u1pQUPvunQVKmt0KDK8zwzOk65p42pMIhYIV9pSn9RKlHVZmmPogmT/ALRZGf2K0I3v/Ztjz17bM8zj8qQHnWdt3fyVxjFkrdTrxVH6WJ0YrF56vgwoCdVZTJpLoSza86bGf1bpT6p4i+sOkBZFQxH7eGx1uQZr1vzZ58xsYusqY1vTaOR7v49/XS+d/vBSmvT6KbX2v3/l+tVZV2SYn9E5z1bO3Fk4+VXbX7D688t5IlnVg6gBNaCIgrApwLQAwCgERG0UlAg2ItOqBcOd74AEclgEYGIaBFGcJow7VutI0QkSrTW3hXtzTDcDdAGZh/ARFA2TFiEJQi+uE6n4zijKMrzXMTHEfz4vh9Wh9cbik+e2vv7f/Ku21/x2itfeEt+XXfomNl6/sWnnrxv4cPJRX/+kYNP7/+zv/ybN77gMkZhZiCDiMwupJiaFCkVPECC3ibD8hQTEcnoANjhPM+zPKqZLMsUKBavKFIKm81mwNpffMklo6OjmgARSStNSpz3oZ2jUDB4eBOVmF6ttZS+T6E5HHo/PBill3nxyo2B5egIgjN4wCmukKoZZBKIKM4LS5hglRgz1MbEYcDDAooGu5dooG5RFKFF3C1nQsE4ZjnDEvAgAuJBjDEAYIxhkdzaUJIOmBuDk4JLs0Km5bVFuIx+Kn9giSLwpf+S58D4CjBUKQuIgSVDKHxDyTuY5oZvV0oxMCiEgB0tAf0ggkQKVMCOObCKgD0PjSQPPzm/qIZHIBMUxUAMJJ4dg2TjpnPZ1pHRYXjd+Fcemrn0aTqPE8OL6dhI58dP4j0P/OD3f/78y7dvcC4fTvo6iuenT/7jr18wumH8+OklrSykNZ2wt37lkwp33jlHWiWRQaBQuGvRqkGViku0ecGO6IF7MRnWvp/FPmIlQYeMmUWQgIPkRCDSOlD6xBa47fP70xlW52tRhtFGRD384KfujSqjUwkOr5Urtm/atX7YLranFxcQxUkuTD5X83OnTAXSTCKqN5sjfXdq8ATz3NQqoxMTEzbPkqQy1BxZvT7atKVpIjQRdNu9ah3ZU6+t20uzJ089/cD9T952Wwd48sMf/eMobjRGjtTijS+85pfWTe7Ys+e53sZHH37uzy5e+sfK0NAzT++ZXD22es22hYVjccVUkmalGoGPNebOOfE9EIuSATKQFfFBHct7DyACHilHxJBRqzCwYLYDv1uRAGkkIvSsAIGFvbdWTKS8K+rUANpw1g62kkjINxERi2FzieR4fh5ZCrbTcnhe3qf/HZf9PETVYEMhYg5FGFZQmHZD0Z0qquvlgBd0z1ZEdyk4aYGksIzExnLXCgISsfMCoEIHO7yNEOV5HzW4xMHAW0r9+WKTlkIlJapchYPCEPY5r4KvRErFjXOfuhNOHfVbJ23fD8e1k0fP7nn64Vq14QTF5THqzGeKXPPSN7zM3DepWnfO3njh3FFL7o6P3v34Qz9Zv1ZNDjujqkvzvSrz6Eg6uRZ6S6uU6nrsTSgwS/0uC2grLmaGqjJG2IqNm0QE3SXquz5prjvogVNR9S0GL9E6e+an+x5+8Eo8cWkeX2uj1VSfTTs5LAyBER3FUSJIeWYpy9mlzpCqqlzMV9/5L/XFs6/90w89+mbz8Gtcb5gxqtKxFzWe23TihX9KL6fFjy/uWLWjObm5p+omjtPWwkLt280ndo/f9IqTRw7S0W+Do9QQLPZwYg1MH9r5tjfkjXE6k/qkIM+Fk9YHYLsUJneDAgNYAkNEKSXLjY1gdjdgQyzjD0REK6UqtdiLQ23E5SxCAZ2kjIiwFwjNSZaAuzOkOESm8hVUqLyQMlGzqj1njkUp00/hskuvEv4qOW40h6o+uvNzH7vnq3f0vnS8MbT2Cw7T1RvsmaMffucbNm0ZedXrXz6xZizJlvcGYtl15oEaThEqFVK4snAsBgGHMIiN4zjLMh0ZdqK0do6ZIXPei3R6Xev94uJiZsN4PLTmbZLECGALIyCRAA8JY3PvoigKXxF6sOFu+NBaLl+DuIWIKgAlSrIBBDgzi8BA61UAILCPQqCy1jpmY4yUpEaWZX0+KVhlZcwPglNSKEsTFYqSK0MphcUh4koVrMCeQq2EkMWLKg6UAR4yXJiXgBpAERHHQqEzUKDQRSTQKAfe5gDgSkdIKcTAlRo4sA6AaQPIaynHEdT1bHmrvfdSSJCEZV5ArwEgZxuqAVeuWmt9JaJDZztdrk4AZ8zMoI0WazUZBkrao/tbasO+/6q2n7sj/nCMTUjtZNPM5MbX8o2TlSsuGfrtP73L66kGVU+eSX/+xtpbXn/BnuP9URP1XO5UpmQZ7gsrqh+llNJGRKTUuBcR55kFHFdue+l5f/ntp7VMOc61ri2yM8IoJAFzC8jChASASJQ5phObAeBEY1+3f22iqghkxVXrdLhnF5Yk7ne7z+af/Vb/wg3Zb7zzsiFO+31rfd7r5iRRtV6LYjl3dobd2W5vLLXLJXCrlU6t6ioVC2dZljEz6iWUJR35xcXFA88dW716cmx0HZkcIN246fLLrrzJ5arf49fia8+cntaRn54+N9OaB23O27VzJvv41l1rpo/MrYrN0SNHPvvZz1YqY299++s2bBo7fWrBqOGRsfFKpaK0A3CKDakYALz1IErKWUdY6dZa770xCZZNoEH8CIskieOBOJqIZFlar9e1quR5LsKCQVRoeVGtDJAra19c4YhQaH2sEMcIlzPoGk5McwABAABJREFU/YAvA+HzukvLuMLnxTyAAGsARAms/WUS87KUx/J/rojB4eWLwZNgUfcuR9+Q+gdR3EFCQESApaTd8/lUyKAUBmWrwPKC8mRm5kAYC4cJMiAyADmRDH1dScdF9Siae+zMwU/855OjceIW1+QKYvjuD+8/cHrP6snhrJ1pxXnqTAxIjfp1P/em+ZdPj9xw4cveM794urNI933vc53Z43W/htScdYvjQ8pqdbLlj+4bMsYCqWZfT1JfZTmqTDB2wFpTEkfEuXdQaQAAdBYNgANtenlH2byiTJ9gJyB/787LlY8qo5qTVHfPZHPgJBLloggcEQtqYNBMnhQiSpq7H73uf8+sueDtf//K8db8yz+sbvqXpF6jPS/5ta++7FVRGptK5/Br/2F19+Xjj2/PcPJcK5k/uS9depy3LsHnmt2jP1z3ottOP1z1P/zP3VftOnrs1OzCuV0bt2686apeHytEfaUw9xgM60rCGCCgFNj6wP3kUrAloO0Awhor5v3hiRcndtlx1N77Tr/TGGmyCINPKkmn06lWq52ldrVa1aTyLA8Nn9AmIoBYG8veex+KqjzPEVGpGiOIz4SUUoY9tRfzyy6/qjnkhhru3Nklqq8ZWjNOuZmZWHSP0nPt4+fm+iO12mJnenjqph8+8OT73/VzHIm3TiEBgFIY0mdvnTifJIlzTgijSAGgc8VxH1ouzBxFUZCzMMYIAgOLs1pVvLchN9yyZcuRI0dGR8cUKULlvTAXP4ElrFBBESeipJwGAaQ2R0StVGhCuqA+E8pQREQMNHlHRdJQdJ5WiCcLgBCqUmcqXC2VKnFYRiYPhdNnMEXzuVdKMQJ778EHu0ARGWwwD0zB3aFQyShqVoBlkCSAaK3DhYTnHa7K6Ci3loh0sKoJxF/vgy0oBXpRGDIFmVkOLtYQsNiDIhuDOwyi996xN1TELeaC+TgIXcuhN0wNyvNRghAPBJ1BKA+l4p5wkI5iQSJCDSiKjEc0JCdmvaEEnA+KulZYGeUtsjgYjv7ijjOPrv9/j1Wv7K6+bO99p0bWjNQiGcHk2OzxP3jPNf/06X17FpPx0Wim19vUnP+j973ixLSqqbzNJtGUg8QuZsNUVjMAYK0VEW2UMDALoCCSiTQVbVQ63eELL9p++67H7j5m6xXV5n41Nu0+VCjghzmAhhGDPAkKKjm5BpzqrTrc6dnYVDK20HfJ6MiGOM6Ubo41ar2u6OTBY/ZLX9zzztfHMwstL9TvZ7W4vnCurRSArhgZ7nR6eZ4OjvhGMpr2sumzx5Dc5MTaPKtLZrM8Zei3WvONoUZzqJ5lXdvPtIqtnZ6Zt+KrOk7zFCM9DirfvGGSveUM4gR63X9InV2IF5WefvmWW29+8Uu/dudn584d1jbas/cbU+fft8adH6sLlb4gNtuiaDLNNABWktHAkhNgARvaNYqqWikvqZTiiForIhQpirTABVeKvHV53l+zetW5c+ds3hkeHrYOoihJ015QoPOFmu7zzjIAQBBcuQgH+bEwDVLewAcJmHx43mtlvYvlXGkQucMrYhQBYRECJhTCotkjhRTD4ENCHgy4/C9BHifkytEKimCp5Q6IyAHRyTLgO4V/HFROQooERAKbC0ih9469FBXzoAVd5iKD/x7e76S2mLdr1WSpnc99+KMnhtR0dbL11MH1N8ORVvvub31tpBmB9aTAsidtEEidd/3FzXO782c+7//3+FSz2+l96r8+2s8Oj61aZZrtSjMnGna9HrOwxNUkS21jpnP0Rqmu87wIHn0caWhBFmlNbK1YEWk0AADSRZ1Uk55zdTPS8VmaLaE3HRPXIuik2tR83p5VKNo4UGRsxWZ5RFUAsWnfYdBFRCXq6M6bH7n5t17wld/fcPxx5SAXXWGEeN1j2y4kqjeSDJ742aWRg+fe8p2x9JqGHeq0ekkSJbeMTwPIj9Xpn3xu1a2vXP3aX9k/c9p3jo5uGJt/av8VV16y9rxd+0/3bGQiAUsMK/OwYnYgChQAE5JSipFDjAgK/VDGglBMFs1ULLoU4XzTAlCvV5MkSlOnjZ6fn++2O5W4Oj8/770fGRkJ1mDLydcAXq8KEJZzjq3LWZIo8hbBa+ctotKJSZdaZ08dODNdmRquz3XmfF9RBLzR2U8ttZca69ZOVFVM0jp5+Dk+eOCi//dHC32KokjrwOgt9IqVUoiaiFAXVBnvbdir4nwURf1+X0eGmfM0DfNpBmHHAk5RiECkiR59+JFLL9l93o5d7W4PA1yNlGOX5xYQSQUbw0J0hgtcLgEDM6dcyG4oTUgUmciHw6IcyypBXkHbD/8XlCoERX1ZpvZYUhcACVjQaL2iJ6aISMBFEOTdB1u3GAuFzwNEJIHlmSsAFLZOAAM+rhAACwJEShtSghTS5NAAEJGcHQkoVoHtoJSy3gUPUQRQhcMaK8CcLUDIxIvhu5Q0DyiH8VQq1A8OLC6PGyj5iEqpKIrYOReyH8/OO621kuDuVPyKguMGQIaCCDEphd4hYhQleb+z90ynWZ3wvqsAAck5qyIBRw6NSPqeDd/d2Tj9lXUfefNlu3avHvmNTz03a1adPXr0H3/p0qN7u3c9OL92dS3zHd/N/vTXt9ZWDS3uVaouRNaDrlPd2b7tWyqFOY0xuc3yNNNWV2t1pRSX6hBBmRUAapBl+dAvvfGKO/70oZHJ9bad61SSSlXluRNmEOe8Ugo8Q8illMdM67Mb3Ibj063O1lX1fj9VYKxxumn8zFzerTtGEtscUrN9r10Vs74mPVqrNmv1NEsAGLUC5cCuQpUvrz1QvX7Hudw7PH703PBounHTeq3AOopNL8/zTnexUR+OcKifttglzESqZ3POMwKZlk6lJ20dVZM6e2tUkk5Vk7WTWzrZBidzVWj+3Dt+q7W4j218ffNVUmvY9HAv/0Gafo6FDG0+l06k2eLRM4+PT9wSRZGJ6xGNImmtIiTnoQcSkyog+EV4QzDaBLSH1ipN00oSbd6y/jOf+crp06cf+PH9b/2Zd7zsllecPn2mUq0iovNeFQxyCcsekAppZUTn/aAQwWXJdArowpXlKUgxfR0E3cF2G0S+8PkSBiMrgFqhAEUQKaMuPH+ETAIDKbqBGXbAGBIsn+SMA6H94tuDHB6rQl2HEVQQm1MlXisIHKIq/qyQXOLwL+Gs1lqz48H1rDyXIp95FZGB01+95+CBn1bPv3K3T/Xho/WFMw/86PGZo3uaoxUnRCQOI3ZOwE+88Jd+dvF3+smGR7JLhw8d/rd//PCxIz9JGms9zo1PVUbOKsns6AjMazzyTDp7LpLGfJ0nrgSdpMfb5EcUdp113g4rhWnmnFUEtSGf9zG3ojlNABjnY4UVX0nR6Wyhz5GgdBa8SbxjNGmC5J2SSFeyvI1g0EPE2hKlnGfN5p3v+Mjmvffc+L1/ZIW5MSlETcbHRtYeaU4mooBi5bL13/n99huPPvfWf97yuUtHa3HldW958rI/hsN1/5xdf/1Lu5mY2We3vPW39nzmj7d221nmLn3pTa0lRG9ZTCDsAi8TVr04YiFhT4NOTNBiQQDy3qMKrDCUFdyk0qk11CFKKaVBYGRkWCkVTCarlWS4OcIMk6umOkttl+WGlPeeCbXWQEgg1loTR0qp48ePVyqV0aFhj6g0WNcB1oYMBll+6T+z96ef/OS/fvazX/3e3T+ojTQrptKqz0INzsPz86rtzi1k1YmoOvKh9/7m5l2bm6sms9YCUsLMbB1qRYqISDwAFKKPUE4TETEUhex8YNGQFOmwAgJCIWYnJCDO+9yJjy7aseu+7/5g777nrrn2uv58x0QGBcKdAgCfOwAgCRJXrIPdIXhAhUSBg+u9ZxFxjpkJg+j1CkMkWtbxIClEUYgDowBV4axUULAHZ4QEtoNI6FCgB2HvRYLW/HIkY+bAYiw3VZikukJlpzCuD9JXxbkWcHpcnDgh5VeACGiFJbTFRQqtbx9sEMGzZyjE2r0XZlAMBBia/FzUDaG4EWZhVxR2WmtUagDXYgyJAAykQ6mU/Ss4oAErhxj6K8XdCHUDFUN0D4KK0IEAKDLCecgcu52lQ3MLUW07F0Rk0gTczytSqcQw3VK/s+Y/7KaX9yauWJpx339yuhIpp1yeEIK5+5GjOF4Dn3T6eN2m3utu/f/Ies84S47yXvgJVR1OmLw5a5VzRgKBiEKAyNGYYGzAxgYHnH19HeAlXPsajG0wxmBskXM2AokkEKCAUEJZuyttDhNP6u6qep73Q3WfGXzPTx9GszNn+nRX1ZP+4ZKH9/nJWRZXebWBxElPsiTXfJx62sREBRMAiKlnjSeP7SYGUDBZ6/hK78onnPKsk3/2zcO6udvyRFwdB5iM1buqpmnuyyrOhNRqq0r8/pOH2/cduHH+zC0bEBU4mTTZ1llz56MsU0hA1tBCSBfzkE/aqbKFlCiENE0xMd77rJUw22LUs5yO9/hEe4pp8/LycsBKxBtjJrqZd4DcYsYTJ074goZakglkHCCmyQyAlEWf0FeVDMLhLCEmZJxVcALp/KBAPJJgKmA5HSK0N225ULFAMK68zKo6rUblsDd8VPHhsnc76GB58VP3jG5ins54x7xa7yrnhKhlTRtRRbyrmoyBMHiP4EMIxChe1q+fW1g88Wd/9tdzc3O/9muv/f73vnnOWadW5aDVyilCsVQVUULwITBbbVrFSBDX27i/HQH8qgqRMNk0IerDUAkg+mKvjmOhCdJxlao2DtmNNA0a1gZIpSJxVoIAYlZnFjjukEPkfWJ9IMTGpdbSmmNBLo2HRSNHH0IYa3J57wEpalPjmmE2jcM2Qpqm4y1fIxxFLHGTZ8A4JyZmV41MpzM8KnfdcEORzxxZPnjykYdPe+T43f+24YY79qetsqAOiEDpCIGNys4nbpztPuXY1z9z4jkbzt39kx9957GDt810Jlb80o4ZWp/CcDhI2va8i5LrPTx6IIWsnSblxlBOFsWIhhZRC7dCQkqdAEWoAqo1tj3lhj2qsOKRBC5BKABXVJQGjc0CFqRZzimEMiCodZUXgxM+DDNK+lUZDKUpSVkR6rdf+2Ehc/Un3ughUVbwBQlQOnnr7GaZ6E44XvS9nlRdSbd/4337Xvqr+5/7Z6d/7sN9SsIp904/fFn70ie7HTP8WNV/7FDI3fbHvezEvd/aPH3w8qc+b+jUU5apgKiDgBpqkgsbjP4aShIRPyreB0RkthCbchEBwas9labv0+jkozKzAZCJbgrKoKWKUVcIVAHQWjs5ORnBhyJC1gigKgbQtN2uqoKZJ6cmkiQBg2yML7wxU2RD5UfGUDtv3XrrNz77pT9/8iWvfu4r1u2+aNePvnvs/rsPbHnaSXthabTXT03PPrbv0em898CDD3zu25/6xG9888RC6VlSAUJUu6ryH1SFwMTFTUjM0fsWlYqqytiCKhoIIpQlLgTUoIrGGkWtgidwoIpChw8fe/oznrZ186aqckhSFMMsTeK4EwGjo7BTFwUBnHPMTGQAQKUW12RARqugKiqshJympqwqYPLeJ2BQxsUgRsEsQIjTYmFERVC1tfWvQiQcu8CIgUBEiaITG4Bq8HVrHRGN4djZCCDq6kRkbHsSw7MXIV8PrgJCBICQaJw5eVAPSgQGSUQIFIExabZoEFVlw5V3CZCqOgQBBVFCQAUnQpiCMkK0KdZo78PMqBC8xDa7l0BMJklVBTyE4ABAxjL6kZAlMOZiYj0WAWLGIOMyQn2IR7MxpgxFarrkoALHzKESMwGLB8Px/uSGaarKURCDTLmK57b4MhTu1Z1vb8b9/9+h/++v/+Tzr3vjuT+5e2GRW5vc0pzpfun+Y/e7ssvpIB9Vh4vffMO2+SK1WBVlyZwZwypBQSOPPISAho0xRVmKQp7n1trhcBhvcpIk41ZSCMEAexsWBu2/fNP533nTz8LsqUW/JzRtfZA0dm6CavCM6jVDztWPslwO7dBzfrzUky2b2sW+4aKWR32YqgzkyiazRb/wPpjElCsnb714OuuPOF1ZPuwLITQTuToFDLphbktodEABIM8z76updILRqOqocosrgyzLtPQhKAB5cW5UWMqYWTJ1tGStJcPMFAJ3zGxibAjBVQNmHox6zNxqtViQQMFZQOmVJ4gtc+X8khpmm0zm+cTU2UwXVPNnZul3zzz5b2etGRVHK3fw4PwdpT9x375P4vLFKUx1Jjd2J9eldiKxXUBGUsDKWsozqwr9/vDHN/3wxu9/57nPedZpp576nGdd9ft//Ofbd5188PCxxKbQwCGx7jTXOAwiDCAqQGRQarqEqsbjEiKynnSc+NbZXmSVaB3bxnVibDNRbW5PSKhBAigTxZoUAYXAiRDVktHS2LA3jSkdb15LVHP5AUBr+0FYxUYBqJLUEtPEJF6FAAgVkaLdnEqIMD7QZsmhkzo/MIIh/mxkVysYRDSoGkoUoyygopCqBlQJ3hhTlDiqNJlJf744PPrY/JVhtBLuv+Eb1x7srDfJtIyKQpKOkEtLKqotz/rDV/Xfi4h7Zl7wk69++kc3fbebWpeJW9KTtrJ6feR+nWrj3GzwI+kPBj6d7So9u4e7pTLI3hRDnjK6MsU01IJSsj4TsK2JsreEUMLo5FNL08q8b7U65d7bWwM2CXqXGwiqhTNk1EiogFssy0q5y8RWUz6s8IhLW9x9xW/vOfvq53/gBdnCQbKp9b6ijgV5OJk6dNbTkyw9trSv38NWq4MtTpentn/13Xtf9lv7nvm2Td/93XJu75bb3jh56ZPLxcUTJ346aI/M6IHZkLfPv+rXXnrlhi1z9zw62mjMQIFaaEYKaIBBVeOBKRgPMiGK2sKAa6ds0ZsZVt2ymQgBgkTwEkktvkS0buO60lVkWENtPzkcDPM8zdI0mu4lSVJ6lyRGgAigqqrUJiGEiYkJIirLEhHzFpFxw0EpEgrf37K59ZMfPvz1T41+/sMvtGf9K3/lWb/1psNfvPbBG+RRANgll2prabSuUGlt3XXahZc+9cEH75+bPd1IGUBEG89RBQkBARK2EHwAVS8BQyztE+bUWB9CxAsa4noBKngvapCJSIjYuKoKqvf84hfD3lKSJOWoIKJutzXsD7I8Fak7UfXOjPTipiMU2Qh1MQoRZQmqioYkhMKX1EjKhRCsMX6sq9Uw9sZt4TqBjd7GWGfERBSVNWvFdtDI6qmHw4oRfFtXC4wc7Q4hnjs14rGuxkIz9KKoPtUk9VBrQY9nVEQEilHFlJmxFraNLOEmhRibsiEYY1y9sLSmcTT3RwSCSARVRfGiWkUrADW68/ENg4I2siQAEPUOvNZGTvDLKBgljKdnxl3VIhh2qGhs8DKXwI0P7B9IHkIJgVNDKt4JKzrPrtXlP+l8+MvLT7xvevu/fODinRs2/ubL7d9+7ObvPuRncnPLXYMUOy3LCwN35tTi0570+OPLSwYbk4lmXK0AIp6ItNHpbLVaKsE5lySJjlW7x2xRIldV2rVH3eC8cy/8tSc/9KHb5qc3JK2ROGtU1TtvicvSqSqhUYTSplistB49adQZfOXw/uv/wC0Olwu/6Eft9jQXJoFiEFpZl0wXyLqqO5EdXfCdvE28Hrwb9IaGJ6enZ5aXl355MhWbkGg40QAIODGRE5EhQwkVRREXobUpsxGRUVkQcuUcIibWGsNGosAMewlAmOe5c240Gnkf8jxJTepCRWQkOBVAkCAozlWVY5OAjBpXGDfdPX1u8ixGwxP3t/f/8NRd10xTWhXLS6OHDs3frYqIaZZ2s3SCKVtaWtn/6GP79x+84+d3bt269fnPf/7DD+991zv/79/8zTuvvuape/ceS5JEar462AZxibgqfM4KPm6KNTCCcfERdaRX+9JNch8rWgDAJjA3u1XjqaqICCh1wEZSpFqLREBX7XwjCyCG1cbBsBm+UlTRq5EZ2NCHap/jCB1rilQRYSLmqGgHaBgDRg0RRqrtpwBUlWuuAcq4xQUAEBA4gCKAhmATIyIomhAqYUC1Ck5xUGIGTAAPP3zw2aefvP2hhx/D9Xfnc0QmFMJk2gmVpXNQmtOvzua2vvD4R2+Vy2ZPe/zBr/6wmj+wbvs2gbLnVzZusl++s7ztZ/yS57exPDoc4SCsn5HhNSfgbFxeAewoEc/MC27QLLGZE0ukGY96YdTulkf3JskfvKN91cupXM46neH9D/ff/vpWuT9AZlMLoaWu3/U0MiXkWbvMRon4IeWlTVMLLvMwXNpw2o0vfteF3/vAmXf+IJAJFYGxzpczLD+65NLl03d3jgyOVrquk010sqWllTS1kwvnbrzurw89989FHQDke85JkiRMd6d2blvpHUq87J9/YBNd1t610/c4N3kPylQR+0bsLxHVxl4LY/3juK6CrLb3oAHBxD6rNPjBZioPqmqyPM3b7L1TBWMTQ9mwGHW77aIYOUJj2Vg+euSYEq7fuKG3tIIKk5OTEfRRuUBEEddflYhuSMjWti1NHDuy+IUvfvo33/img8eW7r3nZ+/8q8+94FWndjd1kx3GLeJ3v/jJU864dMfObQ/c+8Dy8olHH/3Zug0vRxgVI+GEVZXG45a6rsdoURJqNGktdFG3fBkkiPNefWAiZk6IgdgHQcSyLL0Pvd7g6U+/6l8/8E83/+QnL3zJS47ML6NCnueuLJgp3iSpLYHjELKelUa/cUJS0IAaQAUEEMCJ+qCqaZoaw6oaKldVlU2SmrEz7kRFicXGPkW1rlbqHBmQaqF5BAWN8T9qckXTFfl/UMQNM2p8iABAWZbRIwVXmQkAAEGac6HmEdV/uihGnNgYTuI8GxHZsAZQDRi9biASwVUFuOEjIWJAVYmFOijUlg7xpIsbHmJpvmqopYiEoIKA0T6zkbRVUGjo6dL0aAQjwAFVtXJqrENgqJywuIqslLfs6wW7DtUbzKzVkfMqKUJfDL7Yf2kLHfqVo3/2xy8/H4riFw8c3rQuv2CLve7OEW9t54hVUaYpuwPHXvnqnZNdObhYJXlLNIRQ50PRI5mbQ7w+kUUVkIiITQjBWDs+ryMcodVqOfVB3fG+vOX1F37+J9cpnIXtyhdiyZCgsdaLRHEGD8qKeWbLQzsBQLbvfWK2ecfWU006msZ8aIcLC9XxAys/P1o9eqTf66vtJi4YtZawandbVd+fvGPzwPulhYPbd+xYWe67Ju2LK4GIsiytiirLWgLeGFNVDgBarVa7vX1lZaXXWw4qbJiZyVhmjgow8TSJphchhCRJmLnf7wNh4SqPrYitZdSqKKPUatrKRQSVVIcAMCx7gEqGRuUwTd2wqgq3pKDGTG2aOw8EDXNVDZXcqFwqy5FzbjQql5f6IdidO0/fvevM5ZX5u+66a+OmDf9x7UcmJyf37DvRaneq0tnG8R4iOmHtCYBKRFRryzQF7hoZjXEqrM1r/HBrgHQcB2kdwwEhesihgGCzl6KYpcYDV5uDIr4/4C+///hP1HMiUakHQPVrTBlaezEUuQOx/xQNyFVVQA2ACKgGWE27IZ4ykaegWpP3sN7vxAa9BA0JsiEqURSAgKVMfc9VCWw/85Lfvvue9Yf2Ym94S2fb/aZrtLDWiviiGKSZGa6EnS/402v6/zkZjjy69d8+/elPP3jfTVOT3B/MY+hMzWTrN/s7v51AYu570H3/IVq5Qk/T8nmLo13iKhy0NEuS7qHhUhu8RaveBUNUVkcA2+3trcm9/WQzv+CVgwMr+oubC0tHPvHR9vG9TnNMRAcnhgYQQJJ1vliyBQxoJa8IEvUueG1xGI04/drrPzd15KHHffEPweQ+SJIWGpKCBisTJx096cogEHx/W2uCWtxbHCZkoAMrw3Lj3ufRTfsOPOHfuD9ryw1oMMta3XPPShenj3zvJ3LwOOd683Lrir29qTlBzRTJJ5JgQz0XH0UOYuUTaTLj3N0ASdObVBCUIE3JUZ+9Xrz3dWGGYIzl0WBEGTGbclQFosRYkVAURcRb5SHtTnQE0DmXZRlBLaEcQrDG1lhqY5YWTrRaE61W4opiZkvn3z74xSRJjh078ePvfW1ibhOl6Zc+sW/79s7kS3K3yC987esfPbTPKCYIOWZ333znFz7x2de/8Y9CAYBVlKOgmlwXG0feGCONAmLkBYH3AGAMuRAifdAYk1gbg42GIM4DYp7noN4Y02q1VPWUU05xPpBCPDetSaIZw3ijelUDNLYWCHEEHUe2DTU2Hk9xyznnbIQWWxuNGaRxSKzDT423Wq2x1kZTbcpWrF2emsS8wU+C4QRqSh8xetHVy22uGaLGugJi0/omJDYGCXEVD6lraiVmJoV4waRgjAFQ7z2RUUFSRSYmUiJfi5rS+D7IGj/EqI0XtQjqA6WJxPHimk9ac4KjjPC4LK5rFsRYGYtIaOzZm25hGA1LA4aMk1JYrXfulj2jvN0CP0JJQpAATpFM0MT63578r6/2r7yve8mvvfuO9cWeKy489cYH9n7nXphblxYnitJU3Xa7X7n1ndE1T9l1eNHUjtFN40hVGwmRenAeG/6xCUFsxv2G1Xqda+Be35VTaWt5ZeXk00/63Ws2/s03l6c3Zh4kpQQEyijkhOw1UELGEyRJeWIdjFqy7aHfe9bvFIsycglwj8Is7e6ay0pAV5wY3nDPsYeO30fM1jIb0x/2Ttqx+ee33Pq+97+/1xu96EUveNnLXrG0uDJ+slmWRaPrvN3yTohrPa8kSay1Ubo8TdPBYECGQTG2uFqtloo455jrVV2WZafTGZVFWJFuq6sFOhdGo5IoypUG0OCcK8SjICO50gHAYrEYQiirIpu2qoGsADsAES0q1w9Bg6+SJGHgdnu22yUistaec9bFxEJI3gsAJAl5D4uLy8eP99rtdlW6cWyz1rqqYqYgHoEiZR2poeuIKiGvWV24piD+f79ARK9N7StNLBRQijYoEGJSPM6AdQxwXE3OmjNq1ZB77b9G3oTWI2GENX5r4+Jp9aoQGwoCAICgICFxhIMERIzEB21abqqKwojIYwof1MEbmULpmImJQnTAFCxD6K0UpGYoVefyJ/e+8m8TD977SHfu+2CIjLiyAkEFSmj52PLZz/odmdj4mqUPH0zO+9C3H7jr5q8m3Cuwa6Acjo5vmkzvvs/tPWpz7lT9opOxBPe8wfFJNQPAqc0XFNXK8vFHQmd280VXLiz79uMfh1/7b77qyunLrwqzxtx5xeLSZPnxT7bPPSs790LcsJm+/6P1z34JnH3OsQ/+U7apPXv5s1duu3X05b+fPfPK8mnXmLvvLm78ZPrit8jZFy69950d53/06//Sm93+K+95csrtQpYTMy2ykoLmjs2uK09smYUT8zQ1rbmUQ+j5UafTSfomlarqVFN3/vahiz8eWou45WF3+NQiwVx4dnYbXKIHe8vl0WOPnn7mZ/cef9XMusy4wUgM+1DVQmzYqKUCgCIURRXhuyGEqqqIDJnoEAxIxLEPCqoIRKvpYO1/Q2gIcVSMxIv4yhKKqHOVMnTa7bXgZyYmwADBUhJd3EMIriojOaeqqompyZRb4qs0SRD1uq9/dzhaFvSn7j778Py+DdOTu3dt9yFZ2HbrTH/buVdesutIe3F5eO5FZx85fthVeHj+OFAIbmRsMg4wiIiGY1BxrkKsxVSRiQGYLSL6YsTMnGXI5CtXeAcAJrGhcquJsOG8nR2bP7GwsJBkWfzwzjlLTBGYqkDMq5QGBGgMi8ZYDCLCNXoUpMDWImJZluPwD0zadIT+x1bUX978q/+0ppDVSLRVVYSEWOOjQojy9EEFRE3DRxrzDeL7RxZWzWtiWhMvASIyj2pHVW34kfFoQERQraqKrAGEOHMXBW0sE2ydrGnj/FDr8tdnYqhtElAxwtkiA93AKk9aY84EtZczxsI3gq4QVtFhdWyDtTfEOG9sXgb0yiZJOwbve2Tfo4fz6W2EQxCSyvnAwVDJnl6efmMzHXtV8Y/loLryHHj3G1+9Ibfv//KNX3uomEBOMnHBOektLZsX754+5dRNdz8y6LY4gCZJEnwttBJF1cd3TOL8Js+iRevakz2mC1FpZAiapG31wInZf9S9/hWXfeY7/31MzjCmqrwzZII0jGsiQPZJUI+p7SYHd/fX7b3/keXZVut4b1HYWh0EHDhvTNdOd+3znniWT7cuLg+Nlv2iPGnr5hu++a2/f8+7PvvJa392175vXvfV17zmVwaj/nhRTU5OMxMQusp3JtreORGx1o51y5M0m82z2dnZsiyLooxLHVQj3jumI61WO04sW5321NQUIpZF1e/3B4OBoCKBTayqIZMOipFBLFxpuU4TvQ/zi0sn8DgQttvtqlBV8GVYXu6laYogSBQEfKnGoPelDysEGkJI01xEy6qKHTxjTJJkrvKISNbEHS2NHo4xRgUkOoA1I3CEWikWf3mo8T+yz7Xdo9qjt0EkxOUaNXih0XurmzpxHtyoTa3d5vTLclrjwZOCINYitQRrEJEoIegYs7m2dAZD6GqIXxz0RiBORGWSUnyO8VYAAEoAAG3gFM1uR+89IybILgTHaoCtwnDQkwEM2tNpkeD6hF/868Pbbr4rNYe6nbQoxaTOOR8oJbNx/ZbsSa8+Y/iDHaObP996+y/u/H6xcrTbzsvg1aVJpzhy2N50lyhmuRml6/zJvZU7gHrMVgo67xn+9/7+wIffY47vufBdn106cHyuWuHnXr3/zofXP++1FeV871cBwG3ebU7ZWgyX2umOhe99K+S2+9LXHskpe8WvZhdcbk9eX91xf3vn45J3vs/uPnvhuuvdbd+bveoV/tJLe4eWDl/wrJ/73Vd84e1nvukvFgcqX/4Y4dFw592YeGlt3v2217/tlJM+c9NjP3jk+HKYNDiYywyZbBigbdGUsthakHSQLm2759lvOfUzH8v66x3hvKGZU06l9sx8sUKj/jePq949+I2z5iYmMPQl1J3FMA7DMVhE7baYoDMzM6lC5SpmNhSbGqj18F8BAA3F+C2gWEPiIxiYAFATYxFZVQnZmoSQBSGEsLi4OBgMDHFRFK4Yxby1v9IjojRNEZGN+CAhhAcfvO/eX9x15OgBCXjj9+7Zd/jE/n3L27aenE1svuf+h4qNS+V9R97/3nd877t77r1r5bZbHj1yoLjh2z/qtDcrmGEYodY9zBhXjDFsDTIhU8wsACBSAFyovK85yt772ouJSBGqUNf4zjknwfuKGe6///4sy4DQS/Aq4/5PWNO+i+8QuYaR4BlPpfiKN857X1VVjHtOQjws4t+KzyP+7vgJNQ3h1a+h6T+Pvx+bGKvpDq5BrkPdhYKmozX+yfG/QpCqqqJF8bi1NX7bGEXqv4WgjedSHPUBQIT7js+RcT0Qr4lq8CgbY+rbwrzaq4+ZfgzM0pAiRAIKGIxql+O/KCJBV2/I+Hxcex+ifQUpMKAljoI+ncy0k+5oUMxNJdfddnAI0y2sFAlIFIjJGtYU/O90PvrV0TMO6nYdFNu3tH5w456tT3vvB752eEebglY9GRqfO0lhcPRNLz9teUDd3ETh8eCj4LBSFPdvcpQ6a6H6U8QGIK95YYOBDOozhyGEzCbecb5x2+++cNP8kQNZkqoqECTGIqKAoGHxAUSVLaGYQyf5HY8d7oWe95RJrgSSOU7aaDorrr/i9p1YUuwEgdnpiY2bZj71iWs//l/Xfujf/mPnjm3/de2Hzz77bFVYe0lVVXkJ3ntlKENFzMZaEztDqhr128kQUSx88zyPreaGYhvSNI3BgsnEVZckSbvV6rTbBIzIedbqdDqtvD09PRt/PU3TdrezacvmmZkZQgwh9Hq90bA4cvh4rzeKSCLFoghLQbQsyxDcaNQbDJaXlhYG/eHSUu/oseUjRxdEOctaxpgsT5PUEmtcdaRgrY3RKIiwsdHFoV7DxPFrIhqLOGKd7RA1dLJxwBtvtHFfWho/0NWqtrHcHsfy+IVgZArS+J2hCfncqAVE3Eac3Y53dHyNL4Csifz7cQxuttLqNdS2DxrEu3GEHp9X40dmjEEiacxGURRE0XtBEOer4AFJnUMEXBn6kpgtOADnN1161f5zrr5VaKgc0BNag6bV6ZYjt+WprxhmM7/DH+rz+o/dnvePPNpqT4zUZAyirNTO8vIF15QveerKCi0uPWTWLXsCEPU5mOKM84PRTnvb+b//X+HoseKLH02f/sR9H/94nmbDdLp44OHOkfsAoKLZzsS2vL1p/ic3L73nb9efvGHBjOz+x/LTzw99LX54++DW73X+4m0LmFT3P5xqUtptSdkb7d1nnv3SL8jOHQ/ffPHmAT31+e3LLrG/8QZ/xUsXtTxejjb87Z/wE87fms+9+cozf/PSDZOmnLAZtzpBhi3oK8mI2W27HQB2X/f3KLzveX9YJiMLnFRySEs/3U7WzYSQ5qPwg0eOXXvX4bQXWomxaRId42K49d4HBVWM/ztOiaqq8t7HoUCsiWM7qh4QrFl7ceEZ5/309IwLQozWsPfBpAkEtTb13hsjiU2Fraz04hv1estTU1Mri0tJnm3btmV+fjE4nySJc2CNDHqjPDMLh8sDjz52xnmTk+che7P+Rec/sOfnjta/6FdmPrzuvq2HN+nx/r0L1011OpPdznRn3Uufd8Fv/9prl04sT7XbASGooKyh9ERtqSCRzBxlj+vRC6jzTgGifAQSo7jYJxQRMMzA3pdlWXmv3Va+efOmoijGkSOGxKZ5JWu3gdaQKw0oSBS8QBAjFMNDXP2iWtORVceFbRg7+2Lt4lB7E41vPVN0LluN0Fj/jCBgE5aCiNbcbQJQatT2ojnM6naFplNPEIXdI0YbAJiYayX3X4rl9XWCQgjaNEPSNCWiqhkGK9b8DEHANUoaa98kHgdkzdhrhghBRLwo4agqrbUYIBYrsXGDiC742C2LYNGYryPWH2IclXWsq2cZg1Hx3mk7p6JXfP3WQTK7XipXIYEGJTRiUfQl3c+tp/l/7L1BykFrfeuBh0fXPGfbte9+ZZK5V7z9pu6GjRaNbdtyWS7bbM44d1dv3iaZc6E+LuOtYOba+EuwPumwHnlEiMD4jo15nyIiKh2AXjEkSx0wlZSHFvF5L7vkQ9/7+qOj0lrrQjBEELXGrDFAlsNQqhYmycFTh5f/d1UsnbNj05HDCzLtuUglA4eCo9Dhyd6o7C8dmOfhL+4dfuKTn77nrpv/6yMfPvusk77/o9see+zIi17y4kNHjjPb8dNxPiRJogwQAiBGFDoSMrN3UAUPKsYYEU+GM2PHja6Yfllrm0I9FpcIisF5k9hWuz09qyG4NLGIqMGhwkS7E3/dWtvpdvNRyxgzPTm1feNOkya9Xs+VQ2JK02x2eqMgWLJFMUSCJLXOlSu9xaha4yU4z/5IYYyZmJhIksSYxBgG8LEoHyPgYgUcfS2hHrJIADVjvfT/KbBRRzhYU6quDaioGFHKNS8IEVAb1xMYn5vxX6MAHTBqI+Ycl2sVfB19VWqVjyi5EEJEcYzlKsEgNaa/QWtYIiIKAhEqNjqdMagjaRBGWpt2/1KGgVgbJmK9JrVxXXSqXhUSTgVdEOnwaM9jRZjK5mYLLBnsug2Ty69+3d4P7u1W4lAHVZFSMuitnH3+BXruC9f37z2j/8VvjK756jc+NmHKIISSVB4pVT/y6zb0n3LuzL17l7ZS9zlOZ9stRNeFTsZ+45bTN2/avfyKlw0+9eH+Df+69R2fGnZ2h5/ev+0trx500l5naGdWfEnDMNseLsD0uonJ1sopZ6177q8fvuuxjbtnlx490t3QPbx/z/Y3/F7r3KeV+w663dO6tJSet3vltNMWb77num1Po+Ceu+/69W99y+J/fC676pLpJ1+9f++RzhXXnPIHb8ovuTI9Vq6IUKd6zkUXrJva/6Ef732kcmmbsDIJg2qxsOW2dHFHdvzU3V/7hwde9vojz3jb1m+/O6i2PZXqqBgI55MJLHB5w6HeuoxfdtYG58roIxIt2+pOCZrYDgaQcT2DiKqNu0DwqsqiwE1MaWYEMbRFV3kYlaUqG5MzsyBkecuVjpnjrEhEOp1Op9NBxHa77ZwDVAItylJVnXNpmtqExLtWa+KU07tv+8t/PP2sza96zVW33fWtDk0uL/euOP2a79zwo4PFshro2PaT33zV9pndN9/6naNHh7MbNr/hrW+VdmfPXUe27myR5kFDxNTXx3GI/j4aT22RmlEf1SiAOdLTnQukQYOwiU5dJCrG2CA+VslJkux5+GFkhJg4k42uZTX6GGCc3SgEAIgy8cAAAD44FVBspJgRy7KM5qaqitwMRxGcc1hrFpo6hEMdV7RuIyM2hOa49+LUrR6sBlFVgxTiHKEJUfGDxyx7daSEdVOLiJSa/vAaeWrfcJTHnWdVHef1EZtnyZJCtAc3xqCKV5Ew1nyulQTGuNN65TWtgqhkjQqkwEiIoBRTJDJIXkPUkLLWEqCAWjbxmUZp8vGBwk0jAccNdhEV8VWZcBLEO+s708ndP314z/HJ6VNIBqTWhqpCwlBxrtVvz137xeLqvW6Lsd4WeBzwmCsXFkcHDi+86HETP9zjelDkQXu93iteunlion2i3++EDMAbgxDEWuudcxqoUQ5qDutVexwiUlHvPTfTbo66pExFVaatVqbQD1XIsLVcZVvX/c6LT3v9Px/YuHOrF2GEJLHDEADAKFYuw9yFEdCRzWDdl5fuP/zdcs9DNJDjw2CnhVqZ3bk1P2Pb3Lmn7Hzw7ge++KWPPbrfSdXbumPjr776Nd10dhjwf//1H6VJtlD08rwzjjTGpl5FxeetlvceGj4bMAkpQ93FQYDgRQkoJkbOaSPcOF7kGpXXRJxzEiogmpjoiEi0wEptogo240hJB4CiHAEqMc3OzbQm8qIqJ6cnF5dzVciyTJAUAUnyVha8pAmlUzOd9uTS4sLy8rKxGQDkeXtysguoVTXyoUwhT5MsZkLj2TZZA0DUyCIQEQDV3kFrgfRr5DUCKHkZP8Rx+qiqyASGx1tMVYGJDWsVoOlpxzhdIz9l/M4YQKgJh4nhceIICRFjjMoJGxQJDcYQscaDOOfrWRsTI0ljFhFCYOTgvPc+zTNAjTRI4Hr+JSLUtOJV1cc6G0kZGVZxYTagkDKTppYHZSDCljl07wNZr8wuP9ePDElYXvJ7VpaXCmp1RnlF0hJfwbC/svPKl96fr/ub8r2k4X0/zRJckaxtfODQczZlNlqGkzZPHlxe+q+v6EmFuQCPHy3aQL1pGRkMcu3/OXjD592Bh2nhoWxu1xCD+8jfzaw8sjiYlx/elHS46xaGZvPUW/93dfft7tCRgx/5wIbE9753Pdx+68KujcVDR5InXEFH9meXXvboO97qbrtx+xv+bMlRftaleKh/556FvZPmV677h+7Pvrj4xu+Wd9/kFt4285bXnnzN+Zve/GzetH5wfKVqmcwPPUyCuifsWD/bMdf+dP8dS6lPggUImve33Dpx6BIwPLFy9s7r377n2X/Mi7u2//S3DMCw79hMjjqlk9Qriet/fa/dNlecvynRIKJj6V5UjLwSjM1RRK318oxBJEG0bDCqaIg0rpq1L1x8fKhgmNmFcmZ2AkTBw8pgqQo+zbMsSZ3zzOycy1p5nudSuojoS9M0jEbeqYrL83ZRjVzwAoYopcQvr+DefQ//0z/+8z/9w4c++elfJHZqosuvedWuA/nXF954DADuevoDe/ko35Qs3RBO6WzcvfXs44eqyS5u27WOSI0gk0XkKlSxie4Kl9oMjYgEYiaGEAIbDiEgAjKpCKgyo4ggU40xDrFAq5iSzJpOTnseeXB2dvLU3eeURWWAK1eKiEECREJMjIlIIoMUQkisjdwMgxSKSp0HwsKXWZZF3Y9+bzA1NVUURdzGMTevxXiIlMipmMQG71nQGONDICSGaAaoyCSgQSRSXkiBsTa7QiZPCC4AooEIIUYhQCZRpSAmCp0QelQEMBoh2o3ZjkgMsUoIhkK04AghOiYlyCBaqZAhAiIgAPDBB++JOZYC4gJZg4gRrRfBAoDqg7AxquCrQCQGqZnmRrBJTQ62CSMA12O8VUR+nE+Lodp1asxgVgkqUPpxFzEelFiPYIWrkWM2vlpvO++58dFed/1UNfSQSnCZ2qDqbPWK9AsztPSBldc5KohaE3k40u/1loudUxMX7dhu8uM3/OXN6fSWgR+dNrX0K9c8+1CvTA1arjXNxRASmSyNhFr1HqCG6UVFm5j6WGKHqhKc9xF5i0AJkQgo2VAWLrEYJCusZOHIwRPPv+by//zSf/xkhNNWKvXoyWgiWpS2zRTaiB7VPnIWAPyC77vzutmsYzs4KXb4cK90ozB44Ei7OHjq5l+87fUnX44vvfiC0YG9e4OrXvac7dNzs9t2nbTjpF1Hjy60O5PjVAagnuIDQCicIVKwDj0nBkSNEgJU4hxjizOJyqohIIJhAkIFIWMlBGyGEUooQY21wQevIQIdRJEBFYWJgjonAYkZyRJkSaYAo6oKpSSYoNQ8GSeBogNgAE8AliFoURRgeXJ2dm5mrmBx/SERJe3cV65DlgErlYaRT6s6MEFEvKtbx06kads0AbjWRV+1n1LE2rh3bflb55c1jBoEVBAYiZEggCOMK9x5H6j28cVQU95jFwdFpeH4VavytAzNVJqJPAAyR6pifEwkSIhRnVYQAMiHYCOQAoCxHtl4FaiqBBLEyLysW8yISARMXJOSYxhQ0NpVBmMULilH6VUM7aH2bRpkcQq61aN32Fse9K98A6RMRehJdcctNwdXAE45rBKXF6Plx135rPvp9Jnq2GWDj19//PSf3n5XO8sgAKh3QsZkjkYBZddm2PcgHLg/OSkNxbCophRFSrBzYnXxQTj+YAe6QtOwdNj/1WvRi2Kr/2evqQRYQ/5SLU7aFW6+x7Uq+8QrzLc/Uf3wq+HdP8wQg7Ix+eD7n25ZOHGttF1lErP47t8MkNru9N7vff/Hb/70Zdf9w+6v/G3QIj+WZUbS9srs7omW3Vj0B3JiiLV1eIuqojA8yHHHptm3PXPuQ3fdd8Njzmp7qXW4mtnTvfU3gLny2t73lG23vmX/Jf+cLJw09YunBJROzqGCAqosy8j7YuX41x9ITprYMNPmUnyBOOPNCjvFkIfEYUUGo10Ns0VUL0EloHBFBKIMyGxUJUJrXFloy0dfOAUxKirO18Wf891uVwmLqoxJsUGam5urvFteXMqTdDgYQMJZlmnj1TMa9AeDQTI9nbQx+CCKC/Ojq65+7mmnnfG9H353x65NSMlgeOj9N/1t+BcHmwMASC6FDMJFPfsx2//Twde+9ulXve4FBIGACQQJ1XtgZOAQfBzBegkaxHvPtOqDFGNDUIm7K4bAuF1VVb0ScUCpqgqFXdCs1bLWjopCRGKDNCrIsDUAqArGmGjlqE1vuaoqTKxq7Vofxy3xB6J7Wgghy7J6MzOp90QUKidB0NSlagCQqgIT20K1Iq2KQggaQmITQQVDAvV0hxSsYODavSQG+DHCQqHulgtgxNqgRKRmo8LRWFfFADY2bcQI4ARFQkaGEKIkPEdztzVFwzhHi44RGgSJlGoWNBAaE5uT9TE3rq1BV7Ggq00FBEAY99kgBlXV0JyPFE80E/1wAjRumjEMB6RRnpuRiEmPHD347fv9xFQGZUAKBAKWDeO0jN7UvfazxbMPua1tDQX60rVSOtqegNv3Lf/0O4/c9WiFrWnRcuHI8B2/elpnIp8/MkhaUY6UJB7lquCFEZk5tYyIzvsQgoQQA4CIFK6Kj0lBEYERhRGJQTUOIQVECJkQ0Sb5RADzh68/84V/s2h2TnkHJet0KH1qKl96ZnRAxplikpbmzEn71t37NJYW0gqVlEx2XV7NwSaTyl0PPtZletwLrlpeGC2vLLXz7NixY61Wi5kf3ftYu9ONIo7jFzWmW/HBWGvr+QQCM6MCg0aBPESW8UOhegGI8whRbwWC1hQdIrRkOY5Latacar0REkKQEFGB4GuZM15dBqqAQBFXAQikUbw06lAxIhhiwIwhycH7ID4QUZJYVSUJsX5FXbPMGssBAFhT8a4aohjiqISjuoqbY2MiOiH+8Hg1UqMYVX8nMoZjQRxTRabayxrUUy0xq6KrGpMxuSTQyGZDbnpFIQQhwyC/hNiI7e7IoSDVUDe/Ke5ugxT3cqyrxlAV9EIKtMaXJX4MQYBVq1YQERBAJtURG8hKGlhUf3yiu3Hv9T9tfe3z0wWPbv6+efIzhcKxPcceeviB1oQJAyDEYbVy3jmX/sqf/t1H7uPH3fkHnfX73/mD2cyMmCa8W/SaQLstVWEq9ElqunjPL8rNmy589q9dA5//Ahz+bwBMTFJCvyNThm2wlUIJDj3nnCMLZDiV5iEZhsmddrT+HJyb48nN5Z03632/6IrqxLqRq0SGytoCKz7YBFPu+Cx3ULWqQRmWv/Sr/3fTgTufcsP7aLab+pmlpUPyW3/x5A+8Y/loMez38jwflVViMgCIsuFSlVzZNM2K7LFffOVd+dxV7rRLR9tuAYDpIxeKkyw1IDR9828MJ/cceNpfwaPvbZfnMBJ6bxLjQ2DQkKd7Dh368oPhNRftzoCUwJVVC60nqLSK91wxROYkclQrpjjZkeiCZ9iwMSZBQKoLjzHbEyEqH9vEVMF77wUhMTbGGGvtkSNH2u12PJSXl5cdSp7nZNhXjog6nY6KWGOkDEjcbqcLo4VuN//Epz5++OCRbTs2u1ABdunPR9qiaqYCBdqIpZS4iMG7+Zcf3PrBU3btPK0sDCARj4pCmDnOBa1JRIOTIIAWaKxXEwMbMSqA+OC9T9N01B+oapZlZVVZa+PtMMYQm36vAMJW3tm8bXvlyiTPEkhqNJb3kSIZfWNieydanplIK1JgNqhKyM57BFECVRhv5uiS5FUiSCSqOBGRD6HyzhhjiINItO1jiGKZDecAqQKJm5UVascxBqm5S6sTXKWasBjnpQDAiLXNDsWBv+emcSpRS0Siu1E9Po6RL4AQkQIQUJxuRsAar9GRH08y4qvGgQdR0BBWpwONwXl9BI4dIBSi7ABGYpIZG0dGopGOVYlWm3K6hulRP2IAVYEgDBoocaS7NvFnP//YvuW52VnINQxJAMSqquIr2p+dxJV/6b+2YG1pWo7KJO9XduPvffCh3shMTZkJWzo1vZIvnS1f/pLzDx8tWAg1hzAq1I9rbohqoyFEMN34xK9nw7H/H3wEA3L0W6aoNCRai6mBZUbk4FWFlpZHz3ziFU/b9dGfDLHdmpIw8GysZI4kCUYV2QaCyuzf5bbsCb4zGoHghAWPFVGwg8yvHKtSKTsTZnlhFECn5maLoti8c+doOEyN7XSwrKrYqFiNQ6pMFghVFNZwxuIDBgQGRkbf+Ntjk1OGUJfOOkb6NOriOjZMECFEMtxA7QAajnuMr/EVhzyqKg28mKl21RQE8KtktjgcCYarokrZIJLTGjaIiCax0YctZsY1aolIa1m1um07DrTa8HPG8xpkbtaYEtDavjSsQTXGBRBXfuwMGwUXBScBIIgPARApMdS0oE1tvlDHVI7e31xfEagCoGHjRep/bXrI9Sg4VsWqqBJVdQHASYgokNhmW3ttkdEXq3xpjg9ERMGoi1unrYABFEQz5p4PjAzlYGJm/YmfP3LgD/5kfen65bH09tvgSc8E5B8/fPfC8sKGSahkqFnuhqOJ9tzn71qeS5I37b79vqObbt/nWllHVBUTY6B00qKW+mGeVZinB44sPu7KS3c/5QXHTrpk+XOE+o2RL6bTqYDq/AiddE0OhKMwQmTVYMwEZFwllCcHjxwYHP3W25ORp9t+OFkc9V2qwjIpsySAHGDkjAfqlkpSrKQoiSRffsU/jTqzr/7nl4IdVvPY5yN7nvOanyrl193+hMvP9SGMXGUSy4LeS9JqIyk5rkaSpnrjjQ9+7/M/OvnsasvMugcvujNf2pkWm3wCKAHQiuWN1/+v3gv2HPqV/33yF957cPOdbsMeBpsePHXuwSuDnyysuWnP4tbOsaeePMejkRgm9YjWsCCyiCBjBC+LiHgPECXUgK1VrIUqQ6MPofXzVAAwTSKF4upZ6aA/mJiYAADv/ZGFxagUE0Loj4rhcDgxO9VutR577LGyctu3bhsMhxOTk66q9h84sG7dur37Hp6b6ZZlefvttxMbZmssrUz3wilSpRWkQELdfnvF9mQqcM8WlxfT3+T162YfeaTkrBoOwKZMzOIFsWZHUGqIjHhXR7gGBYNE3nsANMbgGp3huM8JpCgcpQaY2+32/PziM65+5sf/86OHDx8+7bTTXOVjfGIkm9joQDLek6sxnigSABBJEKLFWnRVE5E8z1W1qqpYfIuISWxwVcRnihMfREiAGA1jFNZQhSC+oQ8xczAEohHmEY9IUXXBj2nEdUxqcMyIAGNoVWgqbKyHkRDPnbHxi0YBDUQiSxxAUdSrSBCLFDtZY6NlaNjl8UAfH1vNaUWWwOuq9wOgIqIZ+5wzINXdPwaIHW9Y8xIExDqF59pWY9Vu6H84E8cvSMEgudEy5BPl0vEPfOnYxNwu9mUAShMWoMRDQoM3ZNd+YfS8o26LMvTcwLRBh/2OnWvPznZZqpVlKHJpq+yf/+3f2VIl7JYDJ6kuV5gjjJwHRaIkSZQwErAVKUKyg4hgJEnXVxvJNrH7IiLSOApAXUbHa/eiqqDeh4US//hXdz3/7/bg1GSy1PIpBT8IWa6jYZonxiRs0B7aXVz4AzB7507+aaoTAY4iVCi26OFLH//cwX1bRTOTJr4qQxBOkqpySZqBAipEFsT4CqGR448JnBJWwUf9lPHYE2JnJQqnrsEekjUYhcmah9KEkzokx1nvGGwSLTmtYefcWP6pTqpiWCLEZkMhxsAgwki1v+0qhDCgEpFTwbXi5yLia6HjVUhmAzzG1XUSNRphfKkRyRF/jIERIIqrYMMvWAU0MYEPGoKMjUwaKBZH5UkEEI0yPbHnFxl0Y1E5wQZYsQY/2GzV+iQxSNFyOKwBPdRmR402QIgenYgaBBqARfwsSkiEEXRW78dG1QeanJuaVhkiGkCvUoTKSOpIOgksH+/d84dvnthza5jelggXX/44X3SpPO0Zj/zkDoN9Ky1njR+Vu3acXm47c9nOPP3IxzfLjX99426TBW8KCRUQoiYd0sArxcheep7M5cENZs8+/TI31KSbbnz578PdP+PEmRN9MTTJ1iMtUNkmapXsgaxJfAKapu0WpWa48p3vt37h1AO2JkOa+ZEkoGqsFzKGRzTMJB/qyKC2PIXE3Hbxi++55KXP/fgbN7h9K4sLndPP2vDmf+6efc7R7/zoPf/6L4dPvOR5z3qWBOecq4IgIriI4QBOgoTeB9/7LyE1T37cup8ceu+JzbdNLF4MAiSVOkQwo2ox9MrNn3rHgTf9+oOvfGO6sD12Nvrr71w549sn3/QnZmmnePOt+47vnGudMWGXGTNfeeUUOGhkmqhgYGAisiZlZtUQQAVqXno8umOwpYYRKiIGAFJrUfTI0aOdTmdqdibzIU5c0jTtybKWwU5bYgQJO3fu6Ha788eOG2O2bt26sLi4tLiYb9nWL0abN61/ZO8+7/30zpO+/70f3/zT2yYmuv3hUrczU0wSKOWz7BUml6ao0FaaDCeqZIKLQdhyzpleQU1BhlO2EOUggFUxKvKwNUUxtFyvyHj2WWsB1Tln0cRoEcm4NSsretMaElUmGvVH6+fa11337Z/fecfs9EwIIVSOE8tIqbHqAzERN8eEXyX50RqzszHNI+4WanSAI1QNg0cCUInE5SjgkBqWWt2CUqqvM2iNlI5pLDsJoEroGVCAAC2gBvH0S/EJGicGrVuFzWETgcrQJA1Sh/Y4DTLE2FBaiQhEAjUiUxFlBrVuJdSDYwygVVXFlRRl32u1KyUAjoSm+qRDkohNaXK41VjbQFWlcc4YnzuxAaANFmb1YAUY690H0LGLakAVzLZ0ky/f8Ojty90tuzmUWBrMVZFQLL46/WwHhx8Y/HppIHPqAQMQmMlhUAjDURFM0hm2BsPF1hNOCS941mWHl0vKpBvM8mSfigBEjPGEQ4iWEhHhIiqqHKHdABH+E59CrP5V6nE7ISqhOvXkETE2ZpgBBMnwieXlxz3p4qd8av/18+Vst63loDA5lR7TxBkMEKwP5rEd4ZmHDetcZ97o0REOrViTpMtm/2R21m+98ZV2BgVEVVCJvVgyIfY3mUxiOdRqIeMsIfpsNFqn2Ox8iL1cAEABsqyqkRYWdVIJ6k7AOJyHEFZ1KgzyGvfPuOoUwTnnKm+tVZFgMAI9SVGixOsaphmMffeaKhlAUGs6gLHGe6+MRiOwPHaMIO7r8U4crzQcgzTrEw3H1zzu3GKjVqHUYBF0Nc+Lwj51j3dNrR+xXQqKjas3JhaUKSiKevGEjI3JoAIgRpXfRn8OARA1Nqwi0NLU23OcSdSLHOtkwnvvnCNrrLXU3OS4kbHBdgHhGOfMCmN5EMFf6iFBk8t68AzGOjtYl1fXfaPziztaUxPlaLnMiZYeks/+68GVhYP77m0lPNTMkyz35l//xj965NQXZEX1ivyrx+fTr9yTmSlwgZhaCFZg4JTKXnfXrqUXPT+/9+HB3I7Hn3LmydWgSBKTGDBz68KbX1d96vPJfT8flSvQXt/1kNAwtDBUiQZvEZL2umymAoBl6BDxqGU7YRDEh8yCI/bK1nmocm9CUnWBHHMPuT+x87oX/f25t3z60nu+2ls+Yc948snX/kvnzLMWji4+52lPmtq07Utf+PLCUv9Vr3iZomDKCkEDaAAvcPLG7vvf+/4HHrlr9onph5/zGdgtsm6lT/fc8ezfaN/+vO7dV6hqIJ8hg0XqzfgNe8vZfaCIilx0gXHvE95z+jf/vkQ8XMj19x7edvkpoL0ARKKFusSwMUajRL4EEFSNSXCINjyIqIiMyCZaODRG1HFcoqBV6QAgz3OyxrugzSQVAObm5vqDHhuqRqEsS9tqj0aF+JAae+LEiTzPd520uyzLqZnpsj/0Lpy0e6dhfeiBXzz84EOt9nSrlfsRzHElW2hkzULw87JMQwMdEVHXB0J79eOe7Z36yuZ5a1SMrFVGYjarGylOf9GM4aljHXyDodZyIirLMqoaFa4CABWlJMKLNU2SKFN1+umne+8jmM2qrUMp1uDnIB6agVAkYwCAMQZEIdTEgvi30jR1zhVFMb4SDR6VPKgK1CNSAIMUQJ04gChfCYQYwUcB6ulpgzglrGEUUfwGY8VSl79NgxcaRkQU5ozExoAgUBdf46cbGppTCCEC32MsFAQgtMSxoFeN1qjRXwXrUVecmTWN6OZcU4CwWv7G9D/UVW99eTU6/5fIEjForXqVhxoRujZgK6FpeFaIOO4rqmrwnHVxePjIX3/o5+tOevLA9zdMpIUrRECRJrH/2uTazxYvPCEbUg2qglpCPzekOSwT2iRNB1BBYLu4/51/8/gF5uDQoi5Zr4WgUzFkjCFALwKVGGZD7L3EI5uIBCHy/BjQGON8pdIkNLEHzzFJERRAQ6AI4AkxQDDExtsqbb3mOVu+9S/zg2nTCQpBLBhrwtAzAlmi9NCpABB2HgPpBDqCbtJK4vyAk90/euzHl+142dY8aeVsjbFokME7YQQ0WM8X/1+VicYCBIKAiYG2CV1cE501SC0Q0fR+IIioYhPLm94LNrkmxlURLSYRUFVRoagcSp1vqSqt/XWFMT8vXh8zo9QhCiAWjRhrhLg4EzYUGsdsa1jBl6WSalOLr6ky64/bLDMOa9YnjuWiQHVNFhtAaQz2ri+KEAXWNHsQEUUDCmtd5wpoTAFIQcms1soiSggQECn2F8drfvWJhFo5BBselIAAoCESkSr4SPYd79xauiFOvhs3p/HRV0fl1X4YNsq29YeKPSoiSg1bTyPr3Ylq4Utf2ZikBXlrq0Bg59aVN9103979i3lmyHoMadE/++JLvn3PQdyEL5t87NS9X37Pz6eyiU7PrRBWqm1QT2koqjQx1XOuhvmFwV23DC56+lVZN4X5wRJNHv/Gl2TzwoYrruie9bzet75RXvcZu+9nRgriNgOi7Qvb0pWpjCaSeQDIvFtKggfnRDOb+co7LSFtSXAhqCSZESOZKf1KntKXX/uB9nD+6i/8cX/5aOcZLz/jg/+sU+sWTowm0q7u4iuNmYHnffP73zlx+Ohbfuc3lbEYjXJKEAEMHl889onPfn7i4vUPv/XnmitkAQCCW0nM/MolH+eVsnvfRSbtAtLC6d8EsVS0Je9DZVFtaPUA2QEd2XZTZ9+TU/I37pfzNi1esSupMEUAqMoyjvCsoahVixTFDEpfxu9D9LdrhFPqGkZi6oakCiGE4MLk5GSapqOyEFBjkhCCaBAN09PTKysrzDwzM1NVFRKxsZ1OhxQymxTDEYgG59sTE2effbZhXVk+evToo+1OIiIr/eGGLcU7fvc8eAwW9ogMEDoiW6tgPfbRetqyf/NnP3TdrT+9e91cazRcIeMgiDjvnIs1BzMzYCfNx2CEuOxGo5Evq1jdE1FVVVFSYFgWcZmmrTwGMQJUHxKLg5Xe8vLy+vVzitBqtYDJq5RlWZRlc/LXOySu9ZoeKhLjcbTUjd+MyUGcC6pI8E6DgCiKBm2wiKpVVWkzUi29q6qqck4aBYyY/2JiyDAjsQCJQhAnoRD/P6Q5mkMHo9xVLE3qn2juyVgfg4hsI5gVT6oQpQZEQiMILlAXBwxIChAkhFCWJQQxSPGb8Yv488bEN6a1PYCxNMf4CyJiMlzbdZElTthE/DA2Y7Z4iEe3wbFhsDbKWV7Fa90VQEugdjYPn/nxgyf4ZCuA6ofDE1IBkUGmVyWfyLH4QPXahLAFCAxkWtTVZZYq5VLMcFS2Mjvai2998bZzLt69PN+zLZMkuaJPaMJAHu+Sl+Cci6x5aSTBAcBJUFVjTJ6k1lpoegnjFzbNUk4NWaOKzoXaxhGYkLMET5wYXXXVRU/Y3k+qiUHAtlnx6HQI5MSSQxTYfxII9dYfXOy3yIWWx0CesnxLe8IF3rd0b1IZViEFr+JUwDIatMRaeYie8GteUZcBACwxM6MKgaIKoI65OkQUaS/jcDV+OtoICERtFoNkkKLgqIiQNWQ4dk0YkAGzLMvzvB6VhlBbXcWG7SpyUCVyFRA44tvZxD8qjIjIComxsUjVqNtljQMpvKtbKa7WOojvWde1a9KOGnIFq43fuITGS84Fv2a7rBLeRCRA80WcOUTMlGgADTFPDRKcj2UordGfGfd+pVZjqBPH8ebFRrZwdZRLq78bSx1BoIRNmhgkFHUSoiMONGlofShJgCDqQ9z7XiXunTpyR62beHlMQMieK3STrdbw1p/lt91RuEFq0QOnPaOCfqZzaxoEocy0pN7Q8QuufmH38S/KRvNn7P0HC/4zd08PZCRqGBLGFoAMToBfOfDUp9Kje8OHP0yL5qxLz7rArZTDqYnillvo+o/DwqGV972HVlY2vvw1s+/7hPnTD4Snv2Zl6+n9tAulymCQyfyOrbeccc4jqnDWE/pPeGHvjB1D59wIjRC2jVg/MuAnMrBh2ejyaDQ/EdIbr/jTo1vPe95HXltye8Pfvv/Mz386THZDT9q5qaScySa2nnTqGZed/pIXvejEwvxfve0dS/PL3awLAMNRb2Y6/dp/X//YkYWllz46sTHJdiPOCQjwdFXNHEEOS1d8PZjSld4PytHsfeKdkoPKgnXJYB0oStIX0GLjIxmniDjA9PoHjs73gA16wAgKVlXxQXz0qfcxLTOJBUJVRAXGuLMizFQ0+PHSNaCaZZkxBhmDCCeWlKqqUvAKVA5HzrkxJLjVaSuTSRMEUtWjh49MTE5Za9GYyhXKsm793P33HOwt9Xbv2nVicdiZSt/0ZxP/9a937z/CyQcDHE5c7qmFHWOnzYydoZdXZ9LT3b/9x//+u7/7SJbnrjJZQv3BMDWJzWz0swMABFHU2F+NF7OystJKs263O9ao6vf7RVHYLK2PziCqmiYJAARXBZd1Op2TTjopbyfLyyOvAopZlpVlabMEG+6s85WBesNAozVDRNqEnHg93ntrbZ7nIjIqyySpAdUaVYSifAZR7RxMRApC6IOYBsHBEC3E0CgFUEVQRmATI5+KBGxaTFEHQyHOuqAxeR5fpBIC1coeQTVm3MwcyYLIBI2mTyzbEBFEjbXOqUogpBjsYlESvI8HZ/Ae1k7cE1MfcCAxUJFha9MgPiJD4rlWO340Ej+NukaDGiPE6PIrqGvq+3ikomFEJcCaAhwCqE6s53t//MCfXHv0lIsuHi6tMHLpuyZLzsA7n2RufEP20Vv9+SaAoFTG22FVGXZDl6RZKIRhmHZaK8dGV5y2+Ae/ec3+g24q7/rRQAx2bTbyg5CmqYKIAmCSpBV7HwIGHwvf8RmttZW6gionVmL1jRQgejkHAAjBMSWKikymwdRoAMHEYJFmU7/+/A2/++972lsmfH8yGKcgQEkQdeJQiY9uLTc+Gu4+l2Y7oSIIASVZCYOQDg4duqfcdrmfqpA4BhVXVYkhECAEH4IGqQ0cAQAgTZJ6DGwMEwE1a1jVSQjiasa4ABEZ4mhki42DHqwRRFsbqyhaDxHFDnRcLRwhfqLBe2PMWBIyZq6yBrc8xhOh18CAoBgkqAQSDYGDpClZIsWazBbUi2KaZlKW8UrGwD1Y01zBmgIXy0+IhBzgRoR3TeQjotpByNfBm2rFVqp9QTTEdlSEKUAIHlQYLbFVioOoqgGOjYMoAESFsSQqcQrAWKBDVSWgMVE4iZlrrVZVZaKmZUWGNM6aI7W3bmjVIDJoJtaWTYhYMKm3fOynxzx+Fa5lONqcU7A85X/4g1vw7z5wJiyfaOtgCCHosAudqngs6TyKrbQEW6VV0Jf8ym9OnfaE/tG5p+YHzv/Fp7/22MRx0x6FIh/ZlYpdOJFl2amnVi9+zjpIwwf+DdLJ7MVPeW5rdiNx6Q4+6D/69x1YQcTqjk+Vd98JV78yfeZL1j/vFXrNS0ZLK+HIQXzsIeofuSi7tg0LBucVCjuVY4tOfor3c63HbvJgYMlOkk9S8ivowXYRnUW5+5wrf3T17z31wa9ve/1vbbzyinWnbXWD5bxIRu0QQqCUB+UwZd65YduE6SLD17/6jf/7zve+5c2/vX33lmHZzxP44bduUFjqnTVPjGVSUR/tUuKz4CcqSJbJU3/2ft53TlASIWIURJAEApbdowBxuqcsmlSC1LYS7lwqbz3cu2bSOjGQAvrVfnKdfqkQGGVUXX181LiySo0QVFUCEIOIwBREimFhrTXWMNPKaNAy1gfNOp3RaJRnLRJkY+fn561Nut3usWMnnHPrNm6IJPjjJ45Mz8yYDhZO2lO7120+dTi4a8OGLc9/0bZvf+W+678s207dWr0tnHj13unz7WQr966VPDCX/Uv62dtPnHJKOhqd+JPf+933/fN/igk+UA1A9Q5BiBIxRqAiQa/eV7XczNzcnKpWwXsJvhIQqaoqTdNWllZVRQqi3rIdDEfGJIGkN5CLHnfpdd/8xrFjS2mSGWBAKoclmsg4AEF0qtYkMd7EswMNW0oGo36SJKBalmX8JrDxzrmqQgRrTdMGj8VdnLAqxOS3QRFHt20irKqKUpskiVZqyUqosDncMRJ8AUC1nkoSOgkGSUUZQEXUsMRaAqO/cBzQIgG6eHwYjkUyNFocoIqiARtBHxFPhN4RKDJjrDBUI+rNgUKEUNVADyAypBBBsxRUg5KCJaagqlUcxXmUtVM09QFC9AOp8bRa60kjIkljje69d8ElSeK9B66cmMQjGnYkOZgQTCundGn+975yaMPJZ/YXFnySIkgLlv88/4+Lk9vW03ELbo4W/mnizz8yfMn1oyc6SqAqTW6hX5kWQaUYRJeX3/XXFy2Ncktu5MpoBulGI0S2BF6CaWoUIrJp4spKVFgR13QUJap4goL3BpijFboKEQJYFC2qAmwwzACoCCoaG5gkXFg+sVw990mXvf/aLz6wPJUlntFqAPGaMDJam9rs0K5q6wNOnggaFCklG6xJR5Xi9CPFI4eHC/molbc9uMSYYIhFKRAgiVF2QaQBtQFAqQFMlGgQUeCAEjxwNNVQY2xogPpVVcVxb0wxoZ7IIBIxkfM+6j84EDBkfN2mBu99Vakx1lofQohwfWvq5E8iGQNrn9DGMkhKJ6XLWtaToKiCr7xDJkumkKqoSjAG44jUcBk8AxgE9F4NGTQxcIKCd84YY42RWipOIM5EARQJoB4AxRQBx4TaoEBNLhvzKkIEEFAKHmHVSsR7LyjMbJAQNHhRFkUCinMZolBzbuOEj5mZOGIbiSh2kta205yEOM5oGASKBKIB2DCwJQYBjKW/YRBlJBAFwGh/GUWkRaQCIibmRq48xFqfGClI5auKTV5JQF/lScpqPLo86Txyz88uOnpsKXVZmVSQ9MKKc8GE1t2aVOy5skQABn96112PnPr8uVy6N75v29alV99x2tH5IZN2p+FxW/3u06tt28uN61sTqf7HRxZHva1XPbV96uMvYLBF5fv/+eH84G2DDS2BIjcdOf4w/Ofbl677tLnoafnFT0zPvsCcfBZefPHW/R9oP2yzyiSjChXzjiriINl5xla7+Kp3Em/CBDKPpbGJcUbzRLjoJNfPbzupBU/51avWz5r1KQyXl0SpB46GEaiEqjpyDgAmJzpnnnQSPPNJ13//lnf847+84Q2/etm5Zy4dPnDnbXf8/t++4Z3Zu4ZuCAJhEWUhYBugK6rD1K7rbEg6/Q3WJvvmTx9uP4xlC9KBCmpSYDCmmAbFqSNnkcK8KSclWDE3Pnz4kvWduTlTDhNgw7YmSkSiTtQjZDRYx98IKDagAAqhcqrqUQEEghgicpULbc077VhclmXZyvJQlkmSOh+yJBXnh0XpXJXmmSFz/PjxIAIMIThEQ8CTk5MW26PFwlG5vDif2NbSQnji1d3F5dEtP6At2+bEDXvfkree+4R1U/6f/3LP/D1mc8l7jx7auuWkR/cdW5yfL93RH//0Jxdd+oSyWCHIRZxJVDy6omJGIJWgCORcMdZeNsYMh8Pl3nJirTZyP8tLK0TUbrWCd4NBwYkF0dj/7HY6Z511zuTk5GAwiLARRFUvEAICMjF6CbFB39jjkFK/7CMiMVdVBQ1HtqqqetaF9fCG1KiqxJynmcUaYyKGBQCMMeJ8nMhqiNJCgdmQYY0e9VoTBElXIyUomrGtAiAze13FodTtgbjbCceH8LiLDg0zYbXHOGYijiEbTa8sVhJrf3c1rYvszea3xo2B0JgIAcQ6chW2zRRbhaslFMYrEWVAVDBIQsTK4oWAsEpZybeBgFpl3nf9mbmkHZae91e3PDzYlk1DUJEQci7f3P3kRcntbeqvwxMO7CwunhD8zeyTB9ymO6tTrc2rwXKAdnsommV7Hjr+z2/ZuOv0U46fKIjAsF29dUFCKAGgcs6uwTkzM4Ra2RUbsA8AEGAAcD4IakKMzWAbUQQkM7YWK1GNbdVo+czQY2r7UrtbZl/87G1/9Zl+trML/UKyFFBdCIjsvNKBk/xTvxh8AogWjDK20UDGncr3y+Fg1B8Nk85EimqQok6ZKmjwqhDivR2vB2nMuIipVo6DGqI0jgprn924HYqISZJ4bapbZkQce2E50bpFDJCkKWIt1RmLwDi5DFwPXJgMJzZKeI7XDCKOBbeldrxADcJExpiiKIwxURJ19bOAmgaWEX+rzo9FHIiV+BFBDAZQCopBPDZ2KfXEBFBQQIE5UgkamPIY4Rj7z6vEBBGJGnI2SZgwhKBBIrFIINT65swEGqEVIoG0xpdRIyE+7ktz9Ns2sTZSVUVBpAiZrOFvukZmMs7NI/8DGnhHCIGtiU7kEITGFuAAqkCGM5NXZciyJAgE0ODd+un2TXc8dMsXvnrVROaOc1+kFxYZNanMfsu3t1oLx8kYtb7XyqrjxaPGzpnvvO/x7f+460SyxOEVT3XnXphMzIwOL7t9D7duuLE4dDCgw97iromNx84567kT9gxlGHzjevu9z3GqhTKpaJCEBTXPFx4pv3lP/7p/H2w4OWw/1Ww9/dxTr0M7b/Gwx7Swc6wulaU0LJfp5m1bVx7d8ezpZcUp1rIATVIllfCFh9Q7fd6+72zfdPLk3OZ+CDZPg189+Go38hAAYFiN5uZmzr/owpl1W79x3Xf+6R/fV73udZvWtak7/a2Pf7s4uaBZI+hxToFFOwoIsuxKO8p4x6gUj6ONv3janrN+aGBWlEM60GCUApUtO5gzBy4dcJU4LJU4tw+t9G453n/2VOotcMy2kcQ7IUySBFVQwVdRQAZixTjWQDPRFgUEAIjZqGqWpbWjy3DYarVSY6uqStIMAHq9XrfVTtO0EGl12kmenTh8bHFpaW5uDpgiCqndbhtOrQmD4YJTnJ2d7A+Pn1jS2dl19/9iZXKmOxwcf+SB4QteuW5u6/JfPuseNhPdrpGJ0RlnnG6trSq3dev5RVXe/rM7Lrj4cqJJxX6aTAx6vtUFo6EqhSjqCdRnBAN67wcrvVar1UMU57HxDBEEFllYXEyNEQATGMAjYlWUDz30kDjf7/eHw6E1tWqHiDjnADSUFWYA0XhOa1KT954TG4e+8fQJ3guiNabWPW/oXBHkEluX6r02r2gBNGaRRs0EbOyVNE7iEGt16GakRaIeFREj1SSeFJHGE3dsvVcb4Uls6uxxX25t8xDWNgDjDzRAKV0DPFn7ntCYJNaEB4RVd8U1VJBxo56invaaV5D62jix41hORKDBh2CTJIRgkKLbEyKGFieFZkQj1x+h3bGhM+wvPP/t379zZef0lBmhWEkcDiaS5ScnP05wtBUPA0AFpoN9y+5A2PDi1hfuHv0Jg0mER1hJku3ds/KWZ7Z/62WX73t0QCYlggAaQiNiZZCYXeXF+3r2CaoSDLGKkDWAFNVfxx8KVdFwZLCKiJeADTifTaqqIL42V4QosACGTQiDhJNDC/DsK3b9/ee+U5ZbWvaEF0aRuanpdivZv7CM8+u1u3zkTe9Z7B5rL01v3Hv6KXtOH7FPDZ/oLQ2r3qiYJmNESVQFESCAgCpEzrehVRT0uCHGEO3vKDo8xqevqorAY2XNNS80zMwgCMQIol41RGURRY3A6uaZMtcQoTUJijZdGVyzAiPIK3aDOPIGOc5YGuR2EIMExJTYuBmpEUQbh9u1KzmuPe89AXiGKAzIXi1AEHGoFKlNIuNm9erlxV9fA+FSVWhotYARaMZRgsN7D1i7sVHDabZE6kMkWdSNbkRAJQWRCLeuYRPAhNQAHSNVoXmJCDiVpCYXEZEX0THGmw1IzVdSQjbRFBgVBeOkADHi26ObGZGJQsQ2s148MXvnOu02E7z3/7z/7r33Hth+ysZ847Dcb8JyarqpjG6q7FGAZ567vG6HTk12ZmbhpzNv7RUPP2XiH562ceGG1sa//JM0nzAPHvYf+Vi4754uegmwOekUOQbOjm7esmNux3OUJ4d77ik//ffrbFGZTuacIjqG4GczxiIUytQBpUP3jg7dLQzpqwE7jJPBV8aP5oUonaiomIfjlf/h3y0+cm1vcgqEDBImTMPBTac85f4n/s7zPv57Z7zxGa0zto2KgTrVoiVZiCcY1C6otTeRsehcOTk5eeppneFw2Erhvz72mUsfd/4pF5zy1Y9/LL2h5V5S0QnrZx3MKQBQ37SqVPd6d3erNZUsl8N1/Y0bv/2Wo1f9KyqawQyQjOb2uNbSzm/+A/kEoMxCWlmyTpY0++6BxYvWtTZOp4UoqFhiLxgAYsUlIYQQYo0XhSKYa1RvYlNjEg0eQAjRqOpwOHSZI8NTM9OurIjIRxVGY9bPzhVFMRwM0jQ1iVHVvNUajkbHTxybmp3ZsmVLVbqq8gapCn5qYrIA2jiZn3n2dheKwUKy0isXl1ZW5t3jnzTznJfO/ft7B897/uuWV048eP+Dl132uKNHjs8vHBdvp2Y7Bw/3HnzwkZnp5NCR+cROjkajJFdXGWsya6sqFOMYVgyGEe/jfFWU2G63q1GR2sQHKVwVt1aWtUJVpHkWgkR3s6nJid7KYHKqG8NhjKZjaKKqRnRVDatRDc4BU1SzqlGjMQ2vC9+6RACIKgx1HDIxMINyc1hoo/JvCI01gFrvU61z2GiAG+mGNSgznnSEUW5TfZAQrLWCq6wk/eVxnTZiEeNQp01PVWNd3XTGxuXF+BdhzWtcDxGtMqGVUBE0wmitIV0N2DG5i9Xe+HrizJsagqMSxpM0/isDsjGhgcPkeS4CzjlWV3WMH5RpMr2zKzfedu9vfvSheTh//dSKlIvpADnNUJNL4AGFsAGPW/THwsxRWb+Bj3VxmGN5nnkMTLZUrCRZmbnJQ4cGLz3ZveutV+/ZWxWpz8HKKl1qdWxjrRVAY0wA1Uj/UFCiSKfRGuIEKLVoQxqbn813COK8AQbiUIEVLCESoSqBMKBzGVMpCD743SfteNyudd853m+1Ui/BtrLFwWjoBrxrsX/lNwDAzR4nKnuzC4N1Nw83HT75h09AAOHgoCpL7713TpgRgBE9EBpNaw3QNa9xYFOoHx+t+YH4w04CNhmSYM0ti2sVMbZ/ailFtnV7WkJDjmByror7KESJ0+bG1jG+QbDXyVmzF9gYCEHWBmyJKH0RH0ySjrPACGICQjKrVs0xTCZJgohVVQETi6IKAHhCIERCq4iGVRUjWMl7qn2IUBWgRkJgcycUoOY3ENfZCVLtTOqh7l0zEjAz1gQBGnekRDXO4QEAar6yqnoREYm+gdHHSUREJariEKAlBoIyaq+pRm9yAa0lwKxB5Ii1VAUQjXJhUb6DGrYCIKqKIjCx96FwhTUJoIYgeZ4n1rzqd//iZ9/7Nm7efJ2mr0wADXucSYJbcWCenLztdeasXXb/o8l//ueeE+ll/rlPmv/M/9oy3V+Y5j/6xrpBWSwcr1YW2omZbXec5cGw6lUDs/PsDMlcdO6vzp10Uu/owRPv/78bVu4dZnMEeUjmVYGBvXVi1BBxSeBkJe0CYqfsOcm03U7wBKbiQFOqAsBAuCejR8zG1saNev0XyZReqzTQwa2XfO9Vv37xbZ994d/+avfKS3uHhszWKnjTKCKsWfBxwyUmQ5Z40l58/lmEIUlat95x77atu1/5uj/6zH+9ny/1gmpWbMhEC+m2zY5tO16579eOzqQ/qsJ0sTKg1B45c+tn3j485Sflxn2EtnXwovkLP+2m9k0e3xqIvIogUiV51npoYfjzw8vXTG0MrkREX/MLMFQe2IzlAURC3DURFoOIHqSxtmNQMTFpS9PUBR/9RlxRxlN0WBYi0s5yie5aIILQarVGo9HKsBcrzqIoUpOy4YAUfFpqzyucfto5ZWmWjuvsOg+Ub9yaX/PSLfff1mINd9x7u6tk/ca5u+5+YDAoM2vOP+/8A4f2J3l73769H7v22je+4TX791fOlSZREej1KzaCYCIMIm6qqqpiKTwajWKiEEKoKscmKghKipjmWVGViU3JGmR2wbe7ncmJaW7c1GOiLU3zJwJMiLlyLlYDFMfpXmx0Z2uE2lU1SECtjWCrqpLGbzUGzgj3WK0+GzhVjEY++KgE1BxPHJtOSlTbhUKkBikRoqhvQhcgeAncmOWNz9O1rMG1MbXuNIICIBpeq9UgIqQQgtQFyhpeUC29u4ZWqIACGv8uIiITNpyW+JFVVRqBDlVVH7w2/owIY5h0nTTE6t+a4D0zj6pSFa21pThdSXZvTffsPfzGj+z50j2uPXneNCwMVUpqtdoyArBkiXyOo0nseaUVmEgYJc7VEEh9ZvsBCHyy5OGSucG//vVlh1cSZJeYyVD1sUGgrjY5FQAUDUcOTB2cnLPGRCWNsUl7TCmMMT5InNATYTLG7jW9C1VtQowCKiGhESuZ1wFIGtJw+J4fYPuZPDVtqwrBQGo8FCvXvF+4AgU/fUIUSCgbdI9ufax98rodD52WJalAnOjHs76ZcQKBqmlEqdY+XAAABAYkIh/8OLCtjR/jmxCfII9J9qpVVUVlt8beJ6iqSWwE37I2gLsmr8I1vrbjP2SJwy8BoSEG75j3xS5LrIydD6paFAUzxxHPeO1FZ6HxpXoVpgijpvEHxma4QYikNQcdGMYXCTFTHN8fXEVRwZh3BKRNGxhr2L9RwqDinVdVYoMUex8aLTlFxKuoj9BnoDX5a7wtIQQvwdTnCo87RAEARU1qSEFDDbZiQmBjkCJ7OiYBMedQDaqrN3a8j5CQictilLBJktz5ShGyNMeAf/Snf3r9Z745t2Gm6LuvwOGTJmYuzPP+sC99pDPy5/7ZdpWj1YjILOzexuaCP5ivHrpk9oOvOXPpi3s7iS89wKZN7alJv3LiqIOJ0cB7j5dfTts2nlg4cNnllz7ZLZbDz3xm4hf/vTI52R0lRTWELBBgCC7xOvRICQcfNOHc99kzn335jdQ5s/+9PvvNXZhIC1UoBI8OoBfsj7OLLnjsoZxNklvQrDStL//Gv8+Mjv3R711itu6SR3utTNAnRRVMO6Cv4aLNzdGgEoKEChsbGDWWL7jgvLTdOvPcs278wa2mm736xX/wmTd/1L36iH+m8iRv2tTe8ej6yQ9fuOuSJ1x8SfvwPfMPyhZEIRmmxaS555n4oGFmQHAz+x578rsn9nxasBVU0LsyTTQUonDTwf6FO/1sYgrv4tFiFUFBRNGaFtTw1iRJIl2+LEutkdCCjQU1MREz9fv9yCYqy7IoirijjDHdbrdwVcwbR8MibvWZdXObN21p5W0VyJM0Epu8eodVYtgF6K8ENwgnjodt26bPPOPs9dvnOV367rcf2vvoPYNeNSqWdp1y2ote/PI/euv/2rBx3d4DD4QQHnrooW075j7zyS+87W8+kKdJO8/ciFTZ2lgm1hQCYwxZExV5JGiaZKNhIUGd89r0qSxxv9/3Eqy1sQMctauWl5d7g/5wMKpKF3dIXcABgkKEAsUUJMuyen6Dq3o0cT+UZemCRyZijOBGbOZGY1dIRAyg8d1UlQHZkAYJXkCRyVjimPZ6CXEmCmNFm3pwxRGxiY3JaFxqUdrbYPMzDVtmXJKOj5XxF/Wx21Qn8f3rYZ4qKcT/tPEnrtvLTLUujyoEQS8QahdM733kGkpTxY6vk5kZMIQQbzis4SXX53XlSu+8SlVVMRMiorydDkb92aS9fV3xT/95x9P+9u4vP9bdsmFbV466QOCZwfvAVJTWuaHv7OIDBaQV2I14bCMdmrN9QR5ptld32UGeISc8N63HP/iOxy2m67wss0UeBiIgrmdvIcRaguONGV9JvDOxQ9JwdlZfMd6M0Qa1AnMTTlLkDDmJLE3QQOABKgTvSx8qY8l4LCX933/+TFk+PvSSKFFAS6zb98i6A5APQBEUTJko6qjTCyiHTn1QDKWUgGC7nTHHeC/eVzVmTn8p9K4NqLH2jQqO4yVBY6OLX/YFWrv2xrOMqqqGxajyDgBFNISQJEkkvscNGLXix4+4LltF486K7zZehETY8G5XCe4xzhljbJpgs97ijqiDbkQMNOmviFRVFQHzJCoI3qAassQpMsYWaJOP1lNSRS9aunrljnU5xtE3foTxMh5fcPw6ngAQQRuyKniCv/wCgLIs47JXXZ06oYKTUPeKoXYFjpmE+hCtz+rOc4PxHl/n2meKtXQNAVNt/RuLIh/aWQ5QO1UYY5jt//7Lt3/hC19Zv3WzgYJVSkf3hqLlEwk4ksGjp6Q337eyMMTDJya+dgPslyccSJ62/rF///UL8wT0P+5bVxhTiOnk8MTH8cte2H39q9wFp49e/PzsnItX7rm7fclVr5o8fWbh6L2Dr3+euUx8x1SBuKeKipBLcKYkcTqsck7JkU7tPHbeUz81teG3/vuuuw6UheR92ODDhPMd76bXmbS37+Qn3PLd7JZvorGi+QAPfevZb13aeMafvmB7tWlTubI4tBhKiwh5gqlfgzaMT4Hrk9CmbWMMUc3dZ7aXXnKer5auef5VNpsQV73xZb+z5XMXd19h37P/kvP+ZMudl4/u++x9o8XD/+fv/u2eT32uKnpCy62gAUkJOahxwiLbrv9Dny8euOKDKEqJRVTvvfXDnO19C8OHjg/SLLM2JSLxQUSIbQjBEFfBj9fD+HiMvaCYnIUQgoIRUOdXDfuWlpZarZaIlGVpszSoIFOe5eVwlCSJZVN6Z0CnpqaSJCmKIuoXumqEJhMj5HnQDy54k1RBZP/e4rRzR+tmn5Nk989u6jGdQpk7cvTEj2++cTAYodwxOzubtumKJ1y9Y8eODRsmTxzuf/u73/jWN799xZOunJ2Z6vX7YELwwDDhXF1zR+VnYCrL0lqbZVkcFCVpWrjKe9/JW1lqi6rIsixS45MkIeTLL7/8xLHjibX12hWJFCbTWBE0PWExzJlNRESDKNXT/viKB3RUFoqbhBteLDQ4kbjHoqINMQIoA3kdM20wYihK70TErTH9DkAAQlA7eoICMker4AbvUy8/BpRGjm58pP6/cXesQjVGXa3+wJqDWFXHKJ61Z4rWVsy1plJML5qWIOmaLveYf0lEaZpCU4fF/rML3hBHfmr9HefikWqt7S33Nq6fOPLYw3/8wWPfnc93b9290fnDxXJu046jAZaCkjmFTDvUe8fUu/qaHwybJmgwjUtWiiGkx3RWAb7tn15kTjg7+uADX377ZeumNh9d7neCd0nquTTIEMWeWFGilEH8bCoiDNHnJgJgMISAxKoSDz6IY0UNcaURkwT1jTGzIKghdk7ju1L0fq/VEoxNi2qJ3TTy8sr89HkXnXbStmNHqwzafbSkLNXGfYKg2QhAqUrTQVYJubz0xg0mVwIIgm2lnW7HivchMJKoAoJhNki+dP5/PPRGfAdiZ2IstzROI2g8RqUoC1pzxyvvROJ0hCxQWTnvfQwbqOBDxC3Ws//o6gXUjH5VIdT3COrZtECEjqsiAgKKSJxlaCMMDgBRKROJrULs0EaI4uqFKSCgh6ZubgBWnpFF2QkxBguegAKwk7H0F652aBqtx//Be44+EYioIIiNsLQoxRKZowm5MQZQnHPBOaBa+FYbXFiTg9Z8UO99lCVhZkNkskw01E4VPhBR7EWR4br2jUy2sXmGD8wWpZEbo1rKZLzTMeK3iUE1HltOPWIcJvqJydbHrv3KZz//pY0bNvuqX1YwMKO24I8KuYKz06l9rO2XJiY2TMndd9Kx+fKxo3z0lDfywqPf/vg3/88rTnz1WPd7D07aNNEqHH5ssP+h7PyLkksuGWw6WQ4dctf/yF7z7Ddf/JSrh8eOV1+5sd17RNLJ4XDBYZpJIGsBKmcwLywBCMqoWsyzierUi3+Ute96+E7C6s9uX/euiV2XmH1YVYp6dNTuL185/N5N5ehI1trIAfr9gwee+Tt3X/mm33gcbJkYmhUdGNtSFgyVqJjEOBRuskzRtQ/UqyejrORETNrq9QbTU/a97377RY+/5tdf/7pvff1bj9x/yxWXn375U7Z8+UO33vDNYve53aUT1dve9e5LLp24+innlTPlDYuzxwb9YIWUbFBEXzHTyqaNP/6NQ0/84Pr7r84XzwmQSSgtUqlU+nD7Y4sXbO3mNgEXPKEwBgQybIKGOl0GLyG6ZeBqltWM5IANAhJSZpMQgjgfTX9d8IaIDBeuct7pSGPcir8mIt6LSCUCBEDRFoCGVWiRx7RDLlS7dm2QxP3khuLkU2DrDv+fH5x/2rPO+f43lw4fnb/owvMeO3i0qEb9paOsrTf/3u+eeeYFS0vL6qTTbj/zadfcft/HP/GFr1zzzLds33760mKPKAFajvjV0bC01nrvl5aW4qmR53mccsctYYxxzoEx1toYfgREnMdWe+8je9bPzYUQtXEEAMbJJo6xiyLB+0jyidFJLQFEa2FOQs1t9TEdJoPRSFV1HLyxSa7x/8mUI3LHOZewGeta171qjjk4qkJAQEVxPia86uuBGTNb4tUxMK2WNbBm6gZrKgxFEBWNHqa46q2kqpZ4HLZjmV7/gw9BtSbyImKtt4UBNAETBeXX/qHQqPbX/7uKfKnzAxkfnaiiATwyUezhAtNwNNi6dfo73/npb3+ssjOTp29Jy8FSD23L8NAHAXGQ5OAG4rql/PvG3+tS79cWPvDG7se2wf4edhFUAhLQDeFpPxldNjNB9x8YvurJsxecs/vQ/MoETaAxoIaSuG5VVAySoobgicgYDqDiPBoDTRoS8epRuVcVEKO1FGC0wgNNIq9bKapNBRUEUkNB6v6SEWQlCIJBBClJ8lA6k3aMwokh+VKSlg9SsOTkPZcZqIJQQ2JVMR4AUBADQSAAnGpPtNtZCCvikNM6DVJVDQVQNKRYG3/rGqt+LgooGhu5IqIhAEPkz6iqkxAtQOKCsdaKUBzx5HmuQaKsjyUSy6v2lADGGCaK3Q5rLa9y04OqxjpVAQLUnV1BUARgShvN+gjpF+fYxo5uzHJXBSBDCOIkTVNjTPAOmsI6rrQErSdxoCRqAlilACoMFEXffF0HE5noV2obQ8MxWCHW2XWSimPhmvh/JE5iXWoQogxNKb70DhkAwNS1bJ2VMrGTWis79kiijAkzq0EkMEIBQggBRbwhAEiMjcrSIQTEMXYalEibvgVRfayxRufIeGsIsUZ/RgScSWxRFK1W69ChE//ygX+dnJpjm45ELJp2ZQqW5eHwq+i2w4Tv9Nefab5zfW+0aFw+f8+D5297ztUrX/3zN5x24uTJ8q3f2zGdIVIRUhCwi2H03VuKnz+YDEZTuKQTGzetm86q5RU5tOJ/8rmc+obyrrWhHPXDZJCBqqIYZCgTyUaYX/qs+cnNs48ef0Gy+OTTdh7wp67j6VsW3B9e/9icyEpZXnrhM54pMj0aYZ5oeWwpIL7pL79zydsuWgdPnOpp6UvTTchUNCDvFVoGyJs+YVov8jp/BgBABQeBpKoqxISLyqVpXhbDl73gmk9/+Rvqqte+6bdS27r3B59dfuDxR/Yc3bj18MIR1xsNfvv3HveMZ7z6p7esfPrf/91e/KSJU58+Or4oRJ6CEggJA66/7SULZ377kWe+85yPfkTMpLZ0saxYMGG67djCsxc7J63fRF7SxI5Yi+DaNsHCG2vixrRsx2cm1oezRkQTMhkFQMbFleU8SYnIIHnnEyDKs1FVMHMnb4EoBECTIpoUHRFJZLcQktqyKDAAkTHkR8VwXTp79llnfOGz1//+W19/773v+uZXq2TiUDZ9+IZvZttP8ffc7fY8tOfQ0SOzG8+8cPdm5IPziz/58W33n3TSFTfd+LNbbvrh5MyObquc23b9V760/5UvvbY916180JAgMAIaY7z3VTFqd1pZmjvnvHPQTC5rmyBriLkGCobAoABQlOXM3JzGLh6zNpqxUcNWQV1VaVrXuKoaTaOUUIMQgUEKlaOmewCKiiQI1tYxKb4nmzjFUY3sQwSUKCMiNaw6hFqXUUKstOoel2iUZScBIgoSoh2KF98IM0bTq1pzQ0SwgXFFzzhUGNP841gMABiJiREgAUqQFWKsABGp1MOakro+rBFjL0RFvQYA0MhLBiWiClTFZ0pE7AlUgUIjpTsmLFEM5yAoDNHlSWPPIHhRQDRGHWiKLNwTf+quiY9++bY//tTSrpNOdb4cFqWxFryIFy6CU/RGEFBSec/U28+zD75s5QN3+VP+YOUvr05/fGnyswnsHdHZb+lVe6qzUlsuFLB1qfeHr7rsmFNDbcvDnrRacf4KgZtZZR1pEIKqQcIsFREIABLbFBIDbS1a0ohsI2JiEiTyzqOuqlWnyOBjEYSIaJAYKSZ2RBTKfkqpQalCdbxy559y8hPOvu1jv1jauL4LQhWW8PDFFD4GZUsQJCkHMxUAsEvYm5lDm07o0R3ZyZsn14F1oTQ2YwFSDKjivVdlG+fNaxqWnBkBtTE5cJ6IAypEez6E2pNHQBMmNPFcj8NwChp8GRA0KDFpQ9cOISCRATA2EQQBBaiNHBAxt4kCqKEovoHGRA6UUCOa7QMokEJKJt7GAOpRgwaimvSPPhauBCAxGRWRaNDrIUAtfKnENNZAVlIURQGF4L2AMYZiPobApMTqAwQhDIYggJIgEeu4HxBCUEWFClREEjbGGEKMxulo0BgSEQUNEZZBEE08Y4gVRGstEMZkQQDIsASpEXyN9kjpHQgaYmY2MUE3NUlpOByqD3kUQTLsJagoG4bGllgQEInZaOUJyVqLErz33rsksSEEAWRmCCFImdiQZ/Yv3v2hw/uPb948U476xGnm7AjFSF8Nfb9iI4PffPLOLz0ox4+Pzjpv5lMfn9nwkje3R/s/sONfT53sjzz+zoXH0nv0W/smxLskSaxhIip6Xdt2WWeZtfff3/oCQOuKxz9++rd+v/rI+8uD9+ackrbRFsfIADgAKbAroyW98KqvTu646eGbN3GSl8nz5y6+6Me3HT575rq77nT94khh120/8xk7z5j8wj9msDKQudnnvfCgnfr5U/7CjOB1uwZCoGJJSpASQVSNYhUUkRL0jfpQ7FcgAJCIAAKpVSNKxBVa1oWlxXY+e+XF53/4I+/cvrU4Z/1lxk3ddPOdT3jK2Xd/Z3jH0i/e9/6nHtiXvfwF/2f/Yz+e7K6bXVrcMbO9mtg0GK0gtrOQZaNKrS2pvf3bf/zgK3/r4OO+sO72V0mFQCn4kUc4UsK37u+/YSZQJmUoTZl3QUeqllUAEIC8IBlBCir1AcuWgJmivXMwwXtUEO9K0G63W5ZlUbmJiQlflSmztex9RQiFq46eOL775FMp5KOqNEBIFEKoqspVktlEA5auJDKLS4Orn/WMr379kw/u/+JzX7r5p9cf+P5X0mc9/8X3330UZGHb9uMH9vbbreySC48D7N37yIjuOHjGmVsefaj97nf91Vm7T/3Rj3/YSvxv/G6yMpg/cOzHF2x59mC0YLQdxDGzOG+ZMU0BQH1gQLS2KArvfRzcjiu8yLhy3o+bRQcOHIhqgmOhuDHyqD5kIaoGsapGcZLYLvNNxykmtjF5T9iEBupWl5t1OSgBNE4mEGHsUlKUJVHswxpVjbJ1ibHRHAnWdALHEylBwNpCqeYBjdu84wJ0dRAriqLA0bZXJZqVjnWIasm9miRCRLBGv7Cu1DV2cgQinTGK/QKAKgBUvgqoBmtRsNAknrUIkeh4/o2IAsAUc7zVi2zulSMTWO2oKk9Z1/nEF3/+1s8WJ+08xQ8LgYCqXtR770WDAUTMArGXP5369xfm179l/h13VmdYwArSL7grrisuZgKmaUv90rpQ5Yfuu/+Tf3Xp+rlN+4/00la3DIZYAyRMNXd8DC4bmy85DfX0d43Tqo5r/FVStcb63jQthzHwOzRBfbyi1qY1Jp9YcfNTdh2SN7aQyldukXE9UggVChKPWtltTx496esAwJVFBPbWjnIMtPnu00dDv3XLtoluplphQyEjQKxbDqi6ilEav9Z2KZys8tPWLhgbCEhoDSAAsR5IMAEhiWitmacqXN+CsZsCNK20sqpMmmRpHsdg4/lLU8MRWQMAlYQo0h6N+bAWBQfLRlXVh3pZriLVa9ByaFyYnHNZmo+b5+PnEqchIYQo+oWAEkRAmZgQVAQCIIBQPcVp+O91k8qw8d4HUHVOG+cV72vBn2g9GXPcOMbzVS3PFyM4ENZbtD4lkBobkuhRVtcMVRVb4tQcFAkbAQTR4B2rWuboblHpKuhS6s8FiBjEx/sfpf+ROKgAqAQMUMxNzXzt6zd++tOf3rRtQ+kKUUjAr3DlvBhiA65CvaVFOKjm98vjn3DSV687nq67oHXmM1984A9mbTGZhBOF2dSq/uLigxOsX3p4AhVIkBGET0jlJNX5xeMrC4vr5rpheiZ54vNx+xOXP/1O/ck38v6KWGvbGWIhqITz3anN9510ytd/8BVaWjgUls94/HPgwJHebHVdOXj0yF2daRw4+6zLnz5z961+ZaFk3HLNq2/a+fjFif+frPcOs+SozofPOVXV3TdNntmctYqrHFAiS4DIYJID4IATtvlhjG2cA8bGBmNjDCYnYzIGiZyRUE4raZU2553dyXNjd1fVOd8f1d0z8jePHh7topm5HerEN0zsXUz+9OrByLBPvOoD04qPckkbc6FWXPH5KKbyRJ68zYWMFvKCKKSjpP7gnoce2/eoBty75ztDz/xhvX7OS4bX/+zBw2s3bfjwu1/2xa9/4pufPhaNDW1Zu9H5eH5mv/32f697zq+PrtnayWeJeCAGVabR+9OXjTz8qulrPzx68AbVXiOFhAPmg/7hxW7X+iYREgpb6yyRssyZcERKIYp4YQZhKDnoiEFwXXsQ0lqjwPjoWLPeyLIsrtfqzcZSr5P3rfJqYWaRLdhcrPXr1m3o9XoHDh+w1iKqQS9lJwDUbDYZwZBKTKRVkvYxiVr/8YF/f3j3nt337T171/YrnjWWibr0mfXGSON5Lz/7Jb+47gWvqvc7SzOn06ffcO5V126fOXN71pk9b8slg+7Cxbt2XHftruX5yCTt4fGk1wWxyuXivbd5KsGOFCg4AAbF2lqzEdUSJxzVEm2UdTmLt84FewZmbjabeZ6PjI5u3LiRFJLCKDaBzxDyLlQGAiHLVpjeEhlbLHW0juO4VqsF1wcFiFxgRqovRRQprZDC+C6Ettw7LBXUw2/BYPDAHEJ/Jd1s2Vv2RbIsNXsr+K5atamFFc0pDlM7YV75Y0kyoVXJIMQXJ/yUT7LKoKMK3EEYKHx7+CeAv4pQvlqmo/x0Ug72GcQL57nL87wAvwi7IuwjZWQpGrTd2EjzR/c89LYvnlmzfaztOrlii+IRnWMRRC+aQeWO2b+i8c23Dn/qncu/+9X82XkA8VtPHjesX8MwlNrUKpUt4LH9Rz77Zxc/88rNR063h+uR2J43pNBZIufToKO7GjhGRKQLdcDVKTNQjyp74+Jbys0CM0OVnQlRkTI6hGlcJY8AlWWht41a09qOuA55FKu3b5sE2yXSwA6FNHHrJ6/QJ7eBQNSuR8sN00+ifrLz1utbnTGwcNbkeUoXHrKhww4iz1prHZkAYqJVX+HRh6k5lbCyoOdcklsklF+VsWZARQSdSwBAKuyhwtsQXv7Ayguva1CBhmB7ZXTIRsCikTQUmIOgSIyIFQ3JGBMOTlHreA7cNlUF05K/G7IXltAwa61WxujIeufYQ6m9Gn5mgCg654KdObGQgGIAz+wEhFYeIq4yhyCUwv+pAMgUwEOtiAgB0jTNsqyYJGOVDwswPP3/WHxVPgifOTygAM9USoVBOlcgMkSJNcZGjGJNFiVDzsXnUvq6Iwa9GhQGYEEGQgZx7EERA6JWwWgWEU2kpk8tvO+9HxyfmkRka73WUS5WNNUxVk5hnsdiu5A8sJ8Pziaf+d/eqVNTF7/4NSP21AvTT24aygGgrt2WVqZJfuui06MJESIQegSF2vh6loK15td+/68uuvLZg9nF2Ki152+afPu75Y8/0r30hZlTsrQowiZqKmv8xOajIm6+PdYYGxpef9P2XTuXT8xu3H7Pnj0KbHvBbR0euX5u1j1wlx2f0q986xd1dPs3/vO+tU9/8TlwdpO1Vy6UIJWSGqIClBX4PVQvrXOOrWObU2aVF+WQ8jxBydq9LVPjp48d23///Ru2n7v/sWafG1/cd7TdW7hm/Zb+fOcHX512h3dtXL8loqgjkEVe1WoT9nB87Cf1dA4kUR5rYh3YPFYaZPyO31Rp6+gz/5G9N0qD+DzLIqAjS+mjR09rQecFJSej0WZ52fCEYBLaG3ZWhL2z3hey8957zcxepNPtxVEwA+43h1oqs9ooINUYGrZZ3mq14qjeSwdREicN7SGL4ka/m6OCJDL9rGOtpdYQEbKyzDzTbU9N7rjxuX/+7//64QdufzRKfD+f3bR5+JpnbJkcXT80NtOeacbDR9e1Zl265+iBVtS6evis6d9/54Y1zQtPzXRPHn90fOiaxXQ/iyYFue23GpCmkGZZIwDE8jxOEgdex5Fn1x/0tNaIIMCpzYOyoNHGex9WI+GlX79+fTi0RWQE9q7IkZWIRDjJK4cfUOsV7aSivRPPEmSOVpZnClAUERFbJ2WDRQLB6FRrbahYfYUjXfw653O2SKS1RlTVrwgONrBKrwoKhSxZ6YOlmDaH7woLuUBqLJTci3S4EnGQSzKVAK9uNcr0HL4CrIxL8Y3wtVrpVwhD0CwuP2guBhcXRAD27BWZQhgXsHJqAiAdQeagNdWYOXr4dz5yKt66Ebup1gqtR2aRkIDFOQ9KBOX6ePd7xv/pc92XfrzzK9r5sGonBOXskWMddKwSWl70dvHEF//yqqsunDp8Urim2/1BvV7PnHMeuYYKCFwxFQzBHfzK3poAK0dFKoFpAKxQVdwkRARVONhgAMKUi9fwWEO2w0o7ujRbVGg1EUexqFaNl10MaZp4JSAGIENEANSi1dw6NTu16f6zskiGF4fGD2+xxqcqbZrh7RNnCWUcxJicBLkW5CKXMAIUYjIrmSBE51BUFjtOgYBs0EoDISjizDJzWaQV1kaenQtnBEBQJGBOibzCcI0aKWTJ0I2hVklkCrQnYGhaAQEUKgyOjuK8FxAuESRh1VKBsJxzBSmo3KDDU2c8PnfOOdPUWus8zwEk6HhIyYXD0kJRgHPngEUpTUpxyVBG0AENhivLmfJeCaIAEGGYzIsgC2OQkTcsAJ4HbhBuby2qRVHkgMMlOOEwUKoODrIERyZYRZLRpS2VlCU1ADj26AvUgUL0LOysAkSi4D+4epSCWKyfREStmtMUPGzkofrwJz/2icOHj42vG86yPpDJnXWCCWpNOqU8Z2LlY9XI2jhQ9QbL2PqzZje95Jen/3hd3M8ZDYlCROLRxDuhqzZ3vn9sBBlJgJ2DuJkO7Bvf+NvPfuEr/+7v//jUEw9vu/CiZ1xz00XXXDl+w/MGFz7DPfGwves/QX03HcwzACwu7CA1smPb8UMPTzRHlqdPP7J543enu+hPXDeybdvIuvO3X9jcurn5zmePXnjtfl/b+1vPa/7Fj7aP0Yt3DAgVuBiwLRIhCq+SAKrOo4iE2OK9L6IiAClt2YIXJOo7Z2pqemb29Pzs2ZdfrRHaC743PxKf/cRff+voe/79H3/jFS/70N+9Y8PGdS+4+lnfu+Mn/T5Hw4NB1tqx6dKsczzdffP5z3n9A51+5NyQDPeXvdG92qCx7idvO/ayP1s69yejB5/n+plY10pqyyLHFgMKUEgHLLP2joIHZXB2L7KJUsVmEJDLqlSHXi9IWDQajW632+8OmvV6lnYWOu1Wq6ViGqTdTqfT7XZHxsfWr9sszHnfNurNPMtAY0RxY6TlvQdvyAsB1JqNxdn56665UL/9dd/5xi379x159Yvf6Fh968tfeeaz4uGRxqZtyZIdytp9axpRbWLd1MjpU3uOnzz241tOP7Fnfv2myT988y+v23h9bNbnjuuNcWbUsdNxFNSv4jjupYMkScLqN0BqmRkEQVApLSzhzFTDK0Scnp7euHFjEYHDSVArFTqW/D9EJAACDBBgVUoBhGLWujyEVypDrVbFblEBcpnCQVGBTBERwkipSi4/5GARUUXUKPGZsILkqiL46uK6GF+sKGkUO0gqUc2AGFgKUHAQCyb46tYWqMiXxSinhHSFX6EAgx1M+KPHlbo+jLhFxAXfhorf6TlIiOjAXBRhRlV6UK8uIMJ/3hfLrjmBs6/76D7d2gqQsm2KzQWy0G5BEMX3XhOdXzv+yfE/uyO97B2zf8jEwh6RQCtCiFKVK891vdAWtXjs5r+48qILNu2fdXGEID2Kh9NUEJ0XUJ7AqYo4iogBGy/V1YXPGNYNoREMu2EsjIaqG4UsQligqQnDNBjCgKwM66vLMgDwaMCToGcEGVCuYXp6WkU7nHOBs+qcJ/Ru3dHo9NTYk2tzqTc8CYto7vV75w5fPlobcdTXFBERc1HV+EAXRwgd9+pCKhzsQtqlhAcWdkYivhRQDGVWALPAKoBmUZ5iUJvBkB0LMThSRCs8nKBoUW27ARAkCMkVAx4CEAGSYmwOnnPvTeX4SSrcXieiS2smpf7vm8PeE2Lg+wVnrcxZBKy23tU7TERaGeesZwmDAUYVdkZQ0Yoq5+ynvpjVGQnaZxWLn4goTMJzm6b9dNBLmo2VOXxYQwRyoFJAKz9NlZMVTSrPc8u+4g2KCMkK5woAQm5WgIrIWgcAgRxRjApCpcheESptwgY6PClA0gnOzizccvP3ksR4myJqBEJKyYFFLwBooohbNu9jpDCuKcRMloee/uohN/Pc5U9hA5YydaZvtrQyQqh7j2CmYkdCjtiL1JPYUR5rthnNHJvf89Ctvdk93YXHebn/8GOPP+uG55y1aTtfe8XCVX+kHjgAv/jK9q0/HTt+cNe+R3/xvIvvaq3r2c5gZMv8+Ze9enJi56Z36ziJfaontnDiZw7NfuCW/z1/sLTuNe+caU6+5bJBK217oy2L1lEoH6sVXngg1eo33C5SYa4RVkIaI8fMBFoAai3z+J77c8u1JF880NMTEz/99qmX/vLa3Xfcf2ThC9nB5i9deeldg2TBD2565Wvv/MnNe5dgrbbf+N5XNqzf9rznbN2g2v1k5HhnMOh3CGuZMSg0tO9ZQweeOf3Mf42fvBhtPUItHljL8bbPnEUEmxsCn5NNIBJAX8i4FqKKNs+9cJ5lvpRT9c5pREjTfm281u320zRPksjnWS9LKSHvfbvdJlKjzaHxsQlEHB0anjlzBgBGWkPO5c1mM8uyTrc3ZEY0kohLIm9zn3Z8q1aPE/Wyl7/wphueMTO7mNtk/8F99Ubr4OHHgXozj0yjg+mZcanV8t7y9z7/pRe84IY/eOtXL9l28Me33Twxuuaqa57V7aVjI5sy13fOJzXFDgjRJHHuXZIkGmAwGChNyNJIah4kS3Nmr7UOSy+DhRKTLVc769atm5gcL4cYwcirwPpCNfYhqpx0EYBWgZzD0CPY4ChF3pXAtpLJ44SFRVdBxBUgFoUU5LFglVZUuUUFIgqOh1J6A1OpELI6mgOAEIBfmRszCJXjPoACYhquRKpPXwKFVoJauGwBKKxMV0JG1QWiBKBN+ZMJAYRKJT9WyMzKC4TPIEW1zuWGA8KeD0QAKwW+8ldInvF5a+Ft79/90PL6bZtzt1wbmEXKjWhkx+x9sE0klimZ+8zo207bqd858TcgxMqBAuccASuBnHxXRbMH2hc3uh/7m+u3X7T1+HRnpAmUG5RGJs4TJ0oDgR1kkTKZK4XAsHAeLsoQH9wDMKzPAxlFKmpNYZCBiIIsgmDZA1eFGgKRRiJSYSVcZQ4q9S4UxiReMCW0Whpg7a5zJ7/8aI+HCAG8AwFHKHbd0aFHt3qfW2+s1gBOaeyl/XO3nSvegQKFgfMtElDZ4hEIgJzLFTwl+0LZ6JeZpngixWiUSGMhbVaUCKvkmgmVAiQgV2U1AQDxzkppUkvV1WExP9CkgglPYOAAQBBwDgdKK4WEYf4cQlLFpl3RqEGsTIVDRvcgxhhUlCQJAFBZZyulEhOJiA/KIQIiYq3NXFaMfBEwFKDCqEprbc/ISISweqSxUrjIigps2PqUj89DQXcJyZODTHQ4KoEkJSLChFTt2ouDzEVYYMCq4heRLMtCv+5QNJETF0JHuPY0y5ElXAUR+QIOrRDRkEIgQnLIBaJbxHs/Nt76xle+v3fv3jXrx53PERK2LvggW3DKglPaYFJjckyx06DZD53f3HXjtcf+UnwuAEMRpwNaSPVEzY3Evp25ua5WAszACtPMxSY2UfzN736pMbL29W942/98/l8nGk0/PHT3j/77tm//29v/4eNnn33t0omOFdV84cvXvfgdp/be8/DN38mOTY/ng2TtlqGzzrvu2hvandlHD55UCa5bt+7Yj3/Wy5dvfejb/hufW/+mjx6/+BVvvlRGtc85VqybES/laDQgrsjbAgBjGNwxM2skLjToimEDWNE14syLRRAwJNPHj3jLktKl155zxwOPkB966IGFP/yL7fX4xOe/33jm9etvdI3vf+nW2Qs2XHfVteae3Y9OH2iMTqzZZM50fnTme1+D9b84fPZrF/pp3fY6LtZavPDkd//g8G+9Yf6ZH17/gz8RQkfQiqID88uPzzXP3zDh+mIilXtSyjj2IT6gKiztrXdVIVjFIp1nWbez7Ee8Mcp7n3tnFCwtLraGRkeGRpi53W4vLy9HRmtNoHhydK1SqttdarVGSGES634vEwtcT5xjO+g06sNJA7I83XvoxIEDez//yc9FunXBrrOHxxr9Lp44MjhyeGH7tgvWtNav3yZTG8ZHxtfDYOo33/rbVzz9XDt37tDYyP333/7wY3dcdPEF995z39XXPG2h39Fap2k/SGS4LA/zK40U4A/pIAvmgGmaFl0UIEChrSEFeZeQ4OTJk5s3bQFBZ733ecBkgRSqJFKlnDIohB2eUkSEjlfmY95xmI1U2ZGDN4uILV38nHMIEEWRNsaDaK25jDIFzqtM3qoQuw8jZEbEMLIuC21VjVyEEFaFDw9F1gwXIatm6USFjnGVaYJXMSLKKimt1VNTLAfFIYr4MLIDQC8B7alWb3/Llq8ayXqQgCANHymEG2DhQhxZmNkxb5xIvn/3w5+4V20+J1lYXmxyTDbzJLHV4rzPXFBRTjD7xNQ7DGSvOfVvZ6QWs2VEoxQLD9gCy8IJu2P9mbe+dPytv3hTG82Z0+2oZsAheJWjKMzRwECkRsZEkUNvwFTxUSpKAEIBd6/isgBJED7A1X2SlPVHAlTFZcceET2RglIBsdoTh5zCDNxhdFo12LGPlmsS3bV7tlnbihQQvCyivMp4YjpePNd60CA2mGL0PTPvHDvHcx7Hses7raPwBIuZXMDTaYWAEazInEkJlaLSu7OoDssSpCqJsFpYAjrnxDNCUXuF0F88agzSSyRYCH8U4UMKCjWH10AgSEuCiLeFZH5Z/YEwh4xY5e+qBi0+VcmtL2Qoqg7SxMwc+HBZlnmQOI5ZvFaGrWMRUkEfvlST1aSDf5dzhZ4XKRGxngOluMBiBOvcshhFQBAMkDMXaMosVa3svScQozWizm2h9A5CRoWxRCHbXiXacMDDvzsuxxXlixd+oCFFCpQXRDSoCUmQrXgu1LNBY+H8KIyCgEoDCIcFFiEAAbOJdJrKz372s1pdEXhxyOIQRAGx9waVkMvRW9S1egSRyQQsm7XX/iL3lxYe/CbvpNmBnqq5dXULAO2cAHDrcHb1pvYPTo1owQQ1DKlBj6yJB93ZW27+2O+86W0f+scv75tbOLLn/vjCpyUJ7N974suf/b39x38Y/6r+7v9+YWbPocMHHn/WM2668Hm/9Ol3vgUe//nhx++68bJdn/uvD/7sJx+uN9c2x0ZdRq/+5T8+/8B0tP7K3Ve+9hnb4GnNrmZVM40O5t08NzoRZyvEXPHGhm2BFH0FBsEEwaJMjihlRiQk7qeZTsZPn5mdXVxoNOn3/98b5/7+H448nj1+5+jOTe3hzcnEhtq//stHfu2mF736P/7x1r9+97HZA1c991d2zvzENg/UtOt430izZufr22nj3birp00i/dRGjq0MhiZ++KszL/7QyJMvSE5eiDoi4TPOfu2xubUj4+MR9/M0jpNe7owmQcXMyAwikdK1OFGkjIrCMjucDK2V8lm6sDjPAkQEwpEGbSDS6LMBaYWQe59ZQlQwM7fcTKLcZYBusOhTm0YqQqTDJ5aI5tNe3/t+npKJ3ez8oelTeURrt561uG3bRBzNzs0/emb+wbPPn4r0ltQ+evf+WzCtD/LGtc941nCj/ql//szXP/EdhUObztp07dNu+M53vnfq5LGjR89s3rTx/Is3pKmhZez3+2HwJMzBMS1L0ySpJTEJgE2zJEmc42AaKIFRAhAniXOu3++Pj47FtSQk6TALCo0mSwHBKtwO1FOWu1HprkoKAXQFbhIo5CmqE0WklVIg4EqZLWftYDCImE0SB+hm6JMUYDWMCkxKxEInqPrLUF7wivAkVfvcKnqGUFvBhUQk0DyoEN8QcR64uLrq/BfRo/zfIguIKEAqlrgAAAGCWDQIjj2K9wKIvixOFBKj2NyTUaSUFxHw4VOSrDRlCpCQQBWXZgf9P/rC0bGp86V9JrZNYI9qhMX28xQAJOiLsP/w2nedHx966bF/O5NNEYF3lsig9Q0hnbsI6INv2fbcp21pTdQPnhlocloZHmhlODeS6nTI1p1ljhgsW+/TKIrYETwlO4bhInjmAHSDYrAe/vEl+GilpwEO44oAvQKA3Dsum03ln9LrF1MBECR2WPeotFGuph57/MTPn+BkxxhJ39NAGIEbbuoYkJhFw15HSLl3CtnlfqjR2ji0niUDaSBnRQGBiArRS+D2oNbiGZ/aBGMJYqgar2IowkWlJQBMgkDMTAJQCoUSkWgNCtEVZhJYgJMZn6q4VUyMAIWZkChsLp1XFDDamNo89G0CIgA5e+89acWlAEhIz1IgwkDDCo5Pa131lxYZCLx3SBhI/2maKk3sxZAKq98wYKokcQio6HyZbZ5JqNQRuNQWFZFgVlXE7/JLAQahIlSkmJywd1YQoihSCC63aZoiFUYR4pFKTpFnj4ThIAdV2nBjw6IvSNOEX1GpQzvh4BAKihjEiUeAoCestPYrKjhhjeS9daFhKNjGIqiw0UgO7Dt81113NOrGsyUxznlSzJZYGe3YEmgQ8dZrHRMOYtWoDSW7nrf4s49/Z39y42Rt2wj0rW5EzjL1rBKRJxdbb75getdo/zdu3TlQ9UZs+r6d99OxZM1yu/vJf3z7a3ecv+uGGy5+zXPz2i8PYWP/I4/NnX5i6ryb9g3d98ht35o9sFcgPvvqXY/O3Z4lXWhm59x47f1Le+858s2t11y565Knn5qeu3zL+vXuaPuGax/YeSPRvmvGs8MDpzLlnIhyRsVgEcgXbwiwcLD4ExGJtGHmoPMqQTshlJtCDlkLEUjqvT15ytayeIJP9aa/dfuTz/+Vmz7zuc+fOd75xFfq17/EnXPpjFq7485HHn3y03/7/EvXjQ3ix3d/etvFW+PhiX687/wxPzy0xsL8geMfranfmuUxn3NOqUWJ47j18JVLW7eduOpdW255N/mYAbME7jgzd/GTCxduafZzj6B8pNF5IAQRQlLeg8jJ7pHUDU52j0IxBwXHXmsTjY5ONRut/qCXJJrF5WkPAE6dOebVHgvHfJ7lNh4fvgRl7WLntIJFoGnPc1GkHUu329XQHG1umMkeiKKI1bxPrKWa1PWGdS+sxRu3bD0/6yee08kJ2bgzakTbDa4fZPPPfKaLh/czHgH7ZOcCePThhdm5A93M7j001Wi2s2x57xODiYkNt3zjR1/4wuzk5PhLX/KibdvWdTreOsmyLLNZv99vNVomNtb1AFupZYOe2SP7OI5RyGhjrXXes0iS1BYXlybUuCkVRUK6LRq1oCUDGHzoQr3qxCmlgMN4F9kLETEUnkWoKAz3gw9SkOiz1npkCPORUojHA+a5S5Komgc6YSMYpm1IxYmiAF7lIARtPaEyEaA49goUSmEe6QAds5RcBRRgBGQpN7ul628JEQp9bdG6S6ibWQjQMwD6YtlMiADCAuA0kABIwJiVjCZFSlCUKEBdWq35cldNAujYIAppAREQUOQQBCypKEdJ8lgo7aA7d7z5zx/dfXp5anxt3u8n1kQMy5JZcXUvOXttAPrCfzf2sRc2bnvjib96uH8JYTbgfi2K62i9iVNnzlozd/O7n+fU0OJCNjPdieMYw6YAM2RUqGqpziUjrWqgvBIBajC7UE2X6jkqsLPZh1ColAo5uMoNyoaDzQBcmMUiWuCwrKQCkKwDQYURfGqJCIiC+SsROfbgwWHdUAYg8325fLz2od1PdmXdOKadPMtAEyBK2244CgDDS6bWaKT9LGcXqVp3MHvJ8DU6qXnMRUBizezIeQEgMj5wfgDYifcrFm1QTJJJKUUCsTbBnqVITgEiFMDJZZXghMG58PZmWdZqtZCBsNQ6FmbGSEdsHQiA51BlWlvo3wKBJh30OsK0ObyGtSi27K33uWOp6sXMqshUBQGWGnNOOPesSQVMtYgYUipskcgLoY5MKIOCCp1iEGRAct4KA2qlCJk5LH4ZxSGDJgXB6xoEUQuwD8eyqEjY+aIYAQianUQE3pNChYTAhhB8UZLmzoGA0qb4eMYwM2dWtAIAncToPWqT2pxIDwZpFEXBdV2g0ADgEjXGIBZYG40CEBWWkSIQQLNkDIuAY1BKG0M6VOQAAD63AEDgERWwBu8iJXfeuntxsb1p06Z+vy8oGFFuLaBVDiyCcFjaC7J4gTqo5hW/wlm3ff//chq9/Y4db7rg5LM3LmYSocjRTvypJ9bcPj3yxQNjn3jO/p++dM+v/ujc3Ys6dk0EyXigtDoRxz/fe//I7u/VPnGuet0b+5dcMbFp7a//zjtOLhz814P34/PdyLPWkZOv5u+dtzP1Vy6ZZHjfmrv3zt478oaaRN1H+TsykX5raZ4I87HNvcWvrW/J397hAQvZICFADMs2AijUg8r6UoQFwrCkgIuEMRaW5RwG5fsC5ME2e8mCUvJF+97oDKjnK7W4zI5+nGd3dU0yDnhDtOROfjQ6FEMzXzf3YHQ40nGsc7vc54VF50lgMWm+pwNRrsOQAyFHAsZbOJ84dYTeFvXGAZAHhAx/s1vWPlmLDDB4Eh1MZVdVrJC6wcnO0X+5789ilYQwJCIaQML8VhvlnPNWasmQ996YSWdrWXpRFCd93x2K13e6g8Hy/NFD9uSp+W63NzWxLR1YQbVu/cRPf/SDiy56+h23/6jTThuNWr/Xvf4Zz7npRefdd989oyMji0uHjp58gqR13bU3uTyZWZzp9dWaoR1PHJjOJRsbWxbg4aGp2Pjt5w0D6gMHv7Zz18ax5Mpb7/x8vf60Jx6ffdinvd7iBedfMjo+smHzmvGJzd62SBvSZD0BaQDbbAwNsjkCJARvGdAKeBYhJiKyzg0GgzBszrKMmeM4DnAGDMmnBGcJrLQyHDw7Q1AL+7ASvQnl9DUoCYQNMQepGs+uVKMNvTIz2zTDihdU/XARm1utNbOEaWHgVLCX3LtYK0Slg8i8ByFBImAhQAGUEjizuimBcn5edckYCAUERFQNrIWDpkEhzgWFHQ4iojgQIuDwCzCAzQCCQalImHs/dXYtIgAihTBQWCiSsCFhQyC55yiV3K8Zjh548sQ/3dNZO7lmJMkA9Zn2InvPsT5rCI7PRrkXyN2vj93y1vEv/OXMb393cDVQ5lmaoEYiWrb1sXqycHT67b+yVQZDc71OZOIkijloSAYaNKFlr5GYOc8ydGSMUUTChTAFr2ITBbNlI4oKxbTwRAoGWsY+5FFm8KXid0irKEV/DAAogESaCOMirIf5h4gErzrM2Xvw6JtRknbbX7/teDS5vZcOwDpjDAoiUW/NUVxu4aC34E2EnoVFA3K0Y+2u3EMUE4G1lokUhk/uXJjThJVmVTT8n9dAyslH9bJV70bVH8MqnEG4wNDGhelruJNcCV+vmh+EsCgiwGIDoU5ARPLcBjxT6NUCSQpApGxt82CpFB4ZFcQtZI505JwL4ch7D+HOeW9EiQAXGnwiSCpgNcSHI0NYNL6hg9dKIbMXCU2wUlQqR66ws1yYNgmAiCsQZCt3z1mf57liTGo1Y4zl4hdFZalnS7M47z04T0S9TjeKDQC26o00TRtJzQkzOwDwzotnrXWktIh463wYnlWOYMEQqfT5DqMyVfptB5ypVsazA6DAotBKAUCe585Fx44fUUrlaQogROC9AxZNJOhEEMSHgCXi2VmOG41Lblq69RNi+0qp5Yzf+8DG/3x0/YZWbi0d72gGRITbTgw9+393/feN+7/9kj1vu337Nx43pBkVapfHpI6Ob2z/8m+vb66dP/y4jkaydPf0vr3rbnjZW/BvFu79lpw84PuZ6h1t5Co2dW5Fc81kuDXZ63U6y3kmWl1zzWhz0u7f++2XvSs5+uN/ffUz0h7kiE7lBBZZk07Y9TTUmFkpLRLKyxIKo3Kf1gg9EgspzyKSaUyccGGMgsAAbPXUePO/Pvb25d4T2WkBGGIZNJvr1kxtZr/ct8eao2bpeG50I889TKfn7PgFHe28/as3w0iy6fxmr3ZydK1ZPzVxev90z112cupFD8/3pzSlkFnTIsal7Z9d2vWDDV/90+ZgW15HyZHS9A3njl+9fmqJrPIqIi3iWTwhEmkvfLxz6H33/9XbL3/n5qHtjhmV8t5rEcnzXCSAmFjrmK2r15rtwWB8YovPvXiu1btjYyP1ZmNktHXt1TckdYmMXpjrnTx1hpSv1Wpbt1zS7c1HtadrVc/5FFB348ba3OCnVOufmJ4777wLWqPAMOBoL/pz0jzbtHlzPZm+/txp29+mILaujxB1BjNnTqW9Pi/ORuPnjs91ePM5Z+08f+Gy67fU/cvanVmbw94nD+7dd2hscu/GDVtaQ2MjYyaOmix6eWkWOEQE1CrOc9domiiJ89xlWRbFtcADds5FUdTr9SqQBYTurwSCrKzIVzn6hXBVmGaXoSuEOA7TkBI/BQFIXA4hqzzNzBDIlFXIC/m+DH9USgJViGsXFlqlDqIIiKB7qovLSkgFYA6DxxVUQrgcrRUAeOHcF0oOIbYWobmEsYQlJyOw4yC25wtR4ULAEsrfC+WaMFy90rTy3SG8ARIhg0Sk2DFFKIAi0Yjq/u2n9pjhDTUDxNn9Z3hqeCjhPOu3DzmLggPwz2ve/Z71//HRhZd9dPGVWoFYn8eccK3d79e1zM7iRWuzZz/9rGPTXkdKRDQpR2CtJQFUCgWIkH0hmxC0xgQ4UrryPGcKja1wwLQEzHfhzEChkCYkjpARIYzoq9zjmVQJzHTel6OGMIqAp5KqwzCDNKLEAtxM6NCRY/cdipJNJs9zo5UHjIBJKd5wIp4dbZBY68WgYbJsxfO5G3f1srzZVIE/o5FAISAKkbCI58J8FJ5SiFUvnhcBz4X8+FM9r7hCGq5aBgd51EpBXgIOoOyew6tVvQNYJuEqexXHRhEgoqKgU1H8fChSOgAYUuXGBwEgLJ41KQIMLqjFwDbkVUK2HnClVgi/lwO4SQQBEERs4YNCCMWvLyrNov4IJkxY0vZWVKBX6tQVh0oiAssSAN7lYiisyfM8L5jHIoSoQvEtEugDqmSpeW/DoVJGY4hLzvsS5q2gVLUDgHLrFED6IQGLiAfQXMi+VsUTMwOQMDAyUSDCcpZlShEgs7fsXBRFQKiQHDAIeIawOkEUzzh65asl67fv+yoKByFvRMy8ObgctJs4xAUCmO4lL/7mBe+97tCHn3XgyvHuO+7dmKGJmROUpW7ve4/tH3rdpZue9rTF++5e+Ny/nvX7f8nnnNvqHMOJbesvfk46Np598ZPpY3do1rlaPzm9QJ2H4Hm/OnTFlY0N61tD4wtf/PiXXvYhf+TQuenS1qlLBu1ZlZu8Riheu9oAbAMS69vMrMgIhpFT8IYiEod1YKuiqO9ck6lN0BKRXDINiUBGmCgt/XRu29Tm81o3ff7mOUXR448dNLHVqp8N5kYnmVS3PtLcvnNqYo2uN6P+6NKSW16z7/ALYHj30WWEyXUbrvrOV7+979jjDaO+8On3dce2f/jegycyTcqbTJMZWvfAH/WuePD0Sz9/zuc/VUvzmsSZWVyD9c0jGzdY45QX1iHMa62DW4YIx5Ssb23ZOrqTvQTcSSG7mKYpERkTe+9B6cEgS2pqYWGBlMTapBkPUq9ibfvZqTMz4vuLS/NaJTqq1Wtxd5Cefc4FSd3feMOLAODkyVMiPDE5lTR7s+f1RxtbTkw/YdS5c/Oz03P3et9LavaxJ+6II5pZ3N+Mt/TSJ5PY1uvNiXXNoXFz7oUbhofR5rOQD9iNz3ePxTSxZmr4wPRH0u7a9VPXXXzpxRTlp0/35xb21KYnjDo0PNIYGW3UTBNVtLzUty4HlWcWBlmeJIlSqtPujo2NLS8vH15YOP+CC7QxGAAyUKSWYliw+kBW3aQiIRQi9iHrQCAvgS+AspWuVtgMqXLzBqsOWPDBIKIAFSYiFg8iSql6VPPeB0e2ohwmQq0MEnj24hVp1EiEvrQNroLg6q8wCKVCuJKrDJ05G5i/WLo2havjQCpe9bMCYbxSWAwiWFgKrnpfcrdWRdsQfYgISu/0CuBKyltQzJ6F8kxvncSPfOWRO6fHztmIRxZ547D8xoX2K3fMJBtGU1KxJfHpjvr+j6//h592r/yL07+HitgDkkIvaZa3WnHkYf74iT/4+wsXfd3Hg0ZUC655oZ4I+UlEPHvSyogqlBcBgMV7b7QGRK7om0E/yXknGERJww8JqyYgUsHnOfBSlCJVFmQBXI5MAlSZZATetS+qrlCNee8BhSNNFhBwogmf+flBH68dVdQmZRGJTKTyNPe8/nB9vjkcrVl2i47rCUs/W6ohrG1OtbudDbrhPYr3ucuhGMYEEmkxtFBl/1S9CVUR4EHoqXDoqvFdycEsvqoHAeJIhypQWKyzqxJwwYopX5kAjy7AWtUPD5yCQAIMd4NL5IG11npOkiS8mVhWcRBAzuzD6SgKViREjLQpNjNY/BbEAkUFJcopZFkiAkW2dFxYKTGLfh2oRD5CQHuUF1Jwu0tcGDNrJIiianIA1ckFiKIISkZiOAbhYxljRCN73+12I2PSPI/iOE7i3Fld2qkxMyDqEqUViicscSTVBw6vInMhqBcH4TznmDlJIudcwLgAgGfnLEdRZPOUaIjZEVGv10mimggXDToE2QonIlQfbl780vYdn0wwZ6NtzkGQyXqPKK56iGXUsqDecvvO++aH/unqw2dPdH/jx+cv26bVOdHgiSfv/tx7Dv7SM18SHX5EnXOxv+b5M/fduv7i86MrXkLbt7Zv/VFn7Xkjr31zttynjeuGZtrHP/e365/2S+nivuTT75qenv3Omz6Vt8YOffbNr3jFjRRzz1JDx+Q9WIUalXYuTVHVAaxXBMCCguSYnWWvWGuIUeW9zEbaodTJYGY7orX1KSLkvqc8qWhocdk94/lXnZp9/LJLrnvXu96968KdNpU8w4cevk/hmOTNu344fd45Y4udo7uuuuKR3Z0rrtolw/olesN3v/2DOWt+6RW/GY/UfuFNvzo5su7o0elLHoFZm/RJIknBtTOfr/nun5z4xT9cuPQr40+8dJ7SrRri4+/ZZ1501tZfjny7551SSAoFmL0PE/OAx5GSxA8AGkr/OGNMnqdFj5j1IR3EUcvbrOdSE404gUG/A4Raw3KnP+jmpGCpM7Nx07pmLZmZPd7vZa3WcD87bQdqaHjKLOXNkanNm1Q6kEsvu6jX4R3bz0uS5/Z7feF2v99b7vvO0msHvXRx+XRvEQ8f2bv71mO9Xmdh/qBOsubQqCRHLr5izXnnTXY7Q3vbP37ui+n49D1+5rIf/uBbM+nN26Zec801Nx46vK+mm3sO70ezNNQa3bzpgqGRqajh04GxKRNRr5sKCRL2+/35hbmzzjprkGeMYZkQQicG0eZqBB3uiyot6sJECFWA4AU1HJaCaRMoLQXelVeCGFRZClalcwgdFQISSiiAg7Bz2UxT4CcIuyyLkyjsqguhIFRFaGWRok+D1ee2ECdcFQ3Lg42IqLSqDnYRecsYtPJdq36gL3pcCd0kIlUmE6sHA0QkCEQqmJEpQeIiUWmjBpkjxeSjZp2PnDz1T99xI2sbS6lr1nHfdO/Lf3V+o3bff9yuzxqWxX62GdpfX/vXh+263zj+F4BGA/ddrgwlzrAeqCw7Noiff0ny4mdfcOBUV6GkNldagbD1Lqi5BR7I6nEiAfoCEQyZd8UwAwLSCgERCuw6MpZiaKUzoyrbIQEJmqPhz770iWMUVT59EFCaECS8LSEBhwW/DLwxBkGly3Pfv79P9bqzVmmtPNicObKkW7LhqN53dieLPSliAOR00N05cm7DDC/wgjKgAFkpKN3rAo0/aLFJYRyyctVVmqkAfYgroD8padZBlAoAkKSo3gCqeZhSSiltSjBXYeRHKy5GICKKVr+BK686AgCweBQUwqC1zt4ji6AUl1/WgtU76UECVrEgFocRgyl2rr60PSgeokAQSw8L1HBaLfssyyCOAQB5xQBOSjBxdcDD3CJkxZg0Bzo1lnwhJCzp+FVB6diTgNbagwzyLMyrnLXGGNQqtMsYzGCck+AmV4vChiAU66gUM6c2DxL0ofythlirRxTVUcXSCQRBYtLWWm20UvXcZoAURXp+fvH2O++Mk8TlNo50rd7Mc7e0tJREMbOH0CwoZEYRP3b9r4nL5m79VKKASIdRlWMg4OA3VdSrQACsilpNfebxLU/OxZ+4Yf9PX777Td899+7ZBkhkdHQyW9g398Rlj989OlrLPv/JvD27qNTC7Xetf8lNi3vuWv+M50f1Rv++70VL23DDBSqKBx9403I2ezjLfva8d5oLnt098iP75NF0bDjOoJYntkFEkMSSu34Nan3dj7UWIgUIYCTEF4xrRntjnVVaacwbqNI0zdsz7XWbhn3X16KadalJIi/K5WnWbo+ZLVomZk8vb9m0dW569srLn/3YnmNs3et+4ddPHj350r9/5RXnnfPrv/66wXH99r9938c+9A8mrscb+OKzho4tzf38sehlv/Dcr3/y61/+6W1XXHJ2ffLSNSPJ9EJqQLfRGgXNw5e2HnvuzDP+c/Toc2qL628Y+fYE3ProvdfVm3ObdrTUkmN2SmvvvTAopUKRzCwB/k9EIKIB4ETvqCDwoJCFMtYgolhATnPbEXIiWg90fzCwGWoFkQYe4kF/dlDLzjg310Fm6dLg+JnD2qBz+cHjT9ZmW625YWcpzRcbtdGJsfEkJu+FIDYR+NxhDcyaWgtHpmoNZnU5nQOUgaDL9eL8mXSQPfjAntMnZj95z11nn6suvOysj3x6ZmpjMqIGa87etFb9v8PHHt595L7TswfGxtzk2Zv6yyPT7eU9935OqebIyJp1G9ZOjEz0+31m7qW9OIojo+bk9PbRzU+c2eO9856jKGL2SHR8+RA89avKMQBAjgvuqAgFb0JAESFV8Iwr7KWU5NHVk73VZ6wgxRIJAgpxcCoqA3qlwgGeCcVnhSeoJw8OIUj+kggV0gtVnK0ScIiu1d9jMXDWgWIRIm/12YKcqgTLvdCmhIlihaetiL0StIdWJpCyWkYbFTOAoEZdFiICgN7xkIlyFuv6G8eTV7znyV5rw0bd6boEEfoRXfg7D26crE/WOM24ifTpTX9OwK89+rcdqUecK4FYIfhBjsYrZ2yNFw793d/dNL0wUICKtENhkCr6Vx+WtGLnhSWMXsELixhtcm+x7DkCFMuQMsZk4otmv3xwITpX+oiASCxc3pbCrEI8FCUZVok5iKMRaQAK1p+AElno2bTVaB7Yv3jXvmz4/ChPC5FSoyNGnQ0vQpwnS5C6HHykEgaG3PmdU5eCxAoRIlBaGAzqlW1uMcYQQQEhVKsScDWxxABCLjvFYKVV9cpcdsZFgRcGACARxtWYZMWIOiCTAQRXKLNhJi2r2kQIDReACmKNhUZLUQ1orZHAWV/Qa4JqViC/ITJhYXfIK4o0K3scKA9R5QaWZSKFc3BokbVRDdPwnsPSpNoKB8XQMDqSSpSmLCjDmL2aEIQ9URCBB4BiC44Fp8vmmS4R477stsNvEWYAMsFiHDFN016vlyQJCFQK7QKCjCJivVMMoFccyaryaPUjLq83DGAlqkV57sALAg0GvVqtNjLcCv51IaREUWStD1hxz0wognL+eHbTts7I2Oh/X/Pa+u4P+0FngCquJdrEllmAVaAsQlGDIiJysPkSRLEyuPNM6xnfvOBzzz7wjVc++va7t33hkTEFOCD3w8MHk1pz56GHkkNPjCqdff+LysDcobuaebc/d/r4gQdo+aACOKbQxCP3msbd8ejs2Hlnv+SP2qceOPrBP4VJuW0xuvKxo5dMTfqOG+hBpuqcN0RnaEZ73V6e5ySstRbwIl48a0NiLVI916cAmLiWdWl5dq4ezTtvFiQHtkm9+ZOf//fPfn7L+Wddve/gbUvzJ269O9+68dztl52zsHDr+Vfy2NptG7dPn5i9q7Nc/8r3vzIydOqS4/57v/RLG1Py3fgQPbhm4vxn/fWfjebtm2/+xtmbtu5a7E58+556/1h0w/Pdlu1u0BEGTKKEs+0/+cNHt//S8ef8y4XT4z+Z/OZtfqx3+12f/6OfvPMd7zjn8iv63Y4UO76ydhcAKeq88CZrEfno4+8FCE1BKMEK/x3vw3SIAtJToDAlAVyBd+FxpMocTYr/k0WYvT8sImwKoVo0OkJCIsXssZBjEnYeAupNytFWeH8JqU54DvBOvh/uuf/0Pb0kzU/1rPuTKKohqEajPni8Z3QyONPOH11IovHYDBuj7CDLF1z+ZKpUVKslxhgiCg3h/Pz8l3/aSGpJMWFFLC9FELARNatoHr64hI2UK8+VFVEIDdV/VsU4AODC3QypVJUranDrMIyvi41pcXSjUCKtcixQmuI4ydK8mIsSiDAVQKiVBlsC3rliWsr/cfAuEkOeWxHhVWm+CIgB4B8WuAhBHBg8u7CmYiiMBQEA0INw7qQcj2P5JSLMDgUUIBRoLQna6IiYi88sT0w2Pvm1+368b2jjudanMajM9HTa4D5MHl4wcbTYA/nC+r/ZER999cl/XZYJDoxrIFRapy4zuU6a06dm3/6i9RddsGHfibTViJk9ebBZHhR3vfeESFqJiHXOKE0EQX87pA3vfBLFUnojBkZ/MWgFCSqSIoXURuhoXXnHClnsclLgvX1Kt1fcfAl8FgAqfXapYplbm4009L//4Ak1toZIe43Ks9fkvbBo3rAPAJK5esrLDWpkPvdEzvLmoZ3ddIAechbjscRGFbmzGFQIq1Wm91Wqk9J5Agk1FsK5wbpbELTWxpgVe8pwCcyoyDmHSkvALTGX6Y2CD1LIaqgK4C6UM2GogOUACgkBNWDAUgXob3gjASVQZgMHoWrQSYITSWmRWzKAMSgfBfDwUwcw4TKxLAqZ2ToPBFEUKaWK63KevXfERISKNKkgFsMILByESZgolAyVQkgYdjNz4AGHTxt4/EEYBB3rKA4rgABvDuKykdLe+8xmUS0BAB1HzjqHOaEq6wlRgCr4wZQWxcWyAMGxh1Lx4yklgi9esHKJjKAjn6dxHI+MDB8+eGZmZiaKEq2jPM/n5hZYBJAFWesIxL35otOvOntZAL667k8N2Hcm/3LwRvvnP19nWQBZaWAn4hkEw3QvyNhKQewWRIhRjNDSQvzKmy9453WHPnDdoctHlv78zg0tX5ubO/15NK8Z2XpVZ5Yhb/LscAb2yDxLLKcfR2NOJduODo/MbL/8sV66fPCRRZNf8KYPcNad2f3NhpuJcO2xO/fdOX5s7Y3fM9l8v99ysKQlmfVLrJC9YmYQUsqAKKUMeEBEh9bRQpaO+oUX+uT7taSrR9TBxZ5jL4IaR9Olw4v90Zuu/0OtU8zT5OJrL9l1w+6HfrJt67lo5u+646EX/+bivkcevvjyi5GW+r39r3/byxcWTm694EHjO3Y5Zt64MPPzxWhoU/3XbnzVa+/56Tcves6zzx+a/NmDH9O9W9fC5FEtSCqlpA9RozO08clnHb30lgOTpuY5t435Z/5oMDX/hW9N/O1lVxWaN0Srh4uBLh9M5hhBv+uqjyz1FmtJgoilzhyySCDEgIhzXuu41+6wt0qpHBkYCEhpYBFSSqNyWe4Bs0Hfe0giozR6HGS9SMVWgfGeSUlcbwiriakGUmOQ9npLvTwfIDpgndR1rFq1ulJUyzkDZITIe+elh5CgJEpLq9GIa9Zb3e0uPblvd943arTjPF9wznMf2r2v0VAHT3+yJudcesFLMrswPjE8v9Q5fPgwCALT1m3bNm3a/MXPf/GqC5+2fft2X5IuwvlRSmmI1jU2ru4pi6AB4BUCoCdkRFAhBHj2nliKpqFcAAdpVhN4FKGOLBBYAABGKS48eUE8Y4CY4koIC/+ijRKRLBtoSoAEEVm8BwFmFWwMS2GmqlwFAFD4f+DZUk6htTaAK+2voWKfF3CkVa4uKjIkEU9ECBJQSyHks3AUmzBLCR7hKx2wCAZMUPibQuOT0PklVGNDODcz+8+3LJ215byl9lw/TxoKBypNcuWpL9LL7dA/TH3ohsZdv3L0HenEZWPcXuqkulbrOUgZRnQ8DLi85M4ZN3/ypqcfnu7V44bpD9IImbHoRawLOGf2jmjFL5YEKitipVSQoQiXqpQihRwklBVV3SFTmU0BXKmJiFK6Z4cRtaYyOVTb0DAtBAjkb+fKzQU457xSiqKs17l937Ia2qhS7XwaQYTsjESoYLDhCbRKLU9keqCY8oGK6wQAI7VRC6kh8ojeaiLnvXcsRACl95QEGbenvrcVyC68CQXMGFEBeu89+6J1C5ZEvrAhClfjvRfPFft5tThzJfUVCuSVyqNU9KzqEgDIra1ewuqreifJaEPkhZFBKRV2ukqArUNEKPQpufDJCE8hCL+thmIF+l1o9I0Rj9bbzNmYjASYFVHI8Y7ZOctAOohBYiF+B55ZGBVW0PdQW0sJHlud7ImIC3ILA4AiiqLYsheRWJssyxh8eCERcZClwJIkicszZzNjTKgMJOx0CINhapi3r2iSEwXNECqnEcUuXMKrZfI8Awh6nIYQdz/42J/+yV+fOHFiamI8TzOtIgYR55CA2QHgC7ctv+rs5VhJ1By5beK3Xtl+/1paaEyot1428667pxw4k8REyjkfoYZgaRoeKBTucIKCoKWmfEJ53//x7efuXjj1L9ccPn/c/vr3dy5HsVh1l24OgWlEyMAWhnxCM9Ho6aF1g1Zrfs265fpIZ2hNdtdXcziz9cXvMlPbj//v2/WpU/OQNvpnoic+852F8+65eXHDGjuxKUswdp1B3ygWj2wRyeZ5p7O41O4BkLPgva/jWDftN8fjXVsn7nqgHZll7wdK1ZAgVkOZPamj1u/9xifvvO1gr39qdHTXWWsu+59PfvGcHRd89dN7h0dql1/xW9/79DeedsXzU2lF4tzikw/fLFvPe/2//Okbd24E14rXbJjttMEv/CyZSs89703XXf3SO/fcOvW085571RvuvuerR9UTjq7q2m5il89Kjiebf/7TDT9KLJ5pWe2J7bxPPWzC215/Z7vbBmEjRALeeyVl+QxBTrbog/WUWTOUDDcajaiWZFmWZVlreCjPc5SycI4BAOyIZWcRkYAIteOiKkREQ9hX/VoSp2naG/Qb9WagoneTbr/fJ6NJQBHVa43U57qvN2/adPLY0VjIUz2KIgTlna9HzUiiZq1JgMysjC7fQgxWCsoBdFEIJ1rrL3j6FYqhn6XWu+X+4mU7t3Ta8yMXprq9PTumFnvdH3zpJ7/w8hc8d9fzGDxLdmj/qfse+eEkqg20YUdrJ1I0yFLLmQATmqTWsK6PXiEJKnEuB9QMkQB7TCOnjTHeCQmisPPWe0sKHESE7KxHiqJY5bZnvAYfWW+xJBohgSJSChjB+TC/xmD9q0hVEoAQdHGLXI6MyIDABREVEQ1pRvACDBwRMYoXCJ72GJb7zleqftXoLMQ8LUEbnwqCU2jcLWecxXFsSLEEA1QiJCdOrGd0SikmBMDQxIMrWFJE5EHEs0YKYtQJkAcQ7V0mxkTLYFuimVMbC/eTqab/nffvnh9aF2ftDa2hC3YuUCf/9t68PjHcn10exEO/0frSm0e/8sdn3vKDwY1yfNELDcUNHuCGGs3ZPmMjTRRMH/nYe27o6mjAVPfLPW0UiEaFpb0MsDfGEClmDl68yFKv1cq9vAAiKAp4ceEwOwYi8prY+TD2x4qZoxEANAN4LoyjFGEpp1wM86EYgGClfBSSAvsgDS4CzByW0yqh3Y8+ed9ifctkLfUDbchb8WAV1QaDmmx8NFmsq7zZApOhjaOey3wc14dr47mNKO7V8yYYYHZUjpoLfqkUfvHMzHaFCcyViRCIiGhUAKDCwroq9RSFAiKKgi6PDy0jIWhViGoVG2IuPSec10oJiGPnAYTBsScixajKSpSZHQeJEHTOIZAhFSlNiFop0gYAGJxzLixoC16cKkMNQGhPw2xJBXHKCkvNIgQMAkgIKDqkQnTsSQQRExWx96nLQhqLtQmVkDGGPDlkXjGBCOogACiKyXs2SIgq9LJAKMBBZTuMlUmCZCl48cpoKbwHy40yQq3ZCKnaW4E0I5Y8z8GzMQbQ9Qc9AEiSJNhXhN0zIYEiDVQQH0iBZ++sUkoAhFBAlFKOgUFUZCgfKDRRlLjBQCu32G3/1u++df703NTEhHhGJBREYY3kvEfRKPZ1Zy8SwMam/c7QyxyaTW5fs9Wwy/3nbO58/LE185kRJ4ReIwX5OQiyncIKUaF2ziGSQUQHhJyB8wiffnT08ZnoM887+KNXPfIbP9x557Q/ZO1nprbH51zGY+vdAHri45onXRupj+a2L2jgsXvy/Q+Pn3fT2PW/MX3LX6V3fYkmdiRrdtQW251ue/Nm2TF82b598/sfhZMzx2Tgxzasaxg1t7AIAEbpKy97/nUX7Dh89GhSrw0NDZ2ZOQGnz6i2e/DuW9qnqTuIuz3OnU3qDYTFqL503rYXHD9xx/iWgxvoii9/7ePznSM3Pf95s7NHl9OfXbD9hmhk7zOe/op+Ontm6eD9j+177atef+/dP4eJP3v5qzkfxF6c8a0RhdFFLeUfWDp+eP3Fb3n52IvcsWOzXa+ObkjoMxev+cbZw2MbknnxR7943hkNOTGwBs/k2cMEo9c4hPeffuyczsWtCRXBEjnraCr1HkGAMpQIsOe8AGY6y1PPbrm9VMvrqFWtVisUicuzGpZeABAgEt12FzDXOlI6vK9s6nVjzCBLrbPGGCQAFKUJCeIkQmMGvX4cxyG1p9ng+JGjKKzIsAcQUlqFHiJNU+99pA0RRQiFXiOpqggd2JxBvPfdbjfAHxrN5sjQhqmxDQqjNH/a3Ozymdmj2ycur4/F7/u3j77+Db966ZXXzMyc3rbjadu2XnX46BMHzzx2rHPi7C07105MNuM6SjKw2aDfBiBHWQxIGQNq1iTUB2cTVcsdiDilUMBnuUMgRYmzbBQBk2bvXNdzFJsWsPcycFy4foZJ5kofwIWQFa4iAWPQbHVemCUY6wKWbXExiAugHmYGVZgwVtPPsAcOKCGEpywyi54jcFFWjdAhGP0qYF4RAQ6+ExyEok1A2HIhf20dEcUm8lDISIeOMBgAA8qy4JDEKSInTLmvmZi8thDBkt2+hr9392Nf3jO2YWtjebBgs8HjR8U58dJodzIrcGN82z9OfPCji7/w2aXnOMmiejLR8x3ntGoeSTvNOjXYHNt74v2/tfW8XWOz852GrvVgqOmz3BvWLrijV61e1WYREQinWRZUmUo4KJT3U6Do1VGBKlIaSzEkIAyNcxAWJq1ZVkBYUJkLlRNULvnfEKQNpVR3Kn6f2J5dt6n1zf3z4BUZLeycOKXIYAOxCznitqPx3LCJOU8T51GDR4SEqJ60BpmLjFIahDzJStMp1cUCBI2GoF9RPd/qYhHRWY+IATIWREKql6r8DoFVL0/IjtX0tdrRFuYNRifK2NzlLgcWAAEUD55KopQmFZBp3hVyMVmeM4u1tmC6IwmhsASabLETkcJCuAIxhg9TDKsR2Toi4mK5m9fjuvMBdF7yhgWYBYnisPYurWSVUr4oSVXQOJcyDYcttY4qO2QxuhAJAQzHcUVhLBw5JMSn3t5qFVUNAAQxCHcX0pgKG40Ge7HWoi7cCUXEsw8KMMWWxPvAjwiYfBExWufOEpHSqt8fGKMU8iBtm0Szo3/+2490l5am1k+mWR7MyBiAXbURkOGYNw3ZhpFY8YvyL302/9N/nvj0e8TvmHzg7OUfb79gz9KeoyI5ESky1rpqqIHMQYI2KHVaa5GDeLhH0hpp91zzuf973ieee/AbL33iz27f+OnHxvtzhweD3tilN+gdZ8d9Bz4TwcVeP/P9hko0s0kaa1/7L70nb1u+8yuUjCZnPa2+6Wzc9wTvu/XQvodvfP71b/3HPz188GB7aXl5fmZ2qT9/pnf6zP4sy5bmF07O7s1hIc2y8WR8Yt3U1c96WWyibqc91KjvPGtro9VstYaX2t3jR07009O1Wm1+1h47+fDE6K6lRf/aX/xNINn96P7164af+fQ/mJ55oq9/PHdkz6UXPRuYkM3Xb/7chedece8998dGAfUR0WgYHtLdxci01NTk8vyRf1s7/vJmt5Y9+tAVlz1D1Yfcmh+IfxzmodfyBybzjFROYjrKHQJULNshmUoaWL8fbt/kzrOz8fjIxNRk1B50tCCLOCfMzHlUTyLnGjrLCok76/Kh1khk4hDCJCiiR5ExplhroXiQ4ZERZg7mHlongZZuXU7aNGv1tN8LIEZE1HE0yDOX+aSWkNGBJRIp7cF7K41GK4CkRDCKTFCWWVmH5HmtVmNmnxde1lDukgWAtKpHERFlPk97C0SRIdQ0MjEZjU6NZ7m97umbL7vgukefePDxPY8fPXn7+Fmf3rL2nCi6QtpbTx6aP31wYWR4bNPWsQ3rx8dHtzTjcQftzLMVdIgGfQyAKXipW6eV6XoGwMD+J/EsbI0mUeCyvJ7Uxlsji+3l1PZ98FFZITQFm+5ij6rLXa/3K5K8WmvvnYfAqSgGblh6BVRHuii0WcR50USiBBiomJFWxoKrJ36VRCUoAFhxESGpHOaVSCE9DYxh40sAohVzcHKT0FqhUkgKSHlmzz4MbAVDjyegwYNv5pR5ByiZzxTnKOibiU0H//jZdPO5yUgvs6jmu/kix2TySFOWd3fVjn583d//sPe0v5v/XYKsruI068/aWtSwyvdUwszxoWPH//EN69/4i9efOp0prkHMwZCbVYZSagWXsJci37hC8bwqLzxIlmW1OBIsKCtFsC4poVBINMrKUxOx3tkg7qFIKUVaBf/aagoKq76KGelqo+UAX2Vio6xLf7L7TLO10focWSLU4NiKeGqY+Fhvcr710A72fQuJ0o4zxZjVVT0yjX5voElpw875FWqrFAoYxbEtPQCqD7OirRjehHJh7ECEC5uEEOWrRBLuAzMzB5g3+/LnhzkHi+TOAoABgx5Dkg6cHI3/1xk3FAFBGw4AzMBAGPeKaK2LOXP5waoLKSCHq+D9oQfIwUeEoIKeDFrr2DrWNvxZIRqlSxJ2Acrz3hf8gmDHEm6RYPhggBg678ohsfr8mggAw40q/n51Ei7qvBWgZTjFwe5JSi41EIpnJaZwNyEIkyrLXnOhjocEGnV4R0NdgoiiCAHDFqMo0AGAxbOrJ3EKnq0kNawPNf793V/69re/tm7tZKedUqwIhSlstoUgoCoLTc1ISaSA8/a7nrxgUN94fOxZd0Y3/mz8t3ovHd/w/P7gyP354Xuzw/erheMVGrTqExhBCAPhnpmJALmQQ5nu0qu+c/bfPO34e59x4rKp/ltvm7DpfHrgQQLwazeTTrDdJeJESZZ2qTG+6fUfAJATX3tLPz2ZeMzOHDdrtrXOunB8/Wj7+MzBbCqurT/vwjXkcu18J+XMzaYZ9zrdznL7zJkznU5naWlp+syp2+6457a7HtZaj7SG2u2lQwf3X3bZZeedd96uCy6qxa3G0LptWy7YeY56evxs55ywyjN00BNL7XbfxM7mL1mcX44jHPTnuu1tl8kl06ePr53Y5N1/zUyfjmLV6Sx7P9g7Pat6+fxit9PvZb1Oa+qnmxqNy6PW0pc/Nf70G7v2zTk/TPCkLNjB4FBXvG8sSWYVKC9MSkcU5+3ukIlRs0maH/3oJ0+deOhtb//tep2MNrGZMvUodfPtvM+uppVSaZrWajVEzAepOB9FkQhHUWytjeM4CN4qpdJsICK1Wl1EeunACwcogdIUxa04qYXCOdKKmTNnjTJxkjS0RsHhZiuO45mZmW63W6sn4+PjeWbZS5ZlRketVouI0jQVwjDY8Y6DmTuV2vFFK6kKAedMPAmJSBKP5rYDyqRuLu01jIl15OwAh1tDz7z+OS6HSzo7u71dncHPhtfenY3dPLxly6B7wfLCxkcfmX30webo2NG1G1trN46vnVzX0A0g8pB3Bn3SJopEQZ+9VqRByFlRKlEaPGdKUbufbVzfmp0+et89+6+86pk5MGMuhJo0hP1UoQmwUtJTlUpLwQ0RYe9RhFYZEVa+91hKJVQLZmH2XkABlPK2IiIIAlTCulaCoIeCVhl2+WH+VmQaAKUj5pW4WVXxiAWyN5BVVNkJhdlDEZTDZwMAgBqhg9zmnMakUs/tfGjdeNcN1g2rD3xh9wEHG/u6neXaQwQeJHMWAfob1OkvbvzzfdnmN596x4B1jMqCqyFuX9M9sDDCDYfdJi+c+PSbN7/uJdfsn+1oIdBKFBnudjUY0Fxh4p7a+kdR5L334IPjmxNWJUXbg0dRBCjCyMCET1lmB7zVijJDCJsAAe/GjIREZbdU3uRQmCJiRcQN/VPIkZ59Uo+77blHT0hjtOEhVQTMkkSmUc8WF6Jk7YO+lUUzQ5wTkENgIjOwdm1jCNAw9pRSyuhB7hXC6s8JUKiu6PIjrSSJAp1X3JNyQh4qD/HCwAKlLGKVL0sodGEkEO5mhUYW5sQk4f0MrkRUjp2xNCisXmwp3/WqO0TEAId03mIp2oyroAkBIw0AUJKFinGCSKS0eCaBLM9UZKy1kdGois4ejQ4Fevgt3loqZaSqIZMiA6XpJ1S+EYhY8hfC5+GSy15M3ZVm5lKHGAEKzGNFa8ZKZT1ce8jXVAyllVKGFCJmPh/kWRRFKGBdDmgQkb1455RSRuuASw+9b+5d4TSjddB1r4o6Ad+sJybC9/z9pz74X++ZWjPqPNUinbMlpVFc4SKKAITgZTGVE1113tggnP9tw9bQoYt7By+c/u+e07/0wAvyjdc1dl47csNbUEeuPZMdum9w+N7BwXu5O4tBvF0AUVApEFEeCUqTcmIAyB3/yZ2bHpyN//2Zp84bz97wgy1HTu2vLy3ri25wE03kjrc1pVScuMama1o7rjn4gVd2Ty2Z0QtdTFBbU7PN3Nf1+NlTG6/Ix5vzM7O1SFDAx55rQzGPNoaiqUlk5gs8i3jH3lo7yLPe8lKWWhGcnj45e8lFneX2Y3se3X3v/WT0+g1b1m14eGhkNE6isbGxNeNT3gvFlogIqNvHbnepWZ9k7o8Nb59cR5nzF132rG536dKrGoBZvTasojgCtqQ1s4bBQp6yR9dPjy7N9/YcWPzhLR1wj+7vD5vL2+nWg9MH59fNdC89KXWGOvitDg0ZY5LlJG/3Jh9sNs+ZfOLIoc999r2aZqfGhnujdy6OHD516quN/rPHx68cGqFa4JbFcZzneThs3vv5+fkkSYIQTRRFeZ6LgmAlRkp3uu2wfQwtHSJqo1UppN5oNDQhM8dYE5FmqxWRGvRT732v12u1WlEUWXGziwsR6qGhoSiKwuFZXFxc7nbq9fr4yGgURWma9vv9fr9fr9cR0VobGcPMYCEXYYQoNoYUIKaZj+IkqC8oMoNsRtk4jlSWDzKrBExtuDU0+mqbvza3vfnentTubjQfHBv/4bpNjW5v08Lihif3jh3etzA8eWJivLZ+zcTadVvHhic8eJcPwBGSzn1OCnVCzLkipRV1up3Ldm34xtdu/o9/f3+e8q+8Yf6Vv/iL0zP9SLWUKpEmsipilup8FbKjysQh7lR/DFESACho0QX6YyWhAMCFiqKwBgBatdgrRar/j0xHCfGo2qTwGZzLV3dOZbhEcV4FwkkwbtIFuQhLPzsiKkBABEopdqgUWwCd52z9xJapo4cf7/d7uH7yoz8+Pta6tN2bS30cJ3kNKLV5ZE2sFj6/5e+8qNcf+6s+iEanleaO37Jh/NyNsmd+Lpvh8cbBW97/wrN2rt8/kydKUFOuRGweaSWKVG5Y+XC9VfrBVeIqImLiiJkNFDi7gqkEvBpAVMxey3p/9Y1j54GoBIcXsZWIVKnKtPK1CrwPBY+lIJNYa8eIjh+fXUrrQ5HqZrkmDUQuksQYkEG88zgARGeaRAopU4Aq1j53a5trCbWAj3XNAQb7FPn/qU7CU+kr1XWtfp2k/L+lyhwASilbNl4ABU1i9fcCAHixqyb8pqTkBoXO8BoX49/qBS5V0li42KeuAuSLCAtTScOoCutw02Sl5y5gdAEwRqDZOkWFwxggUmSclOBGCNZRhWeJc45Y4iQpNsrMjMXwqcr6iChU+O8yUaAhgSuYCLyK/ktEIp65hHwTVs+6OkfVcwnFnJTKWYgkAOw8ezE6QiBtKM/zPM/jOLbWutSGIYEJUOrq9ypARG9ddSsAwDtXN9pa/66//sT/fO4Dk6NN60AYEHpKRQqL/tsRhScgCAZgc9MqlIPL0WjMdcO5x7mBzj398Ghj4fheOL536baPU1Svb7si2f60xtnXNy55EQBkp/dmh+7Njj2Qn3yE2HlhZKKw/ncOQFAIURyTMvz5/eP75uufecGRn75i76/+YNv9p87Yfd/Is8vHz9plaay31G1G40PbntF+4JtbcX7shTvSelNF6xKBhvkO1kZm9PoFXrt/9qzDi3zJGmFfc+J9vxMJd7HQGzGkAhBdaRVj3FyzJs9cnrvJySkibDbrnXa71+t5O0hTs7B8utNpd5Zk9vT0Pnly29ZNLGR0bWgU6vWxpBZZnxHgfOeMXcA4jpeXFwnjpYXZOGpmfpbB1nAIOXORlpxNRCau151Z21wbPXNz/7zzH3xs98lb733g+L1nnry/116GYwAft3TK8BRDk2Ndb3aGzJzdOL9te3axoP3Pf/nntHPw/HMu23vgvsmzNtp4/+c+efMV5xy97JLObbd/5dwt12ooDeSFsJcOIqUBoNfpRXEcRdFgMGDxxDRgVkqxOEO6XIkJsFAAMSIgi9aKmXPnRUQbRUR5nlvxkTGurHBFRKs4IRyKasaYPM9L8yKpRXEjqYU6ILzlcRwnSRKOrjGm3W6H11cbk+e5OG+UJm3ZGaXAc2pd3mwM9zqD1LejetP5jEWcbaAsCHZNs7Zm6JKWv67vstTPd3tPZIMHN204MN95eGauuTS7vtsbf+yJAxOjT06uiXaetSk2Y8NDmwXZsEltnqW5MWYwyBTSjm0bPv6JT3/2k5/4/P/89+vf8Ks7zz83y1S91rK+S9QoOqRVqKjV/1JAaolCqZ7nOZQDN60UGhNmqmGzyIXWIISBGiIqTQWTzEoYLGutkRQ7D09NDGW+LaDJGglU+ZFWIVSrFTsGi3URYS6gwiX3A6hoNAuYUlH5IxKyQudZRy1J28lEvX14/4efcWM36x570b8OTzw7GwyGVGxa6sxy77IdQ7c/kscq/9iGf9psTr/oyH/O86hRXiG6PI+H6SDPPv7QaHsh+4Mrk7/6o5fk8djRYzOmnngk9iqhSEClnZ6ONCoUX4wNqh4r/EsoJatgWoWwwuStiPrF42BhglJHAldqI0Q0pHxod8p9YUgwRStZrhilIC6thGYpZMcQAYQhNvDIwTNdiEe1p1wJW41mkKU+jQC6vOk4AMTzBo0jj1SQjnH98A4AEGfrtab3Ie+WFsVlmgxtOK/Ko6tTcvUCVAkyTC2qvFVuIgpPreKZIoS+v/SUxOI/ALHWhtQL8JTKssqdoVihVVi/gLSiVRITRkXM7Jil9OWEwCVQKuDg9KqJryAppTxIwC43dH3gcm0MIgVpM2Z2wsHLJIyIiEhRseEWQlAUBus+t6wLjQtUFJrZQAAP38hYgK0qTiBLrsov8Ox9QQ0uzSCxMpMIY6dw2wOkoComvBJDJnxC9hzwNNZa9kWODxs8IlKRqW6U9z5KYvA8GAyC1whpXTfJO/7ivZ//4qc2rpm0uQesUa0nAwLxwrKKKlkQGv/h2unzx9OP7Zm4el2vZ8sphch9Z2r/dt9IzjkiIiixg+7e2zpP3jr33X+LR9Y2dl6TbL+qcdFNQ9e+XlyWn3hkcOT+7Mh97vQhVMx5oQEKAEYZzCWp6Sd7+vlf3vaRF5z+xksP/PXd6z70SKqfeMjMHR3ZumZiZBjO+TO3dLS1+91j649lM+3luRziRr0ejaxJ1o6uH7V7lqLJTv2l1m6jaKgnA8ylgXEHME87SZJESmdZJgJKae9EqSh3LqoPRfUwFLQ5u+bQeJS08kG/2YKJqXEg6ff7eZal6aDdbidxC6V/eH+b1HLSAFRRHFstwzZd6ixzljsTuwjHrZuzjodHmplua92MXCLMiFp8O88d5k2f6EZEuy64LOepb996S031Fp84fvTYUfWeUf+2JTxFa9Zsa9Rqvdn5sZmxXz/8J+OXXf7B//jAYw98bmykefzMwScODDYsbcyelyZrLjl702sOH/q9l71x/9c+29ZEBRkmz3MisrkzxpjEBCqYZ1dgBJiNMcIgINoUOI7wpNl5bQicd5J773UchZJTo2JQtVrcH6QFokpkqNUa2DyuJeg4qJ6GI9psNqutUmism80mlPIUSinn3Nq1awv5Hq1ExOfW5nma2U6/kyQJomYxgz43h42wdhIEjoymLrhYYd11jcf5GVxo1nRL11rJNRg/w7nu2vbJCzfMnlh4NIYx5NZ9Dz/y+H735LGjmzZsbtWeXD+5Zs2aDa16K4rH2Pmx8dFuZ/Dxj37g+PShr3/z22/+3bfuvPCKK57+tBMn50CUiZvgVoZ7q1sTDwHjRAGhpcpNkop1kLUNUhIK0bJnYF2CboqUAcwCyIyBaRmiqGcuhqgrMbFgy1S6WFwky1Jjzq+OvyzeigdBFQSNCZCJmbG0WvLOCWG1L2RmjSQAAVTsWAB83cRoFdfj8aHoQ3/84eF44ngydqR1YT2f7pk8zZQwpnnrgd09ari/n/zQsxsPvO7o3z02WGcUgtcevAdOctXLWq3u0c/+wXkvffElh066LE2HapPEWVdpHfe8tZoaUWTTPBPlDSku7d+pzEaImJgIAIKAfojLhafCU1qKCnpWPCPGcgiLCAAaC1d2RvCOxXkWAUXBSCdMJuH/P2yoRh0ipJUgASmVqANnMhfFjvMoqpPtS8+rRDtWSfNUd/y4WaqhU85oDdp7r7WIh00jZ7G3iqSRKGEk8ey5cpIACM56CIHS9lT9L6ByPRD+pA0ABBESEuAgY4mimRDRgyCVjW/ZZwOAC77OZToBFrZOAEEFEnxp7yEizgMAEwaDP0TA4ObHzMzOuSBLieXetwJDQFn8hao0aIZUp6bKYSrSoAQ9E6FGQlQgoElB+E2lQhmW2G9kCU7AAQlFREHBKhS1VjxxUaBoUFgGMSJSmgCAFIAnkYIJHja4pBCQijz3VK4iEQlKVfuqQhmekQgUklbA6D0jAjMTEJFib4koMjp8jJLkVt4QAtIqxMahoaHBYKAi02hE7/6Hj/z3/3xs46a1gwF7p5gWlB0SnTsWYC9SPEcAAcZfOmfuty+a/eNb13/8santw+lN29obm3k7V7ceb909PSQkyLmwKB0xs/dWKXQuh6VT/uFbenu+a4yJ1+7Um6+Itlw+dN2v0bN+1/cWsyMPdPfdmR66iztnQDwAi9c1HaXCZ9zQG3429ccXH/jHa6cvn5z8k5+Z2ZMn+4PpiRf/NURDe//t+dnJOaCelibJCCfO2e7DSkd114WFKTcvG09dv0Pg3BeDj6PUG8UpuVocOZuDVibSQVSOWFye6ZqxNg3TFxHvnFMJ6ETF8bCXVKsoy7IoVp0ONFtxvxFFsSIhlnTQz3vd1DoGZOTFRCsnkOXOLTnhOfamXm+2O/PaQE4jEwYmJ5IeNKMoQsq9qBrr5Sxt6cYlu7ar2ksWF6+7b/LWE9/96tD3ZfbnfMnfvWjL8zb2Hj65+cc7r4hfsf0FL/zG17//5U/9c2ws0IZLL3zazKljB2cfcZ3+iT2Pza05eOnVf73n1jtfct3rtAeJoziUWoo0W5fneRLHSZKEZqIgy4b9kNaxNtZa0lrrKMsGWmsdxSHuB0Ri5iwHryEPsUkcOx1HgRmSmCgdpCYyeZoZY5z3pCjWGkqhnNCyhKCJJbFBa62NAkWR0iiQW+ed9cLkRWsdiwxSSdM0TiJBilumVhsH0ZymqVBcM+ITZxbSPlnp1eq1GkWe89z30acmN8pgMrTJw85zRp++sHiq1pTrRkamTz+5MHdm/ujuA3NyZvM2kd3Dw8NA6HJ76vipRx99FAWvv/bK3/yN37ryqmt/9/ffMn18QWsFHjAXoMKGpdoVlc0uVTvUol1zjIgeQSlCAvRBdhlFIQO61GmttdJegp0aVjPAKn2KSDC6V6Xta0DzhlDrSxYsPEXHgAJWJZgA8yox/apQ8MKEpMp0ooCMplyKBFZhfMKFaNB9ybXPxyfMD7/y1S/f/5C+6PpH9rt1tVHfnjXW9MFqzYmWbmLePPSlXxv55p+cesv9gytRsWUloHJKG8PJicPy9LWHP/HvL6ttSu4/0WsZIKUyK47y1oB8nGTGWemzBoWRGK+9rhIeIiJj4SBbqlyFzimkYWYGZqikjBGRipE65z5gyKnUTw7OF8wggXVNqCONAlYKAxsAoPK//z8JGBFRkTAX0qGMdQ2nZvpJMsSSLy9lI3U0SZyR2KweNU/ayUGyMGxM1Ldc005pjJRWYCZb6yV3RlNkwIl4lwFqkXICsaI6CqEx8/KU5X+4vuIxFZ5FPqC/iUiVSPvQ2kK5ughmbtWM3Xvvy/aaiHQUMbMSIB2F8QwqkkIZHQXAMYsP0jpQjWqKmkbEl9hMa20ou6uGvvhdRc+9imEMAACZzRWgFhQRrUypmGECkKpqfEWkSO0EQbSkuMCy20YABFEQ7ELCwsV7Zl9WXmG3HdDvzjmttRWXZY6IYm2UUkBQbNDLFr9CqAAAaeWtkzDhJ0IiLnWsAoIpbPrSNA1TPRIojMaD69FgoJSKa4lzFhG0Ueyln6WoqNGI3v/+D/3HRz66ccO6dOAEsTZswI73Bh00sadgVSihBFICV63pve+ZJz/z2NgnHp8AgANL8QcfXgMAUGj1aBFnTIyI3gkCGq0FrNYE5TQ0iiJcOpnNHe3d+yWOtFm3K9l2VbL1ismX/yUi2dlD/QN3Dg7c0zlx9/xgWfu61toO7Lvu3PTA6doHnn3k7K1Lr2kNzfJLaPwXZr/815vi9sbr7cGjjUbSIaKjs9maJJocah89uRTxuqxp+ej8Nz/z1Wede3FjfEMHJa2pOigSPTI86qxtt9taa0IipYyO2AF7h060JgA0WucDp1SciZBSmcs1NupNBUp7R/XWWNrv5ANeu3a9d9jpdBh8p50nNZ3aNrhR0e2aBp9nWtW8dCM11ogVgcrywan5Xm47UaOWxJEsL9LIME20ullGqn3+jrW93gZBOPzk0cce+daVO1/yJ5ved+q9u+3iKZiobb76+vsffOyz7/k9zafXrbliIZs5sP/2yy9+Vm9m/hg/efjAQ4vXvPD8S357+67X5emsJgG2TmvNIuIch5GOSB4mwx6ceK11ZKLcOSGHiqKgg8peGa05UCvFEYXtWnDdCroHLgR/osREZT1rnLAxJlZaFU7wxTQmHLncu6LfQiBF2hQ2XowQGkUymjyD8xhm1Ybq9Waguud57nPv1MAkcVTXEQ6JZzHCaQ0xjZWmnJkyEEEh1IoUWefQeYCs6/OhRtNbs2Z4/aapa1CljpcXlk7aPh47enR5qT3oDmrNxhVXXvL8F97YTwcutze++MXbtm85fOgMKm0g8koAETxKAEARKKWjIGVnnU1T732cJEprx4EZAoAQSwEAkVLLHn1hAOALpTAIN7GwEIDC0JcEjDGpzcF7EYmN5uA/oxUjwQqasdQ1Ay8iBFCyolQAZ1HhHIFh9qUBmQVBuFQ5AETrHAKQKXqUQLkVQNDKWh9FCgR6/YV777p3dO15be5sX9fMegvLYmvQbZrWoG0HRl46+rO/n/rEvy+8+uOLL4gVRTnUGi5jEajP75n5tWub7/3XV7XPuMVTcS1eEjFkDYPVaAaxBxbwRIhaGBCJtagCi4uEoQkLazwsB7AFpgExjPgQAHy4zUFbH0EAkbQuNOKq2SwIEJFnFkJA9BD8kdAAGkErhe1VacBcmFigL3cEAJqUeM8kUQRIQDpl1pgObrp4ePfeXk/Q8kCJilonsrFu6+BaAIlRKbIa4k7eW9NYuyHa1Ol3GsOJqWPecbo2LJxba31utdYaddhuklYqKtJhlX9JgFG8MCoipcR7rRQqY13uvVOoEJFEoSbvQ/ZZcZYkDPtmAGAkUEjCUNgIlTNw9E4jMhIEzYaIBFA8kxSVpgCIgCJdYbUAUSGZIP3BAiKeczKamYULL20QcMIBpS+EAdvFIuBZKPhrA6BoUiBBDDLIdvuAOirYEwiJMhGpUCph6JVJgAU0oTK+4okJCq4y1oRSb8QLgyMiAUBPSivvfc4s3hORUdqyzfM8KbQ2g4EpCqHkDgmDvVqovcPjUAoD/CN4dpFAeCFDxYDs89wCSxxFzrm828tdpmPN0TAAKWPHRpsf/9CX3//eT05NDmfMDlkhkSimggehmDSgMDJ7IhpP+p9+/qHdM7W337bGeycQtgYELIAE7EX6RASMHiSUkezFCymKBDx7n/s8vEekDWmD1vGRh/qHH2izt7pe335VfcfVjXOfO3zNr6zxNj/+WP/gnd2DP+ueOiGu9+UpeeDp8dceye7IF/9i+EU/HPvf47/8X5P/Mb5uFM/e0nO5dOfdjVeYyQkB7Y/Nmjvuy0/OC7Qm77/vyFdu/tn/e8vvJcayi0kGGAcHM2406sycO0uRgWBuDwSaWCSKIsveKMUgMaCIQiBFzg5SBYLGa62T+pSIsHfeezSYp5nNcw08NjIeRzXH9UB/BRaAutYEOmrVG5Ex3vvOcrs36Nt+putmNu3q02lcr9WiOI6wEfPFZ235yVRzbOLsN731b3lpkM9Oe9fbfMFVZ+b8+9/z1nZv+pd/48/PPue6L3z2PxNaenjP3RsuW39CHehb+8QTj7cXZnLLRFprZSBUPuKTWpyQISJr7VNEDr0PfW0YO4NSSqlS4ztU1ViJ5VaCD4gYKVXI+peLRq01e1c1LpWBlyoRiRpIIXGpAq+RqLISE2DPoV6NdIQFF5OCoETVloV/SW2ukQDF5pYRVGQipRHABmyIBPGKlTUYaTOwfUR0vNzrgjBFUWN85DKayDZtv8AoAhRFmrSxHjyDAdfpdY8cnY2SGpDK8jwo2foSrxFOtYOi/VJKhZar9LFZgaVUtwsLoxcpDu6q0kREwAsSeSnk65RQqF2K4MtSKFUxo1ZExT636NgqGb/Q0AhoIAeMKNX4uuh1kCJjwkjNg4RC3jmrACOjc2cVktJaBJDIeR8pzdbbPGu2pp54dG+tNuztcNzyc0dvrm9+ubU06C6vH5/Yah/60Lr3frd77QeO/VqCuTI1wU6UN88IyLH9H/qdC171xitPHQfPDVUDZZMYxCJmxsWuaLDKXqrEkHsAZqU0KWXLLqRIS2GtyxKc9ZRSkdKOGKr5fBgSAq3cvaJYKW4OABitWcQxA4SbRh5RKTLlFDr0QEIF6RNLzDQzh7koCIKgG8CaZmJtr6Wb//TGXa9+x22nuNZI6sh9k7Sz0d7YXMuyVbrOIizc6XW3jp3TGBpqz6e1uvZeTKSZRBiMKZgt3vugwaTKjm31GAMDPbfCJPuC/7Oachr4Bavfw9AsAwIBBoYuBi5+IYBVyKUV93nVyoMdV71sMWwAAAAXJjeljAZR0BEp5Kt8Rbzm4ruEV4HLWSoR6QqiHORUBYlQhYFQYBEpIg/irA2XmXsLlUsVAHsu6nwXXu+y81YEKI5ZSiPRUrqm6JuLgTYiGWJmheSd94DgWQEKgo4MM2fOhjW2Knd5wCvqYGEOoVTA3kvwVhDCMEF1zgWxjlCCYJ4zM2bIGSB3lcLxyeGP/9fn/vnd75scG3aaXJpFQt5xl/th426YcwDPElZLMfn/ft5Bx/jG725xYhAFi3kGB0XbUHkxCEsFnRMkUYLMXSRBFOdsv99zdhDX6pFJAFSo8AQY0nb/iR/0H/s+OE+TG5pnXx9vvXboutePPvfNftDun7lz+Rnf2Tf6k6e9Yv/Xbr7sg3Ov+au19fdd4Tb98+C8H9duvbt938ONNdvxjI94Xy5Km7oZG++wbk4N2YU5+d/P//A5z3/hOeevy3p9jCDxIV6iiAChcY4RQFAQFUF4gZk5zLqiQFIVYGZCNMYLQpYNcmvZB0g7JUnSSGo2yxuNFgEKchRFykQi4qwf9HuBkeuFc5s554zWtUa9OdQKenCjjdYgzxaWFpeWF6MoikycNBsvf82r3vTrfza5duNiLx1+/i5JsbXtwn/8yzeeOXjH7/7RB1/+il+59Yd3nD45/Yzrznv00e7MiblkS9yHwf69T7SXl0fHp/pZqsOwFwASpSJjHHvvfRzHzq6g81eHJ3beAzFzYL2Gb8/FaaBwWlajDZVSztnq3Ia/D/HCu6DEuzLMCd9bVM0iIEBSQJmqKVPR3IR4F94eJETMvSOiMDavUE7siwLc5vnS0lIzabSazaJLIAoi1QNrLTMRGd1AFTGyiFdRwdBtD+YZfKRjmzOi0lorkw3yQRxHipFImzhGxCzLApzburyiEBUlBQRkowrsAqVURdwM96QIRauQq+EOBKXMKjiGQgQARClEdM55lKCVU8ZQUKgk7Go9gwTX7uLGS3nVWGr3UDDiCLlqFQJWYWE5wSXyGQgVAgpUVu25dcwMSrTWOfuaMbWoPt+efdf7/+p9//aJR27/ydTai9cmg0d/9g9nv+Cfjrbbo/zExzf9+WODbW879PYsyj1xH9JmPLrv1Oxlo51Pf/qF28/esP/gfD0aV6YNYIQk4wHpGH3h/RSeNRGGTRcAgPPV7SpKFioZVIjAhZctlcCWUE8yoi6caSVsKLVSIILMIW95BCciIrHSEu4OIgh4YARkLCQ/gzCFh8pnTjQpoRUzxyqUDwA2jw1hvjSfD1/1J3dq58dj7g56xrR57TQoacwOEXsgzj16Q1mWnTN1fpZzLj5JAJUhdha8wRUxSKVVjDpMLCr+wuoETIhBspuZyRRCLp4LcWxFWiv0uQ0JL3xgKSexUr2fQfoRixQY2D7VL6repWINXwl3YJGqi2K9RJtX9yQcVaBCriYYxYSPbdlXlWh1A8UVp1ihLnNzUbOCIjI67GXD9pcEfGBgVwxAhQgUmN7Oeyr7+BCFJASTyvgBA78dypMBzEwCJBJiEngO/H0QsBxm/QrCIC2s5Fcph4eXczWAkYG1iaqX1hgjWnxuB/00BAdh0KqmI2V9e2Rs4r8/860/f8c7166bBEIaOE3KIevIiPXs/Jo1U6DVmYU5zp0wo8h7rzt4/tjghf+7c2GQAArialkYFvGCjFi0QERUEuBFRJwfICOIZy8g5CinTKOAhxjYUUHBcsKeBBDEzR1dXpzP7voy6Thec0Fr55W1Z12+4dj7Nh7TNjn99sb4q7bteNcjx68dwF/d6M5qbt+y4aIt9cHdB3u3PjqnANDWKKqpmm9qsv1sx1nxJJz8+Q9+vuuC37fmNLlxhi5CYFICCYbtg1LKCrOIUio0PBSsukSK2UlgOyAZoxCR8jzYyCKLTTNWShDq9brWxvpM60hrDYISSZIkzubOOcc+/HAPMuj2hCFSCkQGLo/rtbFID3pdb533vt9ur59c42ESatGIi2xty8iWNR/74Lv37rnjb9/1/YuuvvrQyYMn5w50srmHnrxn+1kXPXjkISBcOzl1aO/BPQ/tfs4LXghZwH0UJTT2+31ENHFkjAHBJElCJhAqaPVElOd5LYoBMZfCWoeI0BfmdFhZepUbtUJVpxz9KaXCGSClq4dfRQGtdaiYmV0oFaXkgxfZgIuJFiCGwkQICo15QSdiPTOL4aIGV168Y2e9996zExQnhe5ghfnKrbXsbdZTWhNJWLW6zCllEEjrGBFNxOH8GKXQxACojHHOGU0ut0EDNsuyAsdR1s7MjCLhTAJ4RPDIgCsBGgAKTm0pGVF9+0qyFEEWJy78JSCFyFtsyim0vi403SHqBWq/BOekVb+rKqeq7ucpn0RABPKi+0EpiR9KKU3KkBnkg6CgqaCwFu512sMTI3awSJH++Od/+8Pv+f5k4wKA7NDhe1/38l+uGTi49z8m17/ow2vflULyhoN/v2gxSgB9bThKjh07/gfPrL3z/71yVjUPHu6aZAy5j1ahkpTSqNbM+4O6Ih/sPWAFs1OUcaVySyXdAKVoYojcJBAQuR7Ei1AQOfIu87Y4vQTgBQiRQSMFaopGDFtwF0o9KHKqF2EULHerq01wRQQErHcKiYNkRIGRFhFJM7nowtGpr3WGGr4/wMZIrTPPBFNxcsBNLQCAXm6xUiAOBCLQSuDcqV39PDOGYsNZ6hQyKi3ClWGRFw7OBgBQEflWnmZIY7bo5MAoCI73FAFAUGcLHDkSqRRMuVzqFxvhsAkvGmMK1W2VFFfGBoToC4J4OHHh6ZgSp7kaLl4RXovXzzNbz2WCp+DTsmo+VL3/q55q8VAQMXNWgy54YkWHHfBnTzlfZa1LCMgswlIEDpYAtgMs1FiL90oRqIJr45wjAee9MQZWEbQQ0Ql7a5FIKU0YEHsoIuLZl7CP6kp5FbVJyv2IIHjrAEDHEVuXpmlx/KMkk/bY5NjPfv7w+/79E5t2nsOS9TtZokkDsvMiLApFqUarqbWaX1xwiID4u7tOvebs+d/84bY9C8NCApKH6FLcuuCcCsoLA6kwt8eSig0ARtcFPACEwohIE0beYc6pUUoIQQqbnUIrQIxzGVKO7PzJ+5dPPHL89/4UhpNmcl1j7qqFsz9/yJ944Gz4xC3wpS/yt5pHbzh4+NKzct6Gj85H//7w+L55iZXzNmI9OLOYnLnf7Tyr7+GnC6duGhpdm9u2MqUvKgIgIIvNc4yiequZpqlLbZZlYaKjVLn2WKV3JAxaGVMzUsa9EBW5dOtiYBHxrjCBTZLEc5xlmXh2HMxCVH2oFeA8eZpl3ncHfaVUXK+RF/GcZVm9Tl/59mda0c6Xv+zG7Zt3/PCW7zz6wL3vfu+3d5y9s790RjJjrd20bor7WWemPTFWO5mfGRkdOQ3L3UE/3Hldq9X6vV64kjiOgx9wP0ubSSMg4AO2JYywwhUaJOd9gBoZVIjIhMGxmleBErHEJoTqskLnhvwR+E5F4QxCXOoQEXj2DKKqgwoQ0KekiKCQPQoHyBPoYM9HRb0ZOvIwtYjj2GW5994Ys37dBm0UW+ecq9frQZ82z/OwSIgUse3m+SDtucjUoihCNOGwBYk4rRWzY2ZPYJT2IOysUQrYE4ECsNbV4wQAHPFqP9Eq7RXm7SwIpNRKdADPVZcG1U1b1U8opcICKYwBrPXBXjvUsNVsXwJKUxgrP3bg3Isvn0gVCEKSRs9QETQBsBz5eSoyGQHqYMTnGQGtz0won0h57wnRumzb5vED+4781V+8+ZffdPHpg8voJ5oNPztb37Bu+Ec/+NrYxIaL121/S/MNG1T+tqX/PJPXBsPj9YW0OaSOHTj0l6/b8M7ffd6eM4uS9Ulb8mfAtMQ3nAxqmLiBRrReIiIHgYVVsVPCokFROJmEFNJtRSAJ99Mz++BXSIVdLhaELGLmPM9JFaUPhsZFwuuHyExIqBWyBPWr8N+E9vr/oGCrG4uIjMX61IOoQulBLWe8Y9tZjeaTfchj1jNzksdSy8g3zuTjA0p13NbEyoNz5AcMI/HI+uHN1maNOjbqUZ55jBVbRuaApAvXwswhJVTJb+UjKdKlOEkoWcWXeplBOwxXZu+qVH8kKagsCtAJO7dirszM4LnC9OGqJliRglX4L+UL1FOWZVwvCuPqU60eeiNi6QlTJIBwXzWSrCIHry5DV+bbYUdgDDNzqU3mmQFQaY1YmF5RlYhDBiJUSEqhcCHxHd55X/kNSannyhCYeuAZSlmPEAeKH8XBKAKBAdgDcqw1V/x+Du5jhbR1RYqD4Bm86oCHlycMlqJaopTq9/tOD6Ymx+6+6+Hvfvee//dHf/q0ay7/n899+tOf+mRjpBGYYNY7JMXsDx87Ap4RiUCetXn5b64+/v4H13794IQgWGEdOv9ST75cQwELhm0YFBcAROy9R4oK3EhQAPXI5IlIl8HMs0UAY2IpRpJKQUaswAOSdc6BEUk6y5tuaY/dIhNSz9TNY7L3tfL1W+R3T7bnNHjBnOj8Cf8fz5h+24+mnugQ1AxZaNZ91q/vfcLUGqde8uKDzdGWjYE8YoVsR/S+kDKVFJk5SQpZmICyDJJBHNbqWiGArAIDhvehVquFs+994VVaScpIKS+DiEprhdoJO+/D71aEumGGSfXTASCGck0pVachz/y866/++3f+y+nDJy677uqPfuCfX/mi1+7YedbM7InUSW14OAPVs37Tutbx4wc2X77hhEhq7bqNG3bsPKvb7Xp22js7PDwU8m6aZvVGAgCt5pChYrEUKV0eV0ZEVOSdZ+/JKKWUeCFEY2L0zpWzXygzsVIKKo374AFRTqFDaxWiVRjlQzlWqs5zVfMWJkLylNgHhAH3UTEdq5G49z6Ah8sWHAOOjsG1Wq3C+kZEKvUGZlB1HXMjwn4/9S6vN2LnM0YLEhMqAAJPOo4EJWcr4I2KxDsAqMU1X2ra5XkeGuiVBWF5/BAx4B3+P7LeO86Sq7gXr6pzTve9d+LmpM3SKoIikkCZJGMw2M82YDBgP5IBkw02yfAA++FswPgBFhgwJgojQCQhhCQQQQnlrF1t1OYwMzd0n1NVvz/qdM+I33wwHnZ37tzbfbriNyDNT+cAQMAQs3kW1n7wVgUMcksRmgcpX2QbP4pyCCGEIAoMjAqtm5uQ2u82O3R7D+0WkJrFqsVfJbSuyBEoqSTmOtpGARptEOdJEiM5AEqpXr1i0Y0/ue6f//GK1esnf/WLe675n61TyzoHjs36EvbvP1QQ7H7s4Q+dt+upk7MvuHLFg/rlKe15t7rjoux6+LlbVn7wNV+7d9tswE7qShnLGDURuJAoETCiRtcYN+VDtdBLoN21q6DYTVReYG4BLdoZG6dbEeXsL2mh0IrfZi2OSgTtNlQEAhm2RwRMOMkonuZyuLDEBABSwAXHXiyFE3nv68jdMTr5OLptX+z26Lxl5S/2zBwjWl/uP7h4UBwcrzQSOE8UIPZn+xvHN3SLRYfmji5b1kFEIBQ1t3RVESKyilOphX9rWgiBtkOFmdns0ABI0GZTRIzMSRsb4OZLVU3EgdSafspY62ajnJmEzfm0dMiNWHQeojlyzmFr75jvXb6DRCSgXLdiVWBjfBFRBOQmN2sm6aYFGGxasKJiZlXxztMCuSulbDGpIGgEZnpC5lZW5xyrsCQk9M4jorKQ84jKObMiIyhoO7EDAHLOIltiLkJge66JmNl2ZFVdc0xYBnIUFoA2tGHlYkNSmL9BIkWDniMiYBnF2j5mrxv27zr8qY9+8ZKnP3flyhWDWXHcDZg8UCSqhZNKFz0AxsRM6up4wpL6U5c+9OOd0x++eY2IiLPHuOVDLoDrm2ZDswhv5OQcoWMxDUsQTQC5fkkJi6JAJON2OfQuDyBBlECdE0LEmARIwiPd+OShS8lNq4geLhiGsLcDL34ufOfrcNwkDKOKcj/R433/pnNm/vf3hqCLJro94N7EWGfC9++6Y/b6n9/90ped7QZdcjVYY9CMDENRFEXBmjEHIQRTkbILHlPNKauqtQFWEFli6QrVeTsZRNUUfVnkGq6lNhB552JVQwM/8t5LYlV1iEml2+uhd7EapZQ8OfPofNpTL33RS7Z96C/f8NMfLToy7P/nl7Zv37/7+X/wB8OZfjUzmoQIsT8xtcUf5LnBEAGHo7hk8RpOevjosUDO7969e8WqlUuXLo3Ck73ecDjMiIBGp9Dmda1Ee82pCCGEwA4t8SAAOt+mQPPPMY14W3m2YTS3wkREFDqBiJIKtI9Qc0DNp69BmSICqmj7KHkkQ7oaakMTo6rBjlp0SXsDbMgMAAZaKUJZ17WAGBIbOK8SvfesQXWkkACHCigSQIOnMVX1nhA1QWQRYWVW7wttxkoGorMEGTql+dIIgtqcpqkhhJXIqaa2UXOQV3eijPCbCaaNO2LNHyKKxhgpI3ex2ZyhCkjNgqAipk9pL0FA5MjCGer8YBwAiLwDVZOMno/bhAogyQNG0ZhSG0NFJAEDhpSSd0TBx7pmqd/6lj8bjJau3nROLQcXL5rYfmS4dHE8dkC7XQWceM0Z+589vfON16+7fbS66/rV0Z1hbLsSxIF+7MrPPHR4joL4kgpBcVz4rlBIlSAzFUJFkWpwOiNuDJrAnJf+maqb84FtBIHQKhhqdh/YAKHtkgbnk2ZKkl09VXWAteR9mEGQENBm+zUnMtskItMU1cbdL0fYxGq/ountVLMGBylo47dDDsamFj37hMU37+z3lk/vjLp4bKycHbmxw9WSfnlgwjMBR/I+eT+XZtYvPh6140iXTJQpCQjUKmPODxIHRx6zOTcvkBx3iv//HAxZLAIdOURpOb5tMpBW3WnesTenP6sRs9Bp84uqZtmcf2lTWdo3NSejP7U20vbPJA97c4MJ2jySLNF6d0dAiEDez9NyvPcIauGFGpxmWyioKqBKFO+zq5hNNrI+uQKiOp3fIsOCQUUUVpAiFIQkIo4oKUOjrYELugVIrAAMmlREpUW6tU0tilLma2CVIkJ+k42GCVHwwXlzKsNmH7cw1nnvjUbIqGVZJo6ION4d+9u//sT42OJLL7vo0e07jhw59vBDDyLzkKMn0piIXC08SnWBzked6Mjnn/Xg/mF47XWbiHw24XQEycA92N5bVVG0TVMjcNmWBAgqrAoIQojks5ZnSiJJXYmE3lKC8ZTMNVjqhMDkAjj0OAVfD3jWY0ti72g56rNAB3AESyMum9WxAcwIzMLkohIWFZXrTC2fKv/0okuuvuPBxw8eSe4ocJruLhYY3Hvvbk5awNAIbqoKLEiE3vW6BTknzMCiMg8zQkRRxtARERRtB4e2REOzvwzBErD9raoa89a3OCfUDLckBwAdH6wVjOYKKAIOkyTHGspOGVQSpzoCwP7+3uc8+/If/+S5d9/yy6WLlj8+u++73/no7OH9L3/tm1IaLVuxfnLqlDB+3FMuOvugPLgVH66quP6kjWXZTSkN49AvW7bk4MGDR2dn1q1bB4Qm/aiKCNzWvG1B14KhRIQBnPO+4yXpYDgsCNGkbSijc+2wWpUxHxGaFxQR05PLaIgmOmhiJYLcNYJBNfJ8lK1VRbAFkkE9wVpLTZKIyLRmmBkQkICUrCslIgc4HA6f0LgQaUADragMnOpoVEPtyQWXyDmXYoUEaSQZTIhCAKAMUtWJnHOCKpLKskzMsY6+CHlETo2tqbD51WcD4MtIVOMAAQAASURBVGxtBFaWtk+jCIObr5ERnffYUB6zYo7VLsF5MON0RES0MpCZ0TsbwzXDpeydZ98750ofUkqjWBH5TqcDnFp4ljaeowBAos47DMFikjUopldRc2JhF3A4HC5ftnhYH/2z1736He96167HTvntUy5a94qJ933kq/t3ug3rjsPaXbB2z18/5dhntq774kPFBWdM33DXrqnx9UeGh3jmwH997m+WnnDC9l2zkxPjdV3X3hfk05AZRqHDPKqZO6oEyhyz7IeqZg+jZqTmwMCdzbLQtPmJGNSB2QS13nMAAILivDPaqF1288fNileg2mgFE2AgFyhDhIyOZXdTICuWAEAWJxNBVXN9tzcWyCXrHgz0pLqnX5xz7prhlfc/0l0WIPnUg7C350fVkv7YfSvIFZGHIF7FK/D65ZsTo/e+Exx2OoPBnCooZ5/2KlUAEEIofUgqSZjAmr0FKGjMZ08zR7yZiDQj6DaXaOPmxAsMQtQg4nZJm/JLJT/77cdspnm55G0fKLApfXPl7R9oBiJnZpw2GYCIkMgKRm0MpuxtUEOaoMZSTNHZEM4SBgAwaysso6rg0fsCjcGtOK93bbOlOoIjZ5pdzo2q2jwrY4ptUdJeAREIxkQirGIdikCAiCSthRQLBY9m/FwGh4U2uySzsRERqyECOYmpAYJAO9myD+Wa3O8JQtGdmiq/862fffY//vNN73jHMM7e88ADoP5Xt944OTklLJFZXd4fdcnbR/7k0x9d0a2f+a1T+lyURWDmqq5VFMGbLTmYRvQCl4t2NtTWYdjY3wE4ELOuDQBCprVjYiSOVQSEFQzeNSqLEhiq5KGomaXzq/SUncseXrP3yDbREcEmwR5MPa7v/i4ULlS9VROdJSB9GuzoeijHJ1/8h79/2QuW3XjPL/fsHazduOrBhx/9wXf/55G7b549OOgunYSq9tREUyJLuVWKJRCreh/s0FrzQ0ijuipCsCtgbYUSJuGUouEHHaD3fjQcOu9D4a0ly7bRLpu4FEWBDkyTyp4p9R4RY4xOgRFZjM8JnV7Xs1Yp0jAtWrLo93//Zb/4wTVaziwquuyqn9/0+RVrtvzBS1543KbuxJKJ+x/6yaUXPw+LJUXZ9c53y97k5FRRdkexpjrx1HRn7uihh+67M/aHlKVcnThgAghOPYGj0C3MZLAAr8jYEeeRyNd1Kruy5YTFmzcu6vYK6TjxxEIeyTQqFQHIZNuccyGQM4tQaxmJCMgBuXlFCFQFQYfoUIiiakzc2gYQkQJEFXBEPrhQRJVBNYrCLngBzR6lznnyCJRQsRtGkMg79K7X65nklkfSxGgMfe8SaARgAvIUpQKX0OMojih4QPKhEFUhZIRRimJoDxBQhhSdQhxVpBAoQCUZApBYEkvikrxpsqdYp1gHRE9IwA5FITkP5IMj770P5HxGUKIqS0z2P1NKo9GIJTkETyoOuKnHbZcJBFg4Mst0QjMtUEcJtErRJKtUs5QgAKiySLIKwCk4BSeiyjaAIk+iqo5cWQiCgiAJaO2wDJhKV/RHx5avGH/0wTvf+udvn+mP/vRPL/75jV8mDdo77opP/Ycv3MxgbkPv8Mcv3P+1B8q3Xj0IaXjjz+8cc/29u2591e9d/OCDt138zBfv2dPvdsaGNftQOEkqEZGdINZFGaaKUAZMDhhNZhPQe++dCbGJQyDQpPOAHVVVFhDlmIhVEhNrezGhWXNyHTWxR7LXRERyGcPlzEdPwTLHMNWcJLFYVvCAPoPnmgRACCGX2IbdBRYS1cQpJRNBLCh4peQkzsLKxb0lS7kokkB/JHMFxrnxo9JNnQPTFY/qoHPFqIjYi27L4qfMDPdOLaIw6Yd98UgkOnRlDu6EjDKMI8MSo4KKMCgTtP+JnDBJUHTOqfPCKqwgHBoOVW6IRFvd/6JTtrsJu1YpJY6JOQEBmQxL4SE4F7wLHh2p6V11CiqDC97kxljFBhKREwrmoV/hkFBAJYkkqJUTKqOwJAWTwzOuIVhRPop1zUkQMqqZAIJzRfA+r2Nd8D4U2PXQcZWkYV3ZQzcaVilywEYAxDsl1EZN03tT9PHBBRAgIl8WyXyTmoPEzJFTEk7IdYxm7JZ5laBR2JApIsI2pmYpnHcCyhLIAUsgF0LodDrdbpdESTLYfmGnYaUMMAQvqi6KYoA6aadb3n77I1d8+qpXveHPFi1e+uMf37l3167vf/1rEPtIRQJNInYxUEESg+h7ztrxjOOOvPaGk7Yd7QlCOTFGnSIRIrmaWFAQBBWSdTt5xsaIyApJlBHVOSaIIEikNkdyBQsM62okNTuVogO+AGejEnEegeuqfywNHUHlvR8vOqVAXemZp3fe0e9dfs0l8sPF7lHwtxbShZfd1D2F1vsQujpL9VFfHQSiVIwfnht+/OprP/7Nz23fvn3d+pXPvvwFH/rw3/7t339icvHJex4/MO4KCY7RgxZVXUdgRdCae6xJNKr265ESkh08RElSoIcoqU5RNYJY6xycL4qyLEvX1Mtj4+NFUQjkVtNGoY68Jw8CUiVOKXivqkoYQQTUBR9CQKJerzfe7XkkEZmbm6vrUbcIRYerkV7ytIsvfvZzjw7mBFMtneDLH37303ffftfKdWOXPePZoZo48vhDMQ5D2Vm98Yybfn7DN772hcnJ8bGi4/fs2RPT8MmnnXpwX9r62H1Llq5YvGQNupmAPU6JkYxvJ1HIoapEHJTU474ChkpHq9dMHd6/8y/f/O5Qjr3xre9SJkEsyxJTEnSOvC32rX6ElhbTACaJyJFTJLPPY2Z0viFrZgUoInJEwAwNsRUt6qIo5LpVVSUxEirnLsE3ZGIEIDGDGgDEmhN6V7gCWEzZIO/wPcWYcosvYG1lv9/vdrsppZQSMqO3pSiqZlPelFJG2aE16EgZvzEPpGoqZtXEgETosnGKqqgwZlQntChfwrZlsQbUqrEMOjLCdROecnEr2T9OVRPHLBpqcwijcDY4wMJ5w8FpM6bLtXBmjjJmSWRlVmNeAYBKTS4KjMd6bsPqZbfcfMN//dcnXvW21X/wrP/z0pf/zv964UXve8+7Nhy/6jOf+fy5Zz7r4Narv/i8wa/3hT//8ZinbpUG05Nrq3p0wknLP/LPH9x71B+eGyEQcoJ6VEcsu71RXRVFMDqBtDsLR2jSPLaHaAWZCQx7Ai1+u+nn5kc1C6D4RISU9Z8XDmMAgNA1o9k8EpQG1BZjREfOuVYZyjlH3hFr45ABlNURQREksikfMSgqCmiBDgGUqVdCdQjjDGOHnZCTqaJ7x3DpLAC4Qx1BmKgoFu5Amlk7tXZ6YnF/tr942qcaHQo4KsCDIhB5U29AH6vRcDjsdrvOOY0JEP2CDji1rbl3CtispcHYvUkUEZOwQ9IGdmRCnG6BhrYlDPPbIsROWaoAsMZUtzN851xA4tzciq1yicjAUNruL2zkoJksZ6N7kxoDYVBEAW60vVwjeqoNKzg35c0UmojE1saEXMfCBypKrqPdvrquUT0AaAMgsC9mXgguoYY6paqGCGlNFGzw3miRADUcfWPxB3I1p/bg5ZGVI49omjkLF4qmOmvvXGKqWNBn5oVpK8ZUICkKcnITU8XcXPXRf7ni3AtOedOf/++5uboWlPT0v969/7Gdv66lb9cjZrwLuOBfsPHAm8/c84GbN1zz2CSiIsvhw4dTSoQuTyAUBFQyxyyjnrNBaR4kOABxdnwUnSgY6EDRURa4Za8xjYQrUhaNcZQExDkkcrPVqOtTWRz1bnKslGUrjg7S+k1HX3Led7bcdOOnYuLxRxd94/KxP/7m0jQaujTb1X2qOlv77Xsfv3s/3Hzz9/Ye2FNLuemR0/ce3n/66RecfvoZo9GR226//bTTj0euCdCFUFDhXIiRWViKAKoWCe1gMwuEEEJIoB4aZQUFTimmhEXu9Iyoog3kzRchjiq7U5noEQJYyE2pKEsAiDGGEIBcPapy5GyKV1s8M/NwOCyLLlA1PT3+tre/86H77j6wf7sPFeJYNdh59be/tOmE/xto8TDyzNFqakUfIa5ZvW6089jnPvf5p5x78dMuvNBv3Lj+0IERJ/Tjj/Zo5dGDo6p6ZPWqDeokpTQ2NpZEmTmUhVlhCHYGw0pjPez3T3nS8bfc/Mu3v/H173jzmz7/pS9+5tOfePtfvnv3vlnW2uy0kkTfqPTaeRXINkH2JSIodX5WvTM7B9Xsw+obMjsiOhda1pq9lGn9hVAIQoyxrut242uXm0RVhJRUUCRZ6LX0k1JygLZ1Z+ZKZDAYWOtcliUnSSquCBLjzLFj1JCbSZWIKsnMZrXkBJIFhAB8mAdttqEkb7C8S8yo4iGLLWujl8MIrlHJR8SM37JZX2PfCwAWFUzZAFhSM6BGRRFuIEUsScc6XUHhJA6J5vdxaLnc+jZmRmzKF2xyEiIoqG3lQFJKdZ1h+h79SIYrly8+uPuRN//Za573h2f8zbtu2LLuKVf8v5988B/+4MjuQ7+9YXLfla99ySZ5xumzx0b+JVdPkutCJZ1u0enh44/t/Mjf/9tIujP94djYGKAMh3Od4DkmTVz4rkAEwlYSElvBjVY6W9S0r50pJcE8zrx9MFoMvFJW5bSmCgWdJ0AUVlEbFbbTfmzngW2ARsRArl2rGnWNxFZfaPcl5wPMlWIb08GRgBp63gECCwrsPdyfrbTrOhXHpMdCOASLBsDojnVVYeTAK0jV37j0Au9KD7OLJksekXNJEEFJuWYFh2QYCGGVlKAwtLavhds6rz3/aPA6ZSIPIK38pENlm7hL/pFskWtgHSJYkF8BQEUNI1g4j4g2dRBQEDUdCQiuZZPCAjy2kjp0AOBcsEqYFERYmnF3UXhrVVXR1hwtIiHfesOfa5OS0eJuLs8RQBL7wnkko/J772PMWFmSTOprntz8shZqWny1LoDNW+o1PB0pSKAsqM4sLAkEWJTYl4WNju3YJM0QFnRmVpYPFjaNQdKsrG5vwwFmbCYkSQAY0ehhKFf9z/fvvOOBcnz61l/tuv3W25avW1QWy2/42fWTU2Oqpfd2WSIAhKI4fWn/Xy965MpHln7i7jXz8AhRB1kjwQEAAdsDQq08uM2irfJ22pQjRCTgwQGiQ1GVCKLm/ovgiRyIilSgRSg6OQ7DiIoQqBfjkX41N+4XHdw/3Ds71Rk/YfHyw6L4lPMuv3Dud//lSa++Z/GSUw+u8mFxHBw7ePTY0f5sBPrm/ictWtqrxM0NZut45OZfXf2Ln39nOBx1iq7zxSWXXnTOU07ct++IoIBBH1QE3YhjEXLy02wll2k16J2hNNS+N1IMYqqjNjoK2pAUNCVvgD6RqqpsDuq9DxQwOVYlo2UmBptPFoFjSpp5jwpqEC1VrSsBVw+q0ZlnnfHe93/kja//E1cM05C5wK3bf/5Xb//Dg/u3ajzccScWa88QUXRu85aTH37oro//69+fcOIWv2Tx6sHs/rvv+8WKjbs7UzsdF3v3Tj/80LanPOW8devWVVXFyuPj4zFGh5RS6s8dHQwGpXenP+nEr33pq2/+89d+61v/88zLL7viC58799yzh/1kAnYsSZXK4KQyUNLCbRyQKhA5cIZBaFYRRhpxRjVDVHREkHsXcKSKqkwNs6Ep7ecd6BZ+b3Ezoxkxg6hNK9VgfNoQ4CzpKjavoOjLYmJiAhHn5ubYOaNgtb8CCbx3SdSaYY/emFqchBq5DGgeOZv8iDCKamJFYlJLz1kCJYNq8mNMRERPyAdtsZLfnkMEVIJM/Ld3xaqq5BCE8kq4QWm13aEx5ADA7E6NYG3GgjYfcEAIgD6YCrCiolcUQRRUYCxLneJ47LnP+e3+7Ozll7zqlOP3fOLj/wYBd373misurQ/vPzpGfvWaGAA+dgvvmp31XAXqDmO1f88Db3zzK1/0opfvPiAd8sKsDsqxnnCUqJxS4UIKC9BgMl+jtfc6N6lIAASkqOgWIE7bzrXNx1ZntJVfnvuhtjAuaUwh252c2QZk0qc9gaKChiNSVEUWtgO8YLiNgAiAnvJoxJHdU9NWVi7C2HDvMM3UkwHmEGPhCypGs8uG/kiXWCmJKjhwjHHDspNSBZOTVIZiUAEFr2qwGObmc7XDTGauqqrT6bTwvfYrqZAAOArkgFXJ2S4IEQWUzF+PEHVBZgIAhdZ4o+2DAQDRi2iVRuScYaZUFQkdEJFTTyBKCpaKcgIVAVRzyyB0gGiiMN6oE6oMikAxxTylaDQ+2/bROYekzBzraM9X67vbHhLnHLDUKQu7kqr33rgJLEkly6GbwXYLfrR43Z4cjyYukAfRxr7Dxp7SkpOqAgsDE1GqaovCzhZnTcXQZvScdw1iZuwjFQcYvAeAqqoMK86s3S6NapdSmpru3vyru77ype8wV6M0WLZ88s5f3/zc9c+rhqOox5yb1DSGOEBEZhKRJWX16YvufPBo760/2yxARsKiBouuqspAbsH5b94kAgplflauH7PYjCP0iAhI4NUxcawUHCJ6DDGNpFZQ5/PrC6sA+64L/eERQC67nqvRrbfKslXxjIsObj5x3ZJlZ7z8lX95Cp/x+ep9H3nGlk9eM50e+/GBAwdA070Hu/926/QDxx4nx1wLoRx6fDuRc0hVVc0SgS//5I9/9x3vfOufvPK1W3fu75adQb9f+NL6dyuwsnt001AhoYKmZA5y6hgRsSgKF4I0CrXWyHnvM8CeHCGScxYtU3PafREkMeWBjaooOgJCKgtp5FNSSlHZVmOuoKrWMoR9hw5e8vTL/ux1b/nYv3xgYsIHLoIMxrujpVu2bNi06cKLnvXo0Yfvxp8pDA8dPTIxNvbAfXcePHzI+w4/5fwTl2wt77j9jri8v2HDphM3rt25e/v41CS4xiwTwAHGmI4dPow+nrBpY3927r3ve9+PfvCt7/3g2xdfevG//Mt/3PvgQ0+98MLDR+ZCGFOoU0qOiljXLVoV8tyjwQYtGDE1h8NGQAgLsFoMStYAGb/OdO5t4IakZs6j2qHCEpjZf9rjhJCt0dHmqoRE6ARyI2gIEc1s0W63K6zMTBiLbqfT6fT7/ZRSCME14uyZCR0cIpbOm1ElubxnAkjA0MJTpQWmEgJg6UObyGtOiKAqLNnEV6GZeokQzNfm9uMmddh2zMapV0eqYlACw3pY5BKR4XDonPMu5FqkZSiauqSoc64FSLOpO0IG7qZR5Qrnva9TzazeOWt9hsP+CRuKP3vN2x565OEvffnzz3j6s9ZuPHHf4bnnPHn8VSfsG8zg2kU6BgMPeoinnr2ljuPH3dM746wLhzt2HaHRuf2ZA7fddtu6U56cOCkooBeFWIsPBSge6x8Zm5xAnZ8KtGm1GSTkxkUQTK3GU/a6cQvkJNtKpe112iyeOLb56TeKG8PFBHJRG98enY9Z1vA5O5kNFqHR2UYHaDfIOE6qKsySkzwKKBaF68Sjwwq9H1OnqpwC6NHR0kE4MAYJCDChRowkxZYVpwwH/ZWbiv4gJa40egYmzTaSJgCEonYmq6riBcCx9tNhg5xXlaaJyx0jAKCNlFqxEMtlNjk3hGzNZOTpJwK1shoDAlt5wTYJhBTZIQVyhKgND9BUirKMnTCocqZTZzjQwltjUor2I9LAcw0RzcymPC9gZVk+G8xc+kCNAK33ebGCiA7JHHGR8qZGUPIaYoFt1HzJwgKiSdkiRZ5DilARRObLYmkU6wxe69oZVfOe2zi2sC4EhCIUVVXlmtLuReKkqqCiEQB6427/ocOf//w3B1WdZLh5/QnLlo39/T9+KAG8+S3vHu8EUHRhAFEtDnvgT110b0D53z8+WbAkQlZGQAWXmoJGVJ2QAjJmfXLMcSj3xgCZmZ3Pj2FxAOzogiMVD8g+BGUmEleQiogkFkBUJEHx1ewIy1HRWe7VRVclWnr193eov3fN+k1nn3/p6WefQ9r9vT1//oX1H/rLbf/+44999bhlfgbKvUdLiolqVtKiU4DW4+M9YFLm8U4XPc0NB/v27/zaV776R3/8SvNOLkMQlpqTFOAEkUhtsmXKbhYiCDnVzjlAkMQWHaChHWKzDYFmm5Aka6YFCyNWn8VUp1iGIhk5uyisbVNRoLza6RRdRIeozIxEdV0VRYeBC98B0te8/vX333//j6/90kR3dT2nc3J4ev30maeff/yJJxX98sY90xec9NTr9t549PChyy9/5gknbPZjvem50cxxx63esmVzXfOwD+TqJ5+yvpI0MzsbQkEqVVUFcvaenB//4fe+//d/+8F1G1dd97MbZ2YH9z+050Mf/Pv3/98PFZ2xqjpQYExcF0XhFIwP2ca7hV/mLAPNdAiaKtLUZxBRm2WP0SIsUORTbklVTN0NRNU36gQWcESEvMMmYhKgCUA6cgTKzDElm1mZYHJKSRMXRRFCoGD5ZjgajYio9AG9cxLKsvTem8kEM5MvWlk7K+QJnQ9UKyPmCWquXewoSK7rk4pHT94BACpJ4qYPg3YYgIiKmUWjqsqSIDvrKQIzyxPlfrz3BKgCgNlnBoFUtRXmFNCak6hYVrANu3GWsGEu2Qs63wwAwZWhAwCpVmHesHHRl//7C5/5jy+86EWvuOX2e6/4j8uqem79xg0vPeFeVVlVUgEpkNYMHZnx0Hn6kl0/erj74INTy1f1Nj55/3333efGBqAl+BEqaWQCGit6dT0aae3GSompDXZtKsFGSheIUFQQOCURoUyr0BbS3EZD+2pTchsKMyVJDbc0L05iV8kj+U6nhf2jWvepwQoUyBW0Ctsd5JRya+WcGdW1g35StYmuDclTGnVct98f+tDHeknCIXulsWP10n7v7pXAwAGY/KA6ssyvWD+5YTCsJheNVQN0AVAR1SkkIAeSOaPaSAcAQKdbSmNj3H7eHJIoo+hNj72taPO1QoQkgGBO3u2FQgEGZWECKorCOWdTATLGoCIKeluLe8wq0wuupCgkZlWNMZpqc35hUFslCDe1YLNWh4x5gDrG9ia20QAR66puj4RQno274CFlPThwpqGClCUBGrUQRw6Aga024sbXWRfgBhacN5qv2EShwXi3+HBREQRP1Opg4wKZCABgzURzbkzPtIFlWG2BiKRQOGcWjUIaa1HQ8anOt751y733POKLOqX62h/eePKmkwdzo69c+fUHH7l9vFOmiiQMSy2tTv7IhY+duWz2D39wxt5hB0CJkCiYEROQzxuYQKRkrAjIBlDWGbT8nfnyVIC0QUG3SrqqKkIuFEU3DPtapQE2Bs8p1aPhCGDUcZMuTMeKvXfOF+K5OjL42fXXHXfCzksvuJxcZzQYvHTw+i/y335p4h3jqXpo72Lyrih1EEdUJueUGSITAzuM3mFk5ohLl2/sjNLOXft+8fNbnnzOU5KrSCSJlr1uhIhKbeIgndc5ISJBJOeUIIGiIiHGmKSZcmXCUkvLJswbOs6A/KYgSzFGZbaNhqqGTmk1mV2WJAyqrdxCb3wipQTMhL1R1Z+cGn/b2991+x0/nZnd2fMrRoPRw/ffLgLL1yzvre3o47p5w6b1f7zpkbvueOnL/nTR1LRXqEHGorgjM3Mq3heMMDYzPMzJlWXH3m4Ioa7rUBQrV6+894H7f/ij7517/nkv+qOX79zed0X5rne+8Ulnbn7hS172+J5jnbILBKXrsCR0COhBRZt7qwBtl9OEWESPpJgq0TpGtSkZEGX3HwO8q6oZms5HCrUfdyKJiNA5D82Cyla8C9CeLszD2WtNGDy1DVaWFyQh4/7Pe06UZdn2WJbGmFlUSNoUhYSKAFKzegjBEZFHBFFyJCI2kyQkIVu1gTXp1Jj6EaILWXKrxelBbmQz60NBUuT23Dj0gCoqCIDk2m2W/ayqFp1utoVgdkUJQ2wlf9vQlie6RKR5P2TanqCa4V3kvY3QqxEh9MbK7Q/vetWrX7F+w6YP/u1bfuvZv7ft0QOLl0/292/dMjGc6hYeR4cHoJ1xkHoS6h5VsQjlga1XffnkJYtHD99zz2f/63OnnnL+nt3ivCNyPpRcR+HoPPVCj0EhKSeTtBWTurQrX9e15ICRP4LBWc1vccGYNFdm1PA62gOmmUroAOwoNRIWFqtAAJWZY4yeSCiPn8UhMSCAt2G/XSXbTzfpX7OmYSNG1ihOtEWnA6ySeOVjh/vMpRRVjGPDel853JGmRp39PVRUiGO1OzbsP2nLeardqckBgBcSAnLAqCFpTMjeewEG22DZ5EbFk0uppqanyQGEWRbspBnEEHnYtJuASA6hVUYkQEHXVmAhd37z7TURIibbc6hyzEQaZ/AC51Dm6cJ21QGAa0ZvtaiZOiJ6B4oe1abArWkgABJR4bwx8fIIV1Qxu58Zj0CYY0pRhLwjJVBlFUSnxlFq0qFyyiwUVSJvwmaC4J23RzJ7SipYC2WHzdxcstqaQ2zq5vmHBUBVa072cKlZQLaWEqq+CLY6swTcbnzquvbek1mYN0GJEH0RhoPh9OJFWx858t3vXNft+UMH965ateZd73njpo0n3nnXfQ9v3V6UGhzEREDjiSsR+dOTH3/ZiY+/7ecn3X5ksSKnlICEiFiFAJ1DNe1M55zzImxh06B24jCLUxAJsJgYSxOdWdQ5hwAI6JDUgaAQuhRVmJwLjlxdV6PBHJDts1yf61CpR4lyTGix1Dg5PnXo2PYdP9177plnqSBRbxrc5pvOue+VP3WfXB4PjjgWIkheQbt1H3ulFzdyDp0rYl079Kra7/cXL1v5wH33//cXv3Lu0546MxrWdUXoNAm5JpuGIDExsw5HtqY1tKbpFqM3DBkgPGEetnBFhaLgcsGaDzwoetd1rq5rKkIoisFg0ApYhk6JiJ6C/aIYGUCIqK6T8xAoWHCpY3zSmae+7GVv+Ng//SVOJOp0+/XwoW333Xrz7ad0TgaGWA9P3HTSBz7wodPPPnM4HHrVMSRwLmpSX4hKOeKjwU27UFtzJqpJ2BdBHFYpnnnak9e+70NKuG/vQRHokX/pS15x4kkbZ471U3TOKRIDeOd94prAG8LSxh8AIKqgapVLdgpqkhwiGRtdEqfI5JzJa6CCqEJD8LermVQdOMSsz2coFGxpiw3fXFUUKIdPVU5JUMkRGWqRxXlHiKBaS6b8+0bmwmBvJiTimlWBDSVQlAElMWCj+9oI1lvnRJqxECklFXFEFSlpBkQREYqyiHOkzXbKqMbzFbpavmdEdJ6QEQAkJgHJUsSIzjnfJpLmZ62rMHZHWZZIZDIFhultFdfyykqy20G+MyIOHSCKsigTQdnVXs9NjvnLX/gSgMmJqQ3PfeYrB31Zt2Hj3GBfVyGl2ix4x6cWP7Ln2FgBk2NQBMKERShqmZkYW/76N/3hc37nwt3bR0VPHZdJNEISryjqAZGlQ77GdhJAYILMwqpaFoWIpBZhK2AKRJxZpPOiSwv7eAP1QIPgIxPH0EZsQ4FsVqnMAEVRcB1NVYeoge8SqbBZyYI1zaKq6hSR0AXvpHFfloyLbqR0ne3ViMiHEGSELKNhLLrLoah4ptiybO7Jz37q5+hXdLiHiCKp8lDXo4s3nNcfyIplhUTyJWBC0hjIIXhRsYex7Qux+ZghBGE1GHD+7I7mtUFYrFbFFoRiWabRzc5qGxmKoGiaBo0eedsZpJTIuzzX0eySGW0IXwTEFoqRK2R7jkz5xLTYIqcYI5EHUjN4sPGSUX2yeKcoZ8UYRM0jaE+5MwOi4L0CCKESAks7IqKF9B4g69c1Z5s8gcotb4Pyc85ZchoMBk4yA5CIrAhglVjFtm3Kt7VRR7fugJDUdE4RUTS1zGZRIswZXcQ5l4QlahabU3CIZVmywMRkT1W//MXv79z5uAuj2ZnB29/2jtXLjt+6bfspZ5ywacuWrQ8dBMdUxFoYES9cfezD5z16xf3H/ffDKxAZCW2QZlwph1RVQ8v9AkrBY1JhwcajRE27Fz0haFO82vsHQgajGqkwI4BD7xwQ0ij1FaJolFSJxqIEERlWgxSH0p0KWPXceE1BiTvkZEDjU4sx1Nddc+35512+6eQ1N11/787/2Ae/1OHv9YvPTYFnwqQ1A2jougHXBRJXFYVCU4oanQ/DuUOLFo+vXHXc9df/7Bc/vfW8C86e67NzIdbJERC6Vuu+qY0UEQXUZ8iQknNICMnoXoLBG8JjwcADgvPMzCyuKBAzRJGCdzEvKEejkR3jFg9BRN4HACiKoqoqydz2hOw0AYZhURQpaR2qV77yjdf+8NoHH75pydTKWHfHis6W9Zs7HRKVmZmDe2YeWbdubWJNMRJoRRgRiFyRGARi6buglYgAizHbPDmOSVgLCsMYu2Pjve7YunXHdcdLKvHCS5++bOXGxIReVVmjQIxaM6lHSKSAopgydA7NvS34hKoILniHQRm996FTMnOKnFhYRERQFJOY/5cHCIgEqo3eGDiQvANaEHZFPFLHBSJC78RhdNLn0VBTDcIIhTjHCpFVlQkyt8+5oui0da5FIgZlwDzoAPRoe2PRpLbUx4KwDBE1dEoDZEEgu2iGhGKR5ofER3GSQU/zChsup8y8xGplJQCKEDqhKCggLDA2N6V4R7pAzzL3hZhTrEdKwjFGSFGVFUFBg/fEWveHWicb3CF0RQCQHdWoyYGrEmsIFUmt7AkDpgKr/bt3fO5Tn738smfce//uc84556yn8aoNUnaLOKq7YfERrd3UevVjLODqI6vG4opeIsIaOjHpY4dRh3PDQXrTm957tMLoSs8omkgF1bTBMQGwQgLN1wFJYjKGpaoCYR0rs4hul75FUYAjH5zVOpFFkZxzAagAdC6QwbbJGcjIqXKKCTWhKqErSvQhp20gR0byyfWTxBo4qbKJk0tDFKZmvM2QPf6a20EUfFEUQEa6BAYFzsrnVYq+pEG/vn1n3UXGwVw98HsG2x8rdwKA37tYVZic1INpv2HFklPQ7SsnOyx1CZ5jEvKVqyNI4YPRxI1GbyghRARFqdkBUvDtf5hZEnNMRk515Ann5UcSaiKICEkzgtc5V/hgmSaEoABq6mop2nzBgfPola3kc1h4KgN4Zx+fa2ZWADJsi8Rk6xGmzMU0KBQnTsKqDJHNTEZVR7G2tqPVBkFUkSSSPKFDCI5KHwCgFo6oGhwWHhWI1SlAYonJ0BWIaM+puY6SWpUh6IA8tg1QLjLM9QGhTrHolOSd4V1qTtbfmBiLnUNDw0pMXsBxrkiMd24VSbIqWwCdVyQqg3qqUjSiYKCydAgqATyqR0RGSaAppaKkH1xz3bU/+snEOAyPjVavWHvilie//S2vu+rKbxzbW+/deW+3LFknY4w+4fru8IpL7vvl3qkP37qRAEFyGaQsDgkVmJl8QOcFUBVHVR9j3U1SRCZWAPAC3QgksXAFJ1UU9BhRxVNyNVJyIFBHVYHCxQJ8p9CkENmDAPdjmmOp67quh1UBvtubnvShpPEKnSsmvRQpOvCYkp8qxu59+Pb/+p8vPPrwo5/93L/O3nWg/G4n/XnVKRFFFb2SA0WHgXxITFqgC77jJwShnw4ORwfi3GDtcauPzVU/ue5aH8Gp5zp23FgiMlyOxYFeb7zsdY2D7skllZRSUAyKACA+e6hrYlIonC+cN05pkhxwsPAjjtZlBSCqkhCGTmlVWkbtELoiWNBmjgAyGg1UGR2RDwU6VXRFAC1RCVWqOk2t6L7+LW/pdlZ7T1jQ+PKlf/CKF55x1rnOucFsFcpJVX7ggQdYxLe5vR1UWufRZgIRicKREwEC5XrQesEWioaIvU7XuoF2vmeh6jc0ehCVwCGiqe3kBoWMbidmBSUi9uJ5BB8CIjoiFrVhrPPBIOAiQg3wLz85ROZXk1BRIQAVmFvkCCyIkcQjYVJgLojADNu9M6v2djLZTCry/zToORqCRAXRIyqwCggRISApCBE5RwTGu7dE3q6I8pTDcHTkctfOT5DFQUS7dg4wNerNxl+kBmOpzRhWRBBdbGwtVNXWOO3IhUV4NIp1BIWYMoZeEIwcIjDrA8WoKiWLqlbdXlnHqOzGxzDFwTe/8Z0De4/88he/uu3WW4Zz/S0nb/G+GBvzp589/qKXr55eNPa+t1+1e3f3qkfDSzalA1W5yA8nSmTRGSmwhz+719PSs84/cfp1r3/lkrXTu3an0g8GI3TeJngIDfXTVJSBbKIAVpcIMxI650QBjGaQazeykh8cilH6yDlyMdbeoAKcCOdVnAxh54IXAEnMmsiERlyjOChGUYBEmlSEk3MOiTim9hGwgZUJCAlBW6Xldk9VVa2gtvuYQDWxB4+IQF4ib73nEZiUyOt603PlqttuHv7CzQY/mo0jVigPHpz5nc2/s7xeciTsdM7VMUpVNyUCenASUwu9xgxrsA4sL3oXYLCg3XW183lTCzdYk/fe4CTUGBtbQdYesMJGrJIndVZGigiQmtKjad34tsMjJ5JaA297nbK0hSVY59GeyRhj4Xx7E9sHRBO3aucLr22+/ka0Nc9gyI8VNWgjkQyaV4Io7BoAHWKGYuTtTFQiAgRSEARon1B4AurT2l8A8N6hZPYEQytUmt1TCMk1Si82XGFzZIO8xrF2mxBZUgL2rquqCpGwEBaBempq6sGHd3/2iq9V9SAUaTia3Xjq5iVLlrznPe85/qQt//2Vqw4fPbR00STXkcj3Anzhmff1k3v19ScOk6raJUXQBpFKqJnM3Fy0hEqUvCMFIm8eAgnBgx+OZovgBAOKK0khidMOoNQoYXx8rOjGfhVUii70qapdpXUlzEEdq9TK5fRYYg7gOEqUWHZDVQ0BqPQFx6RaJSwWjU38+oaf7Lz/7kceuX2yx/Dx3syPDvcvnfPfKwnUlyUIVVWFJEWgKmEVRwGVlVatfEqsqoPHDq/ZSGPjne9+71uvePkfLVu5EhlAxSG5AM1yBImUJbtVYgON1kb22WYf2Jzw+UkGQKorDMFuuvlRQmNj1TbKbbnWBnDJOuS6YDSixkG17JMPsMjMsWPP+73fvu/uX3/q3/5p1crFSyaX7tn1uC5h56jX6bhxvO6mG/7uwx95yQtf1RotgEOCRsPdHg9s1eDAYyKDM7b+YvaB26cFs95phvhnReh5goQ2y18UVVLwDeDFPpiBAuzqdIqe5Sf0jhAVW0mtzJ3XeXuDJyxp7EsQ1FoWVWodXRBJkTAL7oMnJRBESRxHVXAFdlymwzYn2D4O+qDKsHBrSKikJBkuCwAgoohSs3ghaeaoC7SvXSNV7yFrNuX3PO8XMC9ej5jlDHIWX7DmXBiYsNFBtSG2a9zftFnbOwUhDM54nuiyLbH5mwJgF0F90BSBbFEAgupWLiseffTu//vBj245/oR/++T/Oe3UM1esdM/93Uvvu/eB2286/MgD47NzBwVvfdbzl3zgny76hw9dfcUtxbnr163oP3jEucIhg8Ykxaru7nPWHb3yYOHrzSecsP+Qkh/6OKVdVmVsEB85ASjb1kARPLkFLjaaUlqIt7LnKjfKCOaj4JGURRMn28QkFpMRUQU0mB4qKgGSQXOBYoxJhZA8UquwGEJAo4SBArPX7HqZn8DmDcOC2Zdr9MjsRVoSiGSJAE7C472JPY/ec+TuL0Xf8Z2VFRyu7x7gb4PfMwaD7pQfX7t2w/f3Xn/xuc+Jc3HsBFLIBRk6JwIOBIk8heSEmYHztKPN/IiYVMy4wr6yclODe1dVo8gTUZZps+ZeFUwKUUETExEDWJ298DTaGfNCJhxrgQIhK8wQUSvlCE24AATT6zC8mCkFOu9sTCoiC5E+piuUGnqJ4HxxY1zbuilkSZsyw7KvqW0z15xQ0Nqg/HQYO9ECiyISmqo+5Gya76o0aL72vCEiq2hkcHm8RETkyFMQkZqZmb3Ov3lpgDkAEBrpj6ZSzx+EApbUVS4YBmXhODnn2fsiKn/jyquOHj2k6uf6yYfeo49su/+BHaecuPH711z7X1/83MTkOAMLYHD+X55254aJ4W9f/eSDAzL1TVU13rw2ijQLQjECgHeOHSYAZfHMzioFR8JVCA6AQINzBUvtnCOlsugkkiGPDuzYPRV6x61atffAzr1H9k33ep2yHJAfpREGQsU4qIIrakmFL13hItdAnghSimVRooKEsvAymjuya+exboGaaHSzo5/70RsH498PzOypI+iAVKUWdqHocRzMVHOLVp16+XNfWQ/6V37zk0cOH3jN/34FYDw6c2T5cWt7RTEcDBwQEagyAiFRVdf5AVTxjgwr2ZaSrCICRrqzprmuaxUJITBmQ1VNrMI2uEZENcWjHNSbur9RdLAz1l5qaXiwdtnLsqvKMQqhA4K5obz4pS+789abfvHznxw9NluNBgV0nfPLV62aO1pte2B3Gs5+4T8/4tu324T1nBe9z/AwZm6ETBERIydyuYBtf9ZKTCKyxRuDkohzzjtP86roYiIPoCCipl1gn23exoYQG2BbW+fa30VhZ1thNbWJHP6MhsQLcp79+4KcAETN1qeC4BUJMNZpFGsXvC8L43X47hgpRGhoJJhlFmC+Hp/fvbWtLTOTczleiJgtqyFm2wmza+yeUrJeLcvRtGXKPOx2gRJvfrCbbS7DPEKPGhBBjNEWuqHwiBCcl3peisFacafAmMMlAKAjh8TM3rxeJSkgKDtPiOSpO5zrj4+N3XvHzdf88Cc/uPrqa4vqla969ewxvfYHP73lpn1veOe5l11+6L1vu27ZihXeL7nqi9vuu7n44Mee/+8f//7v/OuBF527+lnHHV3Zq/pcXLe9+P73yv/z0d897cGrVk6ffPyJmx7d1e8VEyx1zAUOYKv/QGiDUG2QirYUtECfUoJWBD8XauiRgEAVyDtlQdKqGnnvBSAJl85Jk5kMUyMimkRDC3+1SlnUzOEdCQAuIDJ5JCDUOrWznFxNGrWfPDXcJxFTUmz4Y08E1gooMy8u08e/euXhujjnKSfdd/92F5I/vPjYoq3u52ODrx7ADemO2285//STDuzcp7j1uaetrpUNma+ckIhRRSFQs7FeqJ0OQM4pKKbfZBm0ySPT3uraZH3yk9XuWRRsq5NTqxF1BLBpc7XZAZtbkc4XJGgABWh0LexPNLFwgkZNmpwL5MqiyEWMqJJqYzKdh0MKiOidE0OutXCqlqBsJBBrUIyjyJJITEjHFWE+ItqHcuQw72JyuWwKJAYNY87+xnlMja2WdfuAswohNQJfjG7+8SQiyeYKkOl9je2bpPmRSWOdSQCYYtUpu3XksuuyqDrEbnfym9+46bof39jthaOHo0KsUn355ZccPbb/Vzfvf/ihRwEkBAfKoVO8+aSHXrDx4J/85NR7j4wLpFZn28K1CZzZXberZzVcrTUqAREFBODcAGU1oi4DuBJriehCp1PENNp9cAeJ27h+0x+96o8uvey800456aH7H7vi01+8+uqvjaiYGF80NjFWpQGAMoIkLYuxqqoKdI4CIiYB73wSRvFVVfmA1O1ERsJJwujibPlvxfBLg3R2LH/tR3UtkFTYY6HiY107p6HoLVmyfmrxqrAcVq89ZfvWm9evO+5Zlz9jVA9nZ/v1cG7R9EThwihVzsKzqDlKoaOCvNS1PTTOObXpTo78qo3VniiLCkDohCKm5L2nQCZcaJE2xtjr9Sz+eKQoiugU0HrodqQ0f6NFyKEtX+t6JDGFEMpQxFRjCSvXrPzXT1zxnW9/fdXKNaeedvo9e+8E0HKi3LzqpPe/55SnX3zZ+z/wFs+Nv0KTUJsg7gkQMedgMVylI2ff5ISan+T5x56IoMh/DWD4ykw0Mi0eJAJRROCY0LnW2LzJz0BEw2oUnDebARFpalsCsF2OM/8EuxzOhzZ0tsUsAJirBSIIKBIF50yAAoMHAGH2Mc+E0YfUeAi1LwXzb+w35Rra+tdoxCKC0oBOEdURLahDERFakciUgBBEXTNDZlCtE1He6baFMwDYUE5VUTR4LwgNxmQ+E1sCoMatmREAgZxDh8a2UYBmPIMO28bDyFGI4CoWJCVRjtWypV3C/s5te/bs2v6MZ5z37ve877vfu/7rX/n84sXT9937yKteeM+fvPH4r1337Pe+5Wd7HsNTTjv92OGZt73y+r/71KWPb/vxV2+d+e7DONZbPxqNxsfHy870X7/lm/t23XbN9f94+AgE8Bx9DQJQK3rKRlbYFhyWL0kVUW1zhoiBHIb5G2HWMc1ZTZyrxbbgcBYOa8wqgACAmnd46Jz1S0lFJDnn7A4jYsZqmbgE5+8dUO0Isl+bInMemFPunNpf2hZ8iGirh7bXBACjD1x3403LFi96/rMve+yR/45Qaifqlmrsh6vKRYux7493a55x3DM/85F/L8pxry993u/9weHBjDPuECKSisgwsVe0hhVFK83UIMpP04IBdFO5tyfQjkdbsCOiI6pTtFZi4Q8SUXCEJhkn8zbD9gEZxTmXufJE3vvEGmN0PhgIqHA+am0e20RIgM47apymzU5QGopBe91U8xYKoN372C+F3EP7wiqG9u9UFQCT1daCC5+aXAeQEmRXIvtrMRdui8stT8kmTws0goxt7xjQiuD867jt2YPzqQE0ZXhpyya3E9ZcUUQEUkTnvK+qWJRFSomTdy6NT3S3bzv8qU/+57GZI34AmroVj7Zs2fKud71z2Ie5Qb3x+ONv+uX1Bw4eGx/vXbpk5zvPfPTv79j4g50rECOiM0wqIoqAGSoDzPcMAGCyQ0Lo7M8JgYhVgdUJgC8FUEEItFvgaDjYs2v32FjnWU9/1kte9PwLn3bm+Pj4IGp/EM8594zzzzvjBz/67Q+878NbH9y2du3auq5ZkyIqgST1vjAQLSL5bCcv2uFxcVVNozSqq5mx8e6KVcfv33mvfh/xEareMEh/3AUsnWcEQeisWX/iwWP752Z2cwIXOkmqid7U+g0nPfTr6398zQ83Hb9x6coVae4oAh881J+cWLZ46dTYGMzOQooyrCvjpA2HQ5eVd1FVIbEgGM8IkVS55mS6GSICqETOBFsQkVt8AKiIDIdDizZmWICYV8vaoL3mQ3SuYmlUjUIIRVGI9yha17WqpqFITItWLvvzd7yZCA4eGBIFRFIXqJwoe/qcP/ytdcf/jzcJOvOwUlVLnogoolZKOecos+9RBAofRAQQwdPCp1oRQbhti6HpGObnQnbm8141pxBjGVoFTQqKgAq0QKYKcjlv5qyAqICKeQKp1gvalrQd9tpLRQcOEFkcK6oSYWYjCHTKcjAcWjMhKiiJfABhanVxm99r+d4UDBwgIWmzS/DOm4OmNEazudwWIe9KzEQgZgbK4w4L99h411h2b/foVpcAZP2d6FQQOKaFKb+99/aCttgD1VGsOqGw1iajzUWMkNqI/TbHpelCvC+rKha+QNS6Gi1fNv7ow49+9Stf3P7IgeUrF7/+9X/0uc9+7T//67OXXnaZMK9aOX3PPXf++98/sHtH958+ffFfvvaWPdvmFi2Ghx86+I0v7Hn127Z85C8PTi+NB/YfWTy9qtvtDUeHd+x88APvft/ZT7lk96GZTujGVLluhDqQNf2iNodvz4kDTDBv15On9/PNBJgYi8uOsIiG2EQE5hCCEnr0phGRhJWThSRqzrDkZo44s2nyCqNW9kiA6IAQs7+sCUFjO8hSAc6nlFsSsPftSApaVqhBDu0+KghLd3xi2dT4PXf97G8+tGPpspX17IysUOgK3TngQ3urLv9638P9K/ectmXdjoMHHnhg+wt7cOhQohAAIUkkIO8KVQZR16jH2FBEQJkTyHy11+bLdlIioNScFm3LYiKHREiCwqAO0WY5C/NHzu4LXtM5h0CIklGFmtBhCCEZHAmRyBE68t4WrCICJhyWe8Q8ol9oByQiQJm7avVhO+5rWAya+bstAMJK4Qbq3xZD802qIoiySrLA2gRNQXAL/LABIFs1mEa0dRqSZ3H25LZtOioIs6iCc+jzISFA1ywdVdUFDwDa6HUIKDVbI++LmEYA4F2Bro41fepTn9+z54Hp6cWd0KmTzh3Z3+/3P/yhfz7tpNPPv+hp3/7Odx/fs2t6emJdsf//XXT/d7ev+Ohda5veY4Hep9hQXu3/HKIsgIk4V5ACCLKCekJEQgikFTBh6vouV7p/1/7lS7uvefnvvfylf3zS6ZuHc8oMBw8OgYA1HTo6Klxx8bOe/pVTnvSv//SxL//3l6YXL1L1FvICOF8E0Uwkc45mZma6Y+P9mWMR/DCGM8/atEQP1zP940467Sdzxw5sfbz7773hP8z1Tl25Vk7Z9dg9kUe+6J529tPuve/OmSN7ywJ7nQIJRNKqVWsmJ5fdftvND9z/9Cd1O1W/v3jx9NxgeGj/1j17wpe//OUNGza9+CV/FMgP6ypJpuc5csB562cuZ8oCDtDl/WkRgjKbEqCdhJgtXTIDzcaKqkoK1v461+zvQNrnfeHBg0b5dTQaAUBZlhbRSAeuKGdmB4PaIaIDX5YFgDpxgnJ0hntl/5RTN/qFCdIjoULSfEc1S08Jemf0jDZiYkN7R8TsskCoOUlo+85IQdGcZ+dL8tbBGxyZVlMziGZs4F0GVXPOIRoHlxFaMTob4wCi+U5Dm+9txeUAWZWtY2bRxFam2sOGAoBIZWAjZCgiq2O2ulEaJFr7VKvJWyROgN57YFFHqsggSQWzZp4FuJw7VTNiy0K2UTYzAUkBGuWEZJ5hC57hNsvaz7ZrNrvOwXt0zkJDzk8tHVMhxijW+CqIIoiCggBWKYpKvg7NhULEmmvfEa5rYVy+YvxnN/34r/7iPePlmmc/76Tfee4L3/++jzz04LaTTz772OwoVnNLFy+77OnPcyFddeVNpQ/v/MA5b3/lDWXYvPl4ufrr91322xec9dT9t/+8NzneY5Y1q5b++Cc3//nr/+Jt733Xrr1z6MsowXUBUvDkktRop6thpjmTjUqsDceDghcF5jzTc85lq5yUrMoBR55IQAAgCRvlw85GGYqUkvVhzjmxHleEzSVPRCRb0tpdKGw8xTleIyGYiGI7lM4nGpRFGxVGjw4RW49Iu1nee2tYRexWQErsAabGJsbHx5cuWco8dDiTTh0AAD0oI1dTNVLmAwf3TI6N3/mLH7zrXW8fVuCL0oQEUcm0Lq2btKOJ3jUqfAAKor/ZAbcVsEEaG/ZBruHa3Yc0S2Jp5jTtXwGAQ2czCma2ghgdoYH/vWPmGGun1Ol06lxnsi2SnfOGrYQFMrGWLA3eIo0jePuWUvaumZ9vQ44XQIAeUNSufK4QRMQnMI4iZII9SKbeZeHJFoWnDQgLm/GVa3T8rVYz0IZiWyi4LIgdMuQNWvFSBXIYjYWhoCoGIERAYUHzB2zvhSqICkhKEUSd54CdKs5NdMe/fdVPv/Odq8c7YzKimisBneiN79y17Y5bfnbxJc89/5ILzLRt0o/+++n37eh333DDidLOkCEnWYAMS8AmYlg3pg0VO08eEL2NDxUUmRF73aIeDQ7u3bVk0bLXveZP//erXnzSycsPH6kO7DuGVLBS8B1UQUUgnB32Dw/nxnqdv/+7D510wqb3ve/93d54UXjQFOvRbJ9D2UEIqKgInfGxienx9WvXlT0XetN/+PwL5+6/6e4bvr9hwp173oWHjx7S/zoM755zb4XTfnzZ3p3bR/GAjuHcUBITgpTB13UV6ySA04uWTC2aPnRk25GjB0ej0ZEDu6cme4P+6AtXfP5pl56/atWqD3/4gyeeeOJFl1yYVFyD/BcRjpGZM1fbdrSYQzRLas6UgeXsaUGTibczTAqdTk9VLZFnUKyjoiiYo6kFtPEZ8/q1eS69BwBrf9GRpwAK3bLDMvLeQ+LRsFLA0jtPg05nSqqOQPJt6spFlUjiRESmJDWPu5y3CmjwuvM4FJ9SsoYSEQHEqnJqlQXbkWyeygIqMYnL5rjzECoLtVxHdFlaHaAVIzQysECTeICspW5EDwxNxmL7YKoZDdodnO2QkNUJDBwqx9IHVOWYHBGWJACORQFaSduc2zw18gPzmg8ApCoG8AHvDADl0LUdRvvk2xcRoSOIrO1z0nRLtibE+Y1O09eLmJJRIOcWig42cruSRfnFsGw21iNymQfX8PyieVkDtgFPmz23IlVxBOomx3u33P7T//76h97zwRfedP3Wtct/6/4HHr7/obvOOvOSio+yjh7fMXvc2tWzM4PrfnLt1BL8wbce2njCGb//ipVf+uSOdcdPJ+3/6sY9lzxn6sff25Gk89zfvvSGG3/83ne/7/3v/z9bDwypyzxwWlR1FZ2t3GR+2N6WU2CIRhFmrjmVhBlgoqrKXuZhEWLK1QvuODV0rDwmivPyEaZ+oKrism4aETUEsZzduY7OOYcoBn81jqnDQhZgIF3e0TCzSawkY0Z5bynKDF+tP8OmCyGiEMKBg4cf2PbwTM0zW7c6qcqpKd0EMETe672SE1d0OhDhp7+67Q9f8ke/d/kzHj0wCEWhigoafAEAXLOqRq8Lk73E7H49UsEm1bVfvjEjcs7FVGPjdEQK3IydSAG8k7xmRTuTHskoGfMRqgWOSrKJLUKj8STmbusAwNKeHd0WhWTAZskwiZzzQoM+Je/MEtykMOwfsDZcPAUASKokyvbkZaV1q5GhtcTOW2REDQ4ANCYAMHK23YU8uzLstwJ4T2roPBXmjPJDBAIRMdGQpFqQU9WkDADeeYLc+KKo+QvHGIU5b3YU2sUhNQonRmDzZalCLIroyi48/vjjd9y2lYKr46j0VNVHAcb6A12yYvniJRM7d+y75ppr7r333tljR776jEcnQvrDH58xYO9DApEc7khhfh7fjMEVEA1Gmg+tB0kAbHZBQojoAoGnvTsOdQr3ipe/+FWvffFJJx2/9+DM3Y8e6o2NOewgKEutXFv7wFz3QqeToD8cHIzD17z1T1duWPuet/+VzsyVoBPr12zYdDxSGI3S1OSiTq97waUXrzxu1SPb9nCa2/Xo46l2c1IO0/DQrrtOPO2S3adfeOtPrvKfdkffsvvoTyrRkoiGdZ+wTCnWsV9BWY8SW2B1xeSi6Z07Dj3wwH3rN2/uBfVIO3fsWbFi1W89+/Ki6Hz36qvrujZ6d6oj+QyMQO86IVgja0CZrOSLoIkAwCZbzEyNXJ2d9hZ20G76mkiclLNygyOipl9qcwEAeW89kjWMeboDwKO5UafTcepcQhbEgAAqCo4nk5+DsTA7cllAxEBi1sYRuirFXkIiSmoHkkDyGixPlbOwKqqYbTo6F1SVRQSFnM3iwBFFRwxq+k3Y4PU9qVMzlUN0baRzqsDIVDgVYBVfBAJURVJgZJX5Wg9s0ePIOAOI6LxThmQrbRMudECmMOeIQrAGq3SoFFCB0PsyAAjmDjHL93vXRFURSKCJ569PlqJNgCACSqgxtahpRTBvwVYxGJqGWFgY0aMzDQ3bhZeQGeXmn2NKD46AyIECoJJ3SZWVwWSfY/Tqg/fGVKnrutvt2uQQDQUbGUCtfs9HJCYeJURg5uFolDVGiLwgi6jDUIbBzMGdj31r47ruFZ/48uvfds7nPvu3L/5f7//oJz75mf+44uzTn3bs6OH77/jG1PTiq7/17VNOe/K+A1v37Nr9tS/c/Z5/OPVrX7x/x87hyiWb7rzl0DOedxbCA+96x1/s2r31z9/wZ3/xzr9+YOdMt+wQd8UnhxB8xyT7yedri+gs0GYbmSpmbI5oHFXoXQgBAZVcQnXOOXCld+aybu7cNu0wQhEiJtu8IqJoxwVENN1NU/mHZukbgpN5iWkQT1EFBYBQrepSQAWTQGFCBkFVj3mCKiAg6J0DgDiqrF/Hwkkq63So43tJvC80Rjfk4cTE5MF9+865rPO6d53/wN3Hbv7p47+84UFYQ+5RX0hHsRIKPOz7JUs7ROvWH4cFxxg7LqBJiSAJgvdZCsMHbylKQJlUgAmdrYQWJmBtaBiOSBKbKH/eDXnnBTWxJubgsEFfeXNAMWkYZ0BlIHQi4pDynlUAQFnEHlhfFihKGJIkUqC8GmBTVSPWyBEICZAMR60YFFOKGYQsSoClc5XM16CQ5w2qpmOJiIhCKlFQtDTYRBMfraSwcVpSUVEUREfgyVjaJGCgR0urNidXI0ALU1QiCs5LUy6AAgEiKyKU5G1+EDxlTw7RyEIKzrvcWbqyqipy6J1HhyZowwRVqkGl48vsQCU68iNKRS9JKPxHv3HznmNTp538tLtvvRYnRojl6Jg+5eyz3/SON6XUPXRwz9hUOT3ReevGHz1t6tAf/ejsx+d6CBVIKQQeVCSSogCqZo/KKBIEADEBOxTT3HTok2AqsZdcBFAfxtEdmD0Y52Z/63kX/cUb33bGeaccOxgf3b2/SMViklHVZ1eCOsRSPXlgjbEsXK16BEejWBc1bd9+8KxNm/7mec/DHY9MDPvTf/Jif/qpDzx24LH7d3kKw2pw232/5l/dMDMb6+qY98Vl5z+p2D8xWDo9UR5z++980vr19yxefOzjc/C2tP2S68Z+3OkfkV631+2IIHiYZOQqHYO5w37JFKGKnww+BKwP7ty7bsNSV3YG/b4Evuqb37/nnrve9Ja3X3DpxTODikWKEKRxqTG5GFENITiiGKMkVWbnnEqmmavpurCYEAUiSmIH6IpCVY0wkxSc8yJSsyBASuILF0XM+zrGqAJN22OcBQKVGJuFCACA64w5REwJbI2lwghYS2SsuEaoUxnQtwCNdqjtyBWEnIx2N79tBta29wVpWIkGilFVW6uYaUy2DsvZElVV2DKSAyTDBTTdsGu4xSAq5rEKBKj251l8R5CA0DdzrXbnGpNpwaioKZ5jQ45y6BfqkLXpEBpGU9N7KZud3HyDngdOInls23a0GV5rIuaZI6SqCpSfbSTkxARoTGFo2s18V5oFeH5LLRRLxAw7FyDcVdV28OaAS74p26NKYpVmYuGNhcjKoL4Bo0p+QQRCdFb0CAbKFGwkUcWSuA6Lu8WPr//JwYP3Pf8FZ//6V9++5tsP/8ErBg/d+y9/et6XX/eqiTe86RWbNm65/DnP2XD8cX/1ng+fcMIJ3/vetyenrrnvzl27Hi0vftbqa7/Z6U55DDqs3JYTNx8+NLtixYp3/tV7tz7G3V5XWYhc15Xmbt+sSxd2wNqMLyVvu32DlGEGytYlYBMBIiCAzPPBAMjMSdU3miQGevQCQMQIYufJIF3JzAnmx/uwAKzkGvFIZhFVRHJI6Bw3ukUWxLP6qTU8hK6h66SURDkQ94rJalQjMsfC+8prx9HcfY99Kfh4y8/3Pu2S9Rc/c+XenWe859Tv1o+pKieuy6LATnn06BHVtGTxquHQO+eGdRUQiZwIU/Ah+JSSK0I+sbbBQacgzOyb+gOe+KXNWIscGjKl3Vlg8MaFdSH790XOAnCi7bOxgE6DSDDvPcUpgaEOKSeYpAosKBn/n0elSaLUzjkrYW3p4EKIKgbFR3RAWjjP7jfNrBaGHUN9orNGICsHMOfVHZjXEjVcjMQ2QybNphFWNjnE1FTqC3+RnT0bjTgkc73I+3LM2kFi3bgjBw5xvlmHxn0IFFNiEfaFU0UQJW2MwB2h4LiEuYqLFeUttz76vWvvm1i24aRNp4b7flnFY4UPiu7y5z5/5arj9u0dbFh34mw18/trbzrh0C/e/atTbzowicQu+BTRQXIEDklQmTmhIPkksQCSAiTVBQCgCxqcCyNJhaoqcomFuhp42/7tT9m86S/e9TcveMHlw/5o3/46SSyKxX7Mj2oJBUpCSSPnZDQ8dmjf4fEwIeXYoCgAxkB6M6NBf/bA4EffX3zbzSXFHWNp/OEDBwb3Pbj38QN7D0yPd5SwRx2ZXMFuxEe5P5irAQZQHpWJY0f06L4Da867cO36k2Zu3eG/Umx93s0r/uYMEeiWPnSmKmZ1yk5nB/3ZYb3KdagUSskhKZVbd+245oYfPv1Zz7zgnAtf8Pzn+tCpqt/tjY0llpS4KAphYWZfeGZRFRC1hYgabB5EFTj7JOXdok1Sk4pANk23rZ6qMttKNNMNHLj5Y8mSNJqKnCckQmm0DRExF5ZP5MRak6CqYC8CSs6hI+GkqphSdiC305zbNTWu0Py8tMlVoKqski3KWbR5rnL6UTMEABBVBLaUKop5ZWKI1ZyB8rQnS8Nbz6GIAIomhGUpJCsQgaaqNpsEyA+DpXW2zZw0S7v2YuGCYW8TUgABs4bRgi/D5hCCyRq0N8kBgoMozZBKtdHxVxFxrOScLuB9oiNEMt+M9ldkDBFhyx63yp2ZBRvFksSISB4NGYQNlbBZQudbgwYu4wjNIjml5BrcmUorvgdktsqEPgQqrGAHH0IuoJzTxCokOoOlv+ee7X4MDs/e/5LXjg+PVAd3zCxb/dA3r37vZRe/4T3vfv83vnnVxs1POnxwsP/Atsd2P/rbz/vdLVtOetvPX757a7lp02LvZycWrTz3/BPSSOYGw2t/cvV11//k4a0jLdOYm6hSZfsYG0WSgqK2pnVtKFQriWwV3AxwUowSIxF5tnDMSASZzUymle8aPJeKAVpBVWtgT54akBeDCkBkKRq0fIsQxmbRbrApQsTGXUdEkJxI3sE3SBcAQufIcA62ioacsVClTlVwrsSQUmSO1eTk5KNb7/qXj3zx17+Yc6781lceXbWmeO3rXupO1elf06Gj/aUrFs3O9CfGO4sXr7jvvoc2bzq+jiMRKUPhKUufWkojzV4FmdvGrAigJKzQkGQWPvNP+L6ZJms2gUZEZABODZIA5g0qCnKp2Qq1m1oATQscLECV8kI6i2loNswAQkBHdjPGu70GxpEDSEoJvfPO2+7cftB7752LzV7zNxIwAGTBEJZo5TuRIIjk4jpvd59IAPVI2Hh6ShNYW6Hp9smihq6R35vOn0nnnNmZ4bzHAzQVJLTnpyiKBizGhMQx+SKUZWk1ByKO6tqJ96xTwR87OvzsF36R4tjhvTOzyzZuevLFt/3yqu7kGJXguuHKq7595de+/qxnXP6CM6Y2PvSaHx4754oHpxxFYVWxaJRAYWJqotPpHJk5NtufI4cM4BOxQkRM1CEmVRlWcxM9HIUx3+cxDQdnD4Y0fN9bX/9nb35dMV3u2lvXRQFu2EWqBoM0eyhOr4j3b68f2wVYVY/v3nPvw+XmjUefds7WQT3ae2zPod37js3NHZp58trlqyUePnXj2o2bppTuvfFHvGHD9Ibjj1LvwYf39/v90aEjOw/vrRJXc3MJ9ZY77ovDfsFSJ+zX6Xc3zS1bszHcGdy/1MM/qeAPh/IhSjyQuiKtAUYOtHTIaRRjleIg1n0iuu3Wu6aXrxz1577yla+tXrrupFO2zI1GQnhsbtYSZKxqu1+pToiIhEpA6qFoVvgNpduefY+EoMJcliUKN65fKlaxI6B3igY4EGgsH6xsRUQDJ7WHxw6s7bMWHq35mphFQJg5Iw80n+92heybFJj/VBoyGbUIwAUtoH3rkBwgZCV6hWyn04BWRIkQTBoGsRVA5izTAvY+pFHSsH7aPoxHYpWFPZzZY9uaB0XBoYCiApupsOUizqxZbZR92oekfZ6tlAaAAAS2BM0VcX6ioki75slX3HvnHML8pAtaChaCcySqzI0QvxXOSV0bblpaBeVf0qbSdm3ZZhFEtIQxH0lZjJvBreM3YrZVFoXm0qlmaYj5zOGdATIFgJxzAzOcQVUzGwMlRUSO0imWxFp/53d/6+V/cP09t8cTzqiXL59ZPL3xW18/dNWV//wnr9bnPPe33vLmD3z/mm+vWLn06CP7tm579MTNZy5btsTR1M5dW8/ePDk+GTu9ZavW0cx+2rV957e+cxO7RepH4GAwGJBCxRxjNGk344YiASrpwpoMwGTEVFUS2zonhGBNpzMtVoQkIsygTGITCM3kK2YWaflzWHgiB5G9c7YbZ4KkkhdCRO2TM18Oq5K2MDlCURSNDcDYHNozDQnR8L2NFwg2/41SaeQEXhxIp4P9Yzg5Sddf+8vbbzr2xrf+0aH99d69e2+44Yb3/u1nZ145e/7onNH47v7gMKBz1D1y5Njq46b++gPvePLpV/vOKgAQJERAj2rjehM6bfgCbYoy2U6e98j9zS/NaKT5GhqaDZY2k+r2mtjuxkFD6m1+C0CGVTQnE6gd4AtITO11aOtU2+a05Xv7e5GlYX08ATgJqkBPmFdJA4kyFxq7/jXP48+5kTfSBV8ZDUDWr5roWc7BIeRm4zeWfMZWsbjEC8S5HBijCQ2y11Y5bUi07bW2nk5Ew1pSHV2ZJ1lWegq54Wi0aqr8n6/cdvNdRwpf8tzMtp0z52w5Y+f9v6pnDhVF+S//8JHRoNq3Z9umXnVe90e/erz7kh9A6Naa0PtulKgyFO8RdWrR4kVTk6oqkQlQwFVBu4xjrlNxQkJmhd70fokynBsbwv5Dey4+/4y/++cPnHXqaQ8dOjrz092TMjy0Y6+kYXfdiQe/9z2YCCOhx3703QNzh+b6xw4c2j/wJa/e0L/huqXrNqxYsmLJqqnzT928ZvnKNStXjnVLRCynJ4re2IcPyhWf+eTYeKeeHaCjxDoWujjecaDj3aIsw8zMgU7okECnKLoBHt+7fdFUr+iNyyMzYz+dOvTybeEfxwUgxn4A6JbTFLQ/2x/MznW65f4Dj+8/vKc71uvPxf5gx+oV6wZy7AfX/uSiyy8BglCGoD5FNoU47/38+VQQFkUKrsCM+Js/fiklIAVUBalStFuPiI0AH6Aq+IAGeTIk//y6d34B2pRuSkTACUQy36RpvRCRTbVUEdGZ9oAPBSJK4npUUcMj9TZ7EVzgqApZU6nN5/ZXucA0GJmpwiESorQYEJ23NrM3IqDtqNk2uAJqvPi2XpjPTApK2krNZd9vyA+qPWxmlAS5es7kh5xxFwAvDUvSRgR780bMb6h7LVzxCY+9PXjGxsjJOHhVNTbzwnIEWSgPyBVFBRSzYvBvRsA8XYD5iGMv0mbQ9nmWxOhAiVjFwYKiZB73iCW5JAkR1aOxRZNKXdcBCYkEAZhbp0818wBAUFQGjw4BJQoiUgCk/uBIZ/OmtWc81V39nTu4fvHkxXTDD6575MHJJavXbt/98Gz/wu07f7Fn77bppWPPe+7/2rVrBznZvv3BSy59DnXvHtZrlq/csvnUyW7Z++j//ddPfOLjm48/+/FDNXmlWDBwCMECZQ70JrEidvFRSR00wHaGovCx0UDN6PGUjEtgWwoiyiholpTYXlla3dMGwVsIKmjCptoDIMAu+gQJWIQl29w0NRM6MnFHVjEgLhGRd5CkfaRVFQUEwUxhc9e4cDikWpRdQWBNKens3GhqcvzhBx++4pOfP/OczYlnv/ilL6xYsWJqemKweQgAj313ZvPmE1B7vpRf/OKak7c8+ejh8MDdO3bve/SUJx9/+PBMjOxNH5zAYV5aq0BsPPuccw4wmdMQPPHMLZhXIUCjHzUvjGX/ph2l1nVtVaCxls2FsH1w8uclq/TylwlaCaiIzm/Tzb8vu2BArawE3kBtzbTWBoLY0K8bUhkb2wPmq/x5pS2rce3w203XxhKufYc5FJiVFy6ApBECIKGnlpexYFRgX7Gq28+7sI2WxMyMgibXCwsIcu3v5cbMUUXqGMsQhnVVVVUIgeskIr4sRqmaXDJx92N7PvW1W8vjVvUEcNbvP3Jw9yyec9lzvvWl/zjv9NPf/Na/OnTgyJF9j/zp6H1Oyk9XL1+y5MFjx46UZa8ajkRqH5CcA5bB3FysqsFgiIh1VaeUDqYRVCOoBuDCWG9qstfrzh44a3J6xamnLB+bPn7LSec9+2K8/9Btt3z34P1bRysn7p3o7vzUJ8tzz3/wR1dX3//O0bVbasDFvTDeWTK15aTzTli/6bhNk0tWLT5h45J1K8d6ZQf8nLIQQgUQqVJNWnM1eOc7Xnf/A/f88tqfr9i0qU8VxgQVu4QsVPiglXT8RBRR0k4JNc9tf+CuPd0x31vq3BR/go5+5cHp39msN3Zuu/OO2X4qxtdRoGElc6OooTw606/ruQlfqFCSuHvPfvbDa3/045c9+NIzn3TKwYOHumU3hEA+iEgUHo0Gpn5qh8TM40HV6qr2UM3nMsA6Ru89W0HWbFgE8i7VIYJDb+eeRVXrOnlPCwtWiz+uCQUW09s/t/YHnygPBQCenAlTp5RAwStLG9/nD27zvTyRhGrtV97x2jFtVr0A4IiU1NQSsp5UE78EcgPqmmepzaxtNjId6daRUrLS23wFoGoWo0ZHVCVz0muUsygzHKzZX3jRn5DpmxF0+98oKioYfOtqHnB+G1vXdX4/CzoPRHSI3jlqbnAwg0BqO+pmyLmgBf+N9mXhqwHkBD8/xzDvFPsecstitm7eOWguvooKZFG6/FKEIMpGM3fEzcqwxZrW1RAAoCcw4iXF2HXXfmJs6c//7h/+8cLLnvnD79+wYqlcs/OzTzr9zI997F+/+Llv3HPPPaMR7pnae+ap592855bd+uhUZ+XiqanJ1R0na088edUfvPj0rffu++u//sf//eo/23OgwiI66XmMjgoWcYjOuVGsAcCbslWw6sAW4/k0O+c8Ekk2W9VGehAAvG1QFAgphHn6kDyxYaKG9lZLAp6XLAVVVqEQDHzLtrMQZZejLSAKKmQLaszab5LmnVEX2OVqFqLKewUABMhFQ4SIQM45FZeqNLXI/e2HPrdz17bTTrlkurcBoVMNKfLM2FmdPsChW3aXneJpF59YluWJm886/Umnbt91x09/eU1McyDJOQR1KSVgIIeKxGo2i2rhAABAIRFm6PX/bwG88IQDoEH/5k8dArp5OUl7lNrRkdnntdGqPaIZMLiQCmHoLZ2ntznn7CQiYuiUuEAkBBsktiOEZhEAjhQUWE14auE/bn8LATpy7ZKYmr37/NipVVx36BQMcpgXA7nPBgK0Th0he4djUwG7TkfbRnzBV8sPpIx+sTwPsAAKSkSIWTBEQMBRKIsG7+ZjjFVVlYAu1ld8/paD4JZPdmRu1I/QocG2rYfPfdK6J51x6eTExNpVa1cuXr1l+HdL+1tvPPWql1xywaLvfufLX/kCEXkP5MpU1Wk0ZObds7PDasSqVAbv/eTk5NOm109tWLP5uHWnnnzq0o1rVqQh3vjTif179h067Ddv3v3UJ//n3/z7aPvDh6brmW37p04+aXjkYNnRpY/u2rJx7fK//pvjlq5afPrmxcuW9qAYG+8wMPlQMwwqiKoHZ2qA2Y74WlBdkFDWwouo4OGwO9n7p4/9/Rte/qrb7r57cuUyqSKi+aP0B0k9lijkwRH52K973UkncTgzEBkXjXp94e4a67/20MQNpxw8tK/bGUcpo4BKFatRrAb7H3+snpvVsQK6PqokqQsqhv0jt//i9qecccr42BgCWTFq25NOp1PXeRYNACDaWBCQ+V63hydD7RVKH8BR5MQxWTmbxZ1QTNgfckNlyB1Lprai4jYgiEjbwqEtUJsHJ8ZqnsLjQBrJFAAwAoVzLqXkcYG6U86jgLlbtRXRAg/F9jFARLVGHXOzi9rIM+F8Gre/sqfI6MLUSHGIzI9O8+dBGyg3sywDVZJNeNQ2zVm6tvkptl/lyMAxOegYW8llCbr5kG28KcgluTYbYtfkXVYRZiJi+3AIgKg8/5EXXHTwPiQVFiFA770DrFKMKQXnF6bV+W9wvtRo/0H7b2zfXJCDtk63jA55s2tfAo0uQavrq0qOiqJwAlEycIlY8+BOgTlBntULc2RJAuK9d6nX64WxcTnhhEuXL3nGt35w/Qc/8rQ1y1e/4sWvP/H4026/+ZaXvfh/8XAs4THBWuT0s88++6wzz3nk0Tul6m/fed+Fpyxa1D3pghefe8E55z/jAqyT27WXfa/seVeN5nwYTwBSS1t5AAASQWK7ZYrZzr19ZiplXiBaEsBnT0mfL9e80iagKqJou03HFleVonpC0cJ5Tsky8yjWtXJhiPtG5VTZLLbBBd/AmghNV1YS19GXRSa8GMrRQBwg2JDxG7BRU2l5lyWohVcu7976y1u+eeU3161d/atf/SoU8PwXPOdnP7u+qjq6JfndvhgtPlbt/dEPv7vquOVPfepTo9t2rHrooosu+sB7/+2r33iWJ0RyKSWwuQioOo9N12tvP6lYKZnPw4ImeP7g2ecqnCzkcYHaPExStPff7XZVNUqWs+aa2zV5S46w78WU1xRSypjqEPxwWLeLXiumFv52ZmZhBU0qkMXFBBGN5WhBQ0VjjGZppY3AcvuGdcHWxr6cc2VZAgvDfM62k5BECuehncTZ/xNlFddU/ND0FTnpunzAXKvxqYqiCRW9QxYVk7MlRtvNEQBETnYbbNbknAs+pCY8iogjsppv6aLedTc8dP3PH1u5eTXUiN773pjMHhUNv7h964XnXHDHr37w0pe+9I1n7Lho88++O/3+bceWjqd904uXHzh8JPRrgkjkSf3YWHfx1OLlK5YtXb5s8fJlm084/uTTTl5/3FosYxgWB+v4+GMP7fjm166/9ZeDJdNHyce9h+Hhh/CaKxdvWLPiOWdv6i0+66ILggurFy1dtHFThFHXUY3FIA18CgX7w9XsgblDofRqqnNKjgpCn3RcKChBSVCl1ClpODM3Vnb6/WrV6mWf/ty/vfZPXnvnHfetWL5mNBqpHymhL0NKUSsoioJTJCSptZKI6oIL6H2MWn5q9eATD6eTh72HOyhJCSkEqhJBAh7ueuw+1NgpFw81OV9AXWtC59ytt/z6T1/xRySYlE2GCK3lTdyS+6uqso9A3qkKtfWoI1QC0SSMbTHKGRJoKcVaJktqyiLZ6BBtLGyNtdoIj8gFIlVMyXZkUZhg3n3HF8GsHttWVphVNTIz6CjWRKQIvg3l9ETzBxM3SDSviWO/nlizUkdOvWgfo41E2oaCrDtBRGCf0P6NAAMokKHLxGCw0DQx81kKM+XJHlFFRaScXBcsnLhJyAvnZm0Fzaqu6aEdOHKEqDaksi8AcOQcUSXJtJbsH3Pz+eyymIbAwlZjWFcGYY+cUkqW101sua0PcIGqrZCyMqf8iShTuYhsOS/N5E3NwhMBtKU4Yzv3QKPHgA3JC+cTNdFKMz3M3l4RggLUnCh4QDR6q3BC1LGx7vhEp5TRlVdd+V9f/MbJJz+JPJIfPO/pzz26c+adf/nqdWs3XXD+mccOpd/5g1NWn7Dj4NH9n/zH67/wJXznO97231++7bFHb33N6y4+7+zLT9zy21NLyn3bZofjTrvd6EljrGY1dMdGXKWohiSyyI6ORMQ7VzGXzpoPbaMtokMEGwJLg1czTRiXGtxyCyOw3rQxvfe+yGhWVQLogKs1JZUqRUsbmhhYoCzbQVB7W1FEuQnNVmw1Uyx8IgIxcgIjyDYNIsB8nkOEWIPzEFxRx9mxcb7yK98aH+seO3K4Gnau+sa1k4sWM/uVx+mxLbU+SNOLwonHb6hqdp3quuu/++n/t2ftulOe8pR1q1etn5oYP3BwVkGKonCgLEmNuYvgkaSBCDkEWzJZPsMFk9U8g2kkmTSl9oMkycOxNr9aPosxlj4YTpiIhJMFCFqgep3rDEAktLWC1HVyyTk06Zu2orUHh2MiImiMp5SQCZjFLdiiJWaHRMEjQ524rahoAfokONLEGcBlLvcsDeIQuBmZtEnUKqPMnsgOGdCGl3bI197Z9hBa4owxZrdy1MIHBTCiJhpvOGkWmva5dQGVGCMqIHgWAWeyR06FrTPbPXP0Xz53nV8UnPjRaDgxuZwnDiRaynNz04uW7N654+jex1974eQbN970he1nX797yV0Pf9Z7v3rN8nPOO23T5rXTk1OLFi1etXzN5lM21aOqqob9fv/xfXurVD143703XPfjmQOzhwYzpbJWs8dGRzecc/q6YvrpKzeuPf+UYry3YtGSJUumoQQiTH044gEjHB0Mkqa+9EZ6sJM6I60Q5lzRKakYjeqe9wQingSYavVBo6aaBKIW5EcjQXKHJXZA+32ZXLX2E1f850tf/rJHHn10zdSyYawd+5ojkobCq9SiyblezaId75OQcqqjdwG/Pz3cE+rX7er+5enkNQlEAvZ+UPO+A4f27n88eakigxdKQADKUBTF7ffcs2/P42vXHzcYVpk8AqAgIXhocIudIgiCijmKC6ozIo5zRv4WMOtjRE8Owvx20tYnli88Ocl6A4QoiE40USOu3h5OVaVGjVwa0J+d4VAW2jSxGSXgHSA6R0VR1Cman4gXQBcKEM2jJFQBBYQEQOgK8KqKDIhkOzT0JYIgZhMuc1YxcobauBMXlJmirihdKDjbhrBRU0QEAkqT+G3+bE9FrWy6JFbyazPaqklBFYkU0KbcDCoL0m07RBI0Cz/NDCtOIkrolBQ9oprTPSOSKVullLz3AQgRnWtwWFl9GmyIbNq2NvFPtrKyNXmjeavZlkoDZj/5trO3tXdwgZkpOFXVxIJCTKrK5ovn8jydjE5oRX2D+ZKG/qiqiI4ci7AjDwCoSQVBSgwepEJ0wZNwJQJIBXlNARWQRcqi4zqRgIazs/fefc2fv/qvZ2aPTo6vuuvXd73z3e976tMuIJT/9XuXvfUv3vPLX91y74P3ffRj779r63u/c9Vwojz1uc8/9aorb/7UP//b7IG9f/UXb1u5fOW2rdUs337Ho7f/7Nu/HPXTB9/zjlPPfsq+w3OuSyMpe1oK1QKKgaxCceoAYCQJvE8CKtEcseq69kh2nwsi21UQEEQGxOC9MCtAHSNbKQdNMiAzqcOUahEJ5ACQWWrIwKKyLGOMmrjT6ZhjKzWeCgiICgyqhClGbUUkHTrnA6txSGOM0KhS+0BRrMhTA04732hVIgAoIVcRHfa7pTu49+DPf/lrpnjWBUtXbSyWrdi0aBGVxcTadZOvPv6L/FU+dqSq+jNPu+ypew9tW7tm6pKLT7vl1kf+58rPvOG1f6JEibUokJMIogA6MYwYCkISRpHCbD3IbO8UkkC7+YCMkFLvGvgBqqY2NDgiYta6BiBgILM5KUx13JgFERC7RRljrIejTqcTyKHzSQVFnQHaEVkEgquFvTjBbCaoCNIoI5J3xgEzsab2CSWkvCpLrCIZui9SeG9lpgteGiclVa2bmjU4n2JyRFCEGKPHeQVpG3jUwkCYQMnl8p1VmE1NFpTZJFYAQNGLArHty7zaTloFVByCuUhATKISJaHDThlSVXsoQLzIsFZbEwbnCwWloIicUkCKJTvAKgYEKYPOTnTcZz9750P7q/VLj5s7fEwWLzpwaE88NFNVVQG0adncnT//wbte+/Q3uL85MP3M5c/56ouq+hn9PbGiFSsmVqxYMTc3t2PHjtm5ODfYd+N1Pz28b99wNMeUyGMc1r3xye7U1PiK6WlYMjw6t2P76MUveu2zL7+40wGSNDdyoDqaG+7af8wVwSA1wXlVJPAFeoaq58eFxKOKeE3REU32gk13jDcIIRc3XSwTpkSuBC8uUmLxJFXkmI5bs/zT//7xV73qVXt2752eXlxVVWmWFRptI6OSyAkmJ8ICCOiFUYeu+5njBn/52PS/VrC3V3ooeMSh7B+d23n/ncpHponII8CQkWrnRFI53jtyYP8jDz2+YdNxMdW+6GZErHolMGqcVUUIiATkndZ1PeqLwFhvnDTEFAN5Cg4g8x6l6bhEBB0VIWidBLSua1uQOSuzOWqDmcUFXCNqBNWxwQfY+sMFb3QmxAXKMwY8sm2OgiQuffCo85M0MCyz4fEVFFiayS8oSjLeS9E814iEC8yOLPktAIAYtsU6wtbNtCUAZPy0CfpmlKMCEBI+ESPNzCIKniKzUsZmpxYbbBMkme/97c2YVIKy2AKofSceyTbJ+T0TUgZ3P6EcFhHrq1zwTcdqbXX2xeuM9VoQUDsfY2YiB/MS/00ynWchASJSOwhV5fQEEGkuRxBbydz5+5ph6qTihBnREwIQiox8AQDovRORWJN3JEnqeoaROn1xBJPTOBrp3m0P7j54xZGZmw4f3tXn4ebNl77oRc/+wpe+7Fz16KP3f/MbV21Yf8Lv/+5z9u0+ev21N339i9+78NnnXvLMh7/75SMbNp0y6N+5aFqe/3svveuR+2f69cN3bB85PfmizU966qk3fvOml7/yVV/9+hfWrznlaNROEI3NMAAAkq0t0Vrb4INp9aqxhxDNzFxSptmwgQaYIfv+kvMevQcVRVBFsY1+g+2yq2r6TQRgiB6rhbtF2V5JtSOd1cjneyAjjaSUGJS8Q1GAPJDKz7NFbMLgMjuFFGyYIZRRxkSEmgAVgMqi98ubfrV37wPL1gzPPHfj0Uoe27n79ltT/2j3/gcemtlWT+xZOjZGv77zsX0HedGS1atWLI8TB5/7u2evXAvnPO38ugIlNJMDew+s7L3PZBSElNJwOLRf6q0Z9h4XzGZsUKzcENYVWlAbNICJJIKoFAggr1SxRV0B2MUXESWsUjT0ADgDdmSKBHhHCKgOoqYUpbX/agpPNktHk9yCbH1o3XxmWDZmjpoa62uWlD8mmSulSXdZYKmqqqoq733R7RRFYQhYaGxOiMgTRk6+IStSo5hmbyihAotjk9MCQdACBYMHcAKlK5QTCFJR5AtVIJOgoAcHKQj6mkdlkDpREcEFLySpmkF04jtOfIEDRgFAFpPQmuktmfjez+776GduK5ZN7z9yJHTUD/ctLuO6Mye7Dk85df2KxeFPXrLpBdveMNLlvz7t748d3cojOTJzYHZ29p57Dhw5PBtHlSSMNRSldsc9BqLkguvMHJudG9a7dm+bOXpk2J/bvXN7IH3Xu9/5zEvOG83NzRyMDok9Fs06DDgTvBEV86SQmfOE32S6TXCsNapqRSBK523RJnazEBHAFYE5hRAkxdn+3KmnnvgP//APr3rlawaDQVmWKSVW8Q6Z2XunKgDz8A4wqBxi5yurhm/ZMXj5rsl/OsE5H8UnkcMz+/mxQSgW08RSLboMTpEI1EFyEWQo9z1w/0WXnc3MGiN5W3x4VE3N3sRCKDNzTMwsSK5wSUVTNS9m2ljMYUN2FdNjSMk1L4ILvoQy8HAhT9WuXnDzwKu2RebE6DyAMGeMoffeuQDNNtOeFwb12UahWZHmf2F4RZGkidCoUEpEwZuz2Dw/NS0Y/kCzwW37DMvTkFenyYDdC4MFLNjFqiqreIOaifV6xIhgvCY0PWABltSoTBDmRNqsorF9Y1n3Jm8H3fxwwNRxFvBrTdTeNbZ3bSSyK6UmBkuojSBAqeXC9wwLnPJCCLY5aOV1LIK0rJX82QmoIR2hgLT1AQJK3ue1N1gXTNeZmVxy1HHBxZhUPZr1kSpz8iWriMdSOAGUY2Pl+OI0IyFW9S+u+Xk33hgmHqmLn4SAm9dv+b0XTvzzB26cmBz7s1e/6XVv+PMLLnjqKSedfdHTnvq1K3+0dMttf/zGZVd/8d61Jx135rnnXPScQxqnPvQ3f7Vsas39jzy4fsPxHOmUM06YnTsyt6e/fO2qpz39nKuvOvLJj3/y01f8v/3b+iWV7BIIiYhHsqWpjexSSqmu1UoTM4ByGV9r8q0MyoY+NTcks4VGU1cnA/yKdX+BOEl7hLRBzmNMAJJMj9A8Im1J3EyVlVqtvvyz4F1BxKCmB6ItC9uwhM3iHxHrFDvkVdWErHEBsQc1BOIUq8nJ3o0/+eG+3bvOPecFf/e+r69YsrksJoZDXrN+ctOrzrgj/IJ52K/d4hVL9+w+Mjs72x8suf++x5yb/cyX/u65v/WKI7Mj5wJSvuOmsMPNzJYADWSgqkbDNSWBhX7AYAZ8AKCaUvK+aGNJWyi7BSVgW3/bMc7eZc65IrRjW6sUvffYrGmN7144ryAt8pCZOTKoEkBwHsUinoKCxKQsHik1SNH2rlERPBYOsK7rFKMrgh0GFE2cWjcObcbgJpfvg0+NZVNSBpZMTzL1+UYWFBGBnKq6pAoCohqjceXVIJzsQYCBBYA8IaEHRwA1k2MihOgkpUEQcXWtruwCJESoU+Fw5FClLkaqLgjWAOMaQkkwivVEZ2rnjiPf/t6Nz7x4xZNOXb562arV61dM9WDRxNLayeyRQwf27jg6c+wpD70LRjs/PPP6bf/1ZWZGCH5sSafHve5Ub3yZn5Kjh4/OzB7Y8/ih/tzgcH9m5uixONdn5ggSnJ/uje/fu+PMs578sY/985PPOnHvvqMpgfOlJASIqqZ/n/GJIlLFEQajXrvWN1pYAdQ7Z7pAVs2ERjqQzObQKTlDHYLYug0cAARfKMixmcGFF53/3r9+3zvf+c6y2wEQ5xAAnMeYauccKIN6mN/ZIwHSXOh+dc3si3d2PrFGqx5S4QsWSEePzY71VtZJBEEkIjCkSkGQCnLyyKPbkIiCV2nE/xEY5vOXc84cyRCxQ8QhGNwJmq5MxHKKRW8FADVdypwjwJQUjSnTDiQJ8p9YUAYxySmsjIbXdJhtHomcWzhoNJ0kxpaPKo0yq7dnwCMlzSrK9qCS5OTRPi1EVBRFZLH+oO3Y2rrbvsEGjoHZGxgR0SMoYqtsYFekTdVt4CA1NUHzaEQLCuZaKqqhcTSzMlkacYD2lzKo0aly8FLUZq21MLMCzE95tXF6b+8ZtAcEgZC00SlpwT5KGQ7XBrV2LU1ExlT5DWIINki3dr5tc2ki9OR4AcBIWk5nZGyy1PwlAhDGFGPoqFBERZRkFsNIo3qknqA7JmVRDgdx9+M77//Z9ffufqA/mA0br924eN2hudnZw6vKicGwxme9YOnM7Mb/+rcfPnDPg0+/6Pd373tg5+TObdv3vvrlL3zs4CNnPemctRPdu+/YdnDnzLMvf+bsUbf+xFN2bX8gVv19O/bODkdFJ2w+bm16fO/hnbuLRYtOOf74t775LXtnpRO8yEhdCU0Wa5YoxMBJxaaX2PDAiKjV41QAFLRVDQBk4pYIp6xrkJevJtrpsT0/2uiuGPilvbUmegyNyYnFdFW1DVCOSgtU+EtfQACxwplZCVXyisR0UEWkTjUSgSNT0LRHURMruRiTcuAa7r37vk9+6qOvftWfnXPW/8fWf8dZl1V14vAKe59z76341JNz50DT5BzErIgBFDAOBkBHcRzRcczjIKCOA+gYxlERMWfHNIIgiEgS6ABN5/zkUPVUvvees/da6/fH2udUNe9bH+1PU111695z9lnxG56bsuYMq09aPfMza9PrpgAg36P8ilZ/U1+uX3z77bfPze6dmT11zbX7vuJLv3FlY0pi3EXGwg7i4Pwr6XwA+6TYNbsqmPsT4teFOlm0nHMGjRx63y3v8KjTl/g88AR32o24KzcjoOPJcxJw+xdASdmVp/qVec7Zl+NYJO1M3TkNwAwsizL4vqYvxPtHuFVl5ioMsLMRCyGEuuprXEFwoTRX7xoMBm5aHEKgGEv8NSNmMPcy9HCP5v8pKREBuyQwAKGJWcqbUYhQmzwYjAhj07QhRBGpyBpkNYyKjJyDSSaccq5tgAg1yzBGYso5T6eTyQZIRgqbtqkwkVRNJ+Nk+Qfe8F3DkW6tXrp4aXznfZ+ebly6cmZ9+coq4ZgkvHrpI9eMbntX/M+L17/gyxf3jYYLSPHv/+EDd9378bUrzfbWdHtrWbMxZjQ0GlqwesCDuZkqMjK10+bSubPf+u2vfMtb3jKcmTl7aUshQkA15IpVRKVUIYDFal7NsqSuMyrNojd2jRUUHsWQc9bptOIwrGoVAZG2bSkWJ2zstvgxxty0YGSgK2tb3/DqVz1+5vRv/NpvLC3uEUna3cRy2FyCVw1AEZkYEHH23SfG33F6+qpL/PsnAgcMAYERKWVkyorKoGDZCFRBEKTi06dPN02DiLar38MehpmyZRFIRn16Khx3VRHXn0IiIjbUDhIIAMDEhobQW9uVkO7alCFCSk5gNKReTQgAQM1tlwFBpYwQvBbUbG5njmo555SymZEBA1q3Sw5+cAMSxUDQUYbUZS5KPvbnoYtTQTX3qKvSbvbiuoSOgIDOpYTYs2BBPPZN/c54to+hndA8AvZqyogI3RTFESiKhdpUsGpWJHEQ0eW0ulwY+hcH0U7iHf1+dP2rfN5BtF2fugRBKz+vXmt3wcueCFvrA1AR8Nu9WgDAQP3P9FHS/ym7MCD9tfCElEQISinaX2ev2iQjE7dNjrE2y9LCaDRY2B+m4+aB+x96+PHPzh84e+Hc1j23b9934WPNVZNJc83ZzQcuXfn3Q/tOVnig2dz3mY/MPP95L7zlScfe/uZ//eQdZ5/3vC+OQ53bS4+ee4jh6x574NLNTzv2pKd94fratJmOqNp86NTWTFh60k0LdT28uHm5la2NrZWM6fLja8/+oqe99h3fuLh46NxaMwyKwLk1joCIxQrbQE2zZjEFz3kIPmJhREaKyKkb0VNvpWdGBnE4gA506lWRoRceGaFbDvQ3GsrZ6Fs3KXv0oq+yu9pziGwf38119pmZWRGsA1OU1G7qDzn2NthdQwbopjhQD4hNHn74rr1793z367/7He/41fvueyDjdOFFB5tf2BxTlpBAQQapXYDl/3LuzJ8+uPTY0qmzjxDIV375183UR9a2JATNOTdNw0UI3VCNERQKJhm7ZSoiBh/wqfZprP9EESMHJsacVUSzZf9oiGVr4x+tv9rWMbClTa7gt3NcRac5UwzQIRuo+zKn5HbIfGauY3TlTlcyYUAvd5jJSVNt00DXm3axAAyg7G6c7sDkEptZxZeR1MHB+l9ymEhuk6RcVZXjOdSKPX33sPuYwgCMh0NXO0BVtQzgBpQMJAFQTDk3BinnljkiynaCQQgcLCEZV5WFwfygCVAJbGtut8bb9z26ubJeHz6wdOB40OHlZvW++z5zZX0ltbC6vLE9uTwzY1trqxvbmYQp0nA0H0LYN7f30J6rH7ty9unth7925sO/c/GLBl/8/TfP7mnTliFvbco//9Pfrqw9urR0DJFHowFzlNxAxoaamWow3Z5KFTkM1lYuaZ7+z3f8wmte+01rG+PN1S3CKtQVmjtACiErKQGCi4IhUOAqMkhGRNEMVm5Z2R0AtG3rQ/4ikWs2TS0HEoVsiikFJAwmIiqZnXfr3LMY2vGYCL7v+9/w4H0PfvCDH1xa3NNKE0P0IgkKFwassxwFCkQE50fD9xwef8eZPX9xHSqioAqFENS2OSAIgQHYwExdIxKpWl/f3J42c/Oj1KpBRkRTVJXy5PpYSEsDJyJEoW0zM2ZJZlZVFRiqKHIgLB64vT8sFDASaJeGiciozMz7OOyny00JyzBJ1DqCTtl17pI0QMaqe14cpO2YRzMr7phGVqxdTEEtBNIs5nLEBgxd+lFhl0nvSHK7nwdwKpGZuViVE38ditazFDoHpCdkHX+ysxgbETlBBcuypshUAlPOWXLqq2ZmjjG2vkAywE4po39BH0Hv+jaBQ6LAZTF20i13sl99ILPOIFlsJ+P2Ienz8uhOuUTkTZUVYLJ5IxWMdl+u3b/bVZS7WDYA0I30VXU6nZKBa4sLmAVDxJwkhIDSENNgyPWinj195oMfuPvB+x5eXtl+/otuufj40t13TNfW2+e96FmfevQz99x+6pq9exYWvuWxTxw8dAxXtt7fpJSWJ7Ojg//nj1/bblw7rJ5KgacpGNP29uT4wpGt8ViUFuZlMr6SNgOqMM4NZ3hu79yBkyfmBjqanbF638JwpmZqWZY3EitlymR1xZS0NQTVbIa9BTozS2dD5PwfdGoZQPHk8NPfy5J3nSuoOYmqn/OEEFxfHLsdfDldbbEINELFwjRWVQUrE9Ein6KkhoBqyjEwBhdeUFVgMgTn9feTcxNFosDMVTAz6QDz1E0ygEktDurR2c2Uktx9z71v/+VfrAbxe7/3jb/+tLdlSnZcYR4AAA5Anm/3bu575GsvvHLyA6PZD3729jtSO2rbJFIYe8wcidO0YWaFMjDouYN91QiOwe7Gif7lbbGmXCjgIbpI544/jGtfEyAR6M58y8ykTVVVKULbtsWmhUraTiqBArgjcsra3QUREZBS+CNp59+XOwYIAgACxyI7X9dROxXM8uBYSRL8RAEQ6sxW+5teTCbYV/XutwZWYCIKjsYywSJwhMVr0czARJOqMkJNbEaqamCMrNnDd2zMFDHMzBJTXfH8ILeBU0ZYa9Ly2sb5C2FlJS1fPv/wudWzDy1fPtceOgJPfubmA2fPnPnLC1dODVMYb2wlmC7s2Tc/v6iE0+2Z2frIddccX95KG02zvD48f2n15msHn/7gX46a237l6+/9w3v3/Pfbht8w+6ntcWotz86OrlxZO3Dk5PzePVeurIApETfNFECQAmFst5tRXSnlRx+798k33vj2t//PZ77g6atXJlkhhCApa2vucEORICKk3K97/BEAKNBFVVXbEQ2kTt2lr1xLgEJTKc2uP6pERIgZoGkaZ/6AgBkOBoPJZDIcDN761jd/y7c8/Ngjj+7Zs7eZTAOzOG+/I2jsRD9kAJj/3esv/vW/Tr/scvWPe5kqUxaRqooCYKRmAgJmwKoENhPj2sbG5ubmnqX5tmkMjJmRgiqJJB9PIhZIbN/k+FdZ2RKbQis5W0Yi4BKOS2aFDtCDgGqFWQQEWnQaoHP+6MO+b2fNzNMcV5Fxx5/ezBx1YgiuoVtVladhVa1Hw9C/uQ6rAkQETKCAoCRmYA4OdM5vn/xsl9Fm3zL6q5UP05US3hT6N7mjCYopMe2UHt6FiCqA5Cw5BwDZPTruIjh2w0ZUsyxoPQK0yBF2mVLQJTN9FgHkBQGCQsmPyICIxUkoa2lxbFdL0X+cvsLo97IVh76x1k440zm40GFB1VTAeoDqTnbfpTmQ1eX1d83Si7iHVlUlIm3bQidcUFVVEsp5wsDzoyosxoceemjlyoVsl+69++ywmls8QIPRoYdPfXph4fgNN10/v3DdRx76lxiH999+x9J1LxwvHxwcunNlPAr2/L0LJw9fNTx85HiaYqrnLi+fz7y+fDEMquH+YxvSwp76ZhrAZJoOzh1dvGFWYW4Yc6xG0xyZqJZQj+ZTrWNp262sI03S6DgQWaybnGaQCgjAFSj9QyGi89/9UidNZuoK4m4a1ilMlQ5PVVkJ2e0tPFgDGQViLb45/VRgp6CxjqFUurTO6AIQUDpNUTPr1NmKUBeUEXcCzabWNDtpScpamgw0BjdsoHI2ir0V2KRNtHexunJleXXt3Cc/+cnz586eOH7Lv93xYfvGhDNkQ6F1tmXMCxnm4dL08tzemeXDD1/84Hh17dKNNz2pno3p0pXAQyKqR0Mzk+mUuIMiqrf9XpuymSVRRDQVAtqdgPvSLZuCKBgwlwGdJ6qyYq8rRDQC1B3cShbpYTiIuMNlssIpQDWXgHUgYUppt2pHzllEHF3p0laq2mtjUc+f7kZOO0UDWCDuJ3jMjIDgO+Bc4Hj+bHqzi2qtCSj4D2e01o1a1UJXspgPxMzQFAEiUFbNIE6AJqLAkZkGg1pBeMgThTzJsrK9eebC9voGrSzLheX06CPNuUcmV86vbK2d3t4+O2nWUrMxSZdHo/V9jwzuu/vgkaP79u8/Uo+uu+W6m264+cQN1973yKl3/s7vX3Xzl931eLW8ttl+9AwMq9RMeK152lOu+dTHPohrn/ubVz30wObcD37k+i/4wmd97au+bGNzjBSapkHI//G1r33vP/7zr/76W2dn0ARnYtWmbSJtWx7Ozky2r6xcufCNr/76//6zb9qzd9/5Sxs1DUXawBACMRoRT7OYgeVsXbByiXVQA9Xu4pc7XuDiohVy2zTZtK7rojgGSkSpzTsOykRmFmNEooorr+eAqeLgKqFt2x4+vu+Xfunt3/kd39G203oQU0rqTTAqFPQLOpXfzIwg3jVb//vS6mse3Pu3+ygyB8k5q0RjRBQggWDRSBGQoKo5N6kZT0pIRE0pedPHnaueV+0lcSCoaAjkpzelNG0LoTTnzCHYE61RQQ2RmZnI1Ld7Vrp2camJXSmmFMHeoBJ2ew+fuEDo0L1I4OrqCECIROhDzZRSM56EfqrjJZLr36kI+KaL1FR8Vm6EYERIqqVkLpt8URFJzkM17JtCBzl7TWVUZKz7NMNVLIYKWRAxIBFSloxhZ6xtHc8anQO6S/zIM6v/EeqQ1arZUSRIVGT9O7UHRIeOEYBpP5DufjfnjF2BRkRurVjOLj4Bb0WdjLP/VtIdArhPJGoKRSWo+6Se5HEXAK3nPSuYqREX4Hc5NFDwWdO2kZQHg8HcqJpM8vbWlopAFRaXBtbavffcs3rl3Cc/cffM7NKeg7a+Jrz/wtNe3N53h+xdevY/v+/u666t7rv7wp2PPjR6wdyzX/yKj73/n44f2fqyJ70q2N4TVx8YDJv18crKlSsrl1to1/cszNWj+eNHFpcWua6GddxLVaI4w4xklseYMJuGSdNGtklqJWaxdZwQxGE7zFWumMZxZBWOxu32gAXUmNixlEzuM0PEGIwAMak4jJmIuBp4q0I7ztO+ZRcRgcDuOqK9XgqYolrK5JD4LrjvPsOeMplZwXyahIFRrH8FKBBZ7C+4F4EAoFyspnugRHmcDAHIqbTknZmBuhoZIULVTCdVDZeXL507d+lDH7q9GswDwD0rnxEQqMAAbNVgG2bnRk2V9hxdmk+LL3r5sw5fufquuz74pFufs7Fhs7PzbW7G48lwONQsGQSSeR2mhN77UgehENO6rsUwq+R2Z4LSYYwDqCkpCGgqszIn/vrxa5rkma6vicGXu4j+PBqYYCc1hShmbju403EieMyNMRbZAFQiQnd6yOKK8SDaaecYuOMhdms301ItdSV1QGKk4gDMFDkAhz4BO/zKMz1XQU3QLKm4aAYR5SSutGqEnUsGMAIjaaBqdjgYkBBkNW11utVMxlv4wDk9fwEefhRPn83nL69dPL16+bHx1srlzekVaS8ZLFPc4AEt7RsuLs2eWFg8sO/ak4dfdPTI0aXDRw6fXDpydG7/3uFincfAIa9tbf3OD//Oxz74b6PqRMDrCJvZQU7YKOpTn7KUm3sunb/rH1/6YEX2Hf/y1EblzMWV973v0zkbB50ZjhhHDw5PfequOy0mrEgTTVNDHJosMyNdWb/MBj/3prd+z/d818q0uby+TRQzJqyolcyAYpBzU8+OWslRgUNgQOnGeU7sFCNEVlAAoLDjMIGCVVWhinXGWWKWco7Y7w0ZEZvcgBZGRk+w8SbBf2h1df3Zz7n1zW/+2Te+8YcCo5duOWcCIAqAiAqmO8u4ZGn0rhOrv3lnfvY0fnamGtrAZibbTCqCAEgUoaoGuQmt5LYVaNvpdAqiRFTVMecMRoYgOfVjsDJYJQSzULGZtVmYY1UNcs4pNaoFyVQ6orIzREQEMaNCbnCIR1kLUhQDd7tmLgI12inygedt9+oGI8CUUozRxfVKY4Dk/+7Rya9z4EElIgwYKKqqZgHC2NsOIwIHU4Us4LcwoMgOSoKZDUEIIhBQ4VmWR1pUVIop+i7tWUNIOVWZ1OFnTADQ5swGkVh9yjGovagXkTYnVa1j5WqoZtZJgpU9lqpCYNd99f9TAOsaVgAIhKYJswO5gwI5K1RRVbWiwC5JqE50MZE2hMBMbdv63/LnH8rAwaxjPtRELu2EoqRmklNVLH086DMzGZBhhk7OWtVjNwBwN5xQQOziYDSOHBRD0s3DB+fXVy598O8/9twXPHXvvpPrUwW49P/+9uMVnr4E4899In7LV77kfR/78Kk7JouL+zY3B6cfmL/jE5/dv2hp+/zhE8+86blPGt41edfjt91/8f6v+OJXP/tZTxsMOemm8GoznV+amV9cvOZpNy7FeoYoOHR22iICKVgrlbWmOZe6gUB1qmyWBaWtquGAhiICbWMAEoExEqBADhIAclZTkBhjDAUiEBzH7xqaiCbKSKaW2wYRiYKCQ3AVy1oQiTgxgBtwUTc58LvfV0JWzLTdwJUBp21LRKGKbdsSIKnVVdV60VNFEEVRYAR0X1WhXmwykjtikVgWZXaVflCRLBmBTEyyDodDAEiSBYOzcZnRkGZqTS3c/cA9Z86cOnfuj2I9bGV7uDZqdEO2BPZaOMJ7j9fjIAjIKbRtfvC9929vrj/rWS84fuLAlStrWM9UEJRFJKtZPZgx0RgDmjYiQBQCA8B0OuZAkSmnqc8H6rruk2jOucg3+r4tUkoFY0/k75YRMUk2k2aSAxaMOhEhs4JlMCNw7p/zgXoBZ0XDQEYgWbAVAMCKx9Mxc6hjFQe1bbMhCAcycgUpJQADR05FimXeqSKaBYAxADIqUlARYA5giALMHLnKOQsnUhzESpHMMCABkVYWsqaKpRVoszGaGooxUw6BUAd1gJpsEBURBFIDtnFlcmY1XViZPnbKzpyFU4/b44+3Fy+sb1y5sHXlQrN5ObWrAGvIG1VN8wtzV9+8sHfvkRPHnnzyquMnTx49cfLgkaMzCwu8NESo4gAUbGszbW/qyvo2XNmeW5qhTfr+//RTn7ztU/PHl1YvPHLwydee3qDAtnV5cs3RvXW18dFPvP+XX/jg0/Zvfe37bz2XBnXdPnTPnffc9omMNjs/LynJNIch1vVwbmZGRAGEiEBkJlaXLzx+4uSxt7/jHS984XOX17dELBATsdO8gi96wWKM2kgNJB7nu1OBAAyIQOYZoRCQuAz7iJAMidlcv0gDcqAQgQXKSsCLaQrs0GFry2QrSdJd8v7M8dKl9Ze+9KsuXVh+05vetLRnn5gimqoRgaEaqalEjk07qUYzCLn+14Ph4Zmt77pv5j8/36RqVY2nwVhMkdjQSIwJWNCQOG5gEjKaTseWAkRSsjozAkoWYBBL3oN6snAmLSOaicOgSq8bCBm1M+yKXEmxI1UPDREp5eRTBFeNRjTQnZkQgIll7Fbm0O1NyhivqtosIFBxIKKsqiYIkAFbMwWNIQwHdchN662AT3PcLz01rU+ofIZDgKGqrJvF7ex+/H/GUFUVmCLs0GG942SOfTFLvcybUylEfVroK6WUEgPWVSW5GDH2IGcTzSmB2mAwoMBgO4Z2AEWyEVXBl9CiudshlfrAzCdjAFBxIMQswlWUlMU0ELsYveRciu4YEdFXjzHGnsPa10pkTzDEYMeNs7EoImZTfylyfCaSowELML8DvkNHVQTALKII2Pdwnp9gfW44+NhH/+V//c/fqAjf9e7f/sk3fc9mekDy+a126ey5pa//6qXL953/98cfvOUp18/NLH7yznPX37RhWzNXXfWca561dBNdc9tHPvC5372t2hdmrh185zd/2ZOPXDvezlW1NDd7axX22CAiECpORbc2VNXBdzo7S20zRcRQRRHhHhlHGGOV26RIg8FAVVtpB4ORptZ33u6oDGred3pv5BfK7edCCMOqhv8f7Lp/te0Uu30wABBZJ3RMDnfkTu9QAVU1GGLHugOA7MUTQjKByAogKkRUIQOZZlEqfhiKIGBcCnYCAAyoKTdNA0LVoI7Eqsoh2C62jD9vqMYUyuQNMOWGMATGtk0Bg9Agt/C52+8L9SLYOk4bGe4JqbEPVPVX5hmOW7FdzmNtATfCxvrWtVeOxpUD9z/6sac+7UlHDh773AMXYhi1uR0O6o3xZoyRCFSxzUIxRirNaAihrmt0aGRW4Cc41HoC9qa5vM/OE9crbujwjdWgNlEMoCmPx+MYYzUcSO/V2PFosVs2l2LXL4XTQRkZ0Aw1K2hu/Jvmw0ItG2osaHZ078IsVc0gZApEEQrbMYGZSKCAWSTnVNdRVVPeIqIURqSSchMAMPAURXOulVsMIZlFthEOkaJhE2lrCGGSZVPWT61tXzwrl8+HcxfokYfTmYebi9vjKxcm65fX1i9f0ekZgDMcVjlM43CwuDB/6Oo9h49fc+zk8auvO3H1dfuPHJpdWhrNz80szCFDVmiSNTldUa03Ocs2m0iqOEgdSOJwbl8QpB/74R//0Ac+dODIXrF8+tTDC0duPTyaOT0Jswfmjlw1e/tH/ul11z/+mhvP/fBHbrrn8tJswISYA/P8COt6bm4uT9u0NcYqVFXY2FgjCmCoSebm6scff/iLvvgLfu3XfmXv3r3nL14JIVCI/ogx72BKsGzQLdmOCxB0U5wMxmJIjOQM7CydSAMD5CzM7qX8+f40fdLyMSK4RlDwdTCaiUMu/FdyEkbe3Nr6ttd865W1lV/9lV+fn19k5m7xn5kjEOW2DXEgKRNBJJp793Wrb/rM5MTG4Ny8qhpgEyjmEFRbS+OQsykjzmKV65lqOGhyE0IIVKkKITbTabbCZXczBusknvzcgqfPXXLifWveNq2ISJWJCrNC3Y4vBCNkZJ/3ZNUQiLoRpkf4yMEAPFn0j4n4eAwIO3vpDjVdjFMiBzcUB9EQiFVVU04pq/OFAcGgnTYOf4BdPEjuTLlLZ+n0A4ScM2FRlfQP1r8bJ4PLLvhiuUyD2vM3EYW69pABBj4t7rOvpzH35vNFdZIyl2ZmRCLG0M3EuJuWuO6/7UJzePgmorZpzUwmEgd15GhZ3GWBO1mAnh9V7pzD4boxDLg3mwOYwXoovJkZOfba+7yenmTgE7CdzUFJJGju1qKum+evTy6WZtikOL8HHnroyr5DJ3/1Xd/2I2982+/92a9893/8wgvn5558c/vwY825x15w603h8Y302U+eJ7mzmpnbf/BLTlUrK5fv/Ngf/NUwDZ76pBt+9L/8J5lJ//Wj/21h7rq9iy9c2lMBVUk1K6StbWYmRlMlNCRTFTNZX5F9Bw9sb297h9q2bV3XqkpA5IA+5hBKElLNXpQ0OTFz4KBoSKQifo9CCFJUk01V3aLKdkHNscOKd/qOhVQtKqBkCKHDysEOiIMIsFMsVzFlZicFKUKNbABJRQEwhmSGSKgl44KaHwxHFJuhmEJPhDVLKTGSP8Pl1pnGGAnZa0cOwUwQoRNnL28Y6iSTGKp0/uID25v56c+4YfnS+lTk8qX1599xTL537dEr63mVbECcEROGhp7/0S+N9eg5z3zlRz/w4X9+/98/+/lfdXljTETj8bgOUcwk5RjrEMK0bcyUuChvlLURU8BIYrsFSWDXlsSHhElyHy98HWRdEgWASCyhwMVFJOdch4j9ZN5UXLLBrSwRGcg6Z0+PSqioWSmQas4556YFgNDDqUT9CWIsgS8JWWrBcoxMscqACowhGDdZNBJXIWA2QwQe5Jw5bQQKiCgIQW2GkGciDBgqaCc6nWRZn2wur2xdPGuPPz587Bycvbi9fGZ68Vy7ur6xuXZ5unahna5DWGddA2gXFuubb146dnLvoSPPu+rk0auOXX/i5MKB/fP7Dw9mZolABJJYtpwTi+ny6jjnXCFXIQZDS9pQUw0AZDgcUUqQc1rcw20LP/5jP/m3f/tXRw8cHbcUiCdp5dSDH7/2yV+yulndcHz2/s9+8CnV5976wsffde/RP37ggGJuswbHsxo04/FaasFBwmPJeRqroEkZGavq7NnTr3rV1/3Sr/wqM65cWaurATB1oJBQlDK7R8mbVUTq7FaxUJGcIoA7i4z+56lDyJYGSc20KDAhk3Uahbv/ipkBsZmKup12qWXNrOIKEdt2Kqr/5Ud+aHNr63d/591LS/s0C6jFGB3H4M8RcSQCIhr9w/H1/3zf5mseij//NEQmYk4qBE1kU6sMg5gaTgNEHaiZr05SVkQNjJHQ37GZtZL7LOPDFv+Y3ruiV/Rm2QfpgINYKfssTQNXZhI4CJfcYYQBAyISc85t6tRscBcomoiYKLUJADhwgYyUkN7DeX2XC+b3vXMpDqVbDQy5KKGAA716rwJmfzIBXM965zmHHvZs5netHzKLqQ+dYDB0vU7PtdSZmVgxBt6hJJtZRhhQSP5OOwJiJI6DQe5I3IHYLWPRQEWxx4AYIBUccr/W8g/SbwVEBAObajOeqOpoNMLOrsdlH7DT/fB7oJ3RbPehSzmZbUcpuv8+9v4qzGSF1ORKe9gVLuVH1fz/+2MNu5CHaKCag01Xzw+/6Zu/6P3v+euPfej2CO2+fdeAxXHanufrXvgUOXfmsbZCbOnkLQvHnvz0u+/997f/0i/aePPmE09+3dd+6/VPPjKaPWKAp9ZPIdfze082NGozgIlqRrRBNUcMiKAqktssGbNBVlVN0yY37fr6+t69e4fD4fb2dikV23ZmZqZtW/chcQBOQCKA0OmXOSsOO+ELF3VzsU8gFJFQRUQqYmudpYFqpxjc6wkjl0l+SuLHTLXsIykgsRRDrA5z5ycoa2MuJF5eok0t+u0jKz7wskPIRjWfpjrO3H0Xit9AZ54BAAQUI5d04qTGnAUxhKAqBhjrSqVaGOW11fEP/8ib3vBd3xNCOnbNvn97/8Nv/bkfeMmrP/tT33fn5WvH9deyWCIJ1UfnNt8yvv3qj/yXn/yRC+cee+W3Peltv/j2t/2va/ceetJ4Mm1FKyYToRg0tdPCeU27OnJkDubAcMkqknXnhLpFgVcYKaW+cOmTrh+ztm0jsbvzDgaDpOUZ992K9zTARLue0FIjmjMTwIkKdRVFdjpsX9OqGgYGM5ZC+PYBuCFoznFQA9STNtk0xxgDGbaNB5UsOYlwVRNHrriiKg4G2yLGkVrY2thqV9bl4nI6e2l86t7qzCU4/djk4pl25eJ4bX19e3tdmpXAq2YrkXV+AffOze27cenEicNHDj7zuhsOHDpy/OTJ/QcOLC4uxiELGgTYbDAnXdua5Evrms1MeBAwEE8t1N5KBBGZWktEMKIRNOMJcLTt1gBoNAztGP7rD/3kP733PSePHktbyUIUE56J5y88cNOx2bc+m3n5k1vHzr3y+uVPXlz8qduuTVHRQEERCLMhACtYEs+EGLBtE5MFJEC5dOnC6177mrf+3M+sjZvtte2Z0WzOKskpZKCqRWbfANVtzouwXz8TKYkWC1iVdxwvSkIC2CG/OoCGmX1zDJ13MvUr0p70EdAyWAd3sSxuB8cYRVI1HCTNeSI//dM/vbKy+ld/8ZdHDx7ZnkxQDEwjV+qenpKYBmaAU5r546u2Xv+Q/Z9beL0u8lYACqZW7DuIsAbeWN665777n/rMm9psSbHkEYJIEdwxWrSHHH5eiPbU2+N5zSxZGbh2QPoWzKq69jxq3fwGuuS9o17SzTi1Y+SHLuL5i2su0Fp3QPfvGzj0pe1X76HNKYTASBCKxoU/LVWsPBhFYiSPNapm2lFByp0wcBEQ6/he/na1u7s55xhjPyosES2wH4UnaNgSUWDNOygqKVsMZGYSYaIQyDo7MzRgQDHFTpaFmYtQu9f4TJEKQ79UCaoOh4sxOpt7NBqNm2nbtlTVIlJVlTd83qsRkW8urUvB1ikz9zejD0zmzndqiNDfBusw1URkYOBI9x35z87P1Qoi1yV+ETDQvLFKnv2lX3rbr//yb5x/cO+LvuD529Ojc4PZB++/MDPU2Zv1sfsutdvV3WfPvPsPf/PpVz3z27/2lS940Ytn5xebNJ2mKreRg02mZ1ShabPmNIjs2dfMEk5BS2lhmJWBOAzruL01nkwmjiybTqfOC3TDHBFpmsbvlMfZtm2BmJirEEVEUvYo4MrAOWftkkGJAlTglEhUyNxdHdMvFgCgHxJYp2fkFjSesEuaMb8pRbTSX6Si0BbSvfajp1LzSXZnyaqqGEu2iMRGXi9aQPKxs2cRrHbhE7s4hUw552JVxQDgXbsF4nG7tTA3+Mwd9zz+0MrefXsMQWHy5V+38LJvmnz7qz995vSeZ9x0/MKfr8ZFWr1PD+1Lb/jZLzz2pKrW9tHHzsyP5FnP2/+Hv/fb//3n3rayOq3qIZhUg6GkFgDAcJLaem5G1fMnAZRRkIigCAZuOskRjw6DwQARnerg2JDIYSd07jQH3ZiHiweXw7vUFETJCNmVm93irdwR7XaLZakckENIbQaAUA/YLSMVWsvu3ACd+ZiCggFRkFbQoEZOmtN4SnWcGY0aBmQcVoAIaDDdmGwsr2+urev5i3rmHD52Lp09tXX21PTSxY0rly+tXQGA89Celck2UB6MaHZh/vixhcNHDh49fuuJk0evuWb/8SNze/fsO3R4bmGeI0wVAKAdw8bWZH2zCduIkgOAVkMz00ai+jHjgBGBZFBgMghWuxJeVkuSqaoqEFNEGo3qzbXmB773xz720Q9de/3hjdWpZtaqJVDEmW+58cLrT/5Z1da5Gl9/44TQlptqfoBrY9KJcjTDrIChitEPm2Q0CMMoiRFRc3P+3GNv+P7/+Na3/uTy8rgFnZmdUwVkCj0pw4Sg45WU5ZyXsJ3um/Whq0QYopIzHGxR0ioa2BNGzeh4FwQAch4N7G4hOoWDMscCzKYqiopSIBluoGOtpje96WceffiRe++6Z8++Pdvb217+hiq27TQOR5IyGRDCzB+f3Hr9Q9uvfnTxt25V1YYlqA6Is4XMkFQr4hqoCfX73/cvL/vqLycgjiSKgGxs4NoMVvhU0DV1O0mn04YqscVlF5wzjO5FaCK5Ik4pmWQDty7nz/vgiFhx8KFRztmyuKFWSQc7VEkyc/izEhb/QQSgyIogukPlL2Nkc3ZOV+d445xzTpBijDFGZDdmQ3/TRAGREM3xw8TsvMzymQF7akRvC4h9LYwFFi/drKwMQDo7Gp+ZoAESwRN9acwA1UDUEANzJiBAV47dKcfMrJCSykSu66uwSS0BYuCKBu5VZx1qzBv9XviiD1W9s8Luf/bbsr7D9iED+6zSdlejpW9w1KjTIjvGlAZ0vdbChvJfiTECcAvb2wnm9x18zeu+6ffe/Ud333fpzGo6//jZLVuNMnro4Xtn5+ZvPHbD6778G46+9vuOXnu0merGKl1ZnVIVWxWqU9KAIQLitG3QJbFbY4pMAalDVwGAkbPtiGhmZgYAvLttmmZ7Y3N+z+LW1tb6+vpoNJqfn2fm6fbYF43NdCpESOSKRZ7DZBdfi5i8iRARzcVmgwA985pZgZkUI0roLqm7AmRfK5irvnEgYqe0mmQAQwQzgc4TNzdtBogxStvmlBRBNStC4ecR1XXdz0KKlQh27UF5AovWaxWjdVM+RlJTx3mGEBye7Z2/M6wiB1WtwnAy4YP795967O7zKxcam/m+H3zGjc9c/+ZX/dqD94Tnv+Cpe/fu2VxuT3/m/Dd9x4Hv+k83vv/vT/3dzy48+vgviqbLZ7avvW7p615549bG5SoO6yrkDCKGHNA0Na2q2ZQRkcowUXPOWTIRKZJldb8B/xqNRj61AqCqqtp26nhJ3xw7Vh+52HBRDNB1ve7vxjFIYeoLwc4Bxg4R0ov6dnWnhFC1mM0sBA5VhG7/UorJEq9NinDBTD2owwCBrCJDo2bSXtlYH29sbl26lC9dmp4+M378scn58+3y8pULF9fWlze0vTDZujKdbiJuDiPMz89ee+zQaGnv8WO3nLz6xIkTh6+7av+h/Qf37Z09sMg8QxGywGRs03HTtDK5uN0k40qtSaAa64ojC4IQhBCrLIaKrJmyIRIFCsCIEWoxzaaGqgTZ+XII1sYI7XQ82b9v9uL5y9/73T/+mbtvP3LNwZXNTeBYzVQIyYyfs7T53becjWj7eXXPUmaEs9vVVXPj/3rrgz/24avrOJtti4mQA3EQEUPLjDFGU0GEQawfO/vYf/ze17/5LT+5fGViwETYtrkv+kMgM4tciZgzfKAvE50I220Kip2eP2OdJF9hSPeKvO6xBz0H0hSBiAKxdq/tjDvsB91ZiBkDWecLWYIhQzaVNnm8nU6aAwcW3/Tm//6d/+G1W1tbzoIF0HbahHqgORGhKhIHWq1m/vb4xjc/vPDum3jCkLMSJgYMoTasTAQhoS3u23/7Z+6767P3Pf95T1/dHgeuwYRq0s604/OS7u52saPGdsU9IgVEI8aix4eIoJhS6/1bCEHAzdyCea/sqtGdejkRTXOOoWAyyq2h4GM260iSWEDDCmAc2Ekx/ueK04DrehkWFxHtaMHU8SABwG0JwCMfAGgnQeWKKoGxW/XhrrEGRC67om4yLFZ2Qj2QXVVBtHwk1+hSAygyOv4raKjobg2IgUJ3TQlod4PL3akKMRSoN1iH4ytifp4+q7oCtZRSXdcMOE0tMGXTtm2tXzdS8VvdOXbd+mT3EmWn7EA01VLEqEp3ZEtANCDDTs/cjStgl3vyzutkU9KMMlPF6Xgjnzh584/+2A//6q//drt6eTodX7N0+MYbb37FS77iOc969nBhMM2wmfSxs1N3ciYOmGAmDscJhMbGrZUdHxoFCGZgGVsU85YcjQCIydAAEbiqPNlsbW2Z2ezsbDOeuIz46vIKGUwmk5TS0aNHva1XM1WRpmHmGNl5gdr5YmYtvNLeFDJ0qsUe5Q0BVFQ19CuAAlAzMiRin0GBKCq411ux3PHL2JHqTNQnGQv7lrIqV2HAwROtiCCCEVKnqqEKIRA6YsKVszrnzqxFajGpdHNyR/iVfqiIDwC4WiUjCQugRRiB6cH9B/75A38/Nz8chquWlpbe81cP3X37/LU3HDpz5oHTj9SXLmz/9C8fevaLFl/7DZ+473Pj+T1BLR09fM23f8/L/+Hv//Ev/vyTL3jh3c941pcsr6wLAHJQU80SY4XIOSuAZSsMwKJVlEUBHfvaH6FqUOe2oN5CCACVz96lo9KaGbiPZydgS6p+R1JKwQKY7UzeoPifSsedY2YMQVXJgIjanKlwbjGllFJjplkaSQ1yVVVcDQYcaO+emf17FjTbWNq1tUsXH7iwfO7CxoWLkzPnt0+dGV+63KxeMLPV9bULKytjlBXJm2Q2HFRLi4f3nDy0d/8tR4+duO6aq06cOH746JFDh2b2LQzqERI2AllABKatXVqTgU4mSYGwqrQyUg3IWNVC00YZNJAQqI/lADHr1CNfqBACMwdkEVVRKCqIaiYMGMBbDWx5olIfOLTw4L0P/6c3/Ngjjz5w+Mjhza1VtjmjZgK5stkk+ZVXXwC0IzPTmSCBYJpxqc5TCc/Yu3HN3uaR9RoIRYUVgjcLFBK0woCN1TFcuHDqO7/zP/zUT/3E5eUNoJjVqhg76X1xzqsHZ7e5NTOzjMiud1Hivrmn+OdtcN1IG4ssnR+MfhBFiO7w4zFZtHOvAB+99L0QI1HZRPh/w0gMAE1u6rqONGjblgMi8erm+LkvfPob3/jGn/ypHz985EjbTIpAtZqBQEB0yxPD2d+7buvVj01e9vjcX18DXJkZON+fyUIRvadBmGzAxz5+x4tf9HQkZSJtQUOPtCrzUeiG7btAWN0/mQAg+FVSMFNB6wno09QaYBVKvasO5EGJMZqJiqoqUyEW55zr0VA7SDmiPwLJ+X5Qxm8I4F2rgEFR9UFEZDMJaEBd4ePXsfTyMTAUYwAfBTvDkmLwZFnCZAdCUUNCjjE6UDLn7CVWFuGqisbO9zUEy5ZUsMlVVXEV+2OBANYZajoCwE3T/QYrGBOBgZQSHPzQSJcUAcp3cJddFBAykjOpvdBTQlV1nSMFqwa1qbnDYBlcIJq5QzjUde2yJv4GegQpGfRaYH5MfUXNMZipk5r81Yo9FCFhLOP2fvjjC/BuY6GdbqKaaZZWNxkWKDPEdGWaR/XSG37guydrMjfYExdajENCPX9xO01xWAMIDrCS3GDIbc4WZy1khraWkTU1KEaIuWkt5BijIhDHtm0BCYHLkwmkJgaqkneIoWaxqlZXVyeTyezMaGF2rg/izXRaVRUhZgQmyipg2OSUVDgGfz6139pCmQGQwQ643UydgNfVRqVY6ecDXOYgjOwao16iKZiqOF8ti7j7RQihGtT1cABJ2IAIc9O2kgeDgRIoWgTMbUJ2dfqCB/Z8HHrfbMKKK83SSukIAcD9tvuh1tbW1mhmSMBEXAUWkbadEpjVMAfhzs999MRVTwVd3d6a/9zdmywLAFf27KkvX9T1zfN/+t7XCV5+xQv+GmTx6LVLNdvKpbPHTux9+cu/40Vf+JJfesfbfu1X3v2rv/a0hZnZ1a0pEokhVQGATVFlUlWVahFD9edfTYzi/Nyc7irj2rZFA9+keF1fVZWq+gS+CA1CCUy+zqfAzkTwWQV1oEWn+LuEO/iQ02c4BprFsriFZpJkiMO6JoKZdhgCL+2ZP7B3b9vq5ubmhXPntre23/OP79eL/3xl+dLq6fOTK6tbly6N169Mm+1xnm5J25jI7MLS/n1zR5aG1x87evjIs48cv+7qa08eP3H8xFWLexcXFmeZUcFaxUkLbaMrTQsbGxkVKwqlB+ShggQYkCByVpoEgpAjQsggVicQAI4YokEFnDELmLsZswAKUkCMpKhQE2fxolF8tutas2Yo7f4Dc5/+1COve+0Pbm6t7Dt4oJ20Q1hCADWZYiZLoQ637NkcBZ2LQgintgbjTCdmpvMxb6fq1j3rD21FtpAJjRAQCCAAhqQDhlANz18489Kv/NK3vPVnxuNxVnTumWRj5pxbACdagiQngvuiVE2ByHYlS4FujLzTFHXTO4cg+XSKiCKxSlFdEXP5fUQzUHOl+q7f2PHzANGEYh1/VlTNVR+IUkoMRSE8BJrmZmOjec13/Id/+dcP/vM///PhQwfaNgOgWkYKAmLAjICI1ZmF4YcOr77mgdm/PpGFmChw5e88m6AhAghpPb943/2PbKxNOUDbttEqyy12Hja7OxkRIS6OCn3p4M0wAzvZPWUB1IBEgCDA0ZfPktq2VCXk3L9pVRXG0U7/GoLPn/35IiIGTCm1WapRjVCUqH0QjVg28eiO0aKmFkLHDnY+LgCgQdktYwF0GEKsq7ZtJ20z6B18EQBQtdMrABBTzZ0tQQwBzMxGhqJGCoyUwFChAuLIzJXjPjxeZysj0FY6mBlglhJrAAC6LaC78Fq2ZBqIBQ3Rp//9VNO1vw37X6GC0gGmQATE1jWmXhbEupo0U28svCkiCkRkAsmSqsYYycAI/XhRYOeHIWIH+PRJjSbTlIsvFQBBVgIkwgTZYEdiWkQMlJkzgqFJKtBuz0N1XXMLAkJklgwRm3Y71rODPTzOohsIMBURRmYQSGgIGU1V89hGoxGQ5VZQMNsYsEEERQUmydq22wTIzKnVUMUQkQiSCnZqjkCdDwaiqK6vr4vIwvysqDZN07atGZrBypW1wWBQVZXbNDpKyx0uzWcMDMgkqfU+1Vf1SQUJkFwG3aCslojLTAxQDajY3fjLUiAzxFj5aMF8sKkIEZsWVKGqTUVSQgKMrFOlQORmWJE4Zw1VNLNMBpEtCVdsRJKRGFiJA5iVVYiaVcQERmCmDC5k5u6wEM1k2mxbNvf6VlQ0JQoBCcFySjwzuPOO82tbK3PzC9Vc+OxtW69+7dzevx498NB5bLd+5beftnblc//p9ffUo6tGM5ttWp00YW7+sHL967/5tld87at+5Id/6h/+8V9/9Z1/8F3f+T1hECeTXFWV2DgreXLJOglUATaqbqySIwYaVka2e4xCxYylDcREoCiImKU1A2+VmiaJCFWx4uDoWFXjMPBrbpqzihoQGQREYm0VRInNHYFFTK2ZqaswGNRxABWaAZiNx9MrVzYee+yxjfXNv/qLv8kX/ubShYsXL164nE+dufXU//7nt8MlMFFCyyIcw/z8/OKx4yf37z9x4sSxY8eOHb36+IkThw4d2rNnb13H4SAAQZN0YyKNyvmVcfZVAnAnvYJWVVFLggg+cSEAzVZ04BWTuEoREEKNA+CcM0DWGCaWHTLCuVUFMYxVTCkVXL1iJpCUQ2DXh2EKYsCiR4/u/dC/fPb1r/ve6XS6tGc2tw0AGWZFUcCglSCA5tlKrpqbEsJaG1abWJNAf9yBQqokxIAS0FpLSHEqLQNE4pXls894+q1v+bm3bjdp2rR1XYtlAktmQbppJYAHyaZJzNHQEDEOauhk3SCgKeWcg2HRO3I7ZMJobibEjnt3H08FYzDXKQI1QEYkY1VUMBXLoCWWugZFVmFG7jk5VsRfkyn4C3r0F3fM5ZxsMIQ3/9ybPvOZz2xtjUejum0ZoCHDmE0rMxQ01TbP/u7Vl//gY5svvjj6yAF2GSAFQGVyRBZGqCtuHr948dLG6pH9h1PaFFbOmEgQ0aTHDkcQbXMLZoJl8KngXmhmZm2n1UF1zLkNIXqOCGLAJN3SmxFBsiFAm4xJiRKoggYg782w+6Je8ppIRdT1Q7EYukCndg5AiCyoJkoIAQPTLlHJvnbwjSF1sF4nn1hKuS2+rdR9+Zvw2Z12acnDLqKvvTsgNoGp+exCVRjK4k07JxNfVu1+M9DDg31f12nAKppKbrNA9E+O2sHlzWzHLbgfHbs5BKJ129fSHEdy7Xvfl2e1MkwI2KEVuoimYmJumkSdDpf5KI+QfArkPR2igklOBEgUTLVtk6KGEIh2OyuYSDsczVinMu2zX1f88c+b2mxeIsTYNilNprulcQHAoWTVoG6axhFVSQVUsgoBRmZ3xfFbaYg2zaGuEBG5bdvp9vZmVVVxUBNjVUfvgUzUWUY+rhnwgJgxhlZyUiEK9WDgf32apiDKVazruo6VZfFpZ4xRVBUsdl5giFiFiIhZM5ghUgjc21Mys0+3smW1ogks7sGC0Qypk0K07gxIkrqq62E1mYxzzkSoHFVZUwNV5UQpRSBvbc1MLBCqOs4AQYVCyNIGq5iozS1HArM2a+AICpKzogIaUwBCEwXQEEI9P0IsaGRgzHkcEAbDWqbTKtJDD9910423rKysTiaTlbO6dkm/6GUH/ubdp9/4Mzceu2nwY6//eM68/9hovIoQ5uf2D5qt6Utf8pV7Fg/eccdt2eilX/4lqdn6xL994OZbbpqd2zOdpFBFI0spcVxm228mkoF5aCQpRSDA7K7Lu6CehRDSxWKsDaDiKtTkQ2bHkUXGzk3ce5qUTVQ0ZDCRVo2IBoNBQCbGwbCuajQzCwQE4+lge6tZX9s8e/a+9UunT58+df782Qvnz168ePGyXjj/7FO/9U+/YufLk0cHQG/RpaXFq45ds2/fvmPXnjx27MTJq686sP/g0r69i4tLVYWqkBsQg63JeDOntfE45zaEwFWsqCYA6yacAIaEjkym0p1oT9f2fOAi4Nj5wXiAAYBAHIkVQYv0OjKj5BFQ4gCmypUbDKjaJOQKEuUJWPRKTebmRlNJv/27f/3mn/15TbC0d18z2TKFuqo1gxIykRmqyfc96fQNC+NxprFSzXpydhJJAWCjDQBw1+o8MIFOmCtpDREt5BiIjFavbMwvjN729l+Yn59Z3dgAhZzEnaprP/+iqfODIQocA2ahwAomKZsJABGTDwC5E0ztByRoIOqGRAW3GH2d7wNaolDk5BDQ0JCZLXcdlyuDamsuxtClnD4FdMG2mIVAB4byN7GxvXX99Sd+4id+4o0/9P2j4ckQJmo1oRC5klVJE/yphfjZxfXX3D/zb4eIAlNUcHqNlhyWNSKmyXhjfe3g0gERIQ7gGzQA60Y75hwHEwBE2uktofvgLlpg7rNi7hwPZW5fnm4q7j5mbuPWNA2JL9Q8bQkDWi7CYf26Fg2RSDWjoDrPtBCmoTPn3sF0hd33pieMqWqZyiK1bZOsRaaqqgaDQWpa/3j9Vd5dd/dsMH8azFmYCBiIDAwNCcgAiI2MiApSidHfQD8l8I+0u6zQlJ30otixghCRoSfzOH6bOoBrQI8upuhaMYUOJloEOtQ3AE8UIsdu1+WpH3EHSub/STvOsSfCYrpg5kgHBkxJEbXUDcyIagxmFkNERDNxFIy/oAq4bKmXM7mT3mzblqwIaSmiZTEz9/fq32T/hj1rhhBym5IK5kyBu3W7AYCBUSctwlUUMMiCkYhhOIgisrG2SrFck5nRrBuwq2ph1DGLSJMyENbDgSpkT/AE2MEcPPBpzl7gIWKaTikG4gBqgSIRiaiI+sADEbEfR5MBeK1bpHnM+zImAPPBKSMFYnPuBBgz18HUth+8966jR49ec/LwlfXxtMmSIXJop00m8tzftm2apuFwKE3ORMNB1bQtRgqBt7Y2h4MBmGnKaToRCaGKvtS0bJJbp2hXlYUQFRVchUMyF7k0YWBUayxPN5sAbDhGGX3iox946dfdcuGMrF7g+z699PJvueGTH3nsC15y1Yfec8c3/+ej//vNF8YXbfHInjyOV85cfNNbf+hLv/xVk2YSFuvxeHzg6GJFJ0XbKo4CVk2zYamdnVka1rjRJoOWIBMEBDYQMFKQZn1zMBjUo2H/JHYBl8QSAgZyl0xpmmRWjg0AWJtzcVFFwoCIxQF4ZjyKIz9aVUVosLExXt1eufTg6oUL506deuzs2bMXL14+d/701ubKyuo5nA7U8vb2psdkPmDMdMM111z11GuOHj164uTVdIjeefF/ve21v/aUo8+cmRlaAFVIrTVtzllW1rb9iZOUHfjJzDFERlK0LBpMEItSuHs2Z1OTNngr3MVTM/Nx3xNCRHdBsLsshOQ4EiDULGCAIbWthDgHQVM7VckxDMAGWcc4iNI0s/Vgfm52fdL87V//07v+z+996rZPHzhwYDAzaCYToBgiNyqhDtIKA+yp89uff/dXHF/5i4cP3rC4HdAODNuZIMlwdRq3E9+xPP/I1hwQRMRImBKCEZppypIkt5O3veN/Hz95fH1rEwmqepimjSEhllkddU690HHzYkkkGV2rXLOiUYgk5n4kaoahqHV44BIw69aWfRK1LCWLsse1IjXKwWlIZmjkgRpMTQmeQGzxkO+0FmY29+wqHj+qqiHy5ZXmld/48ve+973v+6d/OnhwHwqoZpVWkyEzAxkiGM6865q1X759+Qc/qzeMdSHFi6OZfzs6+77jmJkIGQwkD0fVzGAIAOxwBGJfkED3MWFXXgiwg8yyTmiT/YddjpiKrpSptrqjYVC0tBDYtTNVNYuDmQIXVn7btp4jUkolLAcOSqZmJAhsvrJ09QgokBe/5gwYevRXqVk8CwOmXR63/tOaRSyj+toCpJMsVlXLgoFLH77LxBsAsqoYBCsgOzNUMTEJDht2FrMvAnccZJ/A7ekbxmzqUGF1TA0F6hDO1lfynR53cHFaU+qfT1FffniD1edaRKw4iIgjv2DXF1GhbwNAVVU9Y4yIiBGhYNYAwKfcYFCF8nggW7/v5Bj8hbVDfWNPRe3ePHbALk9padoEKqoXRoWrTUTuZqWdoq8fsqZpOAZCHAwGjldX1UL7IwIreiY+ycg5U4zeBPjdjzFWVdW2bTOZIFBAGri0oYCjCYiIkUurqqYKqmVVzIBA5CZaPmFPOWcR8rqBxDgglhGLK7ox9rxdIyJkMiqq3eVPUFcuxCCtSsqqxSuQmMVUTA8emP+NX//1P/nj37/xxptvvvmWr/36VyztPaQ8MEsWzMwmTcvMhcadJQYCgPFkQkQm1qbGDKZNW0eynABlPJlQQ/VgkLPEWAM64S/3RWaRjzcL9aCdTuoQRWQ0Gm1Pt1VRWiUbxRgnk+aFz3vZuYsPfO5zf/HQA4u3f1Lf8j+ffPbMhV9/x8o3vObIa96w+PP/9fSeA09K6fKhA8NnP/OLz12Yrm9sbSxXayvN59IDcbR43bW33PfZR9bWH7/u2hsHcbA2vDg32jeYP2qwAgBkdW7HqplCRIZ6ZiaLyC4taGGjwKGKKC75OHUYBQVCZBFpUxaRdtpUVajrOlY8PzOoalSElNLGenXp/MVLly6dP3P61OnHL126cPrc2QsXzm2tbk/GG81ko5mOmSMqhhD27FmY3b+w/8De66677uixEwf2H+YD9I5H3/Q/vusdT7/muRxBBe6+dO/vv/c3B7N7xgk3roxZGgBQYPd8ChQJWDX7OkQRRCQSByxjp+QOSEQYAqG3Ce4sC1K4gtCX0aqOhdkxTcEOekNEItk5ihCYDDVLUkXkqopJtiSbGVVxRnIWGUNt8wNYnJt96NGz7/79j/zDX/zd3XffNdozvPrqa5pmur29Xdd1mw0IskFqpwzh6Xs3f/PFn5uN+ds+8JQPnNv/DVefe91NZ86O6w7oAI9sDH/hzqsNURVCGEybKYASEQgx0frm8q//2ju+8AtfuHxlA5lCCJPJJBLHGHPKDIauo4L9gRQrpudmnUyCh0rotY536dj7xNH8InZNVLk4u6KTL8qsf0KBvJEkewLyVGVnV4UdKt4AVAE7r25/6su7dTViyz/5kz/94AP3XrmyWnFAs2ytIRIktYBMhhQ+Mw8T2v6Gx8OFEUTTY7n9ps3xsy7u/8Xn4BSx4mZzctVTrjt09JCoEbGjmjvXO3QN87IMDkx9kuqCrYgQBerZqgBVVZm52xn6CJ2jj2l9ocmqud+vM7hUg6pqksxayLF9+ELEbLnbFCgi9ZhzQCAql9Eve+hvm++Wc9nvYkBi4kAch3G3H3iniKCK3YTBAEPwXUKvtrE7oxgadDr4iEiERqCSvQDpr0JfoeyaZnRvDBEDq2oPr4Vu0tt/9dnI/26bc/k+ubCwM6Cz91g9pNOrh+zTChFV9Tzn6bkvCApk18yHeGY2G2fMTHZZCDumKlBQU0Qj4myaJXsD5/i5/tP5K4cQnKCpqg4Ol+6rdN6EpoaBUc3xjbsb36ZpYoxuDpOzgEEIwdtlf/Z8wd9fVf/g5CxehMgBgWoOXNP29raZDeqhZZlKy0Sxqmqqk2RLZoQkKLnYJ0Riw6LUnRDadqqqVVVFDu7GSEhoMG2aEEI94LZ1jkoIIfj57u8XIgLiEwgS3QHtdxN1XVcccs4ppbZtuYpIeOb8ystf9YqFvbN7F5fW1tY+8MH3PvMZzzt69NrJNHEVQ6hUvbYILknhvx4HtSEDoqpwrFNKeTIWSQ7SZkBWVMBA3HbueACgCkQBCEwLdLqu62Y8qarB6sZ6jGwIdRysr0/WNperKjxw39lvfM3L/+jPf2917bF/+KtqcQmuvfbYLU9Z/+23Xfi133/6K75t8vH35WrP4oWNyeu++ychX5yO15/1koX5Pen05yZVdXpYfclf/f6Zixfal3/9K86f2b60evvzXnz1F33pm+qRNU0TORKhu7w17RRp0DSplZ0lMMdAUpxYYsWSfWeiAFLX1Wg4cBZTGO5tW1hf3Tjz6Nm11dXz58+ePn36/Pmzlx+/fP78+e2ttTZNtravNO2YI9V1jFGW9hzaf80t+/cdOnHixImrTu4/ePDo0eMHDx/at28JAIiCAdx78T4+HSEMzl9a9WHI+sZYRMeTxoZGAExVCEEAc1bVTEQMfrZRzRi9YCYVKSGbiwxx21ncAFNk9g/8ec8UIppan2D6MOKPP6ARlsffT2AIwXAgOROHuqbpdNqm7UEd987PQ4KPfPzOP/vTv7nt3z+xvHIWB3jwhqN5DKvTbWaiURSxyAhitRlz+LabLvzMMx6468rc17/vGWfHIwD7y0cOf+rynpeevHxydjxO4aPnFz98fkHAgegwaXIVouYUAk+2x5Pp5tve/vMvf8VXLa9NkIOIEMJoNELTaWoAAbuO02Olh2kAspyBCUyziovrIWHOGZyOQbzrKhEzZ+uXPtC1Niq+IfZ14RN5H6rgoFgFcygqIiIHBiUrtn99+jBEipWIQvHhtqLpS9yKEU4m0/r6m06+7Ku/7ld/6W2HjxxJKall0goK3QQg0PbrH6HVqIebVG8iICiGlWFz3er6Vz+08Gc3JZTpdHzkyKHZ+cHqlcZEq2pgOWUqwxAE4F20VaLQO61T58elqobmWm8AgCU4ozdj2TTnDGogGkIIxMYmlou/SLfU80iVmtS2bopMtCPoAQCAVnzPAQk7uLi33F1qs+DiVl11UNhHiDgcDj1mobF1s0EvFlJK2XTXwBZjjIw7ybAvrESEDMTBr4gKyGYiKgY5J4+MJe0hcicG2d176qq8knR7X+Gk0tc7+ER5AeupV90XdLh8ZGIMEQgLNaUr3DzTEwK47MAOjg4RHXRqWbKagPXyim1TiG7F8wvVlzLqP2+gqrRLtAw5mGafWffv0A8EdbZI/hndfy2GiN5wmxCioplqSsk1O/viBgAqDsAgqZWcwV0iYmHgGJETpLkbkmjKfje3JmNVPbhv//b2tqkOBiNEbNvWEdDjyWRg1hPMc9OSSy93ta2//1iFNgsiZlOdTiXGyIHdATSrqk4mk6TifDPsxuAdSQCJKKsUaSqfCkJxx+pDQIzRvaUdi+CWIDmlJCFA+Mqv+VbJeW5ukBJMxtPt7aYaDnLO07bxc7idWxPVOgYiigEIU25rGjASICbUGGovpIZhSICgyOAkWkUMjMHM/I44Dc/EkCxJioM6iw4GAwCoA002pgfmZ7iy0dzs7bd/+vWvf/07f/P//vz/+Ll7P/3YH74Tv+cH0jf+h6Pb2/qLP3H2R99y3WMPPXjnp6p6QLL/Ey/60sXhYHD+9ObZh7dPPzq99alPWW8f1dlTr/++d1574+F/+Nv3PPCJ5tA195w7/4FrT35VMx1L2Kp4zmg6aaDNUxgEI+y9vMpdVq2qajQa1KEaHMDApAqTzTweTy+eXjl16szZ02cefPSu5UuXz58/u7J8aWtjbWNjVXJLBMC5qgbzc4uHDu4/eODGw4ePHT56/OjRo8evPrl36eDCwsLSnjkHOaUWphNpRZdXpxxJdZpSWtvaEJVpboZVrapGGAgBjERIs5kl5OzxwTHGmqWrqlVEQA0VTB24B0wMBI4QyCIiCMDG4DM9clzujoG0Hxo/O+zqEh6TCXM26uQwoSvNEdGwRULGSqbNTD1Y3Ds4e2bt/e9/75/+2T/effttotM9B5YWDxzQaZu3JspMvkpUUNXAAREra3/pxY989clLv3XfybfccUMryIAGWQEe2xr+xj3HwAi1azNB0YwMIaCZzczMra9eQmp/67d+5au+6ssuLW+qETA4JzsgAfqDjB4fnDDXxXeAzn/TW/9OsImRgQBEVXLuA5qAiWTqJSd7S2lEVNN+6AgFUkruP9vRvrUj66MCIvgI0zqVlb5Zoqhd98Iu6EAGormKgza1pjJp02u/+zvvv//+D3zgPfsW96ugmYBJQDbSfHicnrIGU/JPSGuVzuW8bxwnc1tffGr2z66zNk2m2whl5OPtBxIx7AiJlOvT8XSK+EIXb51Cjbs0oLB0xtn7MeBOz1VVVYnEZ87KZYFoviF2vmtdFYE8I0XtNS3UBNQEFA0o7KQV3dneUtcBd72zmWtJG3YOU74w6GtGT8MYuA6MTM6jdVZyXdeO1uwbF6+JkNmTJREBoZl2iu47dBQnopkBMPSaR/3PaOEHIyO5nKHmDESRA3QmybuTLhmYA3q9YHPb9qKmRh1svquJzHcApFqaTu+9nGyD7pNIJCSqKio5Z2SKFHPObnhG7lHVyVhTl3HFLRYQDd0YABEZO0uAwuxS7R3i+v1udIJym0CFlMkhX56qQ6nf/U94BdMVqqoIdYwARfE1m0Xjtm1F5LHlByxLltRMp22TZmdnWs2rl69cag7EGKXT2fcgqGZt2ypCYPbJdW5ar7h116jfKQlgaAhJJKWEAMEF3iTXHIGpmU5VdTgzMxqNmMnECMCFSDkWqwzoKg9E7Ab+DsgEABBRNEACL3QUwEHvZhpCPLWhiJjPZiJi15VtPFiUU5FSMxmP27bds7g0badVVatKO5kszs8BAVcRgFWNibeN26bJDuYGAQDnzTNHA1HJHCtmjrEG0JSanMWs0JpVRSGde2jpK7/ty97/qQ995O6Pf8krvvQFX/AFl6bLzd6GMvzKO+/+sq89/OJv3Jpcefo7/+qzX/HKvQeu1337xxbjQ6fbja3Hjx1auvop9ZNfOp/S6UfW6Ekv/gI+DB/4zPvy4vZXfOOX3/XgO69Zvn3m4FNXmyvaCuQKdOyiL1WqY1UNaWcHHAKqWjtuti6uX7lyZfny6vLK5fNnz1y4cH515dLyyqX1jfXUTkeD2LYJAGdGcwtH5o886dqDBw/uP3Bg76GTBw8dOnT4wNzCwuz8bFVVbiyWFDdkc3nrysObKFkQNBCLZAUiAgUJIdb14FJzwUc7xXtUAdUQdkwPTZS4ICV3opVbsAS/mGgIzKzZSdgJgTx/AFP/+BgaQ4Ceu9HFASbeTX4lZ9Oo+Z3yp5KQimecCjObtjlt713cNxnD7/zG3/72u37n0ccfGQzjzNzCfDUn4yQpc6zAcpWEGU3RlABVwG6Yv/KuL3xgb92+/iPPfM+ZQwY+YDbAQObPKbqqEVrRyzckYkTVyHzm8Yeuv+HkL//SLz7rOU+/eGkNqA4Ivguoqiq1qaRbUSV2iHJPgvdRo386N0ExcrVU5I5n6xGUiJjZuWegO2PFvg/GXWhZNUNTQDSi3tc5e91QUouaQfYVqqLn4Ejsbnhq1nWhBoDUMXMUp2Bs0I637cjx/S9+8Uve997/Z8SlGDJDMiVLJzbBAEYKBjgJtDyA3OreRocJMsrCRFchiO6bn7eUEZGIc5vc+wAMimFvad488JvHeQCwLEaInSaBd4sezQISB1cUACIE9sFpp4uHnFJpTY0AnMjcATw5ujOpohoGNrO2bQMhIJiC+9Zbp3VIRfDfzfcwICL0mk1EisDet2mxceizkTqtMGLg4AYMpYxihk5Mp/dCRyd0MqiCluIoiwKqsYIh1MOBdcq6kbiXFPeZrar6ihc7PWo0NTIVkSSmGmPlY0kFr4JFO7S2ay9ANx9GxH6qbC7pgMjQ+yuUHA+EDnYt2QXAXJofylifiEAl5xy5ohgGITgVEnzP6qxlk8jRi3AAwsIARgwM4uyjshtAAkYGgB5vXFaqzESUcutT9Nw2dV0Dl9KHmVPTlkDWi5/5gicGmTbT1Pq/F2kzwoiRiH71Mz9XfqnT5XSrVrtfAwef2xiAmULZ/4DuWofvCmgAvbadU5yt+96u6rP8T39RBACf1hB0QAB/NIo5iHUPSxdJO+hYKbj7f+8qXK+etA+8iFgeOegnsdj93P+fNcf62iqYxRgNrKpqVfVnkoiqKuKuuOYJv8hQd7ah5SPCzl8Av54KIfDCa2R6ceVic+kPt26n/YP6eHRkzJ9cORW3oBqcr54Hf7BqeJU/E1ZdFwPTOTnzyWkd2mE73RJls7U9dr9ahlnIud04tvrIqT+fWflAkkzMRfeii68p59S2/cX3/9m2bU6+jk9lR7gHeC/7NGIUuKrjKAyrWMWqCpEaHp/Cx07jYwCftDWzVQ8XO3fSb4SfDlcS1SLz0M+fujUXYo0zzNEIU0qNgAFkgFaBiNkyKZWBnIhlf9JrlYygRshIIAWYGmMNKE6YkX6spZZzRgIIQBRhl8WNxyjvTgjRMfCimlR8BmNm4P4EVhT6Ukqz9agaVB/7t0+99a1v/dy9Dw7nFg4fOol5TErN+hYFApaU23owk8SQMhhpthDiN179+Juf+8jDGzPf9i8vfnyyQAwmqtDaE0agxc/AeSVmxgQxxK0r2yvbK6989cve/KafnlvYc/HCehyOGs2mRhSqqhKxECrPrG1KnYe5E3MUDMiAAafTieuuW4F2ExlIFjEBwoCFVOK6oUTkTUuneQC9tkH3TzRVZ0iCqJq47Tshq5j0oBZm6hJ28WOFvskrYtTdVj77HlpMiWJgzjlvrOtTnn7ToSMnp5ONWM0lA0ZDMgDVCXj0AadEodogAwBkBDMbG4oNY33jtdfllETA2kxEAmZG4CtWROgAJdAp0VInOVLWSUCdixQHtJSSo5/MrB5UWSRJAvcUMciiZhm5HB4kMrM2J3+UfAPd1XxFViGEgC5XjI5VEPPe2pzZy94IglrArH5enc7l4F5VdbGV3AkA9WlV2+RWqQFQ/AFlAu+sVQrOy4onNhqYCQERk7N0QU3EejEBhwsZYYyRidDAyxAFdNEfFx5SVTQCMbIdWHJSAQdXm5m5VRywQRZpNXl8RkQ2YDUC04DKhFZA4WqqptgVRJVbO3SaDJbdiS8oeWsIWSRJLsE/C1sAg4RgoOjrGAV3h2mahgLXda0iqKhmFbNIYkTo6BPgfnmIudM6V6c/dRv+Ms8BBDVFFS3lDnctuxB1lCcwM2q0DpFiKFMU1RijAVy1eM3//pI/2Wq3XHjLVDULIq1srQWkmdGwnTaRQ6hiyuIHIyAhQCva5uS3yWMiB1YtkFSf2ABAdhNGUFUtNn8AqpZNLYkBcghFG0uBAebn55AIRDEQEaoAI0L3GPu9hi5KihbRFehwjP1CAVzjxWs1wKqu2dN4oN2DE9/wEBKWYwxoOL+wgGq5TW0zEQAzzand3Fh9z//7uwfvu/uqq06Y5D1Le6+78aalvQe4HszNL83OzgUOo+GwmgVESllzUlOeTqcpSc4ZEg8XZ0TykFf/8f0/vLgkzWT2/rumpx9Ye/zU+PDxg8Nq4Yu/6GW/9bu/P1jcvPrYwtOef932lUvnVx4lW6jD8Qubj7AtnD61dvnMII0rps0f/O/fIwQPPfzYtceeetvj/7gU9nzXq9/w8EOX1jfWVy5fPHf29IUL5y8un9u4eGHt0trm1mafgOMgqqYQw749B4aj4b6lIwcPHj5w4MDRo0f37z+47+CBmdn50WjkUkam0DSpSdmvmIgEKvM66FGNTIhEnj2ozEhCCITkMFbEothjZkA4U8+dWDzp5K8QqKoJEYggBEIDC7zjJ4qoBNlyTk0dKz+fXVWlIZBZMmAwCRzZdeUAM1oc1IHQzFLTeoEOokxExALmgN4MQLESAiAKEJRyrSg6SJUM2kYFNQLWoyptTM+vnn/P3/3Rb777zjQ+cuSIKqXxOkRCVAlgBAxhUEcehGa6DUKIOhzK/3jOva+69uLvPXD0v91+s1BtACJGQAGqDJlMAJEAk5FhAtIMSVMeDgbatmcff/jWW07+yI+9+au+5ss2x/nyxpYRynTKwEYA5CR4n5Z7p8TRB8OMgVjMRFqx7NwQ7a6mgAGImhpCRZxzNtPA7NYCOQsABCQrnBFy/Wif82nx2/YRoNu7EUJomzZnAcRqOEDT1DbMXMXQtg0jG0DcpZZsngVEmVlFERFUM4N7s6iqioHWBu1Tn/rUm2588qc+9eFR7Q4E1KCi8uAzs80WyyICgA2zXrMNANgQTjjcPx8TNe3WvuNz1z/9+pwiawI0RdUERJ5xUU3dtRYck2TGXQ1ORMTBp6QZBBAAQQAwsHRiGK0KIBBEVRVAZo7k6B/zOCYpkUEMpKbTybjmATNny56IAoRuwszKwGLZB6OA4iL8BlmL1o2RhWwKWXmXKqSqKhiIElHA4ohgnQAedL4ZwOTUESgE7bIk7jhl4CzPwNyb6QI4ehmHcajON1N1vB8DmqqalokSU8Qyyy01V7e68M/pU1MwjBwUjIKLDrP/CQQqyw9y3WjfsAMgFaKLGRMJaK+A4SCyoiy+6+/2GyYAqEIZ+bZtm/I0hMAxALCYgBaVPv8c2mqR0CqLaQam5ObPgcu6RbFNyVWUseg/g7NusGPBYudkINnMrKpiYCo2iE4b67uOupQy3otoN5FDsEOzx/yapJSkTaYaQlgKGyml2cEgjJxl1GY2ABDNAWkwGASOk7ZpUhtCqGMVY+0ltttGmZmB5pzBMFtBkEE3xgJRgTJNAHIbTfRpT1Ac1sM4ihjY1+/FENPPXZeD+4a1xGlkLDCQ8qUuONchBnqhUwplXOFvMufsSB80ICKRDpLGiLXxiBty1WJbvH7uC5/2FePt9WZ7e7y1/f0/+Lp3/9Lvv/hFX/DMZzz7T//0z4+dPHHw8AEK4cCBI4cOHTh06MDCwsLs7OyRI8diGMwuzs/sHdz5sQ/PxKC0cPn+41vDtW9/1f+U9W9/0fNv+vIX/dnWZOuBz9z1jp/72SMYH7kzn7995bq93/CSr3zFL//Cjxutfvrjn+Cm3rOvueqapZtuPXjiqv1rV5r3vfuvpMWmSfeHz73wJc+5/84H/+t/+PHl1eVLF05vbixrbup6OL9ndnHxyDOufc6B/Yf7K3Pw8PEjR44ePHhwad/S4uLicHFmcX6BiMho0jaImLIvmtSyqdgcAY+i06yT5LDbPanLqZ5ivW52IGQPHlQoCrvU7YMQUZO3oeUBAAN1lXcikCxg2DcoBoroZwA7EGk39yt7sXIGaCfamJlK98R1ay/ufMQpMAPmnFNy75BABsphmm0umE4aHNSJdY6qlckla+OV97137Q9+a0V1Zv5AY0jNtgVD0ewKa668L6INkBEyXzu39VsvuvvYzPQNH7nlrx8/YoYASS0NBwORjAErCikpI7XThusqqdahBoAw4OXLZ2bn4o/86Pd957d/1/6DC5cvbyWV4XDYNmkymdShVgQX7SjzeSZSVzgiICBERTAynzsW7oWIC4sWkggYIWa/zR16lLkAy4v9QAG+dQbzu1S7S6Pc6QXVVZVFcs44bZi5pgAGMm0BwG0gds+WygADduidnVBUEpGZmbmmmRikyaSd37fnliff+KF/ee+e+T2holYA1ACF2jD8syObP/kgJKT1CqLiNNBGBYpzf3YCDTTLvn375ufnjRAD+66q9zB263dMCNTNFM058ghEgoBMCiBmHn362ZhHXehWkF2v6whwA4RQV+ozMUJVQzFjQKKk0koLjlbpuC1EhICKBlhgQNxNCDR3NsySyce8ItJb1nsIY2Lqzjnu8mBq27YOEXwfI4JcbhUWc1bPOLt4uj7k7fYQPfrLed4FfiXIzEbEiNwlwl6hsJ8kJBUiQgrcP4EgAODtpjfrSuq4nlbyMETFEiz8WoJXR26b1UV5v/q9bw/0DF2nJAIyEDujCXa4ZQTIITBz+aDdvSQDJasHlWk5vl6yNHkS6qqHTfmtFQDqsEV+F3bGDKq9Nqm/Vf/3IoTWA+79yibXuOds6mS43LaICEyBgy/picjMeVYAAM1k6tIZo0GtWaqqqqpqPJlyFZF0c219c7MZDEYYuK4iAlmWRid1XQPANLX+0WIVYozaipcAO0e5GzwAo2ueBuCAIIAJNBg5fptiiHVFMYCK5oKMMDMfJrv8IXbTQneNKlWRmuPdiDmbEw3R98pm1k6nfn0cHx45KIKqRop+bdziFAkFzNBiNkVUsZWVNQohVLMyCot7D/3Jn//9H7z7D44fPvKi57/g9KkL//aRD58/f7ZpmtFodro9reKQKIzHWzPzoyzTffuW9swc/NSnPzYazYUQnv705336jgf/6S/e0LT8zd/6nL85+8epXWu3m4OH9t1258rSwSOi6bf+z9tvuGbpt37vt/ftP/ToQ//62GOnttP5jc0Hua0vLm+eOVvTYLixdTnWsLV58c//5M+n2/nktftPHjvwzFuvv+bqk1dfdf3Cwp650fFqP+7fd3A4mN0pTZSmk7ZN05xbIxtvNzmtgZqbZIcQQqgoBo6sYuozgrJWFyqjPOjCkeu7+XKiiOL55RVTMwvEAgrq82j0+Ye5ioAVAbuerVtietYSoHsHCVEEF/3dEQDo15Md8KdslLQbpWYRLHFyV5RRxUEsfmIA2VREQLIBwESRhg0lipUJxKV45o57qn+9bf3JV69/8qOLPE31gohAkBSAUzQsDDRHBZCb/xm84tpzb3vug6e26q/8h2c8tDVHBMyhqqqkqZ8JuX+sgQ0HM0nbYV2jwdb6xtralS/+khf/2E+88RnPvHlleXxpeaOPq2S4MDvXtm2/WfF04pnMsTjuS1Qs1YpIvgGAdbsz6PgUTsDzR6DbJBY8lK/DsKtcfXOZXSCiy/q77Whtl7lQzhm8j8TOrdMxVo4TKhAwl3AvxTSVo2XMPJlsM0ekVHGdWrjhxquXlg5OJtthMOM334wAtP6b/Vs//AhsAl2uyp5qO8z+n2voM7PCSZrpgb37qqqajKeGRIWEEsjUwGWKzQwsF/eBlHNRIzHHbGegQETkEAX0nVuhZiiUYXUHM98Ja7u83WpvsnzxRAhmxUjeg7a3TwGDgBJ0I1WkrlYAIgCV1sfA3jH0N6+/8QRuxhds1xd2EhBQ8E07jkC9Lhd0fLLSZXesmLJnBShTVSOi4CmsvPtd9VdfSuAulpTfUX+rFNh9h4DMOfvO2epPQAxRy2zDGyJXqjJvE6kjN3MnT+9SGL6WLi1UJyEGJaMUVy4iilUN7qjcna3yJkXBjANjwJxEVQOSoKmqU4am02lKaTgcOvSpWEg+kdQLAMxccegdav07QNiktgqxvyBdIWLmuw99gqEYYM9cLdc/Etd1LTk3zVSVFubnNWV3v84q1ajOIggwOztbVVWM9aRt2rZ1R+nxeDKZTHowc4isSuvr6/6QM2FZuxSWAlLRPiVEjEAebRmVkEXEL2vO2VIKoapCdHKXIXmjXBCqouLLoI5MZZJL8SRKnbd0CAGZrXANHbmjvQ+xGyg1Ofmxge5PUwfjRAciIKuoTBIZjdfGgwq/9/v/87iZnj179kff8rPfcN+9jz360MMPPPjvn/jIzExtxuPt6eFjRwlDPRw0TXPPQw9ddd1TOK63m/HuB27fc3D/eHt9jui9//gX0+0cY20GC/sWEq3v2bfnNd/6xl/7X7+wZ/91w2q0cmYF45P/8M/+7tzdt28nm2YOZsG2DhxceMrTbxCsHnv4zA1PGrzu9T/6wCOf/aqvedl0Yoyjtm0ppO3JGmq9sTVZ35zCrq/JZBsRYxUYsAojAmLyUUpxZTCznHeYAo6XAAAGzB3Gx6OrP9A7XYKv9a1jUXhha2AA7velXccqxRxGyyt3HAdmdl4hEQCiYzIAICD2eM9esDqlFMuyzLMr9D7c3eHu8M/UaY+3Sa1o7xihbyvNrGUaaZJ2ilJtzw6qRx5f//Xfm7/p0NK1L1hL201sRxuTYT2HW5OKqjZPOFQEoilLsUMLNcFbn/vIt9904c8f2v+jH7+usQoBJGWuKISQk6q29WAIotmwirVp1iQBK20nFy6efu7znv6d3/lTX/M1L51kvXBpk5DNNSAUpMmhrtu2tW5BBk8k0YKz+TtoIXaR0FxHoRM2KI+GKnEQyeWaY+lzAcAfrK4TKL9uKh4grAvm2u31+5EndvtU6Pa+vWONETpOXcFUtNsJuoiCeuByXmKMESBU0aYT2d5uvvVbX/XQA2ff+c5fnR8MEZERVQXUdNb0yHT089fxuQEsCl8a1J/eiy0bgiJIykuLCwyYJIdQOSSNwXoUkUPHCzMasWUIxVahMJAYzCFm3pgCgJKDaQTAPG2bmfvwlfNutisVFHFpAJ/CFvi9ZZE2cYUhRjU1E+uQKyklN6FFX9Zo0dlFtZBSKi1Fl438cdJdYiJ9XgwhSJsEzFfuO7fcjDomch8F+mTg0y2xMlbyl3ITAjd3o268/HmpaPef6F+5ixkFheIDInCjCIOcMyAMYiWe+MGgQLuFAAOSMLnSWL/thm5Rj443Uf8soN357osP6sXG0OHcuPN+PD0gBkIRMS2wulKOMAUDMrAsuWk1ROgAzH1o669z/9lV1Re+paryJ7JNjh/IZuU2kGPWyKuywMx1TUSCKiL+84GLv7oBWECITIht29Z1TVlSSsBkIhi4DhFC8R5AIKbAhGa2WEdvNIlobmZkZg5s0Y5bXOCavTIRIpmxESEpggAwQgU8RSOm0DHQsqik7NdGwWyX97uVIQcXjVozF4MsF83ARBFNJJsJc5RyT4OZIUbPzSkl5zthDEWT0MHtxACQkyaQGGOWHDkwo6YcAyVVSfVjj6/EulpYOppzfuozv+DFL/7ynNN47eJkMtnY2PjUpz711//3L8+ePb19bmswqJ/1wmffc8dDYYBjOjcazaT21HAUMlabkzUcDaY4Tu3kusOHDu178gfe/7F/HP1JPTf3oY+896lPeR7Nx7i1Ftr1TR3P82Q2DAdh7vylrWxz1934lOc+9yWf+ui///Lb/9t11161uOe6P3jX773sq79++cpp5IA0O7uImNV39P0XMw6G86rQNokwiCTmCggV1QIQsiIkyQOKu88edpgrF6Xvzvwu2roBdBv6EBgRsxqIdsJ6XaHcg31UVJw7lM2sR1wSldGXOzC5rj0TZREmAupdwLsJs/Sfy9ECZbfnDl3WNb7U7XEEjHyv3xkyeo07wLqBjQghWMxKyx+7bfDw/fz8q9fuv2gbk1msj1T2kVYWh1HHgDF4NMg5V1UgxKP11ju/9KHrFidv/Ldr//CBA27b5p1CznmyPRYmDrUIIkQiMMPhcAYHunL+dIjylrf+9Ld966sGo8GV1a0maawHLmUjItVwIG1yA9DdebcPOB7BA5IDOhF81+vXJFpOPvB0ahaq+dbNg7n7mvcQdOy0d6xDtFUcjBi9DXTxV799nccAdSsAIwQfojo0MWsxKCQqSn6E3sOUrgnRUDuCJVShmk7bEDqtCKBzFy7feednXLavrC9UDTE/cxMY6n/dGx6ZKYUFuTSmsalIWlhYIMLOYA3LNIsc11PqclUt+dwxpbsknkAVlPoe3cwUi7egI4L671uHO+n0nE1hVx7GYG7jVLrwYr0KUAbvBR/atUlgvi4oOc49YIKZqYiqVhzMTDoneQHph6IMBUhMRBRjSklAGRhpZ+TdN2R9WgK3ESya0GhSqgif42NhxCc3NXOCTVZxF53d1RZ2JZuIYDdSFxF18gyq+UxbzSs4cqhXt3cyw96VQU29uNPOvAJ7sKKBj+OsKynKgx1DObkAAObQLTCgXeq7ffkJAA6ntiLyZy7aFZitU7EIIUynUyi6MEX+uifI+5eIiAozByqmImZGgMRB2oREzMyAZSq46wtt5/24Hm7/zphZEXLObc4+dnHBZDbHxEtPOfODht1CDhEjcT2ovEll92xXHQwGOhhOxtOUks/oXA/b1wcKSNrtlQitHGGqIjka0B+VumZNOaeG60okO53REE3VpUBp59oqAe0+FWYWmZVLXgVVRhTRpmmIaDAYDQYDSa3PV1TNOVqMkFKiUJlZVVVZk5d3WcXIlFUDKlFluncwlJxB2wC6uXZ5bc0oIIcRz45mBgtf861PuuX5L3jokbtn5uPq6uXB3PDE82+bwBrik5eGB2KYcn00hcGiEuGwbXTvgXDXx87AFhy47+FP3XYHJ7n6Va+cmeWNjXN33vnoiRMnbrrp0GZKp9fPbmzfd9UN+8ZXmk/e9g8ffM/ftA3O7wm/8Stvv/Upz7rx5meN1y9/wXOffnl1KwOub0wENCBJbvrTmFKOkatQcUUxRn9McpacxczNwYm99JSi9NNPejzKgD+mZruVZb39ol2iQog7nFQ/dn3MQiY1AzY0QzUwEC0ubAUj4lAvQ8SCPgUmBXAfce0EXzsCAnbt7y6wj3g1H1z+EDtKPZo5rY8Qs2rTNIbASKMYrR5By+OBjO//rH34Qzxehi3Ye+uTL199aPxvn7tRImobUs5VTMqo7p2FAPTS48tvf9GDy5P4VX/35M8tz3JAhyAoGHMEwFYa5IHlXAXmgI4429pc3riyeuuTTv7sm3/m1qfcvLm5ubE5qQbDEJQINBsQhhCm03YwGFhuTcssjXEnxfZFkvVKQf1OsDAPyLMc9EzojkgJALvQyOrPpphQB5vq7eOIUbJC1x0Z7VJ1eGLv6yGVYiAF6BSKUASYGCJi8ZP1xOzOxB4WJ5NJVQ0ADCAgGoKZhu3JdnCFJS04PjRrn72Gy7F6eMCmiCiAZIQIbMqAqrqwsIBum9umEMjn0CFUIGomiLsaEpEKg5moCRJhKFQ3M4NUKjuXQ/Zoj4jFKnHXlSdAQxYRIKTO0x072Hn5Qylrl4YFzLd+4CQ04hAq7ugiHLh0pF7LVlWlVHQtRATVHAHnPDkiAlFRsZwphuClb0/MMCOfaQDmJ0BkAByNBYzd/IqIfGisqmoCyGgasFR1JpolS07DwUypAUWlo79Q57+oWrB8fQjw0wBgkoXRpxzQ+frl0sIymbe0WrbiHpTNrMxUJXsTUUBM/mcJVXXSTL2I3sW9Iyyyq5pNzdBnEX2tGmOk7n9ShwxSAFMNMWBg9wkIRR5PJpMJFbt4bNtERBUHCaWess6BrjwrMQCTZ0rrVmAV8g4hUk1Mc84uDloPBv3lYoAMgAaRYxW4DtHMmmlLBrGuU851iKZaygc1DsxYNkO9V52ZDQYD7ZAdNij/FQAioxEWdTBRIFJEclt2V50lqpkd65banB2ojEDMWZIk8TPkgR8R2ZN42bvvyOD1RR52FlKoQISaBQvMxEzUpX+sEM0pt0kkBaTA7Gx4EbE6pJTrWJkouMFUBsuqzG3ObgiKSHOzi01ugJBaQ9QAsH358vGlQzccv3Z1bQuuomh466EvzmnCwBDi2mRDdStmk3gF7UJVjds8fPeHPv7wXY895eanLdf3P+VFNzfbW//6qX/4p//3dzecOD4zC9vr9oYf/OZ/fv+/jScvvPex269+8bFnPO0lPp2641Ofu/jgpeUrp8+cX/vxn/jZA/uPHjx49fW3Hnj2c583isNub7jzNZ2kqgYAhSyM0czMoQbMqkqKHh0cMd6nWI9cHT0PGbCHTxoCGKoqd3My7ebIZcTc1aiFEJn7vpWIGREYECRnsEgRCANxWUh5x6YW4xNSe9cla00V7JIZ6e++Vw9e8/kszetCRSrcwxC4qE8IAWzzeGFrsLJ1cfv+26d//jdzZ+8fwySfPZ8IdWEPSj4ZZS600zxneZMDWDYGZIb/9syHvufWC3/7yNIPfeTazZaJCF3lAgxVjYo4lLaTKsSACAJb62sbm6tXnTjy/T/2fa9+9TcMBoOVKxuBIwQaT1OsQ5smwzCcTCZxMKyHg+3xeDQYmqVS0yCygWMXDJSIyUmynRp8CTVqBMQxqJZS3J8ztxjPKSOit2I796sUPN2IHjqvdyDoh5q8cyMAwNt0j5beKogKJPm8CShkH3QUNC6RayqwSIsGSBRjLSIiKYQqSzs3Nzx/buXcuXMhhCQZlL1cN7P0nPXwyTmVbMiEwY+fyx8iGgVeWJzzE2U9BcbbNgRG17TvFlWEDOyIHDVTEacvIiKVJhWLokgWz6ymamo9+6IETyDXX1LNqm51795xpYjt5xZUmOrU75UBkGPgbpvQN8peZYZyoc1yzqaKRBWzUZFkMrPQPXLZtGkaF6Yog1/fWID7+ILPcPsyDTkgYuiYwX2VHQgjV40U8A6WuZJyoFiN2mlpTFUVuibMD5An1/5wlGNB5DMuVTEinwwX0a/AERAQxY21waiofpWlFHQgTwdbWqeK5dAtQjIBx4ihy3sSIpGVGQIiYkSH9pCb7BhoIN+aQwgF/iMikH2aBEmye7UCQBJxXJUj4Nq29Q/oN96lcFwOhXBnxyOm4KKsTGSAav6R+lEwmBXEuKt0ERKR5AwupeJzZSIwdQpHDKFpGmdxSM5kwI677kB5YhlEAQkJVSHllFQilcRGyHVdV1VlZsRIMbQpbU8n5O1LYDQIhoQEjIJQgRkgE2NEkyymCsZMjIRAYt7KKhEgkpogBNg1BenPki8CRCQgMWAMgZmbppEso6oOIZhpO5kSQRVjNQwOQsEYfPIxTS1yCCFgq6zIACJmxACYAQSBARQhg0IkQBVpgi+0B6SqplDxjKqub29N8vZkMhnLeH64F2NYnyw341RJtWe+SimNp7a+ubKRP/7gfdPnPf/IV7xs69KlC8PzNz/z+c/VNP83f/Qn199w7Ve/rr3/c/e95Ycu3X7b+48ePqSTmZtuvnZhe9+f/cYfnb947sabDnzPDx1qxxvN+JrV1ZXl1dUL5z87Wb945oHD15+YgUOHEOpBtdDnp8AzORkCq+Zs2kapQzTkDFAFZgyOFQgBVXYJxnVp2Le5pipmvkNlZkMgMRFx9Ywe3O9EPlXrlem8iDcTiKXoR8cCeHMmKpZKNejSA93sOqfUl9TYNXlmlqYpIu8eIfrPcCh5oj+o3XHVEINlzEkMtaoqNmiaBpivbF+59Ce/u/DxT4zSZCs2IbV2+nOrd/373M1Pnrx/z4G08szN9Y8MB/PEY4VIcrDe/u0vfeAp+8Y//tGr3nXPQTEBVlLLOQsIBcYOUoCIdYSZYdxYWd1cW73hxmve9N/e+OVf8UX7l/Ysr463tyd1XWdFpgDIKU1DCG3T+GfMOccYp23DSJaz82eclMjM7qWNzuwMzFRK/2lqzXAwGAAhgKfVoqbo00SvknNH+eqWSsiGfadkvSoTMnLwZoI7K3u/vFkFCQMFaZO0CVkRIISgJp7tuBf3VZMdhlnxFEpNDoGY2U1hQwgqZGaA8P73fejSpQsHDi2kVhHRee0whPy0rZn/cbUEZCRDEp9voonPH9GqujaDwA5w1pQzonAYECKo7FSTCAiQQJm95jbfeZEBE2UquKJg3D8FTGRAqpnM3O+u64xZLVnBHzAAojpssNwd6+zDzUwNOQYTsEAsZMXYEL1U1Y7e4h5WQaatN09uU1NCm/ibRuuWr4IAiEqUswYkss78oYrOXzK1GKvUtGaWkgxiQF/kOM6oH6QgqFFKCQODgUFBNolBISlXoZTVZUomFQdEalVcmy2EIIwAECkGQA2Yc5Zc+rM+PYdYAULyqUk3KJDOuhhEo98Ss8BshGRWNKtEzNTndYhYU5Bp620BmTFAMlPGULoDRVVCBCTPSUWbDSGXsb/59YlAmhMDDAYDh4a1OYkBItezs2iqKft6xndgNVQMaAKxqia59fxtWS2LYfZN2yBWPtZmDIjGzKgGndoAGUQgC8HEa2EoWGV0mgOZqLrSfV0BAIKGECIU5dFi/2cGWKmqH5KKA3DozzcRBpe86XTdUK3CEOqZNmQRoVxUBn3rw2rAqGyZDJlxKhUwMJmYd1fEQVUZ2Rc8ZlaY4pIRg9f+/v0QQoxR2qTlq8VQc8UYaoisbc7TFqoQKVgWGlRg6v1fDANXuzTJqW3DcKiIhuL7alWBrGRGgCISmKVtfXfeIpCSQwNiXQGT5lxTNYjVJNT1mCxPEXlhtGTDgmNhsoN7ji/uefrW5suff+vStF3d2Fwhok9++o7xhfb6G66FaviX//f/3Xvvk2ZGCz/9ppfefc99mxcGew9UgeYo0oH9h06cPHjfXY89/Ohnplv3zi/M4pCOL/G1t6rBlGG6vX5ubXMf6dO26qrPQDPzBwfxOoU5g8iMczyc2jaSgDJAQCSzpsaENDNNLRFlSISBqTJrIDMACeTAmJNGqqTNxhpjSJIjESFlUQsUYkxNZgUDYUQIVDo2LN4v2qQyLuJOk847D0QAdBKUgGFwCX5gjNqR3bvWAYgCDE0NLRuq1vVQtVE1jkwK6ivnGETV3HA654HRBGwwRAwVNNC0W8AhzNWA4cLf/VX84IdlMAUxbIwjrD3+4Ik2rDz7uZP9J/eevvKsAB+2JDoE3nrJsSv/+wsf3kr0sr+78bblWY5IObBhS9OKw56ZeUDcbqfMCJaHodpe3zx34cyxY4d/+Id+5Ju+5Ztn5+uV5e1T51cJmWLIAAqiORNRHSsxNSZJGR0nyBFC0XBgAhFJOScVKo2tP4YJjRwzKyIBIwEjYk5TIuIQNBfIMSCJWWO53xrs9C3MioE6YLlZLhsuKlBQBmxFiP4/tv483LbtqgtFf6213seYcxW7OvuUOTmpE07gJCEhBSQkGAyFFAooUUQFLvDgGr0q3/X54IqgiHoFr6gooj7EJ09BEQllqIKEBEhCKpKQOqcu9j5nF2uvteaco/fW2v2j9T7m3Fz39xHO2WfvucYco49W/grOOSdiMHkD2WAYGjsDAKkaUykRl1xyomGotRKJbwoRWehGAYOwsLC5JYWRV3ZaA3S8OX3Lr/66SBFVMw03eFPUl6yw8PyOvWwEOIkLta0CCVXHvBYp5JITuSaDT2rTxokoiUNjA8JtSewW0nhEkoexg16Te1UXkYDXA3CYOQdIIsrGREwsKu1tDhf3LIGQDiVUuDslFmGde2sis0oMMq+k3tmWOSjIwpIYTuQQ4rRFyu3yL4Wd4H3h6u6BXEzCknmz2ZSupaxTIaJhGJy8TmW1WsX8PaVFhIPJmu4E+kaZmTPYHHNx57NzZ9OggHfpFgBhw5fHwd2iBg9dJSGJfcOQBiEJIREmbjbRtXGX551x9Khx4Ig67UdYVa2YSJPKRF+ltKsN4H7bLjc4GM+DMrg5RDizmNk0TdjZDc9lJvqyZLPZxCcEi1rVWKQxzYhAnodcphpD6YODA1utzGwYhlprYpn6+N2qIqeT9SqzlFJKOeXcTZo7cbmN06dNy6OBsgleSSeTtGDXtoMeu393D4W57eiirWLbeiTqjLZiAouDdrYmDih8QApqejxEhTuchREgA7CwcM7SG6BQMI7cT6Hm1xpfJBbtM8gZDRAfyzmNLLXW9Xo9y5iTg1LiEc6k7lUrT2DmTsJCEyGfCmeeaiEiApl6lNWcMplPOiEWB0xqmiXVUmqXJS+bKeaiSp6Ix8UC/cAQN/ZLDDZKXZP4uVsG1c1mZVYWe4f7X/j6L12OuHr9+Bu++ZvKZvPIpx746Ic++PD9j156/KmHP3by3vc98dKXfq5zec/73v7qV3/h3/mnr/7oQz+0PLj9+FiI7PRGRV0AWC4vn5xevfTIF50ePwk+M5+3c7du9g8eW+6XWy4+LfHh9c3ehduetUjngclwtdZ9Ei6Wa1UWrsaoPOyl05NVSsllI5YIqU4rFjP1lAfVul6fGqfMgTuCmpZSBCCGz3xKcw81PuZWDbtzR2JyjEP6qi/eCMwOPFHL7vDv58zBJEGrkyS1rr1WDVSqMIXSELWzF8O9lRaxvDm9WsfloR5UZnFfLg4/+hu/vv8rP0d7q+vr4/1JMRyeVl9ef+z08ccuft4Lbtxx8b9/UH5x/xBJUK9/90sf/asvfviX7z/7prc978lTZJbk4uyQRAWmlGRg5tVqtVmd1rI5LvV5n/Gcb/2Kb/6ar/mqO+66eP366cOPXkuSJQ8hfxWthse7lmlIudZKIl1kLXZSzCybmAkPuc3du1IVO6nqVBu2ObEQBaiSyahONZQbiMihQh72ZfNodDtdCMQW4GwMNkL0cfHepfB8VC2lcHPYbeseZyIWaowjC00FIiql+FRSSika8SCIb0Mfu3s1A4OcIbBCh+cWH/7QJz/20Q8sFsvNujqHqAGnlKZXPUlHIn+4X0VEsgMcdApnQlOUvHbtqL3mDf7HJE3goe7AeJk5EZMZNUp6LwSZmBrBnHcwtvG/zYO1o/8jkILA6O5DQCkFWiFtNhw/a4uEdMBjno45i3V6AMSJFWpqTCQdtRUmNjNYLm5xgNPUTEDRxtVaPfX69OZcZaYOj8GpldpMMHLuIHYCYKrcxBnZXRtyakcWnN2dQMRGsCah6iCKaYCIGDWhRFerwYjLyX3H4rdU7ziyeUJFfZmBDskGg7tZYUzanck8VMMUwpwkjmnRmjh5G/G03arBKzVtz0Bjlq7/MrnCbkKD94fSXqRYAKxWKyKqpjJyrSYiaZB6uoFWIh/HsZgeHx+nlKpbIolNTGYxUDTK5jrk0d1FsoJK2fgOl4yIoCBhLTWgFk1GNnBh3eaTFETNAZqCU0KNGxe7qDkORoDz1p20Y9YDJ5i4aaE7BZaNm4qCc0dUVlNVW7Bw5OO42kB7wNM4lFJoC3c3dwez1YYSmre5RM0xKa6qujlTWNlG+BZzJEnjIKDqYcLBVurmdJXGYb6TeSFm1gARqsGIk4CTMriTVBnCZomDTS9hsmRmXo2I1EppWrvZLFywOM8kequbWpiyqpvhlosXmEN+0Y9PNovF4s7Fcxbp7Otf+2WifHy8Oj596skrn7rtlnve8pZf+h+/9ZsXbz3z1re+5ZZ79u951vMOF5u7bue4yOP1kZa8WiH52dvvemjv4m8eX3nJfNIuf/RrVk8cafrER8v9L7nvS/LiqQc+/Vi2e87t3Xr2wuGZswvVnIQqjqfCJ+unJC9JHanUyUeROtiQM0o1VHguRYbFAFV4NSELBwJvJEs3iwaXQyC4cwfMXfqNtaqI9VuEC++FGlOIQsRlC9pj3QmIzRUgL0Zyq9MGAEuCeuyVW9B0JGoiuO7O5HUP5zZnS6H1crW0wXl88PfeNfzov78x3S+yHE+WRNfs2lPr5eEtvKA3/9xbfvwnfubBJx7df9p1Ts8fy794w0c/5/br3/t7z/hn77/TGUkoEUEtDcPaleHnz59V2zz86EOlTM9//vNf9+rP+6I//obP+pzPPn9+cXRUHn3iqogwSSw9yKuAHeSEqE7IPDATkMYdUDgcxdXMkMPJu0mNzvVKHPhQATSNabMW0MBEzEGcr6WICIxI2Nx2o1Db68EbwqfddP4j7YIHDTzm1eYEaIdDUsfioIPPKYACwavRABI3ek9Iy7e3gMi8khkaJDvv7eX3v+fD165fvuXcWVVNnMwlMDbllUfpXQfsXOuagiBDIgSHu1ZnKpvp0YcfiY1SfDzMGVxszruNWepVJ68BgI3hPIRJGv/c4BB2opbKmhB9h0b3gG9WnYmTOIQAzIfWLPDSc9ihHTZB/Ml2tnf2O2jQOYY08FMK5EL85UD8+uw86FBrA+thGDILM9eO2o22KdJJMY1AltOgql4157zZbKZpoh1De+5aNgZvd4GMep9khJD2px0a3LaCAJMk2DRfbWwsAsIdl7FLO55tQOYCEEAsqOJqwSQQIoKapKTozhBwMkM4WUf+CKpoN3KIYSw1nAj66Nu9VmZmadhj9LKmHXViEQmX+DZUEBmZvarWalklLSI3sGMYBiOsVquzwyCdN+Z9iGdmLGRVwyJisVgw10HSkId4drNTBzOjVmkg8ObGIi1ruqqSOxNnSW3gbzAyuLeBmFME0/b+OzW2iTWeLm3XSeg3mTgJmMQgfXoRR5tmHgsxMeZmF30TP0Q8DVBD6uVXAOl5S6UAQMKz6E9wsbIMQuxVp1Amc1c3CvHmnNggKdVaBRRS4ZUbdH876mDebDabWhZ5GIbBu0SaqjKnWnX2h3CPZiDoE80GQ2NVFVFTISKSmWgUnWqhcRhhE6AscKfluH92cfbktLicFp0+9cDjzInTam/vwl3nXrY+5j/79d/+p9/4DZefePgdv/3O7//u737iicvPfs5nnL1lfcfTT579vL3bb5c7nr6+5Va96+kHe+PjOj376t575ptz/eqHL9ymj33yM06OLtz/6bdP9Zlnbr908Vn/+dGrT/uDjz3D0+LOpz9zf3n72bO3nTncv+XwdrP1Zl0Sn81naFOuEaSUTZJlnLTlko9uPJVSSrIPh8agYN50zBS+/p7G/dQ2O+1TFkKzUIWniMtMc9kd8c40nhdx2AAAsWk2eN3UlHhYLEspBCYydw0DxkjVcTZqWzYlmkqVg8KaqW4O967d/+Dxj/3Y2csfob2ln55Q0RX7mQG5XP10Gd784T/4bRxsnnZPOjn6gtue+JHXfbQ6f83Pf9ZvP3rAOU3TmomsSW75wLzYH69euVR188Vf8vpv/MZvvO++zzzY21PFtRvHDz54mVLOaQDAwlFxduvrfmxiF1aqJ5bg/DIoTMsMZD4bYzdnNmvgZwsFghjgmQUTl4EyKW97zv/JL9pRGghA+Fzi9JvPROQBoup/R0QiwWVh9y2Si5ljiRzPXUM4ZSdrgCHcXW2o5R8isVqJzS2L6KXHr/3cf//ZnEAQ86ptMUhKWj/nxvKf3wVgCKFUNaCaiMOckRcZ4CeffJLD3piIKAhFrf7aTXWt6d9ldkHYm1uGiDDNrUW7YCePdgvY0Zbq5CWjprmUA0IobbHmZq6Gxv7oALdSDQ1CEcmihbMklluJwGHS6h3T1K+GmbmuN6rmTCmlarrZbDSlYRhI20o/Cg3M3YzGmD4BoCRGqG5uvsgJbh6UfQaZK2JWnAHEZIXaLDcSHhG1pmrb5ahW2yCohfAIo/HQvCqhOUOICAlnCnRGq9rmBOy9QZwn7e0UmjGzajNQCuY4ESli5486heMeQk7MS/W+n5/JEjmLMkJVgHv2dXhYiQEoptGobWohojQODdddqogYfJqmlHMtJWzXPLGITNOUUipWYymrU+ksZE8pbaZ1kjzVUrTGFCjaRLf2f+Gr1Q8RYXZTBiSRgKpv5y0UwCcmMyObvTIRWgvqWw9EauC5LTCK0HIzcaOdOTszDyax0tapBHYDM42kNuR5lDhuJkkYgTeBAGHekimFSXvAsOPXvOmX5sUWL6qzMIc3m7upWTUIO0mtJu57e3vBpIqRvqrmnEOgA0x5GJroP3w1bRIop5wkTYhRc/u5ou6BlnSPUQSIzCwlcSd3jiGKmbElYl4s9opspulkHEZTOT0u45hrnVjWw0I4y4IWBmcaa82r1WZdUxZcvlqFpuXZC1/7F/7M67/kNf/j137j4SeuPu+5n/Xwg4+8/z2//863Pnp68tQLXnjruDjV8uhdz/7Y8z7z9jngvvhl955cX+DZl27QL9flZz/20Y/94ad//75NuvvOjz7rM6lsPvdn/v3jL3xRPnuBlsPLnvn8u/fOndk/c+uQeTrxBS1OVwRe8eCqBK/rCQvaL+sNBgWTNGJfp3qzaMxVCAxq4wRsXy4KZdieCfoCuEGt58NpcO40m6aewQy4CBMgkqZ1IRNAjIkSOWyqGqqo86tNXT8H4E1ZTws6yOeuPPbI9K9+VD7+npoP5PjoCCaZzzrduLF66yr/t8OLj5xbHJ5Z3nK1fsuLH/4bL/rE2x49941vfcHJWp2dtO6FhQOZubL7/rC4evXqhYvnv/fvfc8bvui1AI6O1o9ePiIimO7vH5apksOZaq3DmN3dp3gBGq7TmhenGFOgXuAeHSYcBmftNwcMbhAe6tRE1M41SsJEBh+WQ7wgBBMiZq61VitDHucmVeeM670A7wsjAHCdZ11CPGOeI4hJW0JRhIOGEDOTxvdsIcXNlGBwVI1ecVbqIIrdVQJt1P1wf/Gud3z0Xe/6vcViod5ou8TExNO9N/zA0u8e1lrAo4gkYncn47g5VphFVHuiVSOJcxUCPpF9Gdieh4LKLAKBmqlZpym3dVLDCUYuoq690fa27atFTGsv/XZeGwNc0+LuIDQfPG3HeEyZI3PVOutqEcisOpSYQlw9aRvyN0mg3te7mQmRgJ0pNcI8amMGNdKtu8/OX0SUJFNqAdfMcs5pyEH4KK7ecKhEPXvFq8ddcSSmtBAOnBn6DwKBBZPWGV89P9f4V23z3T6fFMgOHW37Ye45SXJKnpxbSRL3vVjznGBsv46IxG6zeiietFs/aY2pl0Z3BjPzaeq8MeeGI+1HPGgaBq+hXKEap812NEAMGjo469UmpbS/GFbTxswmnVTbGxk6oKqahxTo6MVicXqyWt04Gsdxb2hSl/NIJL504/KCnd3CW9Q9Fg7cpXTDCjAqniRSmudl38ISMfO6FtpheJOTA5HD0NmB7b8q3J2EVZVBOedB0gRSVSaSHLhxazJq5k4esTyqhvkEBkuSrfvWwGYuRPsz0VlV7ZfV+H+cmU2J0NbcoGiIU2ORpUjica9ySIQ2pWiNBB91j5utVqthGIpp4A9EhCSHdgRi2BA+1vBqlZmiwWBmd4qJDpRzWuzvH+pUCLy/vzw+PnZgrbRMF4hPzKZBzoCq08mSzxpNizwQOyBF6ZEnjiTtvf7L31jX15ntS7/sdSl9/cn66o2jyn5G1R/8+INXbjzw/HufOyfgi+eeWff5GXedvPxFWCzO7g+4/6FH/t1/+trLjxWzU6T3ftU3fIWnD57q+xN+/d1v/4qTa/dcvGdz8bbbP+PezzLg8smlg/1z63UVcpZS65R4qZZIJ/JQ/PHMQo5AsDOzkUfWiW5Henx3d+2K4AHHFZZ5DOYzk5BAZpwlgALUN8HUJcdhWCyGzenGzJbLcVMKC4mEqYPPFWSUxQl0nOwWOlglu/r23zr+qZ+2T77/3CIdHV/xxMtN3pumj0L/Uzn/a+fuOKh1H+vbbhz9s9d/8DV3PvWP3vesf/Tup533RFAR9VqhVKFIGJaLZUrXn3rqzrtu+9f/5kefe++zHn38SZHs7ikP6nSwWM7yauxIKWsJOfooE3pn1i844D9zuxZ1CZkLcYT6WitqQ3KE7LzVZooKgKy96YXJY3ZI4CTUSEtbnzRjYm/cgzk+YOcZxT/Pw3/fbY5DFZF5/lvsBETC2woFhtqBBDyzU4PQJ0YkzHB2UUrxgr/ll38DrovxzDRV6aBOIqqvuoE17X3kLEY6UXMCsbE5nIQZClR192ExRkcayBl3AygYwNRQymiQY0J0pq232MluZaq5/+hoW2hevMWb22oIiq2c9JviIQHXUlbIkmwbvPnz1+t10EDmnieiJKbKk1JqNLzkfZUivZyMzJSXCwBQs1KdKUvHoFojz3tHtxNRNWUnJdda3Qlg67J/TuzkCDWUPuDgcATtVzzHjljcUh9L2mx9xWkvpcm1urG7mjaKJ5OAotsLPpyZWSmekk6tY5vb6FhSKbNQ+EBxoKmZ2eCu1tzCADcYqYCki+fFOx77mGBlBMh+kOZIGsc0D8m0VY5wRwDU3Y1gVa2qEXsjJpK755yr1RqGJ4qqxasiJQjnnKGWc4qH4O5WagUZIXYP1W2gtNls1ut1SikfLgA0LhsRBfLQrIG+O8KlmIVnn5gRSwreJjU4FZLM6yGaf3HweLufaxSZ3G0upNHBoz4M0peqLjCoqgHh2EhEMVefuqB/lI3tjAEig7eLd0dU6sTERhbiDERUSmlu85K82yIFpDnn7IQozMGUlQFYci1VDBBhSeouQwY45GGZuWpB6EgRdCrTag3g8PBwkQd3j7zrTLCwVQEQPGwy99ADt27PLJIACEXI5egC1a0UE8mlTAKXVB2aBzaI2rSankpVkyyVT8kT+QGIUoapbE5ORQ6Ma0qAD6vVyiitTk+Op4cHOUAdF+Oi2BXh4d7Pvjstnnt6uqXirO3EkzOyOV09unbkqzPnbn/t5/2Vhx77ry942lccT5949OgHzox33XXbbfnwupffW+CWd73zU29/23t+Vn/lOc95xtPvfdbq+LHDs3L+9nx6I91y/umQDaGaljQOIVKZ9g6CJ1Oj8AkWTCPQd7Ho3upxTq0MUvWqqQO25/K3HSo04s02MZCb68B5ss04pFvvPrfZ6NH14zFlM6MkDTWNBiNC+ARbPciLpzYn1//lT0zv/80zl69OUo5Wq4F85Vf3DL9zuviRdO7xcxeehrrneM75o3/z+R8S9q9+yws/fHTbuX25fmMiLqQ+LkYA5jUPMqZ8dHT9zPlzP/rv/tWznv2sxx9/Mg1ZiAPvqFaOVxqaaynlaZqmMo0pl6lEFJauujxPblHbW8YsbUxLLEK1ToSo3uDurISYz6euIdjFRGNHG74XaJpIsFIjI+ZxcMW8QqbAkKqq/FHuWWg5VLjWhhdJXTfUqRHMfE4tHfI2Z4E5WzPICAFbMaPZb6OlRocgDUt88hMPvvnNPz+OrUE3ixJF3L2+6iS/93DwJe1JZtqsVlZLrcqcEidjUObqdvbsYci1EYcNA9sOPXre40bmE4t87NL36AonI1GIiJVK0QaYGZOE6WBAWlob3XwKQwbVCV2uqd+HVgXA6vYdFImiYx7yt9i4ZdNVjY1JUjh3Nt6uSlmTqiEZZLvTjhZqbhScKYa0EdnnAqQ5P4PVyRH3qZNurVECs1AsCBHgDYAaqtnQMgdi3wCm6jaCydoUi8JMlLxqTTJIzMEABMZKzcziVMFcZ089lng4McBMKcFs0hrJILEwxYYS1iT3EOyaENMQbgB6Vl4LAAEAAElEQVRjSWJwm8oMNohWkhi1ViahDlyc85WaHV8/muOLmVFI/AQl1iURQxIzHx4eFtPT03XQimIWV9drVF0sFicnJ5SEGJzTOI7Hx8cHBwf7hwethuguHO2nU1NCqRansmF0k2QRKZOJMKXQ8Xa3kMcgi7WwpHlBy3AAQxrm6T0RJWJKJMyUBrLtyfPguxGC5igipdZNLSmlsFuICX/Eo4GaeIJX3dR1QBnBTIxYNJmZe9Ts6n1n3Pye424Hvsw9BI9SSlWrJWb3jFZgCYcSrbkDTFaNiDglMs85T1U3dSMgJhqHodZqqqUUSzQMwyIvtYufJ+IyTTI0NCIRjePoqjaVlNIM+zQiCGchB9hMoaCNWWFK00aAlFKqdTNwkpRLWZt7tcnZGDYMvNFBZC3jXqKahM1MK41JHfvjmFUdajIyD4tst1U9WR2xndzgtJwfAfEZoY0XoXy0kAMfFsd69WUv/7P3Hb3x4q362GOPfN/3PPSnvvJzf+w//djBefrMV350efbDn/dlT/ui/XPXLp27fuWDF277lY++6zN+9hceWx7Ia177hhtPPpiX041jWuxfODxzjpnHvLh4thLR2bPnJYtpQUfJtvkKgWJFF4teYcQWHwTqBtjNTxZzV1Fdo9SGqZl1aXk/PTq9484L683xv/6X//zCxdu+8iu+6vrRWmis1nTZpJPCmy611TOy9/G3/Wz6tf+8GOjI82K9Rl17qVnzb9KdP3x2WQud26CS/dnP+vh3vujT77507lt/83lPrnPmcZoKZ3Uge6qAJyIahPn09NQJP/Svfviepz/n8lNHy72zqmpTZWYYEmdFCe61TpXAOWdwEkrQqZTihBTdvzW2rvfkp27mcHehBEqao4dmYmIP+BY8iA8SwAMREWiTx2ZOBiNiGDKLweId7H7t3MNyow+UKVxe2ja6bTabhrRx+MVFKeMeqBTqWQQ92c6tWtU6SBJwqVGMwXNqM7AIj1ahZAQ2OI37B/SRP/zE5ctP3H7ruXW5QTwywogPk5b6yhvjj992sl7lnG89d+tTx1OplTgb+KRqMV9Jdve9vb248qA1cih/9nqCuk91gxBtAeChFUEknJJQobiN6AvsGGDCWUhmTN+2vtSGH54RWgDcjMNngWiWLubZu6Kzok2jU6hwN0ZI4ZORQJJXNTKipIiI4dGDEHkQlYwRvb6A2UwTuYFSqqWG9mDLlAZnArt5ZVDuOKCiBlOJx29q5tOkHgS0lGZEUmTcaZpYHGB02aO4Blav7CIyTKGdRNVMXU3NyCDMnYja4GNR0wQOkMndN6GYKonZqxsRNrW0iSVETd3cyI1QyR2eJIkx1OC6Wa3DM7GfPHf3nAeFN+h4UTI3Rxpys/TotndKcEDN9s7sExpbyUOpIGUFLSSvi5o7C2lgMOBOZlOt7ptaxuVCFgODwrZsdXqq5jlnrWbqYBvH0UqddBOVV845Saqm5kbC7uHQbYG645xKKdOmhBtE5JUIgVY1oO8SJy7g70ThY1WmiSjwaMLxxrpDQNa2yLwDxxK4DEPUesMwVLf1ej26LxYLMoezEUikukM4MROjmJpa2RQRSWkwQgjPgs2tkoOsMbadyYSsamIh4ViAxbVoqcTAZC6sAgFRTg5iIkOUqaGxzkRt8JRzamL901qiJIEMOZ+WExVrTR3HDJ+zLKJYzizS9QeYmRBrL5DBNOy2hIiSc87iTE4ws8HAnEyIPInDzBIv3eqAxESOZGUaknhNzEYiHt20OHDgZpREoO5spW70mJKktHA3x3ATBsdXClB2wz7EteqARV3dyAu5dHnaO7j1//oXP7q3TM94zhu+46//tfe+Uy7ees7p5Nwt689+Zb5wKz/8wNGFZ7zvL7zpZWcXX3j30z7zwQce4TO/9qH3bB64//ELh2cfvP+hV73q81bJplIvX3r4rjvvXgxLZEESciJnNhihuqa8Z7apOtVaAXIldxpSXvlp2awPl2eqk0FD6iCxZCymsrJciYRIoFZrdUrPfsHeW37mV37x5/77k1ev/fa7fuerv/qrk4wmlMhrBVGefDXysKapbmyf0plbD97x+++T//LmWxbLYztaT9d0TQe0ulHzr05nfuZw7wb4riUn3/zAyz7whqdf+mcffPY/fN+zq5mIH0/HLLF3JU7sbGYuMhjo2pXr/+QHv//zX/FZD1++QeTr0xNBQyoFrHYDYSXqDq3qbiHWn5KEYVQfEXHHqbGwqrlB2piaat0ERNWCRNNa4d6iNH+kqE0i3ooCDImQXWCcmCEMFLfw9RUmZRSvBBKR6gVaAQnaGJiMYOZUNaTx3L3reiaAYvIVAgPYyb4ADJ7zQMzBxiZzuJOquGud8mJZXUNIWZycpWJVNofvf88HErGqJ9mbFExmUGPW5678Ql383tnkXDfl8kMPnix4ECal2y8sXuHT1etXDhSLQ3vFM+8w4cQDgXWCEBcpVNDRukCsMIThIBazACeRSNYyiYGCgE60mjbR4psqm4DbTCv0i+YclBw9fhgRITGMVFUgJgj3V85NOKE6iKgSWHJMW2GViAkGuNUY2gmnBFCKBrSiaXBTQzA54DFSQPccdCDk6IjI1Tpfs802RUShs+AGdfFSIenwWzCFGKX3UXGn6rfNRIA4AjGA+WFHbYaOkIprizUJxQJYtcOqGVb7CKLDa3eaM6iVWqKENAZpIJlDjsBcjR2p4XhUCSJMGgKkbn2UyuYewGRhh2upMVkK6cox52kq1JSrm7R3zjkmFewIcHjOmZlKUbLEzMMwgPx0sw7P4MRSSYloXC6JqDZYj4EboE5E6qbG3Yg6LnXm5WaziTQcLfBUpvjuYmYE7koxMxJt3mFw7jqjTcrcoW6dtpRSCvyCSCulI3kbc51KDOpTSh4tJ1PU+4Fpiv8Un1xLcSYioQ4mjyp1yClA3WaotZIweeOPxdzG0Fjg7MzCQ5bVtDHTnHNKGQDMRcjYTd3MjLbae3Hnfa6LezPt7kSBl/ZmLOG1TBNzMjNXK6YxKxuGoWym2AWMqTlJeFCKHaYaqGDufgDqRuZt008MIkaPWuiSC0IAq0qAJohIRDalUKdv0gyNCUFTZs5tAqmqgAF1kGym88ofc5dCjRDCnFxryCznxbgpZbU6uXq13v3MW3/uLf+FyYXH9bEen6iqD8OwPtmsNpcXZ/Vg+ax1vfzsZ7/Q9EV3ve7E0uaJRx/7vFd+wa233r6ZahqHp566ypzSclyv16hVRDRlFs6SslrlE7ZMvGA396p+ymSb6fRwf1mh02mVnMBkrsMw2OTFTykTMZe1w4rQdNed54nwt/637/jIxz/0H/6/P/m1f/qNf/97vt9ZTvXK0m51RlkkwZonOabpwsG5fNay8pt/+X2/+73f+Q37ZaWq5cbZqy6Zp6PxJy7c/fOeD6fFfn3ivqcvf+C+39lP5Rt/7aW/8PgtDAfVGGMJESAwVICRBubE/NhD9/+d7/mur/v6r3r0iWvMOe6wdHmvPhaKhj8SpSvaoDhEcrCDC2md65BDoxHzvjwGyyRofjjufXPMbkZz5ovfaw9aWKgtQZ3a39KG+nGggap5VhwTtKDUzoy2XelyHD3o+ATuaCP0xcKcdMPwIFq96DXQGOCB3Jxn1eGACHePP+Duw97w1FPXf//3fz8icDWL+YEZFDq94ggV6T2HUeVvMmVw0U229BnT0df4Vd6biqTl0z/z6c9/pho2gCeVqhWkxpmIuhVxe7+I0Vt58uLUSDHBLxXKRTWEGaKWGtN2cU59oxHBNrAjAVYIrBcjHPnYglEWCa4RAWLMqXJzyZJyokZnr2bT4APnnIJosfPCK+BEqKrmgWoN17+myBJo10hj/efFzPN/joOPvDlvc+dml6RpOupMECQJr9ZIl0CMvmOUbck5ApyqqhsJs3As/MyMvFtzO4g8CKC9VOxLJnd3ZSNwyNSBmQlsVdnhWYSaqR7UJQkYSubrgn74BOTmprW6kaQ4V6qaGrKfVLVORVX3F0uf5dDC3MJNSzVgHMcmPzkVI4x7WUOcjAO/3uZ4e8tlqXWQNGllkAJmVoCcc2RZCNdal8slmaeUpmlS1WEYQmVoWq03tQzDEATueBZBLECnr7Ft1VwxD6ncZgACt0WPoZtUN/x5Jws5AR2rb33qFaPsSWt0EEQNHNtOf5IYijlR10eCwqnU+Fh3I5HgAFiALeEVJiIQklB0q+osZA44haJLeKkBodPrnYdGFI/Um6B3FA1mZK5ezEwSE0PdMosDktirMWNMIxGJe51K7DWk72I2tWQWzikG6TWOpVvs9YOPiJ7sjb1Z7jo0flsrM6mGlnKcDo7q0MzGcZzfx7ixgQAPZUF0e66QnapqCZZZkPP8xjXQba+xiIeUB5iWUobFMrNgHOC2Wo+rzWR6UusVGLnR/uHBRk/z3t549jaS4caqcJJ1JbcN5+JTeu5zPrOUUosi8VTswvlbiWg9nQ6SrOoiL546upYXI6svOBVyYR7TwNhnyovlhToM7n7jaEqynHRyOqYB5LluKqwOzJtTlqRnz9CFs/vXn5Sf/emf/tF/+wNf+6e+/ru+6ztf+bmf9+V/4ov+0jf+uU8/dJpkv+qNjErICh/q3t65/MDjT/7iD//mb7/tp9/9gU+86dy+XNjjx9a2OeZp/YilH7Ez71jzbboB7C++8Nrfeenv/sGVg296+8s+eXkz5LUkEqapbNg59lARMZfLfduUhz/9yf/ju77jW7/lGx574lra3/epPR1F0wyO/fcgw5yfiEiYU89YbW8WU9BqtoOEaiQU83mfqk3YeUcOSCsx+87e0XfQMwH5nX+zRzxjSKCr0Wl+7uTkKW1xmvG/IW5l4wgiGXLalhQxDPemPB9GZyDiBs6Vjsps6F0GGdC+V2ZmFrZ4Qx2T1nEc3/++D37wgx9c7u+5uymGIQVSBI76imP5wD5OOdRbmIFqQxqguOvkyA+GTdGTlHD3ndOw2JtsLw2TKIHcMVDqfraY71KMl4VIA+cUYgdOGg+ilNW0KSUiq5dSOKckY5ZUSnE16/VUwMJzbvyOCF+qGuOIoGm1HxoLcsRzacQk6k7ADnIQOwLEIMTR7/Fc7GAHmigEWHO2oaD0CXeZ/nnBQCklr+pmVTpBpf8iCklMm0/StoyiaB/bHiJ+p9bq3gx3+7lpPRALrVarRDyOo4Sav3QspRoT+U6/SyQG4x1/YnRIFxFxcN7n88RMAjeHGrOk4FYRhRo2Sk0hjNx3W1FzCFPwieMThLsjT+cmaXc8DEndMHNGs9ByZo54WkyrWwB2WKG1hd0kUmst0xS7RkoyN22mBqLQHd3bXzBzuMcwUTUr0zRTjUeAuxoJd//jwLG7u3e64TyEADjKaiGOGrjJpITinU4h0qY6zY+4qiYZRTKzht0T0DRfZiO7KIwCJIW2VYdTVNbk3HEiHhku5CfFQ51SdcjSsALcbjX6zgbC0ivc1MvbyP8iW9ZwbMdLKdRtN/vQhYjIvdHP3JyIhmHgnGLSE8cpSmBVzXlMaahWAk9XpylgpdSlSNopDCHxJDD3qg2lqR2g1JdoCRTjVzCxBA+0CRXNNfhuoFytVimlMeW4qxbyokwB2BbeJuC5uY/oX81gJKC16jStnWmQwUHOKzOTtBjTXt1UI9us67A3Oq3VD7WeDOPgdEHLseRUyzIPfONkHaHc1HMeTCuZB9zXQap6uH+QUtqcro6ObzBNT1z5Tc5PXl7XaXPtxpX3jYeDyPlzd5+dihEfrDbLTakHB2NKvhycjDnD1T70oQ/92i986H3vec+dt9/xQz/wwx//1COv+bxXf/u3/YW//je/6yOfuHa4OHCCplP1A/JptU77d+UH3v6+t77pb77nY7//vozls55xeDDRVT2Va8NgJ3c8+0eu4N28d57W58b6fa/6g6981pM//unnfdc77uFTfel9z37k8avH168Ni7ypJpwcCLDIwd7+6uTGlacu/Z3v+f+86X/95stXrihJOZ1GNHmjOay5u8HD0IWI0J4mDI0OEKBCD1ADLEKy9mGMTiVekPayeAslnaZh4RdHfSuJnRGjmUHNOiSWHUSwYDWZbgOvGUiYydlNjUKqApTahTAzRw8Q/FLWhpqOrTAIBu+uQe5Va9A1u8Vk7FyZo3xphBHvbL2m8SjEhPe9+wOr1er82XOlFAQ4mpqzvb7qxvDmW6zvbjOMcqqal7y69647D//SN1+6cuXcmfOLZz4tnb2zmPO0GihbGmAOnaHakay6tKI1YSUgiJIWkEAAVE1AwzAQuZXGYmVmLU2EuDXQnboZjy7Ov4gk5mguw6E84gz5NuNIj0iNzTmTsoTTOES0NLNE5gKym/1GIme4moHExAhz+TMv9qjPx+IANdXozoduuXbn0MwJsj2zTiBrOk3c2sSo5eKAtVPee2jv7oeYQzkhduDV3auCCX3fFvfi//mj3YiEc+ZAirXBqVZ28qqVKADi8QmjpAonZnJrYF2EwQu7GiNuHcBNdyYiNTNPWqEWmWO2Q2Zmlm4PucNBauqYO/kb7uM4skhYypdSNmWSTuvmnbFVAIPnKULMvdkMRIEMmtlWdPNgc0Y5zx8VtyvRdtdTqS1+nCnTsK2md+cKDftnRDRwYmZt3wS1NsdTnS3WbYtA2T1s8/mhkJgmCsfFlNIsmWlVlTS8S+Ojkrd5rJl5lkRcm6bVFkyLNhfieM3mag9tjdGyonAqWkSSEwUVMiYHZrbIQy1V4aVozpkTpZRYUnUrWq1tzdkpPFKQUorIBfeYxruauc2McHeHugWnv0t6AQgwTDzBLpjaljvoZfjpVERkGIa9cVFMa60yDJtaynqLgxv69p0dYEpmdVOUaMyhXx7tETuG5SibzUZSMptSGph5s1bOzlB1JU9uLkOtm5QHmWoVdq01c4iOKblllqJGKalwcUtOuplqrUhSnW+9+Pp/+A++8/O+/Hkg+/jH33n80CcXS79242j/IC3G82f27j09OXji0Sc+9bE/PL5+XIkvXX70icefOr5Rjo9Pnvf8e4a8+K7v+t7zt138qf/231/44s/46Kcu7R9eqLUQ5QrZW0td4vxF+tjbPvjOb/+rlx9+14WL5+8a77q6Pvu4+Onh5bNHTz60Ofvmp9/90TWdvfHgF3/mub/9ot86n06/5a33vvWJ57md4lzeT7ckPyqbSs5JcnikGmFc5GuXLsngP/Qv/vHXfu2XP/HYkXkK+3erdbbK4UCJUvi99yAfUbuJgrURbgyT1Q1wIsosja1TG/+Q+iyKeyfTszuzA+S6s5WbizOK3Qq8icv206UU66HW4bRimiN/ONhN1anRsZMkEZlqsapaS+ZWsovDQmKBiYQJQgRJTgJ2RPvRjm6f1riDkrD5bmcVsFkn22zsne9892KxMDhxatWzMNzt7rXdNeV3ngW3tXrlYW9/nHTvrpPVcPTwcOdth+cOj689dch7jKxCvDe6aZ2KE1Ji76rwAZxo1xZg2664Z2bkzXJtoBZhVEtKaRzHcbn0Xvj2uNE2dACCAa+qXr12b15mngvr6Cy3uWZbKnHPvgRAHbUaEQ/LvXjTU3B7dqOwuw/DYFUj6Guf4KIjpVNKTtsAx8zVLXggtEOBdWsQvvl4zWcoPk5VY5gZAL8woouc0dMwxc5ksVjEdw19DyYwSEDg4N2agrr6timcb9aD5Lk2kWiYGWqu5iDKQhTLcg/RPHOExw4RtFadHZbMKxzm1FU+0J2IgkAmoNjSId6z3verKgurqhDHBjHa3yDYRAoBU4TOUkqpVRtG31pp3BAfvehRjb/YnB5ynqmrIqJmtVYZ8mzzMfdSTXS1790juNcwrg6MwI7AUIxcwKEo2cdQvP1eTJzTVhYjDig7OXMIccyjNpu5Ew1Z3Vi6QNxCkiETEYc+bQc3WSiNzXpJZtWawBkJE0i6BqyZVQaEUbfjuPiJzIkEtZRgaLRD1UD+8wsTbsakapG/vZkYUuRU1AoJQhbVUqFGSYZhwLwoyol7193XTpjvfpO8nndvasZzJm40hgjc0oX0cPMvEs6UTQxqm82mBc6UAAyz6F3/6VGFxAhRUgi8gsCqhdnVnMxlyGaeMkHLweFyvV5TzlwVOhitx3xovgE72/4wei0i2eA+8FBrdbecUkrJaxM7KqqBhuWUHNhbDGUznT175o1f+7++58HfIzq4+xlvHI7Pvu23f/3jH7r/oQc+Ni7tB//p657//Hte8JzP0nX51V9986133vmaz//csweHFy7eutw/uPTUpTvvvOPb/sq33X77HTfW9tFPniyXB1pPNlXyABRZL+rZxd6DD1x921/7GxcvfXB62ovetrZLciMP+x+aLCn/YT3/L6fhvX/w4J15fOMLnvy+l//up472vuk3X3PZn3a0frh6ecbF5z109crlxy/fcsstm83KzJx1sRg3m9XDjzz6yhff949+4B8874XPfujRp3JakDIUQiCR2D216RQRp6DUd6s17xucNj70NlMGS5BcAhQ+ixeGrQ9zTBZNMQf3dkTdPeTjdvzL5/DIHrY8Tg62dpojCs+LDHdnhldSt5R4rsKjeoC6u6c0OFVVbWJuqs6N4Nfjv0VCiuM6A9B8t5ZV02aH5Rref0zmYMfycHH/px7/wz/8yMHBgbq7MIwgEGGHTa88ApDefZZ68U0Y9pCB4WWc7zy59vg//SfVii2W+esPZXxuWTElVrYhixJxdbT5t6si9GKjBlILAmEkVAKDIU5wVWfPWeKlbhKbapKT9fmuM4nPGOGmD9NKfq2VGj5jG0Z0vhNMag3s0vuNfh6IhGEeSJc0ad3WC9EnwaNwMBA7YkMViGp2bFzdvWiTSul6p87Mcc52uxsDhXEyEOh2D5fE0KaIjnaQBKCYcpII/GZdHoWIeNaS5BAuzcK9loSqEsMQXsQOazVIcppb/rjsbTIWqLnXCrVZmDA+KrMk4lCFIeJq1aumnKq1eNruY59IxF0jILpbUGsY5+lltH2bzeb09HSxtyCgVgrpqxAlrqZeNWBfGmVsEgmDl2hqhZ0pzAPinkxaTWtKqZhKkk0tg6Rwl6q1hhuxSDNK876B3n1pg9tQS1FphR6j3a6oA9xjbQkysqhngOo6d+09cQJMiqZ93QKOcBzK5ovMHE+Tukm47pjH7RQH5K5xG0WS18qcRaiLdUAkuLcUPXQphYxDeol7g6iqjciEdnVzNQl3YlCSpmU2C5m5DSm7OweMpaq6Aa3ocXcaRacSdzunBGCz2SyGkQWbzWaz2eRxyDkjYT60ZmF8GeMpa+VOKAhtcTQkQy6lhHA6MzMnIpacdv3UdgMuiEk8MbO0A2ZqqhbMy7yzAw6RLwAQZvPJNkMeg2eZCJKYFOrqOlX1PO4ZQZ2Hxb6q5sSllMT7p6fHOedhWJpV1c2w0FKT1cqSxpzMqqpaIhYB3FQHSa6GIa2nyQlWysGwtz5avf51r15+Sn/mN//d/Q+8+72/8YcPP/zwfffd98f/xJfcedutd9393Dwutaav+vNf/+e/9eve+54PJamv+pyXXj+eqgtnsQk3rtpDD1/jRRr39siROdFYAFtvRMYb19b5rW/6Zrly9aEXvpYPS3n0eLp8tKjT4wv798fLt09nDl997yufuPTXnvm2P/PcK/+/j9/+Xb/73PPnbz1/297lp1x8+eCDDw/Teu9w/3h1PGYZF8N6vX74wU/eetv5v/s9f+ub/tJfLKpXr9wgEjJnITNTGFMbys3hxdV0Z6HTH/HctARMtpfs6CVnj0jjOLb/pHB46gF9JgKBiT1U27l3LvGjCZFrgWhPmQnmTiaM1CKpA2BjAMZwZ9uUgPgyc7y5TQPHEDMAMo/nG2XcxkrE9UQpopyZmatVY8fcJIX8bTjWI3yEAiFLFKX32UHe++73PvXk9XO3nClWHGSOISUDs1t91Y30kf10Y+kSzFsqQtem+jQ6foZd34ysn/jQeOaM3vY0OreUMemx2kY9mRFcCJMhpeBGegckcZveCZlGuDZ3Mq9eiCSlFJ29mcW2yIoZoTnAMhsh/DrDv1fLjqNLE/olItKp8CyYFS+7cPx/d7dQEoXHRJqAWidyA7VCPJ2cnIzjGLtVd7dmce29ZQn0VcMHEVOzmbTtzDMR11qNGpGUt+vYEFPjWiu4Fflx8uZoVUqJ7V0pRXJ4a2zHpA7rDTbXohZajyFTbBAiEAos2jkiCgJxZkmSdBYe7yLI7u7BqOqo+iRSVbXWlJInLlUpiCdMygZ2HpIXNWuTajTlGqiphqqteXMacAtnbqiBMENp4i+eOXOmWiWAhUhRStnUIiJpyHErmPthlrbeHqW55tVa0UfW83s7TZOIBOBZ4c1DmxCzWSKSnEJdJPWlaaSE8EykTnTWLi42l9vxJSINoztpMIcpVEcJ9mpAVU3ajdemxs5F63rahPdWFPXRpkfXTl3eyMyCkhugQSeN7tMIVY3cmXOoZEl3U5jTNgCrrm5QpJS4v+2u3hwXaVZ1aEkxiYiAwO6O2LQSUK1qGSQBxIxaFfBanRhuDSDTRM57hlsMo5m5+TAMiUJqo/SIWVvV1RtoNyMPH2GzYPjF0h1k7jnnMeXA7hULg3eSXjDN7ctOmRLB1+cFdxy8ePTzrxDA0S4Uk4YM56owJUk5hlQpDeaFE9U6qXpKTMJOLmlYSJ6m9bCYrCzW6+OUUkqH6+k0E4sM7CCQuqQ8AJi0QiglslIl5UmrLAadClW7TmUY8fi16yyHy+Xy3P7h1Ucf+lNf+CW2v3z2s+58+l3PXJ2wmqfh9OqRXXlquHjr8/7wgx/55bd8+N77nlVR1ROsmp/CMq348DxvprIpSC6OhPHk/N6tD3/qwcff97sPvfKLLj3tOY/+7I9dIr3jzmeenvqT0P+yxrm7zrx6eeNvveI37lie/NXffsFPfvoeFH3i0mNPXn5c3WnEAl7zYGXa31/Wsn7k0UcWi+Ev/sU3vulN3/6cZ9956cqNskHOS3YlVtU1j6PWXfumm+rI+GfbyiG0d4SdLGQSCTGxZADOlGie9FCnk4hI6OoERFw6G7OYxkm+GZPcVmzqIUw3HwImclOb3xaHkRODQOCU1MxDl5EQCv+EaF6EyFMSojzpZAYnCItpkGttRvMHHgNqcA8MvxslYgM0CnrhNgNgiuRRin3gAx+IDYu6E5MIu1uwE6aXXx/ffoGZ0SQbIWW6fmKvomvPTzc2JPn8wXo42Hva0/af8cJsi82iMNOe8bRSGOnIuqk5Z2IK7mUIkQtRdYQ2gTO38YAZkVdtyDczy5JSGlt7A4K56nZL2NaCaKt8AiLqmmo1nWuxefhBDfNFRCyylRgzDSE/YTTVRQanUtR9mk9MnKFQdApJdOMQoA5Z65BNYXAwtz2F7Ab5gpOqggBpQTwEQadah8XoahLzgcCgMjFIqy4WC5BYqYMkFHOYEHfqWZNXk74KVVWH22rNzDJkcweRKJEjXAUjUDqTmrHCO/OjOQcAZpbMKYmLu5MLc2JSY+YB8LQVYwM8boknDIHH7jSkmOJmSIVF+hFirS2VxT0spQSoNXRq2lOsdRiGBAGwHEYzs00pphwIgrBDpiYIiakmZoBYkiUodz2QBvYTJ458Fs8+wL3KTfA97h8cAnINQy5hZmcAVFVbi9wHrc6BO6Bovikg39L7tsxcEaUGei73bnXs3Ms1uDvEweCmt0AkoHh8KaVQdqTAUQoHgpoZAiqSBnIyRzFmqma22QhRMS9qPIOP2MWhasLhdGeTTtyGVgRQ3UwcaiRVo+Ns4ngFzBy1GDPMXAzCyZncHGQAUWY2IwiBaRQm9qlGhRSyvZxEawm5FVHPOe8NY6lV4cSoaKOUmC7mJBUO00rKzEI0iEyu1Fxw2N0VRuIgTxbHRkW4rUJiDweJOVrxqanldOQLpRBTgLvTvCEEAE/EEK5uq2mTaXAyMJjhwbBnUneqDdA5DI37mFmoltLWZmd8cLOkqrUeiQiyOaCxeGK4TUBIMLHFO0WUkayaECPzSGaTD2kgt1L0hfe9+E/+xNfcdvHgkw9cu379+nJ/Ly8GAjOWQwYybCovetFzj46OPv7Rj128ePHw8HA9bdarzdXVk0tw3ZyT5TItls6ndSqjDCd6+pEH3/yhFz3rBp3ZtyPc9tzNIx9dUrVxWNKeJ/+TFz75/c/8+COnyze8+bM+cXzofkLMDqqGnAdymPpiGPIiP/HIw4nqV/2JL/7Lb/p/3XvfvVeOT+5//Ap7IqJJ1ywMJkoLM2WzLNm7A0cMsdQsSRosubtBQ/1KodFmVa+SRFxCuRMUGgFAE1CTvpoRdhBksRyZJNojYo4glnMuWt2MXWZ5RSIHmU0WaOSUmKi32g6P+VbMgTpkFe6V4URCIqAA2LeqkU2ngiwTkbuJpCHMFDyZuEJVy6lODZkhCWCS5FpZm+6vdjv2NAyraWWGPA5alVmYsTrB+z/8QR6pAMILkO0txtPT48w4vbjS56zyD50pqgd+sOaVEbOXl7z0ea+7/sj+x69MB4fY+JCujy98+XjXxY3XBSevXt1lLxu8qEJ8Y5M4C3KmBLKeQQFmrb5arRbDyIlDRRgh0FiaBbuzC2SQtEZpQwVusoWYKgE8ZDicyVkcIHMRzo7K1cxSEgGmaTLXREmtROYibkU3gSKIOXgyuJOIgDnt7y+pE0Vqrc1veaeIA5oFCoM638ZhxoD2tS6D1EIn2t0UQGJhR611kbOZF602ZIC8mrsLOAAsTXtT4KGd6+TWUKmqjaQfl0HdViFASWWt0Vsvl0s1o05Hnq/fzL1P7X3mPhEBThayULG+wMwyat/aEbB7dNocEDEwGGSt9myWwLGYYWSW6EXS7GPfF0Lx03NKFIb23heozC3Y90njPHuwyXQqIsI5xX9NofDqzrGrYGrzfvdg8kkSM2OzLdzAgifdWDHc9DY6SC8nd28+SyGg786GlIRBaUcY1kwR9to7X2eekgnIzRiQYevFNOQs/d0exzFaJSJaLBZhrtWeaW+ja8gA3dxVQIiJA80ff0zhktg6fywGMGrttWfmlNJyuYxj7H0s1h8uiqnrFvgdJlHWBPU8GgIiDmsUnYoztwEyswDuFhD9IY9x9jabjdYa5A1zbXAPMyJar9dTscViYZMi1uTVVFWbdpcZxam+qYsKcyxqjL/AwQbSvmO1zNsimeC1BkQgwG7zrYuJFKTB40MvbJ4HqKrCWv6O3bwb+sq81MmJF4sFd2Vs6l46Zu3BRRM223xZrbtfYbcdHBZDKYUkEdF6tTke1kfXbyz2z128eJGZvQeTKE8lJQBnzp5d7i+vXLny0CMPp2E0s/OHtw97gKTptLIcL5aHlAuXxQcf/I8/+VN/+/2fOrj7njuWizuf8cLnj5IefOyxpz/r8MZTl//hax7/uuc9/tOfvu1//51n3Fg3b0m1mlJKBHglyHIxXLt27fjGk6/7gs//G3/jf3v5Kz57vaqPX77OJEs5BGoMIbSWyLUSsnXuXVXYzIxBALlaMRchyRk926l6LdPW8I9a+Y7tMAPoIz4AZrBSDcYSXSkBrYwnIk5BoXCJVS/grgxKi9HMap1qbe9+bDRUS39eNz0UuIVWYOhyBAYoxqSqlsyIjIlYpHi0HCAiSiICDrJcqT45pca2QF/0EBHnlAxlM5nbkBcJDFPXstzf//BHPvXAAw/s7e3FVYzDUrUOw2JTi7ymAsjvvLiP4fp4vL8h57J3261f+pLPfNb7r69yobKZFmcO8sH5FzxnOSwDtgZphb/BEzNJju+l0yaA3SH0VMldjYecOYWbrVFz/hEi460LUfsWriEa3/gaQTMDplA9Y3KvETkjhOVuegEgBPEiosDahgidyk9uBEpEOUs7WdGZ7OaJtjVQi6LJ+6o/jk51m4nYIpKC9haTT2zNtyNOwZyAWkrKmXPSBtVBEmkDTYspTUiWNOdLs5iLNoCDTiWWprMhgYU64zSZcEqJk6C2bV9M0cmciQxeSgloUnSKrVYIlhshXKL7qxIaNgBgMeFEYxKEinec42ayFqfcibj9c9CT0lbDpE0fZuaou5urOLuhlKnlEhEAAeSZG9l4BO6exsHMpk4rGiR5cwak2RVr/kHuLs0VsXEQIQwBdT2Q9mT7qx5ArajygpYfZ7e6Z3PpsTSuPLnDEYPWmeLiHVo1L7RmbJfZdirL3fx1HnGPqWE6dod1ADxo52Fc4Q3T70wSUs2dHR6SwgIytxn+MCcJACVO5jiQeRh4iEiOBrqD+yItxeoiclPok8TtjNFvsDxiEoJ+qe6eIEhzYeqBlfNaQ918BnDlnM11mqaEmS4fVhNRDga3sqIleDSdMkYeF+7ezk1tCHYHQnG+32QBxw1ErDl2a5di6gTqgHBqRBQHYr9ibURm5hSqS22m2rI1iVeFIN4jFjGy2s0qdCohVM7ShAxlB0Eyn2QAtdo2qBE5ZFwclM20nso4jgFjiT+pqiyknIYkxTSPi9vuuHPv4HC9Xu/v74tV3hvV+daDXNb8q7/6X376J9/yVV/zElx4x0f/IK3reOXx9127/OgLXvHqr/lzX/7vf+LH7xyv/8LXPXpHevJvvO05P/XJO6vFC43EFJIoDOQk0zTd/+n777nn7r/73d/3p//cGyXzk9eP1utpkEVOo1Vn6dqW7oLGtkAIXZhZDHZna9QuaywVYLKmkRnk0y32Pk4UUbTA21es2WoBlAjaKWQNQryFMTLIHcwelmVmxixFpyCnxKFUVSeeXXOwQwBpaSbsqztau6GycwY5XMkbmtrMYgsii9TiYiOfzM+3Asjc+HyhAE8AM5EpG0CmWpihCs786U8/cOPajQsXb3WnYlpKyQEgYN289LI8uExPLJDSUKsNe6PQtJo2//HH8vHV9S0j8iC2LhefPT7zuVprnSoRiTQLcIalJC0r78TSYIjQkIgojwvLVkqx0KgjMg0ZRPJYDHiL/eHyF70/Cc8AZy7WUptIcFl0KpNO0K2ZRBc0I2bO1HSF2itcVUs1t1orrFGezKxlX+3K6QJyp6iUzUxA4U4fFUGt1al7RITLLNrMkzsUs50qNYOzyOnJyZ7IIGmCmnsiFuZaaxjgzHnR1bpwTxy4xIzMjcQSMWtOUTnlCHPNrEoYANQBZA43WGfmASmGkDZXkcxR7AvIpaEBpe9OvG+p3d3Q6gxnYpOUEiWRzn+Ppah104xtLuQtwjAy+pxThzSatP54tpCaXw/vKGLsdBLzpHez2RTmJn9IIbW2E9oc7l47/aa66YyWYvbmi9wSj/fme67gWgPYx5mxzp9FRokabyqSq3Rvy/kLzr7ffjObPBI/gWYqMxHVWpMQGVXcjOcKEp7DyMKLkCIBx1kK1K4I4OqmqpQSYunLlDnPCbhtmDopK3OOwxMrc+9Wm+iTGzMLgGmcAWZItyK3HQ5bjXahVW0Bim8ovLj4WoqYzFVF/CBXL2UjucFgox9VDjBaomanpKrq1d1L/EV1i7FEZpGUEGUzEakStlceEq0kNPs47QZZ6Yo3/W1qjx/MiVgZ0R7NvHYgfCR5wQut2zvZKtrwuoBRF+CstZJ2FH2nG8xlWfyaN+gcuyiiakR59GkdEnj9tfH1ej0MQx7yptTWtdR67ty5k5MTEdnL51a22jsYjo78Lb/0X37+l/7t//gfv/XH/+QXT48fXbqGC2eR6t7Zs2m6evlTjx5+1fPr97/oD45s75vf9cd/7WPXgcIipRa4MFniPA4jOZ586tI0nX7T//Ln3/RXvu1pd9197eh00soiOY0gKFVDhQmoTw7mewiIcHWLe7J9oRyUJAiBBI4g64HA36lOdl/A+TT+kfs2s87cEZ1DvCZBNAr7c2ZWN3ZwYvabdtIcDrXuVm17Wojmy0jEpm1Z1mI1MTFTKezwxA42M3Ils8CoujujP2VC5HvBVmdqhl4CULO0GCMCGExECtwTPv6xTwS9FnCRXKuKSJmmcVxc+5yn8jvPeeI1lGsC0bWc7ltfvbdc2RzUqnlJWKEMz/sMvvWCbhpE2RA8Pg8ZjRYzZ7jP/PJOhZmRmfqgN8TtRYaogAeWqIHiiUA4orYTiNrjsL5uMzOY10gx8YG8HbI6wbx6WG7U7WSOOicq4n4UN97qwi7S3aJtChqYV1chDjUkq2psiUVEduEec/IzOFuXYybE7zjcCEFmFfdBxJ1rFHLCVtvayj0EObYzz5RSW7IQ5ZxjshesxwDWqnvgbNfrtcCYOVymiaiVGF2OgDsWunUh3eYh7oa7k7n2cx+P0AhwJw20tkUrZ4oUM9LAJTE7QvU8CqTGR8LNvWzwoefXLG51oO/adKt57ICI5pK19ZGlIenRaT/VTR3cRxLb0Gl9JNiaTmbmIEMXrVFHS6cDzv+7yENcuVn3c40K2pUAJ1gvw82NzIlb5MU8MGAO/+PWy3bNJmbO45DR5cmjBClh+mQy5GaT7G6w+XSGweU82GSQkXvTPe+xmijE4tOODdFc9ARKAEymDdsySJpT0VwJRaSYydkeQgGuDjeH19i8BruD4k7CKPiA3EHUvbClAJznDouLB8rMARpfLBahIwpgSCMzN0UX9kBszafCe/dctIafabtmV4U7MHKOwELxdsZ03RvsLt6R+Vd8/bgh8eExRKaAhQjPIWM+rk2zPjWX8jjnbR7DJCLmTkwBbEGQVUyhFpDyOdOjl+CANXs+GICUm5R65C106zOAMouApvWKufFodCrjOB7u7a/X6+Pp2v7+/pnD4Tu+97tPrt24+2lnvvlbX/2Sz/7iH/2hXzw9mS7m5Up1unrl+tWH/9wtv/Ktn3PpN688+y/+tzOWT9x1GBLYoSaUmdJiWJSyunz5ic952Uu+87v+5mte+/Kjo/Wjl66KiDslYFws1+u1C1zYNDRdotAkuIfosfWXfs5q7ACBrK3eAaiDzE21eh2Wi7n6t51hw9wVzCk28IPcQ1ngg2zHcSj+aItmMbKqlXMC4HWm33vQTmNm5h1PMP9coPU82nMqzQ1A8IbBIINR20tNExEh3SQ/4EzSy1Dv298/EujcXYjULWC2DzzwEDMnlvWmAJ5ZSlEwl4XoC072/8OzBpMbw7TkVKwsyuqPrS8tUjmdeCle/HTId5x50cuGw2zd+Mu1gsh3KhgiMqKAysYBgzBvJnbUqYAp5wxzs5paxtkW2XNGQ1U0oBy4tkRA7jGQn2eIFnp87g4nEnUDhTKQ9B/tiKU4mq5o+xlV53aXiJKBUrBgrTESY7YuEAbF21jcTNsCzHsy28038cZGlFezYIOUAH+PWdUklgdutZQ0DjSkTDyvnAMfKxB319hINsSWJ7THHFcs3a9QUoqaRUt1bpzT6LGi5WoZXd26bE1Uc1HROHeXkAaabVkkJkvYGZk2OVbaPqF2AjlMNoHIxDvEqjmoKZB6/hCOwsYpyUhtWVi6jmicIJ9pQkTcVwMkje6pbrWG0PfNqx0JVr/ND4WZA15fa2XZEm/mX7Yjrz3fhvji3iwQtiUz2J223W3E0Li92tcNs8QENYiyWidmzH+RG1pdQIjx1sxYIyI4uqwfejR3IoK5MIdLGBESMXXGze6rHnqcRF5mZSv3UgqMg6UTICNVrX08ED83ZCZD48bc1KoaHDLPTna/OJjYmgxkvEbZJYq8mClJpwCMkluAa4JXrVwwVTWtbXLf5gSRRLGDoY1JIACwhCRHSJG7R0WGZqZjpqREfzSOpK5qm1KK/tgdzs1rnSCJEubhU59ymwVtn0SEd/YpMXziJFG5CxppMhplzBK7O/MPACw0SJ4rACISoaJGTMwSAp8AyHy52FPVRBZiZLQj45CHtHdwIBv59V/+2bf+xk/dd+/rz9+yOHNw93/9D5/+uZ977y3Li7Way+rOg8W/ef2TLzy/+sFPv/zdeMPTn/fQxz72kdX6eD1RSmkcl0y8tziz3tw4XV3/h//wu7/+L32dG1++dOKQxeG+lVpXG8fgk41pnEydEpGbmxPIHL59ja1sV+PzQYp0KCIsVGtlODNL928NlEZUndQhmXMBvc3E3MbU3Nil0Xb3rZbHNqy9R4lQUX2HSNIuxsxbkdCCs8R73Q/JHOh8e/0KRU3sFeQY4ICYQGNGqvHX3KzGHrCZAXV6ISXJXQOgDU0DuGAuwgLmlFdXTx9++OG9vT0ickT7mKCaUjp66eNgLN55UEHJ0+B6lOQrN+tXbE6v5+lcusALKaXU2+/ml7xgr+CY4L2P2i1fIh14oM1BjdhZa3C0oVrVjTTAPClxsUJM3HNwQ0G5NSRsVENNSICJKOeh1lrKhnuuCckRY3dY2yrP3ppu0e45duaCsdZPzUOhwd/MTIkigYNgXuOqkqQgg83F1xwvrA+duP8nAI7mhwx3chjCIzYWGwkcKkXeBTe2nSIRRdvkRA5Pqck5pdy0nVv+83aP5rObc2aiVZloZw0Tbjnc5LMZQOkgnVjLhgujV4/MZISYoaWUovdtPy6UqXd0AVt2EYa7mpE3+dZIJNJXBd3Ftse1Zhpm88Wj22JHJvOti2Qbc8U5hjAxibRso6rCzGkr7DyPs+YHlFnCEKKWWidj5iFnNEovsZADQm0pWFW5q6850/zVY3RG1iIOEe1iRuZec46zoe2MPvmULouRbo4vQIO2tUTVd6UAGpEmhKKCPFctPMWCsR7s6BaHwLEfCQhVA1oHqiB8y/s7UGuNvDpNU8QFWSyYmYqGJQkAyaloNH/Mwrn3ygIyayMQ7FScJJyJmEhn3FxDyvTVace1BuRpU0sOtzi0AE1EQhxDXgDaKf/zBKg9UGooBO5qcZq3ZG53dUCtwszXNfym5uzr7gEs8KoppUB18ZCZuSmQq1Wd0jgwKFRU3T3ya0gNci+BI1IToG6BtAg64/zoc07aN+u7RQAR6VSFExGnFidARAznjotsKIudppCZq9t6PQ3DUGvJQzqzv/+BD3/k+7/zR55x7/vOHdx4+FOPveD5L3/s8q//zI+/o7rIgZ5sHv/6z73n777kQ8cTfc3PP/ODK9nbe+dycXDPM+7mNJyc3njyicen9WZvTy499eCNo2v//F/8k2/6pj/z0ENX1lM5OLylFoUagw4PD09PTzdFAZYhM2fyOlfYkQZBZBpmBu0mbKe4wqlrH86mrnEYat/0A1D1cBcDEzTel4xWyhQ3GDnNg4S2fowck4gEWs1gVqPPDkJBrW1biy78IsxOqJuyGxwie2vfv4TWRLyGZF5rVXNXY/YOemEQc2IKE5RazTWQvRB2IHOOj5rFGqPJiVeIma0qzEl4HPNDDz3y4IMP5pxVy5ByqDhE3emvuiKXxsPHDq+NcnaNkzS9cEOv06uboSaX62njaX/YDPufed/T73nWaW0kPWnK2g5YCH4FGCosooNmAHD4rESIMFPVEq9Y7UpK3d83pLIYCoucAGdAKcKSi3CtG3LMCgHbCiDoYaHJHbEWBEfDbjIBDHMnZxECWa+54wil7SQzCm3rhUz0mtYUkeY/07T4OxoY/UUtEffCtdhdQ+gypXixTYiIJQu7J05edfIKJslJRMLMlZgINE2TJJYkCNUttIKxLeeIUkrTNDmae6uAgts6aZs8R9U/x33MQhYOMxvyEJ8cd8vMNEyZchP/66aPTcNLQk5FtXaNXwI5e91USsJNLjVEngOlNUehjj/HTWAoYprLmkQs8/CwWYZ4tD61m+ySIzfKXXjVs/cxdCCVWixwTHViIhFJIl7dqk7uYTFGREkSM1Pf3NRa1TQOIvf9DYAwTATmyS8FNDjquxB5mPPE3Ap4X5LFkwqPoN1Ji7sz4GmO1/3TOQTuiBQsEpNzn9Fx7ilQV8wECMi2H7hd96IvFJhZe2EuXbMzrpm670FEw+oWGzsNy1VYU1Lp7fiMkggtmhCeJHUrmmIAENN59yQEEgLDbZ6+lFLMLC9GnQqlQHMrMw0pq6qup5xz9OVxQ7R7bM9ZtmdfCT1nT1F4NwyXuwuJM9d1uWmlCoCplCJtcaxODJC6ulZyUJJsbGabzSae+1xiEhFAItxm+H0HaWYOD9yfwkMjNuRplTz3Z4rt8Dm2HrnWOsjQZLhAGrI5HcJpVZlZoU3OOrGBBCIimzItl0sRfvMv/NIv/NxPy8Vf/uo/+7q6vvbLv/helFe/7lC/5dvl6uMnD1+7cduhf+0L3vfWJ2759l8/e7Sy5fL65ctXl8uzz33+fUPew5N89jmHDz/06RtHV7/oy177Td/wv7zyFa944P4j0GJc7K82axERTcOQNrWkcdhM65xFyMinJAPA6tXNQmQ0yOnuIG367egSbyKi8FKn+HbDMKCrc5iqiAzDYO6qk6qDda50VWed9uQxgNOoR33eAc+1b8T3MACOyK5qnAQQq5O7B70jSre8QwBpgIwolPvApl1/qSIyDoMrlI0E1W2qJRyyRSRoc0ScjJiEmRt4ofk3s3XxanY4M5LUWjPgTMXUYUvGk9evnqxO9/b23N1MtVp8kpnWl13ff8+tfnjh/HVdy7GP/IYb6zuWj08TJ9/Pg8hUbiyWh5/7mtPieZp0ECISFu+TAweqaZ3KkPK8QTcLAC1BUPs0DkDsTKc6ASTEfdjQ9KtTSqQFXdIAsbkzd1jKKf6ZdvhB6Pu4fhg6IpWoaiEwhW6YOzvYCYCkJF0ZiYgSw92QOPriQJexyIBJMwty2yYS0MTlRYpbWO5kxN8CDSHR03g9AIKnb2ZKIGEGiSK+GFg1hNOYwSDHYhjLelOx3l/uAYTqtRZmjqwWLoGzWps5lsOImPo4peXo5gLeE5mmSdVkyJykTiUugDzkTlraGMz78ETiH3gxAvCqUynjOEaFEjPblJL23J9DaVndmTLEFxLKvW7mzJO1Hf5uckLDGQJMxLEb4HCpiyBbvfY8zcJMOc1vde7IuMiEUQ81RIC5wZk5BbEx1kVMiDEmEYCcMzp1x8lcdZbiiYpwGBpzupSyqzgRh+vmvl+YuZjCuz5qgPOZwERqiaj2zj7O4mq1yuMgWXoNxGaWupdieBiYWSQwc1N4NmAyESEzYa5Rh1ZVkDBbVXU1xyziQyTqxRiJ2NUUgYyw3PGD8XATE8PJTVJer9fMHExxBqWw+bLaU3gD+pFwdWNKZhoWRmbKzKGZhUSxlCUDyNm9lOLAyEx9ADA3tZvNJhEnp6qacq6qsdonFtlfaKnuOyAydG8GokS82WxoyOpVTUVkALQUY8K85nAytcX+XkAjt/mX2d0D/UsEckUrbin6k8qQnHlzkyROnRoKTLKY+d64cKailZkHYrc2ElF4MYWqEAtIAE0IEGmFK4NIxMHaFvzVqzaftLDhassGMhfCpIVzdjW1ukEdPYMXTnaQ0zjSP/jH//pFL7z7r3z7X7hsz3/skj3jOdefecv9X3zt/3zuRTs83LODgy/cPHEw4K0P7339zy+9LrA8XZ0YD3Vaby5durTanG4mtmJ33v7MH/zHf/lPfOVrV6e4dn2Vh9GIi9boQqpPXjURA0xprGYTGRG5VAEJiUvzKYusaKZOxEDgfufaJboF6TWNhsyoGnPToWDmcczxt8xdFkPcKOp9thvI3GYdFcKmFnePxjGzcOZSzN0ZVKsiQd0lRM8ocR8Xxv022KaWnLPkFJD9UkoSiZ0OutFhHKeoccM9bsbB1cjotVmysES8ARlSzuYttg2RO9xV44aEDStcjYXg5B6WusIaqjNV3cWzqWDhm/uOzvzgM2mqTGWzOPMiv3rfhUfr0ZLdVTKJrlcsL773js++b0yLDZ0WCyyiJmIQFVMwyZAXqAar1ZmTSI7+xN1LNSICuVV1d4EQ0cg5XnndQklSbDJTSsSuqtYhJlGs05AYxHAiKHl1J3MxTszupPCYybl76HBZCbg73EvoZofebynl9PQ05zwMg7s34SRvznEYUiaiUksKdkoUsyLkwV9FVXUCk7hbdROn6paUQa7U5yYNAcg5Z6ulmTQxEdjcq2k1zZKYGdb6JHf3BsElajPSJjrNAUtrntQU8+FoyKLDdHbuTjIRiMJhkKjBo0KVxNypc5pn9yTqP25uBVoX0ne0MeuYZ6eQUI1xAqx3eHPApc4raMu2na0Y9V+zMGn862az8Xk0bToXVsw8SJpzITqzTVUDCB0/y4hm9hd2VvJEBKLctMIZcaSqVY8Nljg3mfJ42bRTmEA2S0j2NOzu3rTIt1OQtvo269vrnZ+eWWAO8hkie5NMbm/043QlClcUn2sRavzqEBJqIBF2JlBkqVqrQEgQEt4CkpScqZrWUrEDcmFJSXLay/GvHkzxEGnlREQuflPCTsm2km2Y66F5Ugr0xQoRO7GwMHEUZ/3Jxg+KlfY8qoqVj3e4e91MQiwpzedBgvgLyiyr01M180rRTtVSN149LL1jPW8dZhKlw81UE+5VlHbi8ZwnWu+OmxY6c0UfBxJqOlDI2ELYGawUE2wAYx7iEcScYO4txAGHkZo5jIrVuQzt99ENDk7CKkxaZTGMm7LKwpn3Bl5DaTNdHcbDZR7/3vf+4HOff+df+LqvevLx46feffiP//bfrOVd/+GN+ZaT1TCV88P1TOoDHjxK58f6ZffmN38KStUsyTSsV4+urj2+GO+5fvLIZ7zwRf/tp3/szNnlY4/dUFBKydyYMOYUYzABQU3Joxli52JqapNO8wsYty7tYAJ0JkD0lYfsML5sdtaat8Vqc7YWEYGE5QscTggZQQXIYrTarJDix81D/uBtRnvAhmaAbaE67pDtBhIxLqJOLWMeJMX4dFNLzDXjNLh7KINoJ/LFlrDNihwuoUdr1v9AyEyycFjn2LYKYc4NPzsX9GWqtVqt1ayacxzpWRV5eukRsvM7zms5PZXFnaurX5OvjcdrU6m+ttGXEE18/mWv2rvl3FQLc8oha6Vaw+YzMt6moEdU72oQEg1cCgI9c2qRPObUjWzZIa5m1SyscVhEEnWJfm+PcnO6ymBhhrmn5stG1nIFzdC5bsvLuXvPMC2GBRHhGHA31VLK/NakGJeFMAd1L8ycUq0VTjGWc2/rBReGbZGrPeEjpNYwk8ollPfa+C4KqEDZxFIRjKBAzJ3WMAwOi53fPDmZA5+7GzDnVKcu9dujYVOL7mPkyJfeo2cs5wJOJSzzynPOWO7kDM7J3INWmzz0kiz6j/gWk1avLjlFd25oQxjdycRzhLWb14e2wyXlHcRME5PqpptzoE/E3j/H+wzKzChJI/QTqE1qez7ozyXaqXAQbUzwqPAa+NpUp1kJK74C9eCy3qwclnNGh0RVN3dLM520I9KYSFhiz6o7dNJEDGbfCfrzr/nYRR5rxU2UIzs+RfM/UGdfxEYnxu2qmoiFJDiWqmrtYpITp9SXati66jLzMAxt2NsVUuZo2FQ75lzblJwZoWm/84dbfKM2pkbTvaJCyszO2+pK4Tzn3dlRY6c/LlVJ4MHqdw9FwCFn6y+8NwFCQG1gmUxT7Nc7ZtDMMouR8U1ZrpmS9ju+61TTIXLwyKM+T1l2IAWB7VRVIoSfnBE5Q0sREXRQbs6ZLCT72R1MlJjbX2Ewe63tKFrkEm1a4urKgmKa8rher8dFFrjW4pSvH0+333G4t5Qf/Ec/+vx7n/Ht3/ZVH/vY6uve+KUffP/vpZGffZiepuXcheXe5jTDFWwkF5Z6rONXPvPKT3/iQpkq7Bg5j3k5LIbLV979hi/+6u//P38wjYsnnrzC+UwkPKC9jEIQyeSkvQSjJEI8EDnZVEqxMsMaVDVU2YWkF/AwbNfAIRxLFG3LPNLfUga258e8ulXTxJJiW0kIWlG8c7SzI/QuVOLExdy9RtAIqIGrNbiAN6zcHJOr2rxaMrONl8DnLxaLMMLz/osDFV+jS/P5VAR+nXPiDuE229GBcJm/Y/u57ARkTkZW3WLtnIgTcVlvqk7uydScGEhCSRLVV1zja8k+OYxMCf75duNZ5dJJnVajpimZKNXBLpy/7fVvqENWXbOShA8Lb+UrArWqM4Bx9nPTqFJ3EGo7W5L2KnX3Ww/FISCqndSZ1d6xPothyQ7T0gwzkjhIPNQu4FZ9p8tqryGTVoWDvJe9wJiHw71992YI2zgM7q5qgUZm5lqsmoavpRefHZBIOFg07t37sInM9GfQTwzvgEqAti1r3YBwEpnNOlpJKLEfbfaKcXRshz3Sbhy3Ozwnj1jPzLd4TnjtVejlJIRjOY2c6Y/gqgAiqNb52Xh0lszYaRQCHWdmZKaqdXaOCw3Irr+PDoid78Ac6LdvoPtsPtguPXxudzJQ/E0jGKFxD+LQlLbYJqK2RgLInFzmO0AdIufuyG1rSESUJWD7HEDl3hTOVU60+NXNahERpjYV6JDMnV+Nse8QNm3DqHb34kbtlA5zGwFA0PRM0myk1glIM5BtvgHUJgo78Sx0NhK7k1kDhcY9DQaz7Jh0zR+kcCsbxSweT/PVxgKPd7Zl6Ix29xgateKG+0a5hdcY9wAOnumStMPeRn+g1ZThslMeMXNmcXPr8PJaaxIRafB4SYmS+DTFx3IThOeoLqP7aojrrri5fTLRRPWvuhuF26vhcMJW/GH37xJxSKQJu7u5i7a9kjTFvnYD5+aglgoEtpcjIIAJQpmy755wtdhADQO0gjmVumZxrQaEvArddc/B5Qc2f/+Hf+ALvuTVX/knXnvtqdOf+q8/9sEPfPCWWy4O2e679fj69enW0YnpibUcY3l+wJ6c7CW/czjeK0v1ZbHJ6joP505OT9/017/z+7//+566oqcnmuWcg0RSrdP8HEWkGuXuC65wqDpzOI8tU9psNm3DJwwwgi1a1To9VGcsj4abtYc/Wxs/9Ez/R24ygES8sVrcnSUw5wIO0OwcyuZw1G44SIYE4/BcsU5t8kCHzTzd/qLlYZgj56zRVt1stY49znZdWmo1a5zjPsNjB3XPkt2xHFMDVYSUUKauaAv3GhevKWdhCTVYuItgsRiC1ODuwbEnGCDTy6/kd104nNKV5C+yJ/+4PrXarNbDuK9eBhKm1UqXL3vl/vOft16VxGJQjiYkJsxVg1ZBzpD+/qJHkv66zRcPIHTuAHDTyTDr1MRe6AT7dJuDw5GQg3XJSSQxNfpim8jGNBAeAP74fDMNzc4o+szM1EHkHYwZF5Z2ckOptVELzIyTuCFIXg3XxxTNF8+piztIzxE4TOvy4tt8Q83qXEA9cxMU2mertB3puOu2QtkWIH0DamYMNu+KTl1xu9cv1uO1oy9fnSlCCfc/Nrf/nIT6eWVGbD+ts0GY2YgibQOAMHf29xyqonltOn9mcSLnlDZfzByX54wSHwhgbuuiT53Tf+TCZhbWUYlGUZa2pTIAmh2f2OcEMMMrND7QiYg8mLU7Y17pdMz4WSGKJiJ5SK1LrrE/b8ew9QcQ96ZlV93M4NZu1/ywGiNrRqnAzZSoi4xqB/rz9iu3P0mBMoNHiRD3JMau/Zn2HE9GqG7S202NfABSM7ftPMDdKYmIOIx0SwjevdvL5ZJmHlr/X06NCjXXcHMwbW+1eSQeoqZIM4dL7JR3s163EaznPO5iq7E4dve6mTabTeEi4yAi681m5BFMbkgS1lisO7znqFl7DVHNbipDdyuM3SpwvjAnWHfa2ClDiYhCozR+P+ac/QDLnFTaqYtXUppon4V+WMSkXogwN8Vy7hPast4kGROxsjBzmaoSSPyOM+nf/buf/sff/z2nx4/d/8kP/73/93c89tClCcOFO8+N0Gkle4ejyBOnq7LcY+XslYWUISS8XC4vX36caS8d7umpnpSjTb3mZTo9xtHVaRylrmvFtFgsMrcmj0iAVEoxKITD3dLMqqmBZhtKI6gqOkTO3WXWggXCDC7etbgDu9omu88Cto0b8TtLyrVWrcWSSU4QdmaDp9puuHUZh/45LCIxkHN3K5WSaKjCNXFWoE8EEfY2fZI842qZ2Url0M4sbUcw5ExEtc9C4jdTg/vFa95+IeZbsTnKmZmJuVvU9PmK2jRNIY/YfjRhf39/GAYgBAMIsFKKpDK95Pr+//WM6zrtbcqX4niP6xFGcV1rljGL6snZs/d86VfQkNIxbPAmAO9ucIWLJCbyMJbtmr4WsKm5y+oyrn3G0848M2KjFxlBRAQUpnwxoqxuXC1itYiEnQa1OjMFUYBBwf+MImxXyonN4Rq4OMlMzE0wSrjGKoaZmZPkFL1wZonRUDxv6dwy5yZjNNfv8Q/SRTmYWECxvW0/vqpZG84Yw4Mc1heoMcYKjWLemdDGvw5IUVz0XEWRVDa1zNnxpvjST1wsC2PxDqYu9L8tJA0e5h3uTsJNgMO20Wc+7nOcJaIS717j4nsHIrJZI5ijDzHILLEgb9eN2wPRg+BuNJyj8CyMFb9ahb4FWnY9BN0yXNGpH/NzySzF1Hd6jjZyn9pNjKvst0KNtwCcuVoKnGrUlLGDbylTW9XZbk6oIhJA205rvmPo/Jl29VsqhyPSauBTrJ0E7LQIu+VLhBVwb4IjusyMQ2lnIHyE2id4KKoxBabZzMxDJzwRM3mwA+cH4e7Tar02FxEZMnc5z5QSgUAM2a6T44Fyn/XFRakZkc/Tkfl8zp8/DEMUanF35kbWokDvf2ze5kaHnVIqdVIzZtkF3G4DOoJ0Hpe0BQqgzxu8c8qre621rW/6Q4Q7i8zVtvtWGw8dt0/ebJu9d8xxkVGO55yZuNYahHXM92SWJu2nov1EYUpCauxEbEoVLl48sTtjHA/+3nf/6x/4Z3/r4q3lzLlbf+mX/+Pe3vLC+TtFN5XAlJOkdz1m+689uHZyfGaYbh1N8wYGy8sNhk+uDr/sq/7MmVsOfO/S44+986knjk5u8JNH77l+bbXcHzZ6XARUrJbNuFiQe4Uxi2ooaSvvoDQAuHmFW6k557hj2p0BOaCmf6QQmWd75rbDvgOA1Hfk0oSAWj0HzyxIsV+FVSU1Ek5JAsQ054z5cTOoTmWzWS/ykFNa9yLSAXKob9NneG5an6ygl0EApmkaJWEn4s2xNILA/L2qh5MhWibuXypawyiq5oAFIDu1LifgJiF83mp3cE4551IqYCJZ3WrV6QXHvrTFBy4srH65X30JX79UJk9MOB7ksGJcb3z/FS8+99KXUylC49oUQdSiKHipwhIzCTuTW0UX6vEGPFUzQ68Y3B3Y1qbqZOYEbkvAqhqAmD7KRkw4LE60MzNLajSNOZFbfKYzCGlHdc49Bd/EK8yUlZljP0Mi4XcXnUnyjt0NSuKYsvXxlLs7gRrMJDXKo0/sHGLRDZ8DdyBzb3m7pFX7nhog/pDZi60ep9DUide897jo0CFvo0vvHGByJxaK5aP1ZaG1hwwBwZzmw0QANxnIuJUtu3sD74YzDHof09Kowsxm17lWxXDUPhaWhVEKwFxrJQmSGeKrUdftq6WhSXfD5XwWd/Nxu6rURLexk54BSBfEaL9pzqB0cziev6C7wzGLN0Xeap+jFssFNQu7I6L4Wze9gbFS6nOt7dicHeaqqkLDnP7ZGxADQJUuMTHTSwgAwo6EqKk4M5E7YM4psMdhggDsZO7dYwMA5upO4K3BZRMB2FpvRg1UqQ15oh2f08/8Uaqa0lZ5xcxUG0E7iYTgdtwEr1pKWRwOm1qD6TRnNY4adqqp87wD39S27AbcrAYVP33+cMRwfpbmFgKTmwfYsgdfrNdrMl8uFierdXXLidaljOPIO77O/89iZb7Om6I/YERJkntzlmTvFeoOsiwSTHxBVaVizgHxRjT3tdX4sFKIKAy8rVQw58VYtbhDmystSBsuhZhD3E37xRARkgio+DRtypiXTATn82eX//VnfvGf/tu/+ZJXHWzWuPbo4bnzd+wd7B+cvf2hh+8f00I5IZ186nL9/dPnfsHtTzz8xCO3HdpyoKdOdTWlYVj88LvseV/w9HtffOv1cvW+V7yAaHV67cyrX/odeQ/Xj09YlAepG/NSggQSxEVAExNC8cZ6KQmKTqXJtoSyW85BYjStFGnYsUU5mIcXC88pfKfonENcL8IAIgYVWHgIkDmKuirU2AJyse0K5qcpcAHG1DDz8XoOw+BVtQfd3TMwZ/E51KQhZx5CAp8oXiJS99DMiQQc0cE6n0K6EnUmsVlUDgBRLaXt2vqPJYugjZRSQjKzGts6s3G5EEnr9TpncVcGO2zzyiOc8ua95xZsd5qaTfskxVVLvp59H1wWi6e/7gvywb6fTuoTu6BhaFoAUzfb0g6zu1r3CoMkZqE2uwqElhAZM9jhTLXfxiw5zrO7iggVjel1SyPsgYJmB2eVnJrgsyNck4kkksAMF3UKyVcnojyk1JdxkdfqVIKAF48mEUHNwtvcwkI2Hr82U2MDPBiowrGyNlerCmdOWR1E5uay9dXaUUkkYnPmpvagVs0sCYiZnNqyamdjNweF+df8B0jatF3dVC0WdU4IPxJW7x2ZKTkBKToVb15y8WYxowQniDnCq+SWSMpU53ozxr/BBLAdiYOGYwRSStXUzIRYCVariITG9aR1HMcmizgP2HtfFWPe3dj3R96Z+UVtYIeeaZjIyDisNPsvswbccKBqjT/vMzQ5QjDcBZAE8KQa02AWpK4kR0TzOjClFOwsMxNJOTajAVMPgf5+xXGh1hVN5+ufC4IGUSeCtexVHZDmgNT/HMJ7KlapRGRM84zLrOnwoX/ZeO4xkDBsv6dHrRJjusxmFpQD2pEMC7LsPOGI1X6cMGbOw7DZbOJljsJuOYyqJRqIYFhFTTOkDCY1U1P1tppxwKxBxnYfJfV+Hb2bmXRa5MHd19UjJDDYzIkQNM+Ukji0VFIfRLTPCclc5tY2tVC4RUHvaJJ779Ww4zJi/Uq0zye8Y6niM+cOOBrgnVlmr2gN3Cclc+4PTz2wNwY1QLMEjbfRSyNB9MubbMWyyOO4Or0hhqcuX333Ox+98sQTX/GnXnb/hz999ZE0jLD1eHL9+qZOA2XgpFo6PDy4euOJH/nDW287s3zWwfqTTz2xd7A4WFzcnJYbL37TJ3/25z71S7/4K7+8QjmVvP85r3rxd/zv33fLxWce3biRiJKe19UpxmZsambCDHhOyWql3pu2r00UFV4YrYPaVq5Bf4mmaRrHkYikfzGP2qg3LbvcJKhpH43OUTHuz+BUzQ0W43oDqlvRkmULL9p+TquHiAIe293PrGUKBxN30ibc1Gq1diVNvle1xedYEnF/NdA012jmH/M2c1e3hIZvkpQiVsxxvp2BeZMY0l3E1tnwA6eVqqrmnJMM8d6VugmhMH3Fjfzuw/2TTR3SP8fyd/3Cl9XVnayU9s4lN+J04dZzn/Vidl1Xl1SIxsRZde01jpYw04wligFfwHh3mpMAa851KqDWKicElKTdpRRKyXUbtOcXWVgSsWmFmpGKACSuRvDMogEtauyaCEfC7BPUt2hzSiRDSkRYDDnsjgCYaqqlhPGzEZxDnsxjx2FVHc3Q0LWCmEXq5GQ+DIMRQhvXqiLYxIQtGEGkmMbbKMNgpepUFymBoEVda+hytctzRBXGRETcAE3kWkM1Rtw9GcipmI55CK3z5ETqDK91QylZZnUlJrYuNyLsMfMUbuvPJBJtq5s4ORhd8iGuJM6KldqiW08wQuxq6Oi4SL2xaojuQbWyc6AZA/cfibbpsxNxEnCjphFT4hQ1NQMViJl583FyTFrNPOXMoFqrkfE4sPDpZhMlZ8QCIQI1bHl1kyQDtzCqtYmkg4hj92C+ZLYoqGsQuT3nHEsH6i2sWWkdktvJRlNKDmdmdSezJl7D5AQRoQovZRwGZ5qHmT3WCJFVt5QYTqpKKZsTtM7j3BDXFSfv6oNM7EzMKfA+TETmBBg3nJq7o6h6dZZaK8aBHFY1syjcXFNiVYvmWFXhzkzqRl2ymJs0Y+sClZH2FlqVQu3WNBxznJ1IQh0lgdw8PEwmGHnQpBKMVRVt5RwVRc+FBCaOvcAM2DGzzNmoeckl50yiMGVnEfGG11KgWs0HSwDURYgSc1igoeuQzwMVIso7dVBx9W7qYGYSIrBgZgGMrZV35G7uOhVKWSRLN9HjxRASeOwwZmcRSq4O8ig13B2JBeJVy2qN5TB6w8h6FksMhRiUqjkIrJMC4ODhsIyLw2GwG1eOxOxf/PCPDqN8+1/+hvW0ye9+7sc+fv/5i3tIN2T/ItZP1I0Qcyk2DLy3WL7kxa+4/tSNb/jJ61/64ue/5o4XfPxd70i33/74rV94+a0nhxfTgvYf/tR07dq0PLv32j/2Z+6885lPXrnGJO5A3qjo3rgsTXul9fvulZnJKElSdm26SpUMAyVL4gDt8BQinEKaInrq3gMAJq1hxwlTFqa2N22BxbRWasDJxJKoOcNLLJsdLR3CAQQv3/pKzqy7fwS8v0zc1pxBGiTSGpET8K5+ysQ0BBvVXaeigEQbxKJu1ri/W+fZOQYyCOZuUVc1AiqZkbOVqO9pkQdTNRYzy0ylTjAPzJc7uAELM5m5UXIR1icuX1+dHmdJtZRE7IxJV+UVR3v/5q6azctGPf9aqQ/ccvEF0/q51y/dm4ZxdXp8z93Lp99TJuSRsMbK14fjmWEYpk0xYpEE90bNIaAq0Q75Tau5D8MgCLlJh7pHmRsu1oyQM3GOUqNGx5Vjv0sunBDTIzcjZSRVtak4a6DQg7vrMcyVHM1ab3ZBkMCnh9uAAkXVgaLaNkEa4VHybs7nkKgmgyHwkN6nqdqk9lsRZ+4ACYl2Qb44FrXWmQAXENNaa2hEzIhTVdVJox2k1ptugbKELkvbmlHvxTUFWyPUmhROzNAYOOyIyzcE9E1IVOz8mrP+PJzxvv0NRsoumG3+u0FsKWisJGr9IsUOf676c87eMZbeYTjMHG9RS1HN56ehQYJVddMlBiKOaOYbbDablNI4jq17ti797FuxazOr1kdJXa3U+/4bfSoV0wioxWI/buYc0IPFOPsQQ40ASQkOV2v3p3fJgQidtIYY9+4URKt6U+Mmamx3dmrInbnGj4YDfboAQDs4KLMArS+coXZz9+yMlJg5lINqtcigLSExx5hK2tZNlYe8KVMpZRzHuXFkZkVzH+KmFsJNoMqnMAKZPQGpT0fiu1OXFHX3sOClQHwEH19N3QCqWueDHbzE6LAXKVPfm0pQqxtOp33TMjVpFyZxkBuqG1HfuTsaj6sBkrcnp/W+3uTTtSoROXl1AyyIjwZrvhDu7N58COMOzyLe8WkeYIQOhwQ1O0MQizBR7RLxwYohSSwMUyaJ0DOOOcKFmQ1Dvnrlxg98/z997gsOLl64/TWv+Zw773ja7//eh9//vt//5KcfHBZlfXpjdR37y+ODxbNX0w2F7o0Xrl6/8oRd/7Y/++2/8daflTP7P/P2T7z91guPPXHbU+/4g3tfurn82MnmaDh7VjY1P+czP+Pvfe/f//zXvPLyU6fjsJhf4XEcve9HrPN11KzUmnNWNWdiNKufLkjwPwkdmDWk1Gqn54aDRZTdDUVMHLoWcSerW5Q7MK8W80SX6IRujksAFouFdx5w/BLinHPRKhR2XdSBI+xq0iNnqBt5H3iOKQPIYSBhZlW1N6bkOxCNPqOOk1zM5uhnDX8X/nvhTdBaDjMj7qi60DRGBjHgmhgABUsBwkRDSr/7tt+5fPnSrbfdAg/moNpnFT+r8jv7PG0mMnM6TMOlE3vA/NfSufNF0tX1N7/8c+XcudXR6dJ445rJix6HN50ZYBqAwWbtRQh0etzGCkf4oqZ+n7mJVMfW3Fqzu52TtfFYOBHCqltMNfqmoRFNravIhUVe5qSqppWIsgikmXD3u7sFCrh7wF+qacwzhtiI9DzE2AJqWHLDaEQGjcScUnLXKBlg5k4KNAEWbi6Zc/MX3V5rEOGY+U5wSuJhpdBtNDrOqy9uGarm8Ibzh5coz/uEDcCMDg9ZM28QOw5nEsVNTiONQ2otSzOzEfEOZDE6lTmTBbYZAc3tgkHwNkzOOc82fHOqiIKjtNK1BWsyD7/r+Otw7IgmzFEuLrqVDESUiDFku3nhp11Dipm5M08ULoATgudKO95wZhbC7u4ekWXnKIQjztaehcwh7IScpVZnONTIGjhcOJUyoW+esFOuWe/4Y9I13xNjdLhPY5KxOzFnaTTB+VC6bdMbADerbhHo5wzn2MKXnBuR2hJTgJCDUUccQVN2CNkQYmu0KBHBtoagmEZyTlUrd0l9RzNKsqkEmAWhXO8ONKm/ltvmiIy2/kcgFqOeiEfQaf6+A+yPbxQzmRgIJycymHkok/ccG74jTAE36wg7atZTYEeZr203lNP2KcQBZu6ny5smFkDoJJnws9MdLUzuLgItATB70ARimBd240SpEQdqc9CKXbtybLXUNA15vig3SymVUiTZt37bn/+p//zjj91//KKX4IN/8J4HP/3Q2972m5euPrS3uOU5zz73uZ9/8Fu//MQH33d9eW5Bi3U5vTKMtnd2+RM//R9e/9rPv+XiM3/gt/+Psn5M0nD+3D0veMGFb/1rT/+tX7py7ZFn3PfZL/0rf+1bzp277fFLN4Y8MnNon3XWkLOwdAUoo2ZWPd9VZqaG06X43d3UGIcx0hL6sHr+r9uv2UeazhxTa6MW0xvrzr3CzUyndiRiscM7OhLtwKjFQB9Ehgp4O/kUSKvmmxLAjziQ2q0volmiJN3pdqebakv69kO5ky3nCjjQNN4bmKo14jnIAkqr7kwNMoLEQx6JYSC1lt5AIFdJ1C1Z5OOf+PSwFIO6VWI3s/rKY0zE71569Zy82gSpZTrh6sNi+ciNq694xR974zd9qx2vRMQMJcvhMJJyMeXQs1QdyFNKlBLAzjqL+Vg3/mJmNfPmOdslJTpGek4l/T7E4D2MwrlW9ZlxQ+Q3q/ZWt2Lqm+o+hXYHNbBeDV6teuRg6hMxapKsgflpQY/TZrOJtz3Mkdxb5RXZFz2hujtggAQKZuZ9dj5rfCBijmcE3dEHjoNV0eZzqhrbUO6gFTOLRrbn0Z1uuIu/Gzxx0w3vl0Rm5uwtVEM4MYGhpqYzdC3O8dxc7Bq3Wfse7T1srW28av0Let9Ms4O4kfDmAx3vFXWolJlJFgDoDvaZZeCtdUG8c+hws/m1pJvZIMycUwrgVXwLAYXGLzlExPqBaKfBmkPLfElzctWboRn9SihKInfHDFXrfyDqIS82U2jinqPHlGAmRB6IysA6ewd98ckMIo4RDFp1aeRtbt/rPACAIIp67wOD1Dem3H1OrNSpUxUp1rFqFBuvLnBBRNr1Bb2PQ+baVq2wkJkHhzKOHzNb9zSdb6CwgGBapmmKwkK6FIOZoY+yd6NzBHQEBCkeYq+EpNNvWt88v2zVnBB0ICdQV9tmaz5xKXGthjmCSpJeVRA5mbcNSANd7zzgqqH/CgCBloDP9JhozYkxP3d3Z4d2rGxkYgHZDtCXnIRD2aXpM5hbBYkTd7HunDO5uXtggc3JqpZS1hFhctPuPzzYm45XGD547kK9fnT2p/7/P/vQQx86f/7Wu255NhFfewqg9C1/6/yH3zv+t5986PjqbWcO946nKycnJ17tHW/7Hxdu+ejZw0F4EN5j0K/89/tNb3nJKy68/akrX/SGLz08uHjlqZPFuDSzaZoWi0UxlZ35CvVRR5wiIkxWAAzcVPHjuU52E0m6v5ut4o8qMODQtUu4Mwlj+xzcPYBpPk+eumOpdSRte1xubAi1wjneppT+b/b+Pdj2NbsKw8aY81tr73Pu7du3u+lWowcICUkIJCTEQ+ERnoEqkJBDrBhsbCo4wYBtgisVJ+WquFIuUsGJHedRJYKJnTi4cAxFyggMMZJsCDKWkQ3IQlJLLanV6m61+v2499xz9l6/75szf4w5v7VOK/k3f2l3173n7rP3Wuv3PeZjzDHHDAsAntCz+PlEVtikfMmQQWzHgBsZgMw8Hh5zrgvmJgToa8+1XTfNdfutO/ctZJA0ODLjOC5+GjaGXK+lmUuAKNx9IWOtlXEaJ1ymDxwr0nA639/fjY9+4hM/8IM/9Pa3vz0zL2tKVe345jfHDzyN58ej0TjMzus43MfpHu985ytzvvErvvIdrz3h58Jzxhh+t5gP82RPl+eMMNXjHx8zc9zdrwy3U9w4HbNhZoMW60IiCUe7kpcnl6A4Ou2q5BprPGetp9E4Gr+Uq0GRRdaKtXIfDKC8jZQ3MpOE3lA9BV4KVKErrGKDhESidxFmtmXldzbt7isrdoJyqM0RyIhYwnkUfN2SO/as42gi0pwTNyYyM+dagm3NrFnQrQWfi6CyiYXYeTl+7lfS3AKQnrDiA/U7cS9nJytfsAdjvMR9sD2fWMY0cqd9q+rLZV7tthR3OimniSxuMG744WyDp0tYZ8XMo+LYGoBT0XeFXrsaVAIR/SG3Tdl3hjtXljtxY3OkIiOyI/Ca+FoVgdW7sGIR1NhLj+pDGD4i4rKmn4azxExuaSZ7xfbpr0BqrjSaenDNXIOqc6mdVKb8Okyiod1tSm6jt3qvRM61GiMZtMwryej2Lt2am2yFZ7TV8x7vuA98zcUjMWqqkr6utHAOktSYTloiFV86G+yZc2VUCrWfYgMANzTD/TmLE8BExFo9ChMwJyKDGblWhr7Lyqs3JYc1VKf7yG8sf9PFs4bolZiX6jU3UNg2GbUs/dT1Km4QF7SJJMHiAbn7leu3YpjNnJE5SKOtkqrl8JGIJ6f7u4dzZhpHBJ6e7s42vv/vf+e733v3o+/74Hd95//2Xe96z1d8+dd/xde+EQ/23/zDN5P8v/+5z77znU9//z/72v/y3/jl3/G//tBHPvDK+ek7nr/5xhl44xPHRz70AS6cn7yNZ7x2/9TtxV/9Cz/6/d/zRf/Gv/Vnv+FX/8pPvjGfuB/HoQEPESEIaoxxmYfaefcZ0/1lXk9ONkvgdNNukLvLqL/qO2atW6V7V1t/K2Kls60OhbzJRhTO6Md2ZlwfI2M1cGVgkJVsLezDqaQQEKUob7/2J2RjPK5tjKVa1X4WufaF66PhhjxIwrLm+qU4AaL+pcBNmNvp5AzbeVomFjQ0BKfkistaKx7z+ZvPnt7ff/LheOWVV9ZauaYPn7/++fk/fJ3M5evuYQ6cn8PCnDOZp1ff8e4f+c6//v/+jb/9V//eb/X11kM+2Iw1Tkc+1wChk3kMe3x8PI5jiRLq189vJbKQEXPbkylgychwybvhJsDa14fkTEHE1eJ/+1dLMGYfpNPpZJZia94GOlejhMzoTj4Rr+biDGtSpwL8OlJzzrWOW4rKth06NDcwYCXjm/yxB3fw5a/ri+R1d0+n0630BAVaVUBRKXhE6CLNY2XmsFLzUSunKrIO7rXLm6xdibj12LvtovQD+yPxBvlhz88p46vo0qqpzvvrdDrd39/vUGOttfDSI+vDm9n5fJZ1fpyHJuLtAIqk93XeG6Y3un4nS7lm224mvDvw0H5O4Y6qs7d1o/2wuJkEsiWTUiHPzVLrOxGBZDYxTS8ip3X7+bUOoitrmrqSpx21YA9cOuYOvzZ/rVr7b0R/6uC6qaKsnm8dyMuae2uAG38QVePsELWzhxthzn2G5YCGn+7O97f2sSyU+0ai0hjEJqaRvKxZAw0bIDGzYb5VKnUdzj5O5nU40covG1e/OW/1wTIjoloqh6caOjJzz4jMBNJM/cuxH6QWpkqzGGP41TXUMd6VsH4dWCKOmdJI75FEtyup+xgt13A1xMM1UNn66/ZBzMy80LPb12RXbeKYCtPnvNyf/XRaH/rwD338kz/w1/7ST33v33zlldfe8y/+yX/ha7/2V2cef+Cffe/b3vEpO813/IK3f+6z+Wf+9Ed+/Afv/2d/6pd91Tc8++xnP+k8v3j26Tc/80ZcjjHGwov1aI+PLz71mTe/4Vd/w1/+K9/1Lb/vW956eLw7P2jBnzx5QkCkK+UAo2uWmTWjV+sz/OQ2AtxXZt10c9xeyS+4WduS9GsuRerskgpJs7qtW2i2jI8m2q4oQqXZuPnKFvHeplXkj62J0Yk6IuLs11/0G7m9WEtp4jwOZYqa2CWnpZMPVEJmZoyMY67LEcdkpEM4dEhxYowTYRnYMpYPD88FJpnZ3TjV1UiEc2bA7XS6A8AZX/NLfsnv/Z3/necvns11QazT6XT8ohf5nunf9yRyYq5nvt4cl9M5gIcXePHxT37MPvnWvJzv7MmTE54M2HC7eyXvnj6mtK7iiLWQp/s7O421DvTQs6sLaBu7d7DvDm6v5Bd4XwA3M32vtjSi3vH2FmTnkO4nac2W0buZFydlOGNRndB8qX1+Rhuyqy6EvtQGw5FyltFzLveHVk4mDZi0KwzoNzpb7n4chxoH11oOCMLdZ+XKQ3Enr2lQV3uXzPeZJxsOdc0OFyvLaqIC1o0U37Z0t0Grvn/EWhEjdSUIAeJdD9j50F59/H8r82wEO3gNjafFKkQw1aiX9tLngXFqvu0Xxicyc/v1q8HN3QkemsVtmtPdJSvDfq6rd+n32r6kP3amuNBbmSSxqhKg8jmy8PMgqUHOImeeTufYslC0Zcqjw06DwWx1X9lr7yx/jAFjZA732UMOBi161BKHuZrQKWmu6mRIe8lx7o6nmSEQIrYUhru7X148jDAOp3oqyBUxQBqP5nDGKqgKgLjcas9fa+1pgLMHnu9jIAu4r5ms3pzzRNJokSWqQM5YmSEmQTUrR2pQB9u67ePHSBLZpI/Lccw5709nGx5uIBnwhRVhp9pB92paswrqNzBQZ0CWeB/O61lljdi5HuPdN8L6/SB26uXDVtbdP9+dgOvRYuW+VVCvwnAVjwHSaXY2jcwZN81LPs655hhDZI77JwN4/le+8y//jb/6H/3QD/9Y5iv/xB/8733Dr/p1P/ljH/rb//l3v/HZT/2m3/b6n/yf/qH/23d8+H0f+E9fffLOIP+v3/G+Pxy//o/9yW/+03/qr3/iJ17/yl/6yz/9Mx+Jcff8+cOZr4/EJz7+sT/0h//4/+Jf+1dgpw999NP345VBh9lxHGrcjwjR0zd7sQwO947ctNK5+XCdNGRi7HD8JbitkOcbm+6gjs3+DpbGdg2ogcL0pqmCLrv2hL747Fp+EpwhQi9U+1eUSbPzPSrNSMtIpGdEzMfHStyLKtVISWrGecRxuaAxVYUjty5Hp0WeqpbopnG5qoBmfj4nrGM+AsjH1FRZl8pHpoHmPjOYjMCxptmYucbpDszzCcRBi9PZXvzG5wi88o/esU7BtTIQa4UFja/ev8LT0/HK659/x/3lK9/7wvKNh4fXnr7D0h4m4HfEzExNa1V5CMBQetm9GJUo6vZ1INUewffTvex6X+LmtLBdfStiuQu6T/EE9uWy5nkA1zrgnFMTmWhZ3J5MDgfB4fCKqTOz533egIckafAosPFyuay1/HxClwNJSpWpxsJ4jdrVK6wMN8+uk4lku+2dIUnu0GnXRcraVYnb1K56Ot2Tj+ty5FxzXUTNBbDWOplLbtjdsTEQNla5M/i4WlI92rocIiDR1R9ssn5YUbJzN+AqeRVHxQ47Xkar9p7pZF8eH0VtW12E3tFAZi7COoeT51PkIfKNBh2czAjMtSTeWR1KO2rIcpwb+dkX5rZsv0N4ufBoxg1ZhSo3M3cNvc/MXDFOJwLHDCDWnEFq4xreVHy3LNsRiu60hcnmkv4c3Y7jOI8TWlinoshcGmGYrb4Lgyc0bnLLTOpxjlib3LRPpX5gznm5XFQ2BhIr3N06hZURMWWO5JZUMzMJQu0EMVRauz/P0AC16u4ddmLXeMyMqLJxRDgdAKMS3GJ7QRtTx+PkV/VBa2aKfL+iln52rsvxuOJ8dzeZHH5qhCCiQEjQI7COl+rr+58hcvJaX0CmjQhxrbTp5lVd1tZLgkTHqY6lpRm3tloba2MW7pKqdAKan1rvUl7+pTSCDc/uzSIlXGGf+dTH/+Zf+Mt/8f/x7+LhbR/60Kf+tf/NH/llX/s7fuwnfuT/8B3/OvnxcZ5/8c9/8F/6V7/8d//j7x5/9Vd98IMfOZ8ZtD//77z/n/8TX/dP/9Gv+jN/6mc+8tFXTsw5/cn9q6+99o6f+vCP/5/+9//uH/4j3/7hn372/PE4Pzk/HC8eg7by7u7uchwRcX9/P9fKo1r8RePwnhOlstcY50Qe65KZp9NJ3aRmdrneLyMHq6Ab0h3zlWVpumsWrWh0VCdFEfhLs/AmD9NEk+3Vsza0LQwwNhv0BqXIm3V2d6ASBrbcVa9/16SHzwgD7u7vzUQmSXfPls98KbknjWDjf208I0nQARrHseYqQM7XWjT4sHWsy+WSmeZuwDqmlcw74b7ox7w8xGH34/F442mOFeutZ5fj1z3zH77H55nBRw32PA9Dnpg5X9jgGvfr2d3dHP48eH76cExyDYD3w6fPyDHGzHj++DBoJx+RpVeobiKSjDyaHL4JHwooMjKJ0zjtn0QhQGWs3EmrLpW9a6tnKmdaVuAaSaxjmekHDt4UIDTKU/VckEjFRpTJJekVsxrMHMWlqtb+OWe6m8HcaOCyK00GNHLJCw4/nU62Esjsqok6ya5CoPs1sxVYCD8NfaIoKQWLrLIuI8lwt5mx1uGnYcOP48AKTtELyYU5L0qjH2IaOTTNY62aZeSuuzFjBjUJIm04wWCaicdZYp4qmmsnzj5qduO6ipUoTbwK3gJpTmgeakXPaa6ZYmOMY8455/39vbkdxwHkyf1AWaWVaYbM2GK81bEKqOs/Ee4u8HmEaUA3Ota6OIDQ1ZFZn8dhp+FEkIvm5iHBWOIxcMrEXHTbCZ+ZISJG49iJyMgrhZjwU0TMtWCs2c+ZnOv+dM6RDw8PmTkjzufzk6f3D3M6G9RVO8sBAy5qapbWhDI2PwNIzHlR9HrXYSnhPmKFXUFd9ybcDc/M2SOkSGpa2pXv7Yxcx0wFnrg/OZyRsYKkRgmcWPrJ5Z4Tc86FPD05+/JAzpjmLpCfMyxSw5T0YQYktRGBPPlgorqz3IKZKyxyOozMFQMmZ6x5UyNzGZxGHQDXVOl4MiyGR4Tg5vU4j1hjjBy2iFKXXNPcz25rrXRDA6cqJVhkxkHn7EFD+jrd3RtZvLnhMCeQKxx5zMlM6gwgIxFOzfgdZiKixDHpDnNoHpTEF0vRD2jmtmLdAFZbcwAhN+IWmbnmoKWfFo7H52/8pb/wp/+L7/qp4/mr8+HFP/HP/JO/5Ku++UD8/b/3fc8/++HXX331dHf/0z9xfNdf/sy3/YFXP/Tht97//ocnT17hyucvfub/8n/+zB/9l3/xt/3Bt/37f+YTfHqmDb87fejDP/En/uS//Pt+/7d/4KfeIH2ccr54MewkhGEdERHn81kp4Oq5EUGgLRKtBnAdcWTmSM655nzcLZQn2JorbYm1K0HgyNT4ry161ZrkXGtZJoefWozWCQ5XM3QUiQ9mwnYrRyhEoTJtc46lrgWzze0MIkknsGJFOAwr3MvMHsfhY2y0RoocERiAuvOzOhVd8IRf61R10WTWaBYqWdLWWitXRoB5OjJJMcLINLMZayLcxkqY+2kDh0YDc67DfNgxIk/mPPvpdPrgT/7QevY53L+Sv+b543/3+eO3PeMn7fN/4kNPvvOV00+cnz+uV+4G1vPF07rL+8vp+OhPfvXX/bqvee97DqaUSe4Ou5xoR4QPZnjCE5qlTfBILJpnkv7kyQnGiOXmwieUmVQ5YB0Rgch57o/dZfzTyQDMdYin5SnQT0y3HMchUgvIzZg7+VAEFiG+ZOVF1b44J82EPUDNDsBY6YGgxuKsgcrTXwpj3R0ac5Xq5UMQucfXtJBQgdKRJ9pSJ1lDrztg13E8jmO3qyq9y07eOx15qRiuzxTdb+7uSp2jmrHW5TIzM09+d3c3L8daSyLP4pKoaFd9nNbqwXNFhJ2Hq76uS/By9oAbulYHPlFpKJtLDVT8qJGvN0lAjZ1w34h9dlyMTOXaK1auVhoRSMteruEuqcu16kWqB98B5IrjOHDSmEvEXDSjypYRrpoHqBa3ORfcfNTcaZPmzm0KpYdvXRh0+kKWqngx4LqUoIGmqnBYDw/JgIElomJ28uG9a9KRTBQkmZmMtUC1MmaPTK6VZFXObmbxEkanb32ZvSP156YySodEbb/Zunobk606BYGbwXAakw6xE41MnOi44ShFj5eA6ohiWAyPatBsc8MrJ5F5nT+x8z+Z1rQiZWWmBtidfKxc5/s7JE1tfu4adXLyERG3rJyaHbOWoeB0NlxpTbTZqcw2AXuhjqOHLXq3S7XImqih2ROvIyIdEWF01YkLlyECMR/WGMNOQ4O79Cy3T5ovk9uddxEvBsbPfuwffubzP/t3vvfhbn3xwc980zf/2q/95d/44Z/+yI+87x9993f9x6+//s758OJ89rGe/p3v/tiXf8UX/ZZv4X/9X//sT//wl9298sbwVz77ifk3/tIn/sAfe/INv/atH/7+u1ff9fDs2eO/9D/+V/74n/wX3njjTcWQDy8u7lxZFJmjHcycMzK3nrMMnXdioMt1lupLH7y4KV25V7kuM0vIpcaxJcQIpFKlVFltWxtTZCsVbr8yNKPrxyTXOoTs6CdXZs4FLBhjrQlmpchFcdC4NjsN0hlXIot3052AE/0GAE2OM7Mjlnq+5WxyvdQgsJ83JM2TCVWLfaiP/+RDIOgYw8m5lpmdxzjiQCZoJy/2T73FWIm7GeRxz6Dd483P4TOf+uxXfN1rn/69n3rz21/kKXEPAut3Pnvrt7/19E89ffq95+PF4QP2xNLiFA+v5uXbvuVbn37xOz7xqc/e2Rlux/BT8DEO5YpOU0dEX66E0cBqEtM/Akr3Xa35KXAojQmvvc6N3vdJLo28ZKlJN0KfIyMz1lJYQxIrVuQY57r16q4eV52APOYecmNmSoPDuES3Jyxt1LDV6wg2CRhY7j3LJG03a1Zwd1NIiChpFaAGY+yHcRfDtwa63rpYCeRuY5Gd7G9npm4HRGktlo2QJY2lTBHMI3NYpchbdHeM8TiPhcwISSFu1PE4prhtPYaItyjuF9iR+lQ7RQbgzEyCkLBGY3QUAaF1MKS5KLD0bpzu7u5GQA0bmrtJ0GFWdqFCGXejkVZdXLZ7zubMzOr0IJG55owbqggly5bASu2B1ouFWyMzsZJNMdiLnLd0TSlzXTsmriB2iOIZAeBeiBaQmWrUkXS0BHSORmfAugzJ6tAuk13lBnWfV3dHBCuj7U9XTsUMWXV3CWK4u0wempRYO8V6he19tVCyUtGjujpuKD5kRCysyBg0ZjihStgO2vYtjQjOFQYgo9oKO16RH8L1S5AJtgfVtMoqPZhoPDMWaRhF+zKzBCPj1GfPbqZlk7SmmMnY1gcLSRIbX24KiNa9AWDyrcScepcU/1LLSNVDI6TxAeByWTlUf6xDMnNFzJOd07p5uqMTc8vrHNaK7fTZJt+4P7/22Y//2Pve95+89ex4e75+8meffvMzX/nVv+zHf/KDv/hLv+xvffffeO2V8cr56UffeHZ3h3GKN9/6+L/3b//dP/7ar/hDf+xr/nf/6k+/+Px77l/9/KtvP/3IDzz76R/98t/yO/0Hv/+jD8++6D/8i//2b/jNv/4jH3l2Ot2dTiMiXnnllcvlwWzMo4ArTd+QVoZbVRO0nrMZbSQ1ZELn6nw+3/qzPXcSV5Zoid6or8F6trRC5Ig4YjGrrcvM4GaBE73y3K68WEvWADKOQePIgqCwIldWctK4IwAxgMYYpEQX6vKexhlZAgA5c2ZgFaaqcqP3+8pLczhWjbnbFSvlTiywShQbCyKQl2MGYS3t7qSBay64FXM2xVmpgzoTPmY+Ph13j5d8Y744f+wT3/MH/ujxk2/7mj/31f8gHu3Zu44E4l3B+7Sf9bf+52+98oOn1/OL3vzM595m959/441/+o/+D/7YH/+DwXc8u8wnd/d0i+fHY6xYsDtX7pmMpc5aBbXjRA2mkwq+G4MR4fCV10lWu9bangTlCFYJgpI81tSYvzQC5nQyYVzHVDKgEGzQwmDk5XI5n4e5kT3ppNycwW2AspMky5sbzcYsjkgPR8zbL3X6Vgfqtb5Ibz73zu1YJ2lmDHeSs+XN8kY/a1/XrCgPBsh2Z16Vm/TmVjrpIDm67F9e0KTpEiRP5worLnMdK+TRFRycTqfLmvf390pDc3PVQGaefFTNrCtYMyMj1eGqCDBubMoYvnCTUjXBIVezWnr0uso4aaS538QT2WH1JljuQppZ9cUWhLjMUvU3tjZH/Zj+4JrZGEHy7u4OHQ8JF2LJFSm9y1hXGs6Oiqj2Ps1XvMnY0L451lVhOLuGjRXoILG4LTfkEbbHvkaUqpQjEwHQCIq3SVyOR7OiBe7QR1ayoi5eTyO67r4jP5I032sixol2rQZzlsCNqT980MWEWt0VRrKabmsrAYDWImjkZc0MKJVfuM5+ST1Nk8VwG6JthFyiCy+xPHQXpCNpkF+fsZyxlmWx5U11kTSsMIC0EFe5dCvTaYwU+imNTpILy2F79Nv2vvpUyre8qG28Ei9pZrbZ+70OsTKwwsJXTE5wuIZ9M410qR3uA5ytscNI1Q5nYxI6Cecn/vnPf/ZHf/yvfu4zHzgujHc8/+TPvPG1v/Hrn/yiV58E/5P/7D/6THzk9ddf+ezDJ89f4o843I/7dzz97Buf+g++832//3/4rm/81vPf+o8/fv/6q6+87e14M/7uP/rE7/rH3vOOX+bf/u3f/u5f/kX/1U/86MrAcwJ088Bca2qa8owSJ7AHoeJp7pFhNM2bgfbHykzdVQZcMd9OhqILau5GXrkFIQ15GkQH2oB8aHR3BzIrakpgBV64Fgma2xMRWAuEmTWuUC2hmTWSeYtUX9YSTRU3bVTo8z90+Fv0OI0ZYaVgVbIhmcnEjMWd+67rlnH4ELSmCJ6EZDzWopkUbCLifD6fxkjkFO1hrlwNUqqp7+QPlzDnWm/cnd7+Ux/6vu//gX/voz/1xk/8rk/G58d8ZeJN4E3wGfKcy5ctf/yWF2//8dfmxy8PD8/HU3v3r3rvzz556zOf/uR4RpAPXE9RM1LjOd1PiVQzZ+7GXBbAjj2yk93R69U9laEyX/FtUhr1reGIHYjc6BIKPpO4tt3GpFuEoFGusga8ktpkfoHYStQffetDmRkrdN6oKc4/+bHLvqv7Jdx9JAXR6MrNKAnG1Wk1WY1JUodQVCh/Ka98HIeDcLvm1j2jMDN37mI29nGKCA0qKdOgeKDZm9HDhkkmQvpTj8d0DbQxP5/Pc04B9LppJ7sybnDDTro5vjgkquJlwrZavX5M4zZLyjFr/ZOGm3eBxmQqRzd6TxbSezk45xR1NrpTkLttw4CZl8vF3f1czdOWNTD4Fhup3Se08e6uWuYOkE+nU4/TWFBHLHWp+AWPrCvKnlFaJtUqeK9vWuFvZUFGyTryZnhD6a50cnz7LpbQPLPrayaVQmEDLZQ3NGuO0uYka0lJPhwX/Uwnf6nGGH3429mC9WiRUVJB3EtXIU6zl5VYqByeVH8XdmPG4zwEQFUzW6pZxVfEZU0hVCdzuAmDNdysZPcw7H+eaFNatccCQLdcwRUJddjH3d0dUPo4+vzo5D6zo4qIkdxE8b1rkRV13TrgTbyoG30cMNLHjgJJWuLAEl+m9y4va855OWHQEpbuJ9jZ7dx3oeJpvZ0c8O70yExNw9yH7cnT+w9+4Eff//4//cM//YPf8dEfS+B8unvve3/hSnzop38qY2kiAgqfaGg9YM63v/3p295+/thHP395zNPdQLj5fMcv8Kf37331ldeViaALVeUCu0H85aYSoL0ddolAIVffZvbfZJNaAf6c3/6539Fr1E7UP1j/Q39DP05+wSsUI+eGdnvz2hXJ7493m1TdfN08xfW3N+kd6I/xc1ZjE4uwRX73R+J+HG1INr+wn/sKBb70irfPl+A1wcCnPvXxT33iU8ex8pdM3iPvkokRlpnTEwk8km+QHzNAPAP7yq/8pfdPnqwZbH+k0CGKUXl99peWRYkxOuK4WfC8/ffLpSLuPOzlhcV1d+uVc2emWWu1l+W6AL2w+9f2G+oMZOb/8bf++1/82i+2cap45TaY0qsS1X9tPVkwbicUGU0KI52pDHNvgIIv8bPNpF3WIcb+q8wcJeOZt8UMWglI7aRnJ0DYilQVfUfW4Ewvf+A2M9I4bESEiNl5Aw3u9qy94HbDjuYOhXg16BUE9K5cbd/LzmY/4A5s90OxI6JiS0YNcTI3EmtORmbcjMuOEP4crYJ2zdvEQmdaSzHPCCelXhT9pPrAZoa1juPA6WTNfGZHhWYW3ZWUXf+riLuZrtmOTdOQjLWh3IMXM7gW/SWkdK8zdJRYCWJLT2ILzeu/b2nbe0fkIzOvZ2P/jLzLWmsPTdonSh9AiAVubVjmKo1IsUljVzEBi5hOUx+d3uIcUJBhu/lYLTo9K8kUgUdETQ25AQO6jr4PhrsvhOX1VqYRwYxVQho61ekAlHCjnKtU/OoM5LEASjyekZlLNxFd/L7e4pdXrCCzm4HwEFtH87OZkaF6NsmTnWIduWbGjDwZj2Vr+Ol8vi81p26EBeAoLohe82R+O9k6jsunP/sPf/rD7/+S1971S//Gl3/ssw///W//p37FL/mmD3z4g//B9/255298+u50yoVxNjN7842HcUck1vJ5vPUrv/l3/p7f96X/+Uf+2t/+rhdv/wWvXC7zyatP/rl/8Xf/5v/WH372bPqZx4Gze0bMtbYku51srlVDgbIL8HLwJGdkxCpXjcomAWUzY4zqsXFn5GpBWY3yhHQOVAwCXNYpU39aERISMncYRYM6FSsqjZ632e96aXxkEPrbjC6K5YLVUUxCfe3rdgh6FhevLLztUDMxl2qiLJZLGTRjJdNCPrLrRNo7aXpnW4OMwmOTFhkqmct8ZV4rXKVc1O0nQuOr8DPnsdaTJ29/43PP/s1//X91+cn3/ebf9tv/3jd9z6df/9mH9zxH8u7zZ38Sbzy55AvYz/rpu87nv3l/RN4/Ob/15uOv/ad+wx/+I//8em5255d5nGc+nIgV4+6cK6qE15okEVFR6jDEOhV6B2XAmY3Hmwkk0BQ7NBKGngHlYiwCbkP04IrRSXMnTbMs0eP4zudTWfvLccxJsoZz7ItPh6ByeTGaGe/H0y++/2JVMDXuYKyVWmtYGInOcuTg89qQfgXcrDs1Z8Q6akx3s+qv+1rtv6WrN3T4bi3vToA61xVVialwbF0bNDMrrTEbpN5oCYdIxPl83qY8MzUNAijSkHwbh6tl0Ttu2i9bVikrwy6tzkz5fiSYG1vomhC+kJNVKbINTUJBF5OwIqXvjz0wEbkCNCLdeByTt1laO6GThsMfEyr8NHEJSJh5w5B1aSM4TAVmFZ/GyYiTxdwLrseRkpaDIE1FlK4YCUiXhBC7vHoyj3HVhApSKjBBqGok/jGbArZQNYWcCyQgdCZ1Y9K4btIXfaqN/WoXtgWXLVDZ28zUx6z+rjlna+gybo5cXUiWqv42Q0AyckYojBaRSh2BTNC4Wehx1KlT+3ufgbwcBwB3lTmU+Fpqiqz0he0l13vr8ySWLj84a5fTzsMS8p/rWHubtmLXF1Q9VgSNSEtkocpgGmMV2oybr6xORM9MusifoQ5Xa5hKua9uXSj1TDPjzHXMS+JCuwB3eUzm3WLASotjH+w2f1d/XyYgEsiPffKzn33j++aciVd/0bse/dmX/aJ3fM14620f+vsf/OyPf+7tr94P2uVxreER+YTn9eyUOCyPU7z6D77nb7/ntfd80zfd/8h3z/Pje77uG37Ft3zL7/5Nv/m3nU9vf7z7PPMVMrlnl1Vxok6UawF77sX1/M9rRX8HtXocHdpt3zbSU8FKEw/NGXMdc51OJzdbKu0LvJGKQPfd6eJUObmE4q/xbt7EmvFzErByD63wgVZo8duEj5l97g/A0EzPjaMmEUcZN/Md+lsioqqEWwpJvyhzvQ1FGnVBTla7fK0Gik7RzeLufmWWGJHh65T2eP/q+d/68//Oj//dn/ryr/myL3316/7W3/6e/Cc5Tqf1+nz27kcqVf4Zw9vSvtcuH3043T3FZyM+/+x93/u9b//n/idf9qVf+4k33rx7dfjKi+VCnlBaimyVuhYeMQcfY8Y87k9nbZl0sNdKP99lJkNZgdhnw8wOzGowWS1d52ZmcwZQ+ZvyPTuNMmidXO2DR3K8fj6OozikEWkU+ddBaZ4IsdOmRIQzQvpR0qUQf2+PhwyNNZwLo7LznSaqc+42J8tIQZQr4v7Jk7UWsrgGOrw9xQHQ69wUF2kEbFUHRWSmQPbz+U77GgjLq57RVaiyuGpnR0aEQMtrh0mnzoMc7tm/q7TW3Y8IjVTqPK89cewpH1fMJ7uGl43T1pCA1vh7CSAyp1muuWNVMxOnCZnHDTdVXP1tW927S+FyMTONyMhMulrF8vHxEbN4ZGe6yIrutntPzYxjxFw2CMrXhCXoHqOKuLf3PDOPy+HuGqqIqC4T9oBYWWoNRdY5uxwHlfsiaKYZfIoAlG8mrglrShZ3FYNfipg0o53IyLSXZHX7C12/PJnb6URZk1FDQbaQlozjWlXKyhtlXZLqTo8WmYDOFmFkrMjGUQljNUHpvIeZOS0thg2DB5YZ6+CVfGvpHO2VrJPc3NF8ORfXD4lhezJ344w15zEzznS4ieU1GnrZ0i5lgvvkVzx3Gnm1mCByRawI1li8l7y1PthQ2dgrKDB5FgETRixoU9KI1mlySr/+OOZbHhg+M0+RM3OO02txIwem6bDo0QKZyRvHBuCNN38G8Tk6PvCTn/66r3/lbePXfcmXvecH/5v3ff/3/Z23v/q2nM9jzCAxT4h1d5czH8iMC8fIh7fiQx/89Df/pm/4lV/3zv/2b/k9/9i3ffvT1+/fems9PMy1Bjnv7lbmnbs/Xh6MOsg+53Q3I9ZauuLRfLG1lsa4Z/fe6M6TXFpKVLSNlttEpsydEKa5jpOd3H2civU6zLyirkSrm5Vvi7jM5ZJ0FkVjzjmnIU+n0/ATKh4oeUi6WUlxqSc7KLXKzETp6nM7vGFuzh5cfRrG9hbyT0mLWJ7JzaAEDGV7V+v4ZvezqF98tbRtaIqxlFeghj5xLxjIGkJj1NDDjJgZlm1Oj5XGNf2V18bf//s/8D3/6f8r8Ln3vPsbP/rJn3njr3zGftsFB/AC/roxbX067IX7D493/eSXzNcuX/KVX/313/irPvyB9yd+6m/+pf/n7/i2/9GXvee9l+Px4R73iweCM4aPRVSDDiGwIZNJDhuPx0UZ12XNO7s7jROLrLYBy+LDH8eRA9JXOJ3PAI5YuVZs/FpVbafRMzJi2WlsDFWc5lQLhayBV0IghYY554mmGeSn85nqJARsuGyOKsCDxt/1e/8sxeQEinKV1NgFfRglfkYr6f6CU2SztQbJlGgjVak1FB4BlAA26DNm0evAzHAiM0Cj9QzVFIa/oCbFLeoSBBAyvgg3y0g3I5girCLVwcgi/SZQtaDusell1W2RD8QVm4dC1KI3t+wQUJ/sWigQPB96oqrR9xsAAE3DyYlkT99TsiqlFqh7xnplhI8VYKs3qCrCzWvDbt5Gbz8BRPjWpkkuL9URRMrZSWAKliJ1Zd3Dygrp5j6MNoxjVMODmc3+YNaPFYAY89diSjMO0DXL6+5l4RI0y/4Z+Y0+DASbrmNEwgrgV0LQ6v+1mlV5iqqGlfqDzB52cHTzO19Q9wqt8f4UkTf/hcxWYovM4ksgMyIDcMlnq/81KoYgVuyHqvCrpLwREb1qAhRAsPj/GezZ2omMTEdNGS70RPoqQMSU3JbKe9o5AKtwmvYThBkj0kCgeswAYx+dffeATLIAsdR1jbUq0JxrQd8MjRm0FdM0q1O3v7o7gka3wuTnnBoMVUcLIKtfobcg8yp5zzodsR1cfTvNEjgj5ww16V3mBKGQ1CBsAY2M9CY3Qyqp2buq/AWKC9WHV43+4q6RVgetBzbrNmKfXrZQlUYQ8DTGMS8BmPE0xvFwCSvbYl0H1IwLJdrVSWlASuPAksoubChWXkGzlTnIlZFy+bAVaXoKC4NJn0/1nnnMoS502UezpeObGDQxy4KwyIlsmRHOSDciCArbiDKyZmosuS4kCDBimVlmjCYK2PUt9VMZWadauqUgvSqiiDoKasNoaD3CylYhzaim8u00QsOoxNBk9jwbkf0zLgFpg9DaHkJPwNsLXlNGWNTIlLY8CbexYpHCMqMLYfVbllDBrt1tGQ61/dYPluJgkmBaxCqnENdnBNLsmlL2NFFYVxyErkXfBAMCm7VXrzaASFgVCQo5S7SxNY0gA2amYB/CJLcn77utTO9pXcCr/8iMTGvDaqAZaneSYGYZCXRO2QJbXUlPVRIzhzHSOnpIZvqwy5waK6i92AmttUKG7A6tNDeuzhgQQFq6dCW3Rxrilhcnxb6QFZG5MyJCgVgmd+GfAKvbRIMvuiasNpUyQ/v4RHaFWpcetTlZDbs3LIA+HvIMofbWtMMC4Ej18O33EkhAQZ+eXk1TN4Lw18/CZk6a2gDtlsCxt+B6GXtP9zduS+FZ/839Ite/3BeAlcH1aYYuZVoKYN3bltSgtdv9wl5HfcCuMNdf34QtrFgnQ367qCV6o+wfFmX1hhhQvEBZEFMVLYG++ZHhzAgGTAErbKlxSUdVRvmqWlG/T0l+glILdB18kpJcKKYkEmnUULX2DalToZVIlTR7PZW73jq63Mh16MNATew7yiw3KIsfgHfzdBW7bzZZNHz9sw4ygETkCiRpKyJj+XACkSnMXyWAChFobXgTibA+7/36ESCxMiVfqifKRMygZDvr4BREwqqcuLyu2plRkY1VFrHXDJTIUu/Gy6EZkC1CSTUmFpW1D0SZ0ezmAmzToZ8tClPdo/1krHVHSiNV+UxkSvfAy8YZlNpiGU2mz1UFMhAeiAxahaCSj7O4OsTKtpNpKR8MFZQisiYblz3psx6Iki+re1nCAACtVBy6h8UwuOYCTKHyjqpA7ia+1KaiNlpYEpBmQ+1aAQggs0hRSOrKa3uYYHqxliITpAUCGlmNXb2vfcwAYZlXRyozotb4FUtr2Iz3sOrW7TnPN1F3CJ+TNVeETjWzCLfNDvMSWSaffStLaLZbECudBSC/JijCKjNmyRiC4Ep1lsqUWd/Hl6c9/PzXz3/9/NfPf/38189//fzX/3++BgAEwgBgRW7XXag0gJo4JNknGqFkUkBmqBAlWQ9ckYrOY7Ky0o6VBBxW+bV4eptmzAqiOq1SAKeYyaR/oHzaQBsRseYc1IfVIPMUmzMVNGYqcd1tAMJ6d1rG+i0AiCywPDuqgxBL84wGbTqPIqUmiduXUno5EUwYzOyamlg9WdUjEYChgIbs5G3ju7h2uFYilIJbIDjRAkug/4KTkyhMJQE1XpNzHVJqsmqZKw3m26S0YlFCCZzd0Ig6bIa9lOJev3YikPlzfqKw9c4nXko8GhypJCW7X4GIRGm25U2msV+ynr8zFmzEPrsd4OW3aainSvqbbQeE9eI2fxIJSUVquLQ+IlFlpkozN2JQiWEIUCpB0y94+luQipGJVoOOIJEnH8eaymIK7bHG5RNmlEori3hI6XUisWvuDfhydQqBSnuEfBg6b71qnYjsnwJRuZAGmHkgIibSgqjmcYRVha+oGJUxxRSYW9eG8OEA1gqhrLHCJGnJLc7VRKiuD0VhD0LuMzMbZqwSlxKOQrl0JpM0qqs1ZqlWGZoOpjS3XrIWv9FPlDlROirjU+ewtxYFxG1Ux4wZa8ail4r6sYJmyKVX28yuTjJ10oqEiKYC6MOoGbkRIAJXnbWs+i6IKD1pNVUDoiIr8VpI6adn4VsqcITyXUPCYOQEajoHbFsVFLZXKH2ycPiCZFW6D52V6CsR6yKJur5jNGQkyK6XEQm7WkO0JQA4Y0WkzgMCRmbV1ovDYHVduFDAopLmsuENOxcLU2WVqq0kJFlTCnaoDYgg6MOnlPyByFxzmblYV5Wv14vbrBOZlqzaGhoBrUJ+lT0EbajMpVxcNRenxZo6rFWvidCoWBGZhWfqz556ah5MGrG2AUwTZ8FqQCxFaREQj93ypCUu5pEGCPVsGp08k7KBemdCvqCNsshWyEwXGHDT6ImyGWxg5Op5qb+4uRWkzbkAjJODXD3krnHbxiUis4X8BBrq+y3WkAne/p/9/4jAjSZGkXdrJvkGPepPWUhgfX9bedvS0BsSjoiIKe6coDj5yqjXkUncjn+/9XWFsqKSOrWJ1OzCiGWYGap1UZymmtdUrE43S0uYWhxsewrWZjdgX16UsKySZl89cC/FS//frvzmo0mvU9FQ2f9dj2Rd8QyI+a+jEhEyA9GgWEb1ZsBQami7gvIyXNNRYtbpwUsl+dSlslpMWXlWeGN1onXRy2cW4FcbaPUkeROz0KQ4psisfgm7bN+02Zuv6PhJF2Z1hTnbCPUNq+cxFrndSSICa80VKzIyBUpCwuQAa8yoRveUidOGiBm6b9m10r3/cgOm1qTKPtL6jrHFBaRhnZr/UUUsRMnXRK5AqDbKTJhbRJzcLLHmXLHUBBOwWW+4xzcZrEbOQjVm6BZFWXyVWgtFzRoUUIdLJiTciz+yXlbD5l7PfRzqO7QbfZtEtgxOHR6VdOX8cONE5Wprd2vdy8D02gJkVoxR3EBZ3oUa9chApErutiIuc3azU7Va1AY2uNyPsDtfqU/CJhpkRWVyXSrc6qwhqlAhy5lV4lIcVVdun45IpkqVqDACVIMZIjKySx6dEpTQI0FNIpXJlZyidlOckj2GzAEP+ExLpNkyhDNUwE2dHAKeaVA3Ti264HFpzcqG5HB79dVX1wppHIq2eDoPGqpM2JnWGG5iqVbRvA8Ndmsqg1gGAJZV3NbV33Wctob7uvS5kfnW6pdrr78W4WBlwLgyV2S6rQTdqtdPwbvIJhkjIoFFeiYEXgc0BKZCN31dWUkKvasG0FPHI7ipH6iaBlBYuX5+X45EIGyD34rLRfYsR1b2KdsVqXkG5gZCEoxGGj0zFm4y7P5iMcNgHeYBWrWylaqG3Tzf9dOxpqoHmrykf++wVzWxCuuyjyT7wZIVT0UCdAkdRAXg15RHL8Yu/yJfrnWx9Q2rpKvH6Dp9yfmO81mJSahan5FEzJDIorpdA3LPpulxUIIHy8ho7Y06a1Y5HjsJv2kt3mev/5D9ECp+9gFlh1SL3YZbmQZJEa633+0lB1m6uy+pCggmwBe+8z6QN+sIGYjtYmBgbXp9DEOTIHh9Lcpm3jSH04hAMhAikcr1oZ1DaFVoCp+JAKVc1Qyj7Og2xWQ5In2zvQFzS8TCrE9/Q1xoSstV0sQMESDo7lutotGlyIp1b0K3L1ijzqs6om+f3JTClLx/xQMVclV7lfp6oCom4irdR0QywiFaewkAiMemW7ViSV4nsiroRJGGeu9St6WxkjZfomRW4J0iWNX2RFzWrBirIj9GrMiaioErk6OYl5WYteOoe9qKDtj10V7RxnL23cQ+//0K3vqq0eVC2ZJryLwPcB3lHXtm81Ep1gPdHBmx1Npdv6zE3ZBWUUiRXKuaLZyIqIrofp8IyaOKR1Rkj9toNFPyo9ig4o0kbfF+5IuNBvg4zTlNmoHR8E+9CMoAVg1etysBWiAy5HZnrK0PhSBlBMi4vhZc/JqEWGAoVDUaMwBrbI409qW/lJ/+zKfOp3MttTLz0nXPymwoVVoE0ksmJ6LGi5n6O5ROaid7c6kDvM0ZKuSh2gdsm0NRQcwOXdV2vqRmjSq61HdapaAogrLQRaYEI1cMM48IujOWImJFoxoe10c4s9tYK3Mo8lIRXyPFbDUAZeh7bzNTzlbCtWJjxtWiJ8227TZ0c+9ttsNSKJT7N/UcC3mwhiPLIZTjIujOHmmKHpyKNhNK/gLXO4Yr9snKA25XsLclGm6BiRNRl/bqgMRGqQOhjGkHX70w3DEly4cX8Nu0Br1uYrN8cx8KJywxDQEMMIDZyZTZQAaHr5jerVaXdUFv3vU5sfEzdsJx7dnXJ8wb5snthb59setXpQi9DO3HUYc80Y0u0G244gc6dFxi0ORGWgAweoxxfGH622u+s/c+bLfRfT1KB136VHkNgXdcX5tYFCptYNO7N0RRHzbC0BxvABUL2hToGleyXYVoRGRQGQIsJPCjBLD2gpWgE2heJXRfEoIT5wwbVwMQKop0gIHK3xDN3WyWTu9UsUyRxY6pYk223rWOpNXBrKYXkYzWitIslnEq1NHA1AkrPWrrbICYuSsAoslJJa4AiltClN7YqImZgUgqbWGauTDB2NumP9Dq5pJzTl16jVysd/w5Z/N6YiKvbrfBw67LNBSUtBKIrfgIVWpyxWpmhaLVmoiQWbWYKgI0oJME1LwhqZNUzKE/CYgexXsVMkhTTc66CQ1gmEyrpnleI19IliXIAZ+WWBkZu9p1m5NUYKLaYh36huBgrPBd+ZSwwJmxLE8iKr90V5RmwLKrd2VEU9EpIzNnnIZ3NzsVJk6kp1tqjAoDySICkoR5YZQakFKJm9J2fVKmKOLn81mVjib+8vHhoQZ8EkCMbi8sBqHRwm5xBL01wVK7phITAtXGoOUp60VKPYB5fQXIAsx1RdjaXBqBgJsraoAyIjBm1Blhv0gaGEPwTsbSbkQsnVRFWFV7yjpeZS/KzAkXqnJnzOx7RZ3czoWuWWY5kwSQKzrw3K23BV0zVfapoeaVBwTWGGMYVyy0EQDIPVVwxy4ZlIzHS7evGlc3MteYSjbQXamIrIC5F5p9Y9BBw9X0gdl9MnUyM7Fl2Co+QbsfQ8MgpGUPLCk06WpeRFwtv3O1Frk/c8QK4zCzsMc4ukG2SoYqLJnZikVgRUdFxd176SIVyF6gVCTqGmhmI/uNC0UNhZf9QUxJO7e/IW681C4E01oBPcURRANbnQPH0G8sgH2JJP6+z1N7F32iG09cWJF2oLSdetGsme+RL2HT+xOiQiKN0ZCfqHMfMCQjVx1IKnBTOtixQ99DPdf2K0pWqoZDjArARI5nrDD6NU1OdHm6viS4kV33MCuTHFMXQYU6xVsGpEpK2X6xI81y7Tq/VniMDrMC/e40KYMoR/aSQkWm+gOKZ6xGRG2OrNGMFfMY5mOMOQ8zEjjmLO7u9RlVKmu4DFXm3aGmogFczxZIaBQf9z4Zhw+ozFxiYWUWkan+sapvXq+LIJHs4ky/3zU21AZseI5XqyDl+AhAeqWZ1YlWqe0NamHl4Kqkb40FMprHXaoZNMqPSIZBBTtYIoa7ux0xsXs1+hxLrHb/qJuAPQTRI8TQHT8LSNKEoGcfe3XuNoSR1ZCBCm1IguHmknOSpV+RPk6zOoO3MkJ1yqkOwQIRWyo4sUB3ywgQPgZnzHkBbBgCmEuXpAKXIJAWWFbbw45BCiqRO7EmXaDcUDaXwMjMSDO6j0apGMHLnHUvmYydYlFjaZ2WNT0Ge9wPJOlvQKeR6NVBNztlk8ZVB19cNnyPd2ClviFcGLUMABhYZlLmNYXcazsUsxEBH7bmolHDDoafqrWuOt3KvokMxS5/aWx3W75s65lqOWOj7YKXt5e6oo8M0GTVGazoh1Vhsjq6vRLIYR5zCchAUbEskDZM2bC0IO36K9igUhd/lcsYOlTYRTvFvTXxSbHnyr0BdYt7E+RfVvemFru+y2MeujSCRcCVXFm1WHTeyStoPsyiO8Wo+6we5jKLZRJ34mH0mTEBDlMLIcxypY+RGXMeJFN9JtriTooqiy5CxLZiAbScG6HOfaaKwbLFigGTeQNbFHHDwGRUTN8Grp1TZSlXl6c/qAweKr4Jx9INF+BiDL1lUxhw20FQAc3m0VSFYLt+FZWsmj5RSlWsTIuV7LRVLz+NyiWQEUU20c4O82jQRAJtKIahOZlJlXhAJGGhuwmCzlzF6wJiatLqlFgSOIZ37bvbVbZLCtAtViAx3KLUjkBzU9Onmt1TMnCRaGPSq8y24ExAqUDpMmomSvU/AhkzO5XU8mktma3YEaEJK2qi6QbR9tuggQESNagAwxgRJ3dLm7EQoVkykViYMxN06AB2HYC91tnoSxRWVN0c23gajTDNszF4IleujRZkdVGvTkEbu7J9UkCRmOR6hacauxOw64O0JIyuD+BuCO4YonMybVwz6DpRQp9zshIXSVDKRjiIlTPTvX1xVPqpKz/nZJ1OM69hkNoaBUNWPZyMOZUGRYSy+JnL1PqTXLEMQ2to3J+PHXbUo+xUTuObIqeZrRT5DpmYRfxD4cTy6rFxaJkjImvUGEmHYcl64OHhwWigybIYOXR4iJUp7YyVMDtlZgCxAvSGuiY6XREBMIFAeA51FOtw7kB8tsDiEtfRncZYKyDB+2CiBJEKWKtmzmtiRXonw+j0EWXy5R0omq7QEWa1azcUrDVOAGYMICONQFznRkPFFTXvNUDkNgaNuUKBFao5T534ZMJFVszVRBiXP1ZYrrRyA7UxYwwvw0YiFt1tAiaCZW6WS8XlQcTqR8Ld/f3DwwMw3FIj5dVCaZB56xJlB5cATM1hVRTR1aUe1digmXw9IBEgFoKkG7MAmp9iBVaYwwohr+hlZQ5d0Zt4PBDBOKfLQ0Y0iy0YkcvSzarwH0kHHZHBUDe6yx7Si8sn1pohDDbL3UVYezspNlV4GAxkhNNyIpBoL5pU5oTz/WnNWCsSFolqiduoMrF7/2jVPOcEYrlGGAnrMkwVorDhfQSVvO4jCyXccKA1RnV8s5xwOc9OAHu6H3JmRq6KB9wjYy7Y2RKBVYIy2mtpterlrKNRyGdXWyT351Hs5pUEa0xwofwR1YEXebWi2MW/xv/NmGmdWiESNmzOVMd6wcvRPKJCGndoIcDYVk3HsggkbKDEeGhDPzinQsooMlUCRNTYsUSaIY1pASBXChmI7d/rHqXiBYSNiEXbNysQSIqCP5WQVc6GjIVMivYJhpnRcFymBnPUsAeEhCAUKKghGnKkyIx0YPVWDB8rYpV8Ac0cyODaVKKyRLDaNKAUPwC6IRR0V+vnXNNtZIZp+k2FUYaSd52QGlpOAGPjBqq2I3Sv7893x7xEI96RKI5LV1zEx9g3NzMrlJHrjAws6vXlGpQ2kqcVYVwqJwQHkImVLS8t4xLwYYHAXCljTIIZuYbbCb4SRl9r0ap9lnoPeuXnwRlKUJOZbphLTbSM0qwbVonMQlgUKF6Apc5G9cAXHl+Ox4C8IemnheoxWhflKZrjYmbNM6yAXRc+CmbaFT056cIBDSsBupr5bRNTAgZamPhMOfRJSwVi3Z3Pcx4p/mXCMlYXHAARVIiIQQsTgTjEUqrqbckI1FHItUhgGVIDcuRGwwidZRi8yyBmuuVdXaUhFxHqf+5Af21pjQCi2U7IHerWWyMlakZOnf3AzRQfUmlmaj6uuDg5Yw9REeWncgWg4Sd51uyekkZgtwDCym2hW900IgG3ofpQuC47sCe1Uahc5cKy6ef7O/G42br1sVZZHveUSWXncVX9QDnEWn2XPdWQSgzbGUFYZjCJAMYu2JjKr2gFhGu1qR/UemebiYM+5fL7WRwaQAKwdj6fjjV7QZ2jXTZ2lbpC/EgViaNyCcl/3AbkrAnOEau8GJHAoK/MhZCENiFXUys/ZylebTaBcdQH0DUSDJZi99RNDTNtnmdXX02/UFjZQjBRClHoHawuFaVbjQwXclaZgJKNACJyZUasJRJwe8y1VkWdxwTNRgDMnrOMW295dXXb7uL6V/V4N3+NBhzk4YrM2T/TudXO2AurF0oouQrhUkp2BdG3fml064axNk2WKRv7pfyLNNmzBVRoksWQeqq4CMHIDGpas2SJ5CzVoMfS+FFspgOdCsjqs0drMYEynQpBFcHd1DAyGpOqsQ5aeDMv4uCNDIccPOqJNyAmfKsWr0IEEV4DWNLbqU4pdseUFDaCtGTuuncgmUGo50cXbFW2QGakGWYrPPimkmTRGcu8VfEo9CSeoNnj5VHvr50aQPAUKwAY7Xw6XY6LsrswRCzdamkn6cCaERG5AINZ0RcjJCTSRrJV7sWJyc3vMKylCIYV+Ncxk30nmCtW1oAEndGm8HFfnyRtUDX7Dv5khjq97lvYVNn2QB2Sqh6aIHfm8BJHo8syNxyKAoc2LwJVRLwpNMV+HLT4Wnb4AehYtq5tZvYc7mkc3LyAvrBmiJVrzlQoJIRDQaQ+b0UKEBNCTTtzLisuVfcT7qxAxWMAKhW2s6tXUzxnJiIxDRkhBZIEYy23EQknYX6JWfl6ZIOJ/S6sf65eFXQoHxG4vid7wSiEHaj2i5U4kUkmcrBdL5p6BoUidX9Vf0X36ulQVvhPVudOVjLWjorouSNQqK7t0fkVlhJEZpgAN2AeKzM06hWVsG5qZqKSOdOfTFMbG2CWkHZExgKZYxAccVyp4fveVBJdomMouRZZTFqB6bw9zWUsbjfA0Lg6isRwJSWZym5a7x3MRCdMWgUKUQsVr0Sd7phVNr2hgtbUwb6c5UIMpijISSNXSfxCmVJ0fYq1q0hkCkOoPCaCVRezgAFcsRzLMMp0WOWyvacRkSUozcQKs4GQ0F/FInWf+x9tzhU8yZ1UyUqXox2maUPKdCjXYTEgyZu2bUC0x9qaW8cMlG95aSJCsWo6Nb/elzJEpO1ARIjxNqNmGVSWJZUcjqrt1pAVUL/STfOhHI9p9HqOBAJh9Vu1Hex8qVWYJCUsExZI5Moeiy5O0s29L5vWuLzRqFzfd/sAIGZy9s9vISztPG9U+Hflhtsco7Pl62h2qxgPlR1bL+3Smc606s3CjggVP+Yq4s92mlmIYmUvqRRhC4oSwvjVUjL6SMsLdYiBAbYAXX1cEycLi7UaHt3hU+1JNCNixeU4qiXaak6q7Pgwz7VUvRFrIB2DdCKTs5iAZf/UVJwarlp/UdrpAATkgiBLJHPv3T60RWvIm24Q5ZW7fFAqUJHAsl1ASfQyURFo8bOAZm1HwhtqDyC7zRpAFB2qK3GVDfdeFGpXLcIdB7TbvtrFjPY6fWV3CHv15dW8Xl/VBMVkRATrI8UWk4pyLjcBi4DWcmTDLSUyddMYse92dvjcEwuBoCe7lCwK8LZQMSN8j4MzkRuC7hNB2kRihnWZUNxH1u5uV5ALTJW/UJfLKpiqBm1r6nJ9ykAYxqY5C3uowCRRhIPKjsqiJlKiWkqFFB0hKyBFiLHKSFHaTJZf3S8rFnkNxaMNoKLiELlbFw4w2rGmDqV1UcqrgiVu8HVrUeeA6iEpbojWpTLrJNSxXscor/YF2xrXz1eHUzNEMpEsIkUiItSP9fL7F6QAseqLeogVsS4Xh5l7cpfjOznkKu+f1bYhyNHM0fZHWym6jLqhmMUk2o5kinYrc2RU7mJFc1SnU+X4ethugGoizgZQsyKi6NJQynkbkAIuOxIW/B3dw6o+VBWgkKDudS1w05sL+NRWRv2ewuEmorNfm5KJQKv11jHQHbsGN7WBV/G2bN41cCWrX6N5mQ+FbMktx5iNIBFgdU8ykUZ2tUHWGvCKiOQJU1mkzoOx+tGiPsMwn7nkmcUKSgIix1mfr6vhghc+pviDJacPjOotUGZFtRqRgEp6YkKKtNlxACqcwclHHUgAUjzY9L4OZvQRzHYx4ioZUyxwfaiKRFOSGjrvpfbSZ7UnNFe9uYJOcQlU3E+U4RJfJFSvhzqNtYO1yTtYBzbJeSl1NhI8nWwewu6LpZ3amm1/KxaC69U1UcCoapIRmWLZ5HmMuZYiryI8h46/RoJiRjo1lC2Ozvh3f2qhHFHxi9UayrYViFYHA47sYEmsKy0y/Yb4Zrh2i5sOfmrEl7yhbG5xNORphbiYlVvpCE/jrXCtZO47g52OVSJVAS+JHePJmVQZpG9Tlnh+OUhCSUvw5jSjX0v3up9FksPl5DJDlVpr8ZMVy8zcbIcQnVLrObebxU7L1wpGurGrE9GfUKa4IbFyi2oopbkhVW6ruKdUUCKEdhSsFxJtNek10Noa6Cejoh4WOq0deekrKvM11y27IoGlp44uGou2p7r+uI1TdhzTL09F27ahCCI1H3a7MgAsZ0p0flKnkNU+WGFd32MgNS7ePSL30JLa+KpVWr/G/u+2JZSEbJIi1QpPbRBAy8MdevRHxM6B+oiiTE3Fj9G1MYWNyrbb8fLlxS4nYlVcRqAGmoAZs4ugYPHdSkTlZquqZGpXrKJi+f2Zo6LY8pPYNDLui6ELhIiprICFClR9JLsaq9bC/kgJtc0ojMgEuBhqBDnv8k4vkXoJVHKSnYIOPkGEgbF07jq8LIpdoJAVNYPVfCtUM1p98kqyM2kWkWF6D0g7p13mNiAvfeWVZ546XO0ty6zLCPZHzoVUt6hYRmCxLpsnVo6elJ8ilCAlpKdTf0/pohUDVCnN5hNXuFHvWaH3blS4XjEZXasSjwHSwJUuxbZb7GOR14ev6ngW/42IJLwr5XAfiFgxh4/LPJrgU2u4qysOuMyoWfSMjsjcRu0KTxTKa5X3bNZ5Xj9abXMD1qCIGNWfCexcH0LgpHCSuPqgigUqMyi0OetqJbXVOVDZjtbzJpZWiB9pSDNP8oigQ+wwJ0GpLsN7sFvOuePiwqNKeCUjYshAIRMYsIxYWfyuThI4ux5fjGCrp0BEf/iKLnTSOntK0nKFyiysm9LMehMeBpUhrZmQdGP76MoMGq/bZ0s5t/D8trHsSnAB0eX9O4/W9683qm/bbTagwL5zlvq9myZXYTLCnMpJ6Lazcm1lsZHJ4WNHiygao96wYIDmpVXsIY8QGQbT7NrGM8TVkqEKqvFjP0MpHURDNfugau+ILA5ouaxMAvd354fHB81ZNxg9czs8de7uy5hdbejEDAAFz1ifSKWAdeJr2TRCbiIMcBBuFiAwFG5vqGG/kxGrQAkabqEwi0xIErHkONClMWTTJYFtCGFK0VB+EQFmgBaRInYWtzLTzaRIJ90+7toPWyJBVjt2Rp3SeSollEqt6D6OmGyyVS2XkL99dW/BSWDt5hGk+BxEUc62CdwPVie+rGmZUuqAt6bKhr+KR9W0YNV8bhztcpqk1qDIC42F110pn2a1hKqVIFMjGQABxlnGiFSXqo5yH+dqAK94WlYjIQ0c6V8ljGtXXW5AZcVylSWkfpQeDHh0XefqYbo9UILn3amPSrF33b66rG5+j8zAQsjekwCdYFSOWcvVAN1tpaDObcU6DbRXV8ON8FpUplLsm70XMlZsn2PqPMwkTHVOyaxaKflYue4bTCiBXBXdKsxlTQarpnPIyti1Kljt9s031zI256DeohCBvne4GQ6Bqk9U4jzcAMw11woa5pzXnh+IuZMVesvqqvKqzcriE1RYWyegTgLBzIAGcaodRphVOSEACtCrgoMVaRhj2KpXh8HMZgTMEhn1GRoP1QpkAREqeQth0chrTQh4XBcZuDRYEeX7A4JgiLVBJGFuXGuCtiJcSFJHG0xcouQJ63BHJhOFT+naptXdpxBtrLDciraRuGpk1Fck1NHQqA9AYMF0IiLpDGkWCqSkUkaNBq89r4Spojhdf6KumRxmqkONia2CBDmcClg0hA1NTt7X6/amVODbXyJQt4fNvGZMu/UVAJiIKhDIBieY1HS7KhMXbi7hnYgA0w0RFhHX9AtgroyAnW58Bap2odtYj9WfNrq4Aur8RSx3FIUtNR6nsrJdSASoiKZgO7VIB7zI84Gm58hQh1UkpwWqH89wG0rKomDLBq5FXFdMJqaLjuMK842X9F7oV1hVwoUE4eC4klxu1rqWZJfvwZYFBKvTq5T7oretVjyk3CqBJaENKMSIQF6BAjNTH31GuFt2AQ9EXqvACkPQFKksVJ1IpPVOd8xWamgRsdYctpV32E23LVArGRLdPSEUbcsrZdTGKZ7CbQqB6AAwUPPFuKOXemk9NM0LWSVgxlW5mJ4UFW5n1Mnf4Z/YXC+Rj25KNVRa1xyHFWL1qIxcssNZToFoLZrMdj1NF8oYYYsIq7Ys1y7GVomouDfamFAmuiqJai3MwEIBEn17u9B7G+YoBFBrh5rZ+kPqgYq3FxZVyyxYoxLM269ranljU/YypUSxiXaRWTXlOtwZvWPVIG0bu95GE0QD6EbKUaqp1xgzqHbh7A6HXQBvgCgXOmmTs0YHZr2VQCBHq1jWkUMkOBEsrRfWGCgCsOzCNtUkQESXDGiIDPMBYKqZ0Gwdx+l0musgrD/AdamOTFuSfmSBF5lQ88dLa62LX2GbUwkaEkFzo401c7ONHWJlBYAZSgki4oa6EdhiC6iIWqGLpJ0I9myYmjwzO8pyM/nxREA83RIBsxsGDpG5Vg4Po81VV0+6ZthhMcr/R8HHpBmq81CyHqBkPlVKWGkyIGYemYZZ0EdVIyPh3OOSCp+7iZ5vAvjKEkEaWBHxmmHWaGkJoJpML0r2DofoPoouwBYn71huT2Wqe/VSFrsrOB2Rc1OnsiqBQDXm2K3PzqrgJHv4Z9c/u6P86s9V7uyIuE22WK6hzLtobrFKx4ZW4m7Fa4zOjHYqUS+25TbIVd5SUE1FjFHWBwKGtxBh4Wfev91nSbGdoFzJviZ4zGnuWEvWMLBQfelMM1mNykpTdSszclZdoOB3rb3yH5rFmlCw0hMzM9v/qDKVqgp3ytfBi+mFmvfbebygQ9CWkndjJND6A2ulDa2XDB8buRV/dNawp1SX+CKjG/avELTeRi8YoHVDsc6wwJz6CWFlaRozXD2COhTFkdbCy/r0mUNlqblpEVUKRabV93epTPevFMh4jakUDgBEWImgFbzDMiR68IIkaJkpy9iHdhXgZCZhQkRRx4t6DIpBGg3rERQaYXRzxkyDwblyRfXLhspA3T8go9tnmUA0I7I+hZqlQtrPgVxWV7X4YGFgBpmAuKE7e0YnHyqR16BgFVoFBjbwcVPmrZ0tA92OJ2JVgKJ7kOrCDYg6WxLbCsaxAXWUebnqAzQ2QaC7J9BgTGRE1hXSS9I6mld1Cr1hWmE0ZliDOFW/zFgOmttaQRHE+rh2+Kc31adW5h8stqBWwhTzqddyIkVSWJltKDBRYzoL96smwJRBr9KKws+KRRkIGh8vFz2EdIJoyFh7smHnJ3UNTmLtCkLsJNCkUdMpSiNa2uzYnduyP5mYir2twmFUvxINwBizOVCW0qiiBPcNLS8IvalsFoXnuYl83s8GCK0VteaIMAPYUwpLa1QRE3Z8EVLqMVfcb2nbU5kxZkAkIIP6CEnkDHeb2XRP84qojRkLTbdlD78OwMvMWjLKXgiZq9kbzAxToKz4lzoH+tAWCUOaD4mHlMKfUO2ydabumsw8wSKjuGodh3UpqqJ5kMVqUmIOMX3r7qOQR+ykioDBrhBqZyClBHAlVZT2i8oQfYz6VJGiiK9ye2g4Q4+gxdV1jlGup7BDZA3+ESLbuj+NqVg1aFFUtTaMwoTBFPPfh69IE4dHD1Oi1sqdK+2N5pfpyleCbZaJuWKMMdfiqhDOKXXIZYSZrxlsrsM2LHNOWfAqK2xPrIDyZIRhIhFwpxmwruUppEnCIhGBseOZst0hEd8ARl/GakNSN6rgHxOkA3UlMgPn03h4uCgPiChljWE+92QMRFnhLCUwKIUARZnOpLsHokDE5oCWXG+nGBVcZ4TmWlQQN2t4hw1MFg/NfKnNgLVwOhqmDImkXxuqQRX2k5mZpsnwgZoeS0f3kBqqpBNqOK4SS/Oeh1oHwMfLxWlYLGnXAICVS9tUdbAo4p+VtMjw4RExjwPeEu3lm1lXP1T+CSSGrH4B3ZFmcVQPt5tx5hIjD8BVJS97NULJk4Icp3GmucFyrrABSw26CNVMpgq5gcxwd1MOgSlHOHxA7cdV+FdRqEDIJspl0SYEXiUHPFbp17rXSUwJBIZmxAqOvpFlecmhsINK7sqCidkr9t5cBFdMQF2z5o5YoKWile24pYMjl5uKJGwi3IkDOZaJDLcgtYxIiH8g+2mJmAE1BlRDWRs20av6rpLdXS1EYsZUKBby9gKV2r52T0Fa1yK2CJQsawDSLKtGBtrlODJy2BDtI9iktcxtNzvVz537WpmOuQjCHcxEMMQWSjccMRMYlvVQWYIlsjqpavlWAam8Rq5ok2aHLpT5bA5ExCKoxZFUgsh+trkZRqTLxxrVm8lCxK1UVtDwNcibwL201CMzudYKsyF8x9JAOEZasGsJC2lmCHHQu8ga6TbEvY2MCRjNc0sJSEKnMgKV4Ws95WaZ0xABS9cTAopHYhS1J0QpchpBM1zW4d33H+yecpSHUuNKzkp9loZWoV1XN0Io7eyqYdKRq0KWbIwwMt09lXpRRIfEsQiRcjICAxYRJ7M6qLDhDBaHfZhlRV1qTE5kkCMi3AcsVkiIz5UTzDWVDlam5z3ZPa8dF8NGrCCvemcowURDBtXMZiX3a7BoCmSoepGJhKPq+ZT4azZsZpYDl4gBpeMzQ/3BcTLPzAwLxBqZU+9uwnekxkODZZEQprgzsYFPJHIU9hDHAck+FApyBK1JEmlgG2wLIAcK/KkeDFbS1oBDdqU0UzX/q6ZB1HcUBpWSC0TBKA7znBObj65EwMAonqNILpF5dz5H5FoZcRDeBWxKaacKj8L3ncKxUeTyataPrc8SAYsOCdOFxUtAh14BVRVOKyok4Vn52gQ0jiKAkRitXLlWtBNrkVBBaFeguDxDdF3RWIKuTbVucl4Hm5WLo0ifgUTMuERxXAvJUrlts6aL5agwtm+8loPH40UdxUmsuXwnQNKjKSWgK7wTAXObK1wiOCAWxMsOZf1KbSM3zoMufBW/0cdcE1jH6qJuyulk2dYC/QKZM2Miz1AtM7KbBgJJYhV5AxaRm5iJVhHesEXn3/trwy3KfqMi05hzCUWc4rtmqfWJHKxs3qxoLMMMRJAjM4GLmweRNo/DzRcncy6cNWr8ZgOrYlZujazIUfe9PlMT4lR4FihdyZaCN/YVQyJK7KBzARqcNsiHuQTV1luj/pyVuN4kRMaknIZAtqITZqvm4P/XVwllyW9NwFQowZx0YyKXEmBW9M4m0/boKMUNM2OYYIPmX3XLpgFzzs5vRcDOXKHJNXMWCh5AanQebrLbypNYF/D2DLz8g/uH9EcZQ/l0klDHhy0U15KFYmUANGdVd8qFBSqCqjOTmR33Lr2fRZh5SEJRQoRAJldpRCoHyybE0ejATYeZcUVYYgJmQ1hjZMYMd9G3q9InZyztd6+yRvtflITAldPQy5TdjWNmK5ecSlWQ9AAz4aYu0EBiLaVjsc816jgrHair1Ite1t2YE8NBQdl6cas40s3J/TsC6jclY4PbGappoklcaO9rKM23rKdaEtAIuNk1g2e9loKVVYUH8gblNoC0tSaQwz26rIlMDrNVAOUqaWuweKmmSluIlYVszoTcvD5AkUcRNoDlVy0kTyximCTwypTpY41N7dETyPpFCIssB8su0Pr1FMrxMiIippmtOZt3lhDtu7OWanfbQLl6k4T/u0mbQapMFPcv86oJoNy3gtqEcu5sOEKIYx+BzNBtA9NszGNWvVSQy7VJlNlqcIUT1weqS0ygmt6bbpFKWWhAehNoAaC7fm/LewgxeLPRtKyLIQXlLoPV7QPnpklnZuRiJjlQePuy2BM0AS7GdcyI/Fgy1RakCVG7nlKkJ6scuNpWgQ1IKUqobDFTd1IJzP7YeihCYmhlTtvrxGq4OHNfJIPwhIScXVYIysyTscqwqKE7FT+y1zKxAlhh5T6y4e6uuVU0eG32fYnEkOHK2CO4YrhfjoMr3EdFfcIOhA4XqYdD4DCJyAuAjNMFhyHyUey56TEwxqwVzECkyB1o41+IqdCq3h61sDZFsfaiiii7VN7eiMBiAu7IK86RkRMi6qgip06JAOA+1ppNSauLRlQXXcYs3qkuLErnMSuAziv6eA3iYviIJcTLbLMdxXMnxB6aanu1DtAUtGtGQtaNiooutEPWETwzlguXAnz4cRyoOYMVGNOt1k0yNCrsVKipk75PBItIK49zjSt6YgqyeQAoqmz3rV2JcnoCBeSqKmdmhncIGJFj+IoAKMboDncEKsgjaixiESgS1TJuBpU83TWBBATdaMyV2452RGzmnMdxHqeIjJwa6ldxdzsXK3OjOgSye24aa7ZsUpciMiVUMGYwM6ret2MFs8w1aCox1Nkza7AqMtXWXGd0bXQL7b+zXPDJfA3CrM2OEAkFnnD3uSah2gfozLg+CzrCEEWkr4N2qokB9YNloXWuFeu0vbMOkFLqtpuCBDElEhIcCiA11ipl6qJE5eY04F3vfOenP/0ZuCWYsZiQjFrUcWPbeYot3ziNplInEmcgJQuq985cRCKnlF7kLrKqqUNHDv1oIuPBrLgO2eM/OkqISLfdCApzI3LFUoy8VCGW9aBu1qp70WQp9cvQPXJlJGiXOU/DCUbM6DEzrFuMZKqWrafBlV2sgKagYNkUlqJstPZNm2YU/r7U7FFdipUtJ5nGQRuodFWbvFrmTi9jhQgULJBdmG73QRY3rWR9geIXim9SVL59TAQttlepcacm0QOV9LPaEdtnG26oObWX7OE+NnxEzFzB4Sf3NSe6zRxXsZ7sfpkgqYpmaKKczNiMGEzN9663SgvMDKOt6vmpX+8wtgh6KAYWgVCEDsPKLKBYD7jNcj9GFEfP2qyFROBJCy5z189eve9N+NKkOvkxpTEprseCdrmcvK51JCLXDavfZNS8mLy0AMjHUak0sS50mwxLuMV6wfSynFHxVVV3XlLm0nmrFhWd4go1BB/fuCeIXJqmuGqu2cUsG+0hEvCqklf9hT2DWkvfx7zA+jIo3d/aAZt8kmp6JZC0VZIgupohJZXQUSiMQTCq5wMqlwDV+L+JC7l5XmFso0hsx9gUg2J/A0U3rE2UFyk8uzVVFLVls5lKGiGtQOKbtb6a6+t/6FbpygEwFQ6EBBYVILrsy+52zIrEbuiTQNFwWLMBaus2+jtW5gqjwE8DlqCmGdXfAUTkMliCM2Jsi5QVGhFUu5rD1GCfsIhwswUk0hrxCkBDx6Ouckew25Kg1IVuQAIShKUsum3KGBMIVuRBo4xfhSO0TjoUZ9QBuh6X+gNRqigygxFVQm7jBLFiLZBwt0SuiOtHw45B9b3dYVzVJW4LThpWglI9ReZwQ5Uvbw5Cg8LZnO0COEAwQ8Lmxb7SaMlqBSClXB0f/8Qnx/kU3cxD6qN0xIrNB8oDiUjLBgGIxSpwJrPFzcLTNDljXWv1ay61BRRzyqKE7VJXMnM5vdICSsdP8wVY6CGr6RgRS7n4SjMz80Y8yjJaZYkhuqHQggxgFGXaaAGjVA+y7qddqSsV2un2laY2KlbaW3jzL1PBIFAhi+6q05tfHnX0K2dphgxNwUJBLzQ3AzPWggYLZhk+8e+3sAPZ955sDKRC29J4UUQjFUiU5+lLTJVpvfKGaqW4kZZCn/OyLLehwPXxEzR78fBimJ3GuBxHzGVE1qie2BmAma1rx0JGqAcX5tBjTksHRrJnUeUKpWaNfxIk11qEiWKWOx4gTS3HiwurGqfKlZXNLRLETRhBFYn7QSLh0JzUqJPJ5ubVhaB11/+NEa50Z0UkYiFWxpxLWcBlzeLi6jgxI9Kvi6fH0twKWuQJufLIIxFhIz09kQsPJ/NYfar3iWsCaNUTZMWbC9Ya9zp6kcUK7t/rfd7mf4BBCsJTywBRdZsxxpwx11QeM2PKEGem19lgwfZVgDJI23VHiqpRtwPtyLs+qfqXdPjnmoNF9SwSZQeObnT6SgAtB3hNplU5vaq57a+SvOiMPyMAJ9ol09w9o4IvRYjobLWyhratumFfAKUn8tqpqOSrDgvAJpvU7ZdMV7vRjow67scqsPc6rHAHG4Q1ebvOb0ZEW2ArB8XoiWYqy+bWowASmoiliyNfWmxeJt0sM9aqICAiODSfRAbgZatw8+xIw/UivvR3isOQGOM057zpHi60RrGzmeWK6sFDSKSqfFc5wcpAK8GuhoWiTWYiV5iZ9zSC7PoMW/VCgYLaNkvPvCGMBiYrnNX8DyGGddM0q55lT2rd0TLvRIdQG9RJbyOtbIbXmymrYmCuWAJes44I7DxCxQgggcBysUUig9miRzESMFvIVOexckOouy5sSUKaME6kJ1ddk7zVUwhgoJLxKEJJZkITCSNVqGhLl6kPme4nAGsuIlwthAKwru25gSYPiXq3je72RxmLu45rnLGIcD8xaor4rlfqpOh3LSsY17oK0pAsf8OtAmyqvBSBzDS7duJaWjB3yUThtdXH7aiFpZY2yEkLCrBSuqPA3FqaYZuLSlPJHSKxnO4+BLschGbTRLRrKp995Vq098pKj/XAyiolzYHbvHDO+fTpE0Qej4/nuxMy1wzAIpNWAgi5wn1kHnEtHVbrF1INdjngRLkxQI2QsIAE4tGiclYATOH1GRvRFjEt1lDX6jLQUGcxUvJZGRqvygTgaGGaWhDh36EbtjKoqKtqATIZ1+h+uxCtEEXCWmEa7VchZ+33hn9Ulo6w0/BlEYAHPC0tL5Y2Vy67wI6nOF1iHuv+dJ4x52Tl21fXlW0Xr9qbVXjorvSC1BSXZUZHJDum3n4rIipMsPYV4JaR2k79al0JGZ9syyu8suxOJw9FNbg6rSaC7qLmjXPSSg6z7CjCIL2O9KRqEU2S7p/P2+AQxI33ze6MUFlrTXePCJqZ2ZyH1VtHliGs5uyo5Arbql4DzsjuzhCElEaLNbMiFbQBkVguaZniA3VR19SuTG3dDpXqxTIWhpvZnEsWkbSAVO2xl1sYYjbZR6jSLtKzTRBrP21GmHE0By3zqqVUAUcbrmNN0nz4movqSCaSsDAdXFQxA6nPEiylzk4GrCyNoHEzk7561Q0bR+8yGkUVXMiCs2YAlkOLp2eTQavTvl2yYI6KSUqCzMAlG1mJkNuQoVurPkPdiYRy6+6oZzelXHsZOs5NA8OAzJEMMLojdJjP3YyD2HVPIHff+fWrKq8lFY0+ElU0TISCiBRsqUMoOmFVetlhRewrlgX/CMeTTmHfPSJD3snTjs7utA/qPx+17rzmSab12iVy4HQaEVmjgsFYE1KPQaWFPpxml8vBTJJN9a62zw0xVWCkADQ0qzEyl/G0Eb0hhnbbyE1U0CxJPTu6qmmq5GUWSaucWgYy5gSKa6fEqEpw15K/3oK7A76D2gpsJ0IDOjOvsUBfsKxgJfUDveXlpFRcLnhJZY2Gu4pAVWBDhvTq+tZanaOrkKoafa1ve+6aNCBetlxL0rki4pggTuO05oy+cr0TiQSOiw44Nnkn95QqAzgzbLXMSCKL7atZOEFwJTRIJ/T2REq3HuXlMgKIkSMTUc8n+quldHRBCT8UHuLWSJNCxJB+ceiS9N+xy0GtrmyqBfAlpCCDWKKmsvPvYsPJZ15/WlVvTM6TjQI6S43x2Qnn+yfnyFc/9tF48vSNtz998fzBeQfOjIyMmutULVftwXYm2N9JoIaqSpJVnCg9hWJgMUFWYLcuCaa6EkaYyCPl+iZLKaIzt46cdVokqcuEa8sr5gTRLoLsQ7OR871yCSTMlV6MMeaaytXYZlDYYiAZ08wrQcy+G9q+uqJZ2L/k3VP+HxpxJqe1YpoN3A5ZAc1FW02gX6Dxcx2O8iFlxmpHi7ZSbTjIDopS7jOqoq5MqJPCUBsAAE3XMYGpCBpC/pwEKJVc96b/AmCRIYMJmPfyzgUrj5IpuV8p9cYELA2BxJpG9mCauA2GDDXzRHMwI3E6jbVWnZaX8lurZj0IpfTEVXz4GqFulSoz1+ATQa8sBi1hQFhijBFrwhCZK1ZxgLGLmK39qSBOIZT0cSJE3AE00Suw1Jli5iNyIYqHWOWyQmEREa4G2QJ5EzvNkA/RkVaz57WHs85Glb1akyHbl+8osE0Nek6YflWBAtKYbb5kNmCKc4ZEi2bdjUhUd4xyM/ESIxmWFlXIiT6T8tElIhPdq04sqn5hUO3covLb5DCkn9zpDw+Pque5jYfLwzBnTRYKjjMxA4ow+9rt66iqkWbUDo9YQsl0qsqwJbBPUKrtjak8wKra0Him1rJO/+4Gaz66Th8aQItWJtsmQKUsG2ZTk+RA7ga+hAGeyCpVcZTVrYtugEYJKtoLBqNuFOlm4qdUTl8fNcCtkiESPyrPdVCjsiv67pxoOwBB93ntjWubKEqLFjAyGC1b6TOqRihnU6eNMHDFIuG0Fy+em9kY47KWZVkHurvZvFyGD0QVAVM18arA0mixNPeyOsJT4iqavhyQRvhcSawMuHsiSommHyAzpBYFWhpgHrlyzbOZ70CjQvUUrhfiP5o4z8GE2YBhRgwfEaFVNha7AoW3pRoS6mIKawuwyvNL8JWqoZVxbiCi5FNxmAxMTmRYBHl3me95EU8/8cOv/Vf/5eW9X3xcHn7BO7/kQ7/mV+PypvmTY12iHE3euvTsTrlEVPCsw4FiIkTCCRS0L39jRJjbXFNzMKUiGvroAMzcZMIzjVU0qbeXQ7fM5fBqWMoCnoOMWIirEH9Xq4ulVf6rc6DKureF7xurn1wRwxjIyAWaJxIMpqt4SKWE1UYvo6lZKzWdUJJ7w+cRSYxhl8vSiVJrQK4YpxOBFSsLe8c1uGKjnrjyi0iLGRpsw5KaK1C0Nqby2mobdHp0HyGYgaAjMscWAmgqT8CAKYhY5su2XQWsqZGFlKHkAySnJXiyIgHQjCu7njXGZU2iqsiszWchKEslwFhSYlC12uw41slsoR6nSF6Skdiq4NkFMlnaMlEFyepC1sNpDBqWACWJn2TiTDuP8/M5YxPlpaY5FE9vpI1aEEcRjkjJxuTUO6dlhFeJDQiJeEo9vc4Y1XdULUZZxDHhwhXL1WXBvrAGgZyskBkZMaoQuS4A0bRE9aFn8WbU2xaylNVuQDBWzOyG4FR5kYboqgFtlRsiwcExExUXG3OPEUqJ3gnKrti7xNKResgay0A6sNDHkqCZj1LUGIAdK2dOTQcx5LwsmJV0htl5jGNelMxxiaKgZAlBwkJII8zcUoV+o2MnjZmZRayjYG/FFRp8WiTnWmlFUq6WZLFR2rE6dKZaQwAacKaE0qgouI6eEVgxT+fTcVnulC9UV6S2NlCdwFEaNEUfbBWEMquDAL3Ya9c0gzWMVu9eE3WEdfO4rGZi5TJxwdngleDQMNoY4/FyABDRhkAyC4ToiUZVb1IHGQMJj6AXPAJgxooG6WYcd/BlFhN+4onjMo/h50mcAzMuaVhzALEADZK9qhV2/hkrxrAVK1Y150WsyEyar6nihArq4tvLZLiNWAuI0zhn5hGZBR7qepbIkSqyZsTO77fpJJIm6pDD3O2YM9KTyOPwnv1pw1WMMXAVQ/9ac6+00xAzzGwlUPos4XpxqyEjouxGhI2TLSaaS3bE6f7p2z70/i/9Bz/4/PLGs6/9JvzO3/OZD/3QO/7O958RzxdzzeTVrQvHliMEOyeQ+2JdtjRnhg43iVSYERkR7ozE5eEimyt+w8ywZIBmxsCKpcvT03/q88tMlouK7EZ7ENXQxoBbns+n5w8PNtTun5aMVBP/iOZzoWJ8VWYlACmdABCwQFiFE6pXuQ9zu8xZEDY7cdmgjcIkWvWUIGkMo0SWL9WauDMMc4UgtUCE9HiJMCnFlcSHC+w1m027aYFCmyuSGsoYdhrzWK451xE2LKZFgtzMOrZHWxKlVD1JrjJyk6SqplXNWLPy3iKYEhFVel8m5BuOxrgTwZhrmXtGOJlrnsGIDDVoa2Bz0N1WxJMn94+XR8C4IiPvzndu9vzhuZktZQ2KvXI5zUb5G4n1FEwi9yScyTIi3LxLv2OtKVxyapp4Apn3fnqYj8PHmnj+8FxgCqLr46VgzeEu0Kdq2CtnqiOua7ZCwQR1dQ4b4JwXMyMq0kEsB4bbMQXSmHKfGUG42RLi6SMPhaxKm51JzJjDTCxEs3HgCMMKUNwV2f+6Atdyub7nUu8EkupCLiswAxp53jifhGdTI9uxlsBThTSKPKpxJkMclIbzUCx4JzIsE/Qs9E+gdeO11SkOI2EOQ2QMWYUCgBSA0KyiwQZSYdIVS1JwjU4/EWpM2hjqTuEU4Bs593d3CUrAR3YLkQiy5QyCLYiQjUuZGZzzcpjMtZgyNLETK26vigo7OQFpa60xao4VDNI3YcN03WLWX+ao7qKdKMHM5mUqsqifsmEGUayVi9w2o4RkSZAS3GAUfroz9PJziDmniq8z0jS6GKCpDcwjVmHHqQBD8EEhOInUaJHiRIEGnDCAGQEMy4UcjzZeRTw/c8457s0vR4Iv0kfMh8h7871N+3OJESL6M2YsNFiZEYcg4EQCk0kmIxFxOp+XKKNpx7yQLP1FIxOG9LLCOo1SduziqPAAQUGRmRBNcal9LmhAmh1zupkNj6ojECrqM2/PlagNKiqsuSKWhiUCGG5FHDMjuWIRNNfU99TU1zmnGV/w8ReO1yLWw+/61jd++de/Eo/nOfPpU5zO9vji4IVhEGC8lmQvVW7jbpQUSEQStMSKVAk7opj58p00myvVF9ERtHT1opY8lyG3NEd2gVCNgqKvk5kro9XoVYFZpJFOAnnMkBkycpMPgkAsKuYwo/laS8zKQUdgYuXMkxFmU/YtiFgwpnEiMeewsVbj/IkGPxQEoYHL1MNZJGZle9adf+z7R7NcS1x9KhqTLk3SdA1UBFdwUyWRKzSNfefr+qu+Lr9l9ao7SdmfFBxgJFY1elQlz9qgV76Dyn0TMcaYc7qNRKxqc2dsaSJZgMJfZM/MAgtYoCfADMNAzoA7hJatFeZYcyJyqTROHvMym+p0LRQX7ipyFswKJszmHOjeqR5ktBXLzZO5akJ5zIDBjgxNRbjEMvqU8P5eQnWiqrquiEw8rF68balv0J8KO9UHoixCAOj5dIqM4zJPMycwLQeNyKXDDCPdaCvCqg7BiB67bDSj9G3EOsyi0EciPTHKTyAYjTMrRoKBDs6ScIM1Mi0YeHOgClrt/4CEPqpII1YCosuIQt7Coo+ZYKmWrkTmkafhdJutceCslnPpdtViKVpoJE6ibNeFbDgVw8d23/ICqaTHSoC8apOq01WnY7YMogBGBFbdKEB9k3K7hfK2w8fNV2SsKDRRpyIiYsYYCkzLbURES1uE2EOlSxAELEtaSq+DMRywiGXm5lK2oLRUMluJO8TZ2pG5/jvGcOTGYzMyjmPtj6bPbrunvkOtRAaSSCvHWahnFCgmUwvASOaKmEv8MkQg0lUXLVxn747ORdH1xEu37jE3NeVamE3kXCtmvsjAczyxt73+4tXT5W2vTifgk+fTaExP6VoTJaMWNqPuuZK4sigunl4mM1j9RjbnLJkOg7kVJMk0MIlluFgchjCEFMJNuVtd6azYTSVkZCACK2KF0sScUZWGOWeUSF4gY8VcsVasiFj6hUh9X6dckutaornmzEWvnN9Z1MHI9GVIPC5EMAP+1uNPf+m7n/+iL7//2CdeHX4gf/a/+HvzK77qrcfnmJlKBokJiKN5ohlM3xYqaQEPY1rSZsFeVdo10orclEW8Amqps6iwERYwcjAdGEiLsEyLjNPpfHd/l4k5g4Q5V6w5sN3RMqZhJH0BmSsxY9XcW+k+ElmdHn2gonL1rvVmML0yegtZyYRBl4ixhGRyLeUSBpr4BEGqeEGqgRVVpq/9RWW4Cp/Ue9pXL0UuzGyJF9lEaS9fIVBjV5/E7hefNCOrCYpIxkxK/0hmdzOesvKZBrHQTKZuGAOat6/sI8qkqV5jXECQC5J2YQocHn717Mqbsyg4g2Op3hg5WU2fCxJaiIrWkJl5zEPIpN49QqQxZNJuRqQL548MMAK5UJOSk+08b0xp0f5VpyBapEH9aVlHThcEHCh7WM7VkI4h86KrLdyqqSRarWgLAlwpGgYq1szIOS8xp5s9Imz4wOnh8RIaE5CNXZUJT9X+11qCiwTZlmdNAJaxmGFrT0UQ1NYeJNv0Vgoueq+xnDKi9lRvFb1jiRSv10Cr0prRxkjdjy4wiwUV6ufupVakop945ekTwuZcnjjR7sdJAa4Zl24gq0KvpzOD0UZvXPaB7JCWBa4WfU48ryzeLDelUAgzCeROaIr2Lh6iAbwpXGEn26wkr7NQefQuJaL4AqHDGmZ+66mHO8jjmCUSWbhDEet1JHRe3QjQnaHYpeKDxu4lsmnSB+NWF9A4+Awhw7OXpj6BjzHXVOof2QEFJEMoJ9vMoewHrdyg9mDzthYlzRQrc0Wcz+e5VpFNcgOroOh4hGWaIaqRo5pDScvgPJ1tLcwDp9OEjWXr6elLXjy+60d+5PzWxy+vf9GHvvTLngXvLZhYO6kXIJC41lMTVZzrLF8eMygdqLRMZTJh9KsmBrs4raJ7oWTbLBQuYdbPlNXZASOxtvZyS9Ao+y6zEREZw9SAVIJW9aaoLLROkdmcK2KNMVZwzQXBoUxlLcziWqu6vNJsHu6ZZi8QT4IPkx/4xq//ir/21+eHPmDf+u13v+bXfuSdX8Tj8chgcIoyEGnAypTX2NXTQE/KCGybpzNvZnGTNdSHrS5tM7PzGJM1uDQrvlQnDMw8Eg8PD2qBdfe1ZswJcPSYdQAWScrEtkEq8EpLWSU9V51YyXqGhai2WFnlSUWpK0NVUnM7DqVPcHh1UrHYEqVZlCAai6Aqk4Ei0SuJTESYV2UjmT6GbMWaRQ4ScI92YOUIY8rWyHGLFpI15rQHBqDaU6guqxQs17kaKyFnI81gEDZVHOkNKXIckqXM3UV+pbokA4OGgEOmPlXAXHsN5BawC4j0zGVCrDORs4pWnlPJg5mHCrTuHiu2woEZVqRlSpA9kde2TLL5D/3GRPPGC4mDkn6ZD0KTVpGYxFAAQmPmYnIt5SHZOIacaQLFNVQx8YpfbrtxJcMJe64trhq28F4gwwg/j3mZA76nD5AU14H1vt25VCMVPbG6N0dUDwAcLLr3jExDZjroKD5shbVKVGRom4fV5RFEhb9asStaXY4vUoRtqxZWme+CNdQ3bA1OsNVQjRXTrJhN44j7p08vx/G4Do2c21FC71zZw0EgjZ5EYiGAEGN9rZXFNirebT2aoF7BZSiJnqjDLzaA8hrF/GUGhMhVk09WILpNEfsC7ORctTUWg4/QWLrUXJTUUXfz02msVfwdqcjq88mInM/ny+USsdTuXJ1KNMFLdb+vUh1aoHKTbMgUiPP5bs6lmxorzOxyzN1n4e4ZNaWrzmWqOs2sQJzsWl3W52jAXb1Y4iqRhA1Y5FScWcyuJtXq1mt3lLPaaj6QlKzXYUGMkStPyPXk6de8//3n9/8IPM/n+/GR7//Gn/nIP/j1v+mtNU/sndvPfbV40sMyyJT1X4qtUIhh++hEUqWmuoa10dqja6ZV55VJLHWa7uYG3VpW5oDMQLJQay0WIpYCyYjoiobee4dkjUcCgRjul8h5rKr6RALpw+BeZdJGVhIZvKTjtOY0H7Fmnnh5/nD39BPf9Gvf+eZnHl49x7N3redvAX4yzMq/0aYKUYhoBS6KKDK5feA+XlGcadS5qhFSEspgxLoIozb5tDQzo2Y/VP7nwwHUsHYzqJktw25QSmWEZmYYiZmBjDQzMASGKwZguctuq2e1q3iAUOGZDDg4EWOGuwkduT/fqWXosmbNaswbsDQzq9ZVZ117JPswrGVkKlKvLg43amCGHkuYTGQOmHeEnoV1iRZb31ROHN3Z34p7yixLYd6QhCX7Rl9tT7mv7acLIkeJ0IUiSQVPYp106rd7mmk8YotUXBOLRFoiudgdhKjocpsdRGAMK95TZMFNUoDbpXUiYhUHrDCiOvZRU5gAiGxUDf16PIIr1hinjFw9CeZ8HiuxHi80ozPmch+ILDU1WtagITDAzJVFI0ftpKxlJ2DllYtmK66Te5MtkAm62guJcQlBcwwYbEI6oA5kRrptY6f4wuZcJIPAUg99RgTdJsolqToQ5FVwsXHm/UVQdSV3zxWojC40QGMHw3opI7P60a0JeaIG+ywpbaUpxc/unxBSj2H+/OFh+LBEGCLw7Pnz0MAB5e5kiWogsvQYEjUCsb1/N6tkhbDV9ZRtQ2jCs/sgZ/m0dumCz7PCH7Dm6Vinu3blAAtMutrOPqXYsBjSKrvMBPD0yVP3Zh+YJXDMOWfJQVSWUZlo0hAZj5dHFHs4MuP+7l7v2o9QD1DKhdvmtyXrS6WCHZS9265SRYhZ7pSphGxszQBCxU9R7xJXk51qu1b4IhXJnGp6HfZiXgJSPQB7yazVTIXWKVQ0bKMi+3WMjBwxMc+Rbz05felHf/qdH/iRV1+/H3dPnv8z//hP/Jrf8AxmJx+4m0rTvvDIsiNqnbCoa6a/M1imJ0bqsBCBU2JFt9wDPtzMit6fABDGZTmtxNgqhqwF6qhCEW5ubKijvIBlUfr1CxEx1xIGVBWCqENbSwzQbGUjihU3gIYpBUX9OBAZc86Yi8ZcBj9jxgKXSLCPx0e+6D2f+82/dbzz1fM9HzHv7KzfVXDBMSLS6RYwsyBVv6hCjZ4dNUVzw0GFUkOFuoJG2XBJBoweBSzasLPZIM3SIhKCAFdD/8X4zbSSUFNaqNJmz11S/hmNv+AlVkiGgQP1USXBoKrU1dQzDTlRRcphDrMj1oo1oIYCcTFU7wdRylzyXuX2dk2o2Cjl98sCaiyGNeWhrYNsSDZg1wel740OCRqwTiQ0YEM5jO4mGwa4tWHXqM3k6rtVdGWUxoBICDcXQ2UzRhqJFYiwhFet0Upuo9yAyltmSvHaoOnMS9GvBW0iYnb51mZpCLL4a7py9apbRl65b11cEl69txUsGIEro9WAJOleCf1ci2vR3d21lRU/9mVhKVq0ZVYDUMUXXNUqGnsR+wjuLBzJqAJcJlDGITImwmwgQjVTp3nlQPtyV7s7E6bRXOxwzSCXhEgLFPF61zGBhdT/9RiaUWdAxDydhjrfqPJFpt3Uv9Bem711ulu16TphUsFqq1XfL/KDSExK48yHaus4n07n0ykTYwy1hrNmIlWHOm9O4VDABr1B5+Jl3NUZr7+kuqpp5Ooga4tZsB3wtaNVHD3EqMIA0JIPdQ7LKfdKJNQ3RZa4thKqXYV8/tZb5j7GkEameMaR1WRWL918XgKmZrV2+XPNUvlJFeRUHaljDRWRCigqypOMFaH+cW7kZq1w964+FbOmlnW3E5VCcvnwBNw4126lquCGTIZ0ViyUygMawEIW5rztUv1DAqs6/SVpBZDmIy5ID4Y9MJ+u03s/9uE3fH5kji//db8mXsTT19/9ka+wF5a+Mof6pDohqYOrjCxV2rDbcAQAmVbD0opBUcuUEhxAaKZCLCGNgCFZCt5N19WF66wwqgcU2x0TuxlGjYoEOE4jIiKWAlg1dW5VBB2iAMSbMEiqjGvORI7hNoZKbmstJM0sVrj7+XSas+bmxULyPAIHJk93Mdf94+OnPvkJfHzeTbvzV494sQKwKaEQTo2ZqkFoah6sjrtOsgrQzD4BXbWRyhHMRYoQypdCDyTnIGGFdRS2XGUf/bZVrhSpsVGnYEfR8EASq2sXJqQhoWYYI27ktHUNI2pegmH3oSJZNV0k80TOVNzNFWs+PCRi2MAV82RzHzu7ZJUD65kAEUfnPMY4qUaYpoTe5pzVcVdiBH09wKWFgp5FFU155gIcahnYiyT1KKj4DINJWg4V9XX0X8iD7Ao6YlYmmQAmNY4zbsJTBkg3c4tcUeLSuTLNnLkU6PTStm2mWWAiEzHCFjJMqGU5yOwk5yq5oJsdbRNld6rQCOXF1V6KluYtzKl0Wpy2DMgQxxDIMTyOAG2uZQlzi1gzVgJrBX0P1at3ECagRnyXUa0BX0k1P9luA6ijVA1ytIhwKWZnmFuKKgi7DERczuZJroxXnzx5/vz5bQN4r5uCY5irFwheICqF2Wy6QbA6l1oFt/xnOY9eS8V5a80aIFRqBKngvmOLLLuqv6gmLgIri8tmAIr5zMpblGWWvIzVrMwV83S+rz6dzBkL7pl5rLmLktf4D6DZQEuuZaWqTNQpES9dsYHMFpDHnClXhUTC3UprIhStmgxhVoJuJbRWoxn1GZQLhV2PHRIpbqwi2thaCr3BHXKIDccC5Td+VChMEswAEz78Nsdykelb9LH/SjFcpacKPCI1KZZChlKD6DIpwaZqo8yqi+7iYwdFReZHQW9SuxABppXdtocJNcmphyIZGWnuSxOOo2IuFVJAI6VW1a9AtWEXqJoMunHGeYwHw6sPb96/8WB5f//s0/GD/+D+81/18LYnn3n3e+3hcvFkeBT6sWURlY2go0jcnm2wr3vdM5FqUmZsxXKJC6Q8HFauk5Iy9q+r5CXeDRVsX6PoonFlR6OZABdiwQAcczJzuAWqniIdAXmOKiakXDZmFO9mx/4xpw3PBcALUOnjlIkVYaeJwOB9Ance69GnEeMEPv3Ef/mffcUXvefNr/7ah2fzdL7LWeV7kZtKNicSbiW6lZIHgJ599x93BpMChG2MY0YDXYic8r0RQfg+tVEBOwurbai1SqJgRD5gWlQ1TnJ3VrV8QmCwsTRqYlVHS3Wmi4pKI2PBWPJsgVxUpz6YGU4LJjkzIuHk4JgzzG0nBWXHuggj6Zma6uQsSAMwkyTF0F/Pecj8dlmg6h3VsYrw1SQpAKV9ARAuHcEqI6ranABsMQlzNxvzckEj3vo1LeEVnqq9YFFL1PFhXJr5yGtFUYYbBGJl9B9j7bZ1kzBtahhOZwAJSMRBEkLDrPksQHkPdxczvzNkXb3SpLOtEFwfMErhSB02Uh7OIOlGRtFO3eDGDMtWQ58zI8EMafrkimJCZoabaL6yKSxQIK3LE9ceGpaXdDMx64PciWQ7N0jgSZBmFJa+IvjURticsZSevvHWWzS63fI9Kp4vQnauRL2aCSGYYSdfiYVFwmCjdnEZ7AqhZHd7w/xkj5cHurn60RXDlpZL0a7qzSiUQ0P8OpA0O41xzItAdQMT1AjIEOJmGD5S5AMlFrQjJiIj0txjrTGG7ofTZkoyQSQIAWTG3/Wtf9ZAM1xiLeSAZZNNsb+sYLIEVswBzy6nutHgKwLcYgDXpFaPR1A9wyiag7KbghYNVc2vu6F+iehpRf0VKA6G6hn78iEhSpL70JmPIgLEGGNGcFhGrLVOPoSVRxvoirBR8ryKjPdlyPJEtSGyMdGlK+nBd30kxZImiNY6T+WKrPqG0WCumhwMGXX4FXC0oksAUu9KQVQqtwJAVeuyyuPlJ6vuCnBiukY/5DDaw8leifiST3z6VffPfeLDr33Zlzz7Be/+wBsvTrw7/IJIM+PKhPRn1WCmhyqD1ah63T+CM0qFeJjrxgU0k1jK9URA86ccRclDpPfgM6BVnFTJo7CWHbGpe2TqYXv1PSM5ECk2LyMwHCumc+TLflyhb6iNp/rFYe7INHCuVmMFAFQnkqkdUYGz6tdcEZmBZbgfePaZ97zvx+y3/o4PfvIzT+9tHkYPRpiNNOQKcbqzRkqJmaM2VTFZsms81o8foqGttRqvvYbwSDWhRW8rmCnimo41Urr8UWOr5TgapE0k6Wo0tXZdevGIIOgiG7LqyNx92IgVKe6pgBClkqUyCAslNKVbhFyVvUhmUoecpXGIaFntSiIbLXSjlTojaBrIQ/G8Mqulcjcf1iFBnRKVaPQjoDuLeinvLqJZsyWohtfs8Hx3oYgfNGO1I6sLC6XeLRAhzUh9zjFOM1YGzFwavdb7GAK9ZQFgqpsoAxP9SZ+YbaxQRM/CISLSfEQ27KcnruXqwFf5cIabsafbqSQlWi5bw7Wg/uEEck7wmmbcdo0SNKspPejKRP07CxLcJSWaOarTLWSXAKxl5ql8pdSWxNQq1AQrnJBuIt2rraTLCL0V2JciSvENEUERFQ3iRe8ErWgjpRyyjKYJqpKQI2HmbP23KJNc/Zu8SnwIlmDXwMKcM5alIWNwLOUkUpwurYBciDFOug9WJ6lZIInTaUCa81bKowV3miHW3ktzrw5jqMJbhzWSA5qlHBg18CRhcLOlAlKVnjUlDkjATLPKFJatyMRMNAq2Ma4rrU62bceSZWpYCTH3OVsaf5EwOryDxU6RbFS/pNgQlScVeKBYKeqIy6e7r/bEmRg+VmwbsWMloXsVtdXz7hOSmUgPTAOZ1qOoE1B/RmEmFH2qiqaK4FmhnK6BgUizymkLpFZvMqFQjxzD51oJiSFUchexKlZL/WehPld0uxK5PGME5vRhCVsPT6etMd7/pe8YuJ/vevWXvfcd73zXF332pz74+c88Ow3PkrUi0Lcx02hDT1kY2hWCJhCI+/v7mDNA9Ug04w5BDNCAZUbECVyi02Yrn4BAjjHO5/OzZ8/MmrovFLqUDrS8kpfZMIFAVJVvQdBKiKXEEphpPZ2ol6R8sWZ9MAOpUYq6wtVmpvIKSY2j0KvdsM6RI9bDW+PVd7/x+EO/8KMfe/Xtr7y4XE4+iAyzABhQT63Kr9vGXFlpCTn1FUEKg0kYC7/ScNDmi+q72ogKeOrVKEhGaRfMQrS9ki7fqJVW2USJhNDOvCpd6aVAQFebUEksKv+sZkTlwImkGRExJTS4uZsp2rZZASKprFLHuq97i7QIB8ASV0Lgeb3INb4vFhVJ3hw4xZpLLKxKcHmdQ67ZpQAEhokMphyGeg/RrVVYLRsTxZ5Qs01khOSuMiJiyPQLQjAzTanLkP5ue4o6lAV0V7SNsNBwcS/MSka8SDtZt7UuUpXFU5+od1a9rWB0u3dm0YMtCfMCF2NfiwDgVeVuJVmA1c7TyU+DE2XOrIkWTevT2rhkhRp8UPkdRa3qKhMCK8zMxumIxWrjVDyHxI6csoB8My2sNv+6AmgDJicpZJQk0r19hHT3FJvGlhGUdke6eRKHmptrxkxERBNKNw0dm+izAahaCHXtEwho1mvSIjfbsauaNBs2bMzj2OUjqPLKVBPKrHmg9G4arUQglkycykrV/oRFoBRhq2M3jUuYXTZsZwA4lygGVym7UrGr9ZMSbGQ/JK5x6zXaqb/SqSKuHCe1BbDvvSUs4AmLzDU1TC6W0uqy9SXpXOV9tew220Wg/OxQsPIB85QqJ2yUjZeHlf4AswQTKtOIjClebBMVtaBhGtnIyej+cSTD3CjooPNYGmBUITeAUPMrsNQ0EUg1t67Z2rMlFFw6VKkYmpZGIJc4UE3ZQMRaTRCoT3ctoKdcGWzN08L0cTGcZzx9se4enp+Tn/rU8+Oty9OH5zyd11I8V77cEmrnTBFqb5EP2ZiKIxMhqdrl7j5c5foSJQeXAqHMrCiusltZc4CXy+WNN94wc1TVpSIJXZDNKmcTvzIRuWRGFVStaJG4AAoj1Snt7oOVGdXBLDu3Vqwl7kjsXIEdYYhjfP2SQyqcZBLmIy9f/hXrR3/ktfMTA09YrMJT41S8boOiznKwfReTOJ1PFdoraKmej0zk1UpLxqiQxvo0OrMbdzTokoBJC/MwiUVt0Kwg/KxW/gJmhS1uluA1RKmjHxtjYQ13U02RMD8NdlpMo5mtWHfnc1fs9fAF+8V1Gfro7KHiiYwcVhTGSC75w9jjIpUHdt0c1aUazbRD29MQgEbYGKyBKxGxGgepDY0o1anM2nFe+ydptGaNkVVozN2vZbCunVjbAxSmYxZd7BUKnnu5wSTSxLULK/aooDXRmUT+TONAO1T1SwCRmWHaKXjCQYkIROtrRktMyASZjAxyMVbBUakhYNnk2comuleJwDBzd7ehYqqDA16YH5tg2r63Ppber7pEaf4F3hS1eVn+xt2ze7RQzIXugkLKqffxyY6plsQNxLhUmFCZptsYThCxWtpBwWeVgzNvipvKLxFufnc+lceQRWMzoLJqMxYsTcCUSmJKTVQ1DOUEERFrtRvMrE6GtJK9TENkRhOl0OYlb+1DMXR1M6vDSzmXOFZFys1CKORgqiWkFBrqAArnJ1estWbF0MioPof+odbnU1hUrKSyfmVZCGQya4Jr1tYEEf7SvqJZxPWJsDsE92XX9l+9Rt3DnEvjGqslIkLEX9t4nQxKLaltdvf1ddgXdUHDpK5NO5ZYa9Ylb8PRNeniAuwDomKlmjqGu5MWIf+kCday3FPFdfRgWDIDXdsIsxEZEstVd40SEKvbSPJk4GvnQc4B8zxfzGz4sjGSb8z5kz/6Q8ff+O53ffZTON/ZrBtIwHrURLx0N0gzkJspTWDOWQdcucyKJINh6l1Brlh1StnHUehiBIxjjDFOiSLaWWUaRiomVZRv7DqFfMTKQ/lWVGE+Y3XRkEgiMjV/NWsYnUL4rJeGXpbWAxmvO6x9UybY+3Ql445B+Hr+nF/6Cz//5vOnH/rA+f7Vx3nImpgybIKhHpdN1d3IzQ5dsR+yjm4UB6I+BAzY1wCC1xTriq/EducuHQbEjAVDGKaJ+RzSi6ao4Zpav4+xCl5qu4pAkRhqh1jEQW2zmpVj07wQ6YVe0XqUb+xUMdtOo45/O36w9MCQRUbTFMcy6LKM9UcThT3aGbMTalJjGSObHivNnBTeG9IEMW78QA/sbgUGRkUatba9uXUK5HQBBxzGqH5KghGxVqlVsx1e/WJUOclMuFTZdvWYgA1JtaxExRZV4g5uH8T04eZom2MkdggTZrGdoSLtOjaRuefTVNxmADMQteZVpNshBJAC+pTyRaRY3OR5DAEGoQ+EYr3DxN4PD1hS2jgKYI+5nl8uI4srINmWsovaXrVpIEDuy50Jle1ub1/Z7c6vsk10xAQyYunnnebmqnfY+QTXgGRY6TJjpEXLLZiZ4gRUj2GHf7yGUYqwJjOYvvIUVO++8nDldmgZxv7xKhi70eq6IptQbFs0RAIj1vm9TGUzUmwTdKNygXrk6ZbVG0smYq5Exskq3705B9WCoi3vhNYqBygGoD6Z3Hd/BnTcBwONA1fiYV1C1uvIE6n8720R0CaR2f73hqB/BRaG6DA6qQGsSOTwEYlZ9aW9dtdXun4V4LSyZJUC2+Pt3IJWcQDoAr/KC0izAxuPuy6cCm3N/DLU9MoMZNZIoowreqBwPgOkzbWm6iKFBRkgsYLue9ABTrO0ZY+ZfH7gQqSTGZqsIO7/CZxPX7ev/qq3feoT57MfdlhWBWgCq+G1inTbZGiLpLzjZjCb/x+y/m3Jsiy7DsTGHGvtfY4f9/C4ZGRkVlZmVqKQqCoULgRBCqRIGsWWqDbKpBb7TQ96kOldX6MvkFmbyWTqNumh2SY11WQ3mwSbgIEgCBJAXRNZWZlReY2M8HA/fs7ea42phznX8SgyjCxExsXj+N5rzcuYY4ypXmuNXB36LnaJkER5i22u7nfWQpYls3ps5/WBLkfl6wYrJFlsIAF399QGiVHdzWPsZ4ZaK+BisJzQXE3eQ+ViRlqpLCUXjkTl3vvoYkYIsFGBRlBzv8Os4lT3BhZar6jl6ptv60/+9U69TWfy8COJucKoBEfTmmhblCzMCmldVu9ioCWKraNIzHnQFUEL6wo4kCuxs5KLz9bCfityh4FAhdeRJE5t/S/9SFqKnxRA+avjuHH8rUhRXaGlD+72EMkG8k+o9fhUy3Iczn+4+z93/6g48n8AAvEmDeip9Y0aDnetpJ0MwR0+RD0Dks9ReIy7FD93yNQEeQ+zTyPiQiUErVrCUR5mCAXzHazlgfzGv6HQBTnR0XGyrzbBPEecOaMcvqanbzSaDaTHmBmKW/GgRqZl7wC3FD6AkZSkdkrOuovmJUh5qew4Je+7Fv6Xrkb+j79SVdwdx3ysWSbFd5x/BihFQO+Bekol0ZH/4K9jEGTiy4dAlgMOiQIaDo3zGbivTjLRUwOIAcLg7qNGkeXutBLlV0lrXCMCwB64i8fAQTGVSG1kOFMG+8EiXlGOroAcrMuX1hOp8qSIjxbQEGQXRzM0go75l6/PqQk5PQvEtxF47VCBxTHy4VuCBFV1GvuMJCfAoq7NbzFfvSP474MA4DLl1Yy0H865qZ3IlkGuWkqtBQmXmZH9NCG++xGpl2PUGj6GJ4ISsswFg3PnTiDX0HepRas3pGillld67FNpcrpW+dUMPvztnKWYI/YoBON/hJpXzmICK1HinwyKT8N2GF1MFWDECxu8DrKGBi/fWD6xcRkG/nyqLMLleFFvkhudJqCruVSKkSXGHoGYONR6nzczLKAnpjPpXc19F/mi2i6+uptYJ8ylGWjqq1cutGpwb1br83ffXb948eiDj8vuvBFRvTLrxjtaYIItr7xNt8DOW5QgURjFw55TkWKVZSJRrRPFgmyOMG+LV2ChVkvDxSgcRsILqWLEp2IMI+PBRim1xL+WIQkdJjrQZHAKBbEyCHS01ntraZDp2f+WUjKan/CPrAA0Ok8koycgN7KiyuDWtHT71W9psfuff1rmiVm7xYE2B07r4vyu1ucgs42OLzrjyGAZ1xNvtJMsLlhhTIFAHNZA8OJr9UFUqjBIJqfHbkaLw+kAPOaRd6dcSQsdQXbweE/TJQy8zGg+DOHgsVKOyAgIwAtTBT2ILoir5yO1cdyrfoLTPEv5pEgzS5/TDQz4nCy1njrZ2DfTXzmAisk/MJjbHMVo6K+j1cku0hO8G5VQLSXnxydkIOuPPOxxDEuOUhOHSJOyUWEFpzfu/OjxYn4JGxhllEejeM1gkMIhH2HC4mV5a+vQXNFDvDsAgzjJQcbQacJqUZKenokFIaAH5oQeCTV6n1OFDkSBHyczQVW4CllZYtuWjeJ7ZJjEdTpPpDLPaaxUhH5KZZ6ZLO7MVOugZQXqkbN7vxtz4JVPn7asHoO2GCmWkgGdBFBYDHZaQK210RNQiU8sqGGs5sjlFNlSD0HHqdk79bRyQEwrtyI3sxaUthPhlAkkOwKUIcbCpfy+g3rOkiMGjPY+KsjRe73yI0mGedEy6hoMLB2psYucnf4rgWgJgTRGXQ7v5oB1eWvZ8rYWYusMNjnXjVZ8YLHjEVAZizLv5mMxxPRCYDewljyIQ0o4WugeFLE4ktIYpNEGSj66CUMP1BrpsxGBZ2TP/Dj8JWQpqvV84omueAdQAdHdvBrqaJwLuLTWTq4IKZyHJbxsJliu9UNJ+UtHySFYoDqJ0hhqqbUSwUdgjOEJ9+12O5XplKsC/TOMUZ5FM+My75DzrNSidhCWlerwYsXF2inICrXcHub55q/8+u7nHz16/gyVAz2PPb3E4GIgi2un20B04pTZZt6ciu7gSjQTuiRvMadqObXoPb0mMr2552wkw7eHL3fcpsG0CBF40DwDDoJZcSmWL/h4MRrFZ1hARDUaUbewjk7ekDsTmryXk5XpAL6Yj/8/ahzjT9EA2URbVju72L/1Fj/42Xk/slSBwsgi2QflsCw49Om1Mtgw6b8IwCw+hmdQy048/nZ8d0kPgQaiHudZRXkZHN7MF+pYsFQqePXjOzv1Ma7Bo4tTaGbBDE987K7Cin/CgQJMpWabTsq9jcVDae3Zo5aKXBZHkJEFT/V+HIw7TGEUzNGNh1Z6DE1BWkziNdpbjPtp+WjGYx5T1PiiPUAqd7MgY/jdBwvD7fwV2iiTT3jUqzxLSxoTDV5ZKBXlB1SirOOjWliGWFbIiVY4kJacAnoyUcxgdJPU1SP/BSLcpaaQKqAPZ5b4p+QqYPCAnEm4oaHancY37rxGAKjG4jwBpPG/U9SyPvxWhp9gRlLWYiU2EqzHBa3HbBXmd7Dk+N8TSMiUG5MsU6n99IpPRRgAoRaSVO/uMmNhxQCYENPG0/5UOOA9lTTIN2VovZ+kj/EBEiIDSpTFU8DmicNLgnlXc3Xkkr7Aee+Agqxb8qcAQLfiZgbRw2YsC0dzpp1blhVRI3d1ue60BlE9hk4np3Y2nncWMz5wJgt31KgOPdnxA5QCBLoFO9cm2BRALECW2D8a1TKj3nfEsuUogaOMNtJY7FV9fPaEJoe8h9TYYw2qinuk4VGVmHkm5ixYFHr16JGVZnBqgjtho5RFpKtaR10f6EeWo3SguQBsNxtz99xPbJzH8ObV6t+HGihoFHfVed4zi82+hSxReMNxCgfZjKbzl3s2NfTocTVqF5Lp6+SRedwQzpqIdq01waIhgKBSyvXV9fF4rLVG1xiaTN599lG4mivlfqqkixSp3qxDDusNZFODTZ37h0+W978xP/1ky1oRTHtvnqtGoShQQuhgxU5lUGaT9XDU2rt7c3d3OpqL5ER2aTFU2BYFtDLVUxUeQuq8FDnKNam1XEGKMSDJOz16pOAohH1Q4g5A+NqgEyrstMXj/qmRLdzL/cS7wKDk9Cal167B7IQkjHnowMZOw9OCY/XqjlareX/5/lvt4493z154TXP8gJKJsF/27JmQagSckBmLoSlKrcEsIlNPF8dv/EpOy5e2yn1tDQOIq7VGUYgg0ovootM6pgaAhaWa1eGvmoPwE7h9OucDxoo7FYBztEtJPJR6CxIWT0NQJWmRa1vjHGJgCYmiRykxAvZg4TqGP1HC9cwxV/CCGRWHPNr9GLvK5YoR0OgYT58/uc+e/xE8bSKQALgszM7ymeYkqLcecyWc+L3jSxrZiQZvtFWtw1e1QCsMp/0KlhwiS3PA3nsfHyOGeRkks3PNVbRRKAIxNMmqEqNM70LcLVpxKcBgy43Lv9SMIlhPo4SwOyQaAOLYwwhaED9BoJghiUCn7zZasm6Q+Zowrjm8TqXWKXpGTzVdzlfcskmqyDFuj9/1tALt5lFzxAWzruPxaHlobXAaWIwZVP2Oq+N3+T1jv5KZ7KBVM7hY2YAmmZVwnWuutfXTvTAlH7KEGkwwiIMCctIow+/QofjvUAcEoYGenNnojpjZEtk3+gCUDdE0DSe2WJTQPFui+OJ3o8bECHygp/GbDhvtaBQwMd5QlMsr0Ib6E6nsROimgxmUI+HcyJoVdOwxgImM/brRzdQSxCZVCEQlCgsFmSd704yK2ORexUks0XBn98/42nI1l1iE0i0XuvcQwMFbaPVKjWbLXOHDV40VheDtcWk0N1pHZZ4IBTLqCoU6S8BjsccG4TJRjBi/sS3VhSY0OcFCNnSDQ50ArZLcbjcFRiuOEpuiCN/OU0krN2ve4aAVEVILdKc4N5wVnqWVAOghlC8ArLLUqiBIyPPQexbVEQfiDlYEIa+H1QLQETSMMABE68BWk0PXN8+Xb/1ancvZzz/Fbra1FeuzhdmhYDnDNcDg6j0G72Q9KY5QrNaS+cZBqzLrFgtBKbKbhRI4R5y04CvBnDRWJBvVIWBi+Ld50ZhAF4NZKaVWOjoJd6xNvYezXcsI1wR1uhcXvRegqhcpV9n2sNwyhWkHLAcCwwsuGPItbcwYSAcgs2KsAhqm2NJWnb7fl9fevX790b1PPgKsaakGqAQ2SxQqafrI9tFG1vBgmbMQLhqGGmbQjWO8Cjk6wu8dp/UWojnQ27oGs9m6o3tHbAFToYzKEefQxMeLi7GPw0uGSwe8ACG2CShb7k52ububVFwNArS4KD8v07bWCpdls1nAWsjK5pIJpBu6urlqYOkpkwmlrw3YBmbWtKYnLSEGeWdsihzwGMDELGGnxjm0SQKNkRSdMemLIJWLSTxGJDmgyaaUA7HN8oqgYM09cSv5hDpxgrw6K4o7rLCW+INA1OMJ7Fo0/+pewjLFPSEJjZFkVw5K8vOxQ52IuiFqi+qFzmpUbBaBGMOnICRmjjIoV3naK51voHlJ+48JRRBLBcKqsRAmtOMqu+vgYWZWHAWq1YG09I9wySasCtFFwFj5yAtQ3UC0OBVjXujqMLDSXZQIlVAns/SJyu2FicwB7uiLtVEd5Nw36hqzohwWeIDZLWApmMGK0RwzOLNGSSBvmWuTWkCHm5NBpGNFOXFlQJjJiXRGEcJquAQi1Kloim3sxGGAVGADetBzLRQyaWNzUlpIidh1ScHdgmWHonZ2NkstyNQ9ZiPmQoc7eu9MlZk7ZHQWBypeoWcgkZZ8R6diMfHxU0k/GnDLeXhIavxsLlpE4givqISrepMCeVXLpdAyC0qNBQDho3pAOMtHMx1CuxqAj+Rk9ewPaTT3nlWHJ8k//lhzR2wO8qjjknrW4BKiJ3T1qU69Bzmj1c3G3a15Mv7h8bdqPi01WUgu89YjRz8l34raqtbl6oWFjHVj3dLIJiuxnZcFavDY/CVDg+CoaURDKW3bLOe+QiqFR6cbIcpAUt3NrPeWAq3/CFuLYaRBQoEA2tLXqVrZ7r66uvnm4yfnH/5sffLgZpoLcCy9wM2LvHoW+4HwmlzdfbyohEsxdD5Jo5fIgvA3yNou6rN0aBrQlvkg8Q9HGLTenKR5qSU8nKPH9xAk1Jy3RRCyvMgkKY9yI3wJ0GMMn4RdFNbelSJjd1gOxySRNU5w4qNRent4Lyi09IUALV6iQ6ujFNc33/Uf/OjsV6/7vG2I7dRGq4LPxiZZMUqAqcX6LA/ERPIaW8CBAOdpBViC2TvVWdLaWjSUaq1W0qp6kwOMQ1yFPg6CmSMbpqiM78BkeQCGSYO9GxhmxgKQA1mvLKH6gAGu7jJDL5wEmS9q1tEMZgjHkFNLShAdvTenGa2kpcZdvJjq1NUHuORAGAYF/p0XuPdO2tJaLfTeY96czN0TBJKNi3JSHOBTfFdkZCPBa63BcIluJii6+V29EtYi0DBH0IZsu4YmCx5AFoAaVmsKXwGfphpenh1uhU2dhkBGi7G15l0Y+GLabwmAQuuYuux4/uaCh/lJGyvuw7sRJ9Myd5i5Yej6elQCgYj6iVhC6xgrPQYKCIOhBDOGNJfB0Hqa3gTRJtcwxbugj9MTkSpHkfJQ2L5ikZWXTnJh+BRE03N39MLJ09B7C7soJkMqKWVmhkQNzMGKkg03ks0aX66HO45bM4eFVRmbd8aqXIMD3U4oL0yp3HMfpYIppMSBP1XGBtheaK47Hvmo9HJB+KAdjxl+4jhuA9My2FRLhBG4jBm0pfxOb25uaTWaGJJ29+ZdNrzBUz0/xpc+SsSgEpjTnESFFaVCl2CF01ArK1HdC1BN1bwKFVYJok7LAsF6pbA612aUT3TSC3olLwp30gRN4AxUWjVMQHVU+QTMjhle4DRM8GooZLX4MEFeTHog4bVY0ja6NAayZiWQ8iEkTs52HGiptbYuBlvX3rsIkrUtq/dx2k80BM+hDtLZihj8REVPlfLLfMbhFYMYqbLG52y9hSjQodVCsGYqIT5LXK6ZRGZZe/cNp0Ynv34KZLwOUngseQ0SL0sCJyeU5RXskRYueDArEWZV5rNfoPD2ePHsc5sp5xkcQTIpciQN3YZlRCmFg+qlQfkBLMSaHd3hTQ0GFgoo29l5+gwcIE7iYAPM9lMqyRA7IB4EAwVmidj7HTs6xxvh3+Qh6Mw7YineS64EzcO7G6fNV6SVlNjnBzKDlaRX9oCEVlenq0CuRghu6huRh+P+nXf2aJtPPprqFp5ynA7LgjabEofLwMpQnwORfcfoBwCthnJPEmDLsrSWsTh6tiEMpEDJWnSsY1Al04nEy9gbbScidJyPYWgUegfFxYm6KPyh3MBSTo1rPG9mVwfIEDBrz1yHmB+3mIbB3dDHYCV4R6GfJgYnKztZOwHycRXi7cgVRVydavIYgj0ZzU2ODTA0r2M8kNsdShJkYgn1nX7cwhQlml9ZLmyuEfHiyaekJNLCIHtIg/0NuS9NAKTO4XPXlbC67p33TAABAABJREFUyeBeS01oEeiu0+YPwYPy6WPynqzBcE2JKtAgV1OzofgirPWWfv/xOiwATyIEbg6Mlbd3VKZYCeqnaU609YHIjsFX5nzYCQ43i+Y4Qdk4uVkfj3l03FRIQmqhHDmXs2iMQqx1x9cc/0cRsnJkUUpcNw/ClGcVZchGwcNJ21hBc7XwPQilDBg01UjYjuEUGA/BB8I+8laqfU4fJ54M4BzbMFEkVwDTBoWhr5AatMF9Hz9i9O+BM5shNo9DssE/OlGd40mZxbwVPSStQcW6e5ocY22esHCTirxG5s7/Nz5/T3y8RDMYLoFxdOXFjGBBzn0pD6/i6mIzimwLajuzPodtPW27Sp2Uqyl0ReWinsMLvDoKbHbMjiJM7hNQ3OleQigcjC6O0j8GlINiagCz0z8R/puC/RRh22NpeySO+O7NlKGhnoipMIuVMTHuiH/AB6c5n2psbKfHtY+DGyVNGIMZYxqVlKIEPj228bgbi4NSeIoVY4Wxqcg5pCkI71WzKJ4BOMONpYTZcosQD0vhrAOW5o7IMfjgoeUrB+FNUsDUBpB9WfjoweH1Jxcf/eLBeiB97UDdVIi9UT5FMpFXo8GtB+IYBUHWyNGy1MJqNXX9YMhyU09wRzKND5JR8jTE9qja3b1L3Zd1OTEdFCQ7JET1SoV4+r6cRpPBVVEKCA8z7OguKPUYGQcmllrHeJtSC8DiTioYSlNsSi1AgaHDXVWwFrbJ1NKX893y7ltnH300tVsvEwOKeWU04CGId5h5GHqfiBvrsm7PthfnF8FNE6RuloVaPCCv1eAtavKmaAuY9g2JJHOwQ06olaXkFH6X9oAYcfldwI5jHMuFLGwzuhpcva+9t6jwJiuz8p7H8GVqQMuy1+zuWMGy9YnE6e4nIRaSGgNgyGlgcoTP0e5sG/ErAnGYKRrJ0xrTLCF99K9KiYdlTYZIyX5HBmm9pbjJ4YaejYePXX1YDY3WzJuhGVZDg69IOftoelQCfRpnJphULJCae89X0FMeIqBJS2uO0fsCiK2pp7ibZlAKdn+8lHgyuUIt4MtEfm1wl+9Ao1BcvUoU9Oy/8idJsn/lPXvKxog0blQpJf4hndoxZN7yhJbjK4s46TgMJ56nIcuAnpZlxN0kWin6jAYl358Ze2sxc+Ev8Z/k8JIq0/A/sQa5OYPEEHlTHkh79eiR3Rwl06xbGgeNWsqh/A+i0JnkJyW8pWjeBC8swQ8BgNMGKwS0Zga6ldMSkYgJsTO11FJC1RZ75+C11M08YwCsyaoIPNhVxoEQFGo2gkFoTRlpSZKd0mYuaeuO3ETtAqux2EyrtGKsxuqsQ5RcDUH7qDFaktOpqSwAd8XmeRW1LTSrosyK5QCuGDbEtD/cwCZ4Ma9ABWe36h7dMGETQLMJPrtK0jjc5Bbcrt5kRrU7iUshC2ses2E9lnl0/DzbyXhD7lIDTV2J3SSAAjc0806Yo0a2HU1U3lQAoZUkfUCyGI8mNrLUwlQzWwEKvJSoypmV7JF+oHfCaT7wF8jNne4cZlIZlizrB6TM3lO3VIKI4sPMD4OmMTDrOIpk9FuxzM6gdTkevvv+8cXV5U8+ZLVSWc0hdhrNWmuBMEsqxlqqayzmiQuYpwWAGyko7GFb6wTXtY38EPMgAMx8G92vBt6V8JQDKbNMjMs95rxu6G5Ip0tYmmAE4jDKykF48YQKMv0AaR4UJyRyuY2QYoMb1YFm3oEmrLBuENCBCpZIvSiLOWhs6/6db+Hmevf5J17nLgUMkEZflgSLmHY5IHhXbsYslfub/fX1dWFp6vGRs7EtJbCspCONSGqWjI9gyLfgtNiJ9RqleJO9mo4By+XSKByNX+rsA7jkmAtI/U6tkk0QAHTCjd19NQDmpAC31CgasjoueezdXadO1OMo+shq8bUjU+a06fRjEMYC2hpdmUtSU5jGIQU08bgGipx+H0F5FYDworBsEhwpRdIo/uJXGX/CQckjYLkh7OaSnoqAX+Sa5tmD0GmcpikZcoRMy7ootqRLk7Hgzi1krvX0arp7k4cgZFVT77WUWqimWkstxWMXuFRLiRtC74iFuaBBQYUWkv0fN86DcSM0YdgVyxgSTPco9zNuhJUOE883t9QXJcJnDipxj0CnMhFHfo4qdLRyMd62kSGgJC/GENoAgpbk3m6mk+VT1GdMetM4JEpAIOeaQY6Q5A2g4ozFOzUUgf10xH2ElLtPW4eTYkEG6yDhFGNlhTsdc62baSogZGSxk/zBOch5jsj7iLxWgbxBykrQBe9S6IxbckuMLD0MDWmpix5dSny/NioeAnQv8HgF0WPX6FTCKMVPAKNZa8oiAGVshqUDYZQImvKxRtRzQ5GqqX54o93Z/fvz/NGzr7739rlaud7va5l9haE2WDPWcjGKxrv6K24yfMq7DJh3VzMr5j0AIKATReylUJZlHGFdnQVghSkD3OiX481xgLrLusTgMk6Zx4wkYgQZTvcNoEQwaxsaTvy2DF4K+RNhgSNFcw0MwCXxH+dQEA5TuySCUorGnj3+XUTWSvTPvYY3hSc8Fv2QacwnLNWiclWWLD1HqMPoGGGCEZKxNrRtnVeHW6td1+T867/64E9/8OitN764fDgdW7OpU0GjKyxG9tazv20Jg8fljqcSLaSojIOBqeaTjCdyasZdHuM2DHcMOmBSqIXINBHDGPcF5uUJvseBHtuZzFyxXt4w7G9ZC4Ae0ddidCpIpZRsTHND+11KiCBVZBabClGaVphE0GqYk8hAQRSs8HDbHr12vLjcffJ0fvu9hQiWR+QNs2AhJJjFkogus3uDI6SUoyaoamszKz5Qi9a81hLkBotP6d2TVSfmTTHFEH1QvU5kzxE7/QRnMbfIYWwFcuSoLjFsASVG3VYCJEixObIVCgFyZQRNuGXjMPKhjbsV/zQllUjYLrIiix7L6TvK4fbI8QeQHUNCIXls7no9hbx2QJw+dEoeoLK8D7whhn8hObbc95dSxlDT2i/1iFGt0ZkcxdQYj4vttKJkPTptdKhIsVP8hWA1h0L1jiyTPWu8ltir0eUKTYFa8FEJj/DtSDdvi1PEwLZOypyMKQTaqKDs7kbF0kjPTzP6/4ycUdRGjOlqduoccHoj8UfjMuYDHzS+0+/lDYhHaAMSVFRPngXfkHxHiRXT6gDY4zHE+hAPBkkBXRC6mTX0imKgS909JHqEWn6CAVkxx3hhzjG4+wNxiVl3hmnQzL2HG05Xr3UCumRqWpYFjEoxuvqo5CPsjMY3s+FprYADNTkuaJEGQiDemxui6kHsGpN64fD/ufO4OZU01ix5S5ZFC8y9evR+OSQfOxU8hawcGTq+V4uRj8hcAcbYdyI0+nHtux+9nJ+h2vP1V588+vZ7lxvTF1e3V+0+Uc/n44ZAr+pVbkSxSGEhsYCMQTpshHWtgBzdUQskXw3uaC7Igv9sUKwBeNUK0GGgmcyLU8PXOE51utmaFVqptZC3xwMHHjKUA2aOMoqsgC+Rt9Hy4UVoM4MPSAwAvKs7UFE9p0dCDB4Jd/VK9TzvhayCoOa55OVu0KVI96bghKUt5QBOpLCy7a2bWeutsngsonnlJOUrNDNjkwi6ukzrsnZOgGop2+Ph+t33+fOnlz/96fPf+73lYHVasUAlWC3uXUaTtD+sJEt4BbgioJwcLRhiFfUAoSGlBp6wnF9gKGI9M0NWbXl8Y9E01FlZSvHWo1c+EXGipSVozijXOxCfzchCE3K0XMjgOtgw/fB0oPHY+RS2OBEjC82llUa4FxDm8mLRb0UUUOnNAaM1eJFvbLp99/3thz/ePf/iePkaW5tgi7NbSzMxG2tqMjhZlyOyUXzdZKeBrlpq62KhuQtWS1CTIhpnfurqIUmqkMPEUAAiYIEMPyN4IHaaumeNNHLoXVxOmJM5XowkwJJaaka9lpv7EBZpco7QbkkAzndShlNUhFofVSiNUv4xG4simJGvdM/ZfCmlt15LGRauYwpzRwvKY5P4bDCa3KdSe4hIkVdTDrgInvwFQsvgwUMKfeIQwmVIVExQE4YxFpq5YmENeusW5FND6y0edCygqLWuXXKZsXsv8VV8VMo4BQlIndmzRoT1GE1K6q6SBetYhHBHGcunMVoEV4jiB7Q15rFJZ0RiOaNyQotRRVSzuPM+yjHBL0ey+LiUB21KUDGau7JzyW4ibFyB5HDwVAeYRwwAzLx3S2V/D2pSYZnnqS3LgKhz2tDkhNUSRHo5UMw2pcq9BYErQQUAQUdwomaEjLDDqpQ42wkRCTp3cOLhqvMkeRMAI6sNLRnjkIaPt1U/VS6eXyjIavnvYDiUDy8OMd9LoPMulVK658ruVObYaIXiPhIOL+KJkRc/c3hV/mNkTrrjb4cenZGDI3bCCdQgEsVg3mEF5t6EejM9+dSneT6+xX5Y/ME8f3lV/vCza+L+/ma52re6vXj3wfzdx3XmcsudKQqHmF5puPqrsJVKtEW+yBuwoMB7pS2G4mhR06o3WonihxZsSwMEGSELPMTC+NzCaWXtzTtKcs3Um8rJRVsWVJzYbVEBxsajDFGR2c1PFr7RjkolAu442KejHFS3QIPI0tdGkzGldgJadCUNxhrjQ4xJPMla6u3hINJibusWFW8PYh9QKtV7PXkqRYWBqMkiHrsneaw4vcob5KwACmsnSu8w3vzO7+7+9E9e+8sPvnrv2zocrFYzb61VFiPVZVGzY8wYs60GT88hOPlj4wcLmvd0VB+dbizwTmw5z7rDrBT6aSErXK2tcAwjheAORXdg6b0VGsrsDTgq8dDrB1gUiuPeesyfJIXdR2+dpBVE2USzUtjUikzeomO30W/BRaux3Aq0CVZ6b7W2dji+9+7mB392+cEn17/7WMAtZdIEhIOtBZWaDPMQdcHM3VKk7hELrPfuVjy6GHkPhqc3K4bYRwskRRl0gZWtrwYSBZ41uDPcixN6zLeREIIHJgtGoYMTITkqA7j1CKjyeZra2uC5Kps52XUAxczMOnq4BUAZfErQW3KDOKR0kQsykbsGxQzTPC3Lks+zaWDdbixyL6WcRgk5SHNIni4qo7XycbBjYNG9AynELIwylV3KDji8Azg6aEjR7AKepOGY6ytiadAtJHU3gxuLneaftNabkTFLNIIsXT3hmRjQkN6jj0BXNuVRUrs7zRLkiVYQgHthabmfkD2brzAzHwbTScIyc6d3WXoqZ5AJjGfU+FFq5/AVTlBpCmagCSgs0TRGPiswT4JaHIYsTOpU16UJThR5S2mAEbAU6wEG61KpZbAOQjBiYYcTZXpTo5VSKxzr2oIkUQvllnSBBOfQkGYRhdbkh3W1mNR63mIZ4GGqUlDMWzeGyRNrmVrvmXpZUoMOIJluBnRrvTeUQgddOZwLCQSGitLu6sdXaAXo8JptaqInwZlNHaOEBllgAumRHkOqxhK8EJOnaX98DRM6Q7KdYSB6+4qcS8bHIDyMCWNnWY1D415oNU06rS7E5GSRNazErb356e38iy9e+u3LptL89o1LfvVs+bOnV9xePpjK5aNHr7+1/fpZ++Dz9tlh91feuni03e9lrOEs0DZO9YqODqxyyY3VZHRzs947jYLB1/AGNw+lbFg3NplHKavQ5HmVy4rTraK4+rytt8djteoytyZ4H/ovRKhDBEjRES2UA2ZsHt2uADdyqiVMM8zgaAP0PwH9mWByIjtKzNW7KiOJRg0uRFcKkCUGZgyGQWxRaeu6WK0WSCM4aHoOWLXQX5g8uNAOd7OShobeI4lXFqmjd5AurQ6iehLF4E1rZV2v8PDhfntx/4c/Or725rOz7W5ZG0tjdXS4G00+wY6wUtNqFZvNfFyXiEi1mjoMpp48XjNWY+zpjIAhV1oHNofBaRBI856rSEJggPg2BWarPdCx7vHXW5i/hF2ilBUSiik4adFfK0IjLYoht/hfmCoTdwTcfHX1RdVitWLu4pDyw7rlmok4Jt3NjQZv62rnl4e33z7/xcfbq/dvdjObVSav0mC9hYeURqnuF+fnt7eH1lrQcJhjtQBz3YzeRSvzVFtvGUGyHSTMzD06dRsdTbYDlQphaNiUxtPKERgVoqVYXegWYt9oMAUICrcwAYSta/MwawvC2gjNHl0Zzax406ayzPX29oBS2phHjAQQDmCg0YXw1KdAcj0sMXte1w7aRHpTgupRE8STTxgsYLgBh1oxi9zpnq6c6YhoZFRqAcGiRHfTY5Sm6LERXSMNuas06AvhhhLgrcbIgzDnqeFM48zCCvraOoIELkcMvoSpFg+g0uVWpAazs3mj1iJn95BmWdgtmpsJnaDMltbCi0WuSvbezWwmJRgtTkGhh71aN5bUCiXyYCwhm4kCYpTukDcrtcndUcdxaYCIQjur88u20MNxg2ZxfgJLojmXpTkctO5LZfHuwWCfptrRpSiGfJ5ntWaJ75gnncl6a5gYxIhaixm6mlG99wJrvaWNjINpSmL0LjcYc3gW/mvZAQNk74oXSpd1gtXGgpulr2R1dfMCQezBUK4QfFI5bHXmU4OziShrIbxXQzeUo2mDeeZ0XFafIMyQ3I4FxcCgTyI93c1dRFGiQYhNBojd2WJDh5kTpqi3wm7A583m9nAI1FBSiX3AQ2cND0i0uFCTezUWRiUtK8Kh0a26FfciK3QSxpV169Zmb6vq+TM9+PjZ+sXN14CXzbnRuPTHF/UX16uVbV25yJ/9Yn92f/urb799dvbVJ4v/0Uv93pOL1+bDzbpfeWYsqwNNFGqz3g2LYZldMAS+uCCDf/BKJV/GdfViJWZMQRiV2gDfw/2rG9FaMzPLPccnkDauaJRuqkCagCCbPE8Gp2qlO3rvrUWlzuguaeYxrxE6dbIcjxB0qhmjwo3HGyhgCC9GyUpl1rTAsqLpPdEphQ4VEOodtCYEow+5Ot5pRa6SJUB2cLEgKIjACXzEbwy70Bo18XF98Zu/dvFPPr33lx9c/fZfPS6Lmdg6po3Y2AzWSULeEDNXb62FSBcurR1Ea63UE60x6o+sRgmMRbAxWnGKgnfALHxVXCxyVI5qM0AHy5ofhTY2wmLQTWupcg/BSbymAekoKA6GaMtOcgGvIoyy5OdEY94MSnjfJUUdEzRKWQ8oIsjEYzQDLLe3776zfPiXF19+evur35lwPDSfK9gZ1uiMgwQ0CcDV1ctsUFy0cIofEmSz4Zij1ls0lIIPg+E4bD6cXk/gG/NkWJLLxvKkQAjCNq+UFIAN6l5CwckrG3U9MECJ6OEsNn+kPtjC4GOuU2dzmNqgbTIWwXJ8gfiZ0QiKY3jfII/aJjS18NbbVGuTEv0yjzsbE+6xHA1uSAa7nSZomXzN4BmGo5OPKaXTT7YbA2sYU4+wCjoNZwN/jxs5ZNBJcQg1QsCqw+tCKaQxnCifcatj9VeAtNElL63VYG2ShPUwJLdTKIkBRCrQetTjClgmB8kun+c5Z0yhb5GH5CKq1/iuDQgPJ3cEilBY4Fhbq7XGgY3vg4lJWOuawVDSAGDEqIDkTsQFMgVy7qT1wD3boKmG6UmLUmb04EO0XWqQhNyBde2Dm2A0imaxFXvYNcQko8ELOZ4JlEBRBJCaNyG70upCqoKiwrLJBXLqshno3onSa4PXGav6enu4vdlv759v5wlq07LWmV0+N9rUBeL6sNRpUidM3SrMSiwy5wZw804WGntvgFwCa+Ll5oKpd4Q5TBKrQ16f05PjcozDQ+NUKPXKqgEY2kgVsBCdupHVpWKT5LQKD2+fCsyGAhRiDg4dWNRmtus6v/Wz6+1ffHklWK2P6gxb1tv9YVsvNyjP90fvXGnres5z3Zp+8MXHb7338OGyfq3DH+3b//xR3RpRjUY1oZtLWr30qqP8FjhWLBWQqwKLDf6BmYZpcYOhAEjdfLQHCtYnQTNJnUZpaCWjyD2xJu5+WLP/6BfH2ovTdDVy5th/lgYFKObm1Rl8iqF48uSHIAnSyn47G8OEIwe6jBNzJlEPDEw7YblwbqFZQw7F6JZ+pDbAqDEgGpyWvKwMN6lg1LhDaUagwrYuvLj/5fvvvfmDHzx6650vn7xWrq6xnZpYxQbQOmQ9XI0s7QYKS3BTDFBXISM4BTA1TTWoLDEYG+0EYCaJVHQ4xjAGTkcOgA65bDR/iVYN8DaDoku9t1duZkxcUIzmahkbskHz6JhDduOe/3RAfGlpMFidARhWqCM0+QHzNl+SIqAe9V9rx/7ocXvy+OyDH89vv3dTrajB0b1V1nVdYCylrq25j49JIh2ycz/8uMU0MyP9FUnUEIJEAE2uRgwgEBMQxUsIctigqJ3ISwmYeGDZuDtJ+SOO4p15x+mwA4VV/kqOMdBq7721xR0mimFzgWInaeO4Haf/cKze5zr11g2oha23ELKG9YfFLvEQ2bt5Vxos4LQIaWgOEBh6WFTEOBWl0jsincddHksP4qDc/c28Pkjng/i+7z4pNML5XXl695SyHFSgCwaDY57rsiwhP19aI2ya5h5sAVoHpNZIuas1jTp+wJYcnKwsTJGAQQLMcSd7oL3yJoX/JCmgpI4W3eFEydIjUhIskOE4n5KSNHXyInGH+VhtMSq1RIJtnC8vZI+5iZAzLbjDVogGOgtM5s1VBrwXQSaQkhCuG6wEJ9TdwkA3tMmSeiPTY0xmbqDKOISubOQ5TgB7PPjRM8dljW/WQcMUVGpHF+YyqWDX2vXViz2mzeeHJzT84jDbYfu93fNH8/6iujSJFVadXLFwM3c1w2zO4sdq22aeCmBAwavJtrwazdWjQm69mWF7drEsB6DCh196sGiBecy/PdWPlDC+g4IR/uVutAoLWDB8LCvgwMbdZBUo5jNtxhAIOWoAr9vNN354fPgXz17M9eFKtvW4W7mrbWV568m9l37QcYHtTFvV2c8X1Obz9PHh6zff3rV9v5n1E86/9XhX1KxKnbVTDb6qrQ0LsYXfEitwoB8qUQyMUglYgHqyppIaAFppfQVA1tZaEERjDndXKhu62ol0PiIWxh+I8HFH58Johl29h3YlBzwex7Q62tiGGyTGLG7yr+cpHHnVE/g3nDgQNLwiLB9+C4ZE5mKQZjGU88KgNRps+B7EDFs9SsuALhxh/Zb5O1igARURFs2ZBYnapsrWDkv77vdufv7RvX//b/Z/73+5lDnUL2wgvbit0bUPBT6GPD63tQ2BUvxOqTX8TwqsJbU92ZcchZMBafIX9mdmDFKrDQgHGiqGqB6ioIHBs55IWooH24lZGGTSDZq00dy9nPC6eA2hGI0KlMECjNccAS3CQ8b0zPBKFofLWxMB3+L52+9u/vhfnn326f6b7xY/HHop4LIsU60OX9cVjNQICRF6gijnIytEtmtSPEEz1lJq5XFZ8UryGKPRPKhB5k8SswOGudS1te4jGCKyq534QKdRCCPh+Z2GzKKviYIAopPGrgYP5+ogdtraOkOTZRaEm+bhLsi4Cz4aaslpKLWsba2F6motKEQhTTWRS2u5Kmtkv2DJamiiQnEYzyjKbSi5pcjohSF9hiX44ULYn4amNK4pzGC5wCmyXraZ+VhSMwoE+pTFtgNZp8llCcm7C+od4aoROD+89R5/mmMxNJLM53CEqVX6f79CnFBTMO0s6JlBrHSEOKi1Nu7TGAp4G3BdQVKFTgyPmCnkeY661TwOeWz3VYznOnLwXczieMgSROT45km21lhrVww+It0FBOYNQLLIkoOcUAAS6EtoMKDbfAfp5OdBJA50I2Y0FpI6DYqCG6agUCBminID6TXCu51GtvmPFjNrvdc6OevN1fVXz37y9Oc/myGevfF0/w6fPLi4nAn82xeXZ7ad69l7l/3JfNUOQi3EOZoVbMHFjcB5G9yyCBNmNtavxaeWcQ6CNllBLcvRE1sMNnHAEsaAvnjilgOOOVCrmLQFZdqjDEZV3jqStYm0Kq9AJQjMskm2ldM0G8yqYeMo7QfH8qPn1/MlxT7JsNbrXg8ibK5L+cWzK/gGZaMZ3DbOtc2wCzn5eWuvv31vbfuvJqyPZuooOrmxo7CiLLUdG1agGibHMSWgXM7Q3XLzW4Ml0BO0YNfavMOdLGSFqUs1NGTDb8gDGk1hSN6wcayj/DcXNdBaWSbp4nmIYswfOENGg6nMslXdw9jPwJ4HbtAu46cBrwknJ9BRiEc6GFHkFD0R1D4i7X6yGEsx3oDoToHhJHHOWRAspH6J30mijVbl1CnJCe822dRbLxe3v/Gb87/6lw8++Mln739H17eFrRUyqzubnW2oLXI8xzLgFPqQxDgQ5veJLCFRiFEqycxOvSSA4giZhxeaW52qmS3L6iFPikZIDniJL95WwMASyV4Rbu/w3hNU+0pxdUpkPjSfQgEZ+8tDsxLJxHIn2qig/BW7n6xsLMwaD4f+5rt992fbn3+4+davCKrcSq3Ewu6c08bn91P7Q0PrnqBFzjL95J05iFdACgnB0c354JAjJDKGYiWqMUm9NQwWvSwgDg+630mqhMzjltS8ccizWjyRz8wAL3Vy9dYCTWDvKjVmY/FCU3h1ekqFVZkqzNExZuCty2i1FrU0nFrkrDUSfg6YgUou6lFVWR4hD2i0dM8awRiEJuR6GMtv2UyGHtkPIQEMmOeV/t6j3MtrGQciOfhjUBxXKzU+QyznyVvOJxoejFE2Rzo2D9YKMCzXLchx49CMri52wQY+G9i7LLBNY6DpGEscWJO8Rhse3GFXknK2IDfnYjGD9d5LLXDEDGhp63azQevNe++5mLlY6d6VBrun1mPcEjeUGGW73MnYKaLcMnBqcj0WOolmsTw01UB5ueKxp59r3jpP/kzPxp3Bu7ewNAmqD5m4rxW5txZNBSUniOj1YV1Wai0tXxRYejeH17KB9OzTZ8fleW/13ffen1/7tQ++5uXhuLx8eXi5nx/eO7vctGWDpfzgZd2L7+32y+Kx0I68lGZphRUrFrQCeQcC919cACfXajbF0EXZEkiOOoQzbidqOsxRhSABKY4EgMImTYR7H1eNLLGvPdSMRoCGAtsAxTEBBpvpW/hsRtsaqvkEsNw7m+vXzS+4nBU6m9bpsDjKgrnu1TgtbUcSW2IrbRu2hoveqrjZtHN83Q/zm/NhuX2x88cXZW1eCRzcF+hg01r8Fqrme0eFBQMDwH6LGPOlIWgAwFGnFKCDRWrL0hXVthxmyUGH885R0gdMNdrEcYbG2NGBnDoG5prs8LEfNKx44ULrjQkGCU63Mfu1hLBsOK4IZFWXqyWubfQmd4Hh4pSbMTwhVRfQm9JBIria2Xm7u4c5S1tXRgMYjaQ7zDpOALdlgXsXi5Iu6w4avTTIrBTsr27efnfzzqf3/uIvzt948vX5Bfd7n+cu0Es1V3fOtbdkZrnC3sPglMkK3fNbUe9hbH5ngpHZ1taoYE023CXdLIyOGPVRlw35acAE0UW4o9SoTmvc7R69QLyiV7Itx/B+OKeEniQitgx0i41yGJhHFCsG8+iVAunohuBme3L1Q4vtZuaC+truXVx/453HP/5J/fKz/aN75aaRPm+2t4dbg5U6rW2tqJF5AOSI9xWVVDbIdprBph0jXukmkXWb7jJKJLpcpgUAvXcfjQjTjjHMKLzWEBnHvG7wZE/QfQI/afYCWPfOUjIxMr8Ia7HezbwAlnJnRMfYkI43GaYtaxfWqsNtDTBgaZVjRFtrSrHlm+1ksONykBrDrjzTQiqekDPHeKvx8QPVDOophrQ+3KfcDB1uv3Qc7k5FcMpeKWsSJgHvtK1DWzNkan76xpCD6VyHDADBTY4+OD5bXKukZZxAhvyuDOmWgzFWyKw2fu427j/uasGMM5YbK6Qx2Q29aJShnplSwUoZbKlTkwxZMEVQnQ25XIE5tkd39NZpNk+b43IAbFmWUkrUCXKPwiEGvAIUg6SohNwtiHJGo63LitPSz7sfioGFvMO9GB1eSuk97BMyW0eXQysOGKu7hRey3EIrb9oUmItRP4fHZZ13V8+vXlwfHj/6rnSr7fynH52tm81ZWeujh02HdkC/7mcXm1lnB7cf7S+49Lfuv1SvjnlVK5wnr603VdEL4AYJCyDX4oPEBjR4R2q7F8Dm+QzrCnSNsxP6FodQS1iZ4uQ40EWP/iuORUB/kKMSFWCxWYJxIye5Qa+OAmzN57KlbxxbagYms3rcb6YvO32aWRZgQdu2ZUu2vl/OL+c6l+X5S1+23E7awXYb7VZubXc5Hc66znSst21GO5+ena9PnsBacbg12BE4Om4dt7Br+Ay7BSaLCgENWOZ4owYHGiBz1mmzttW9uHeixu92eVbJSegBo0gd/q5Brxq4qcGxnopnBG0z0JHQJiM7ztS/pMdgU/gglSzqDdXYFYCaEssegTJvt3GEghFpNXreu6uKQOyEZpFSIIiBspKQGBCAIhCNARIwhqeR9gAEIMcwbfeonSM7dvhG1smqkGTr5a9/Z/rF0wc//NHhr/7OUqZZfgQLQjYf2mgbZFcgRnOGMNYvtTY1cppmrsvR5WCBh5w3UWfSBJ10EIh9ODRzqMMMrQkmGivDgS7EZYSrDaIvYjgsKOLl+OqM+GCJZ8YfOxmkJHAVfbAgQ0/CDTrzRRhUYkIjWVTsLBhGloEmSJ116rB5ORzeeef2w7989OnT69e+X/1YbG7LyjQtVy0ViLSORAAMJbFyeWZEwGWwxAnSNvkEhPjATzNB0sxKNXVJDTLSSsnWaSQADJA7ki2GecnI4JHWxg7zHNrmyFzuva2JzXrShQaOk3ZF0erlE4n27iQk8hAuytWmeepN4Z+qHkAqgKxCvHpTLzH2KmCa6HqgDAEGREeLsEs68X4AlhIn3BmGNiNrWoh2TphFFmGe/bSf8K4A58nx4vMqDkb36RdtjDfzWqL3xlLgStMIeOut1Bq4QmVRb8UQ6CpCujTyNQadLDkiXXDvCpFZEtJg3rqfSvfgPssFjrFBrmIKxmx3lFQ9xbRVqLX2tZFEaGrkFrasIYjyUwKI8tYC3QvR0rIs8UDqVMOvNSFT5bxJgdMEKTE0gGYBzgkJXKQSOum+8bxtU+raWjaJTPzOE9vP0ChHZSWppuawXLIc6P5sqNIMg6u6z5YPiUs7u9q3i4tv3Bx8Umn1MfyS6yJoyyKYn8F6bdelk128PNv9pJ053njjwbPaWebtaovowBbW1AERLU5eoy2w7lqAY++tsMFMauQG6MvSKk0xPrIYnRGUjblaplkDzVrs1YXFhp4o7WKhUqVVdxqKsQCFtoFPsInaoM5+z/s5sDGcATMwi/PcN7hWrZ0AZWe19X4U12oXeLm2ui72+J43+WTcdFwc7ZLcQRewB5t61nB+tp0XAtvN/vG8X2oVTar7i0l91gq9BHZm18De/Qoe0ECHrgrbHIkR3qL2U5d5n7dn6/FwahOKmXvLLcZhmt+7w2jljnLh42IEWtITGI66ePQc47y+QnLJP2QhXaAlLJxGyTZAmCCFBP0UYek+4E3FkQudmJAraSKjpQgjXTaRq6YoVyzwCs1lU2fzEEEFhbikZg9egnOUQ80gjyUxCch8DVX37lZLXbXCJ+z3fnFx9f6vPP7xDx+89e0v33ri+2uiNC4wUsUXhXMX4G4ELf2A1Ad5skhdSxvrR1wIFkWgzihWIHeTwSL/JV2oRv0YBU7qUDOgI2dTo2eNmuKUy0cNM7hUcpTcqztgOo518V0gZIGEWRiYic7U3UoSw4GE8HDCS7NJE7oJpXCe56v9oZK+HNprD67fuP/6x08v3nn/+RnOjhJy17G8uxuBUii51EkzlN5bTNyiEhpU4uhulDouuFuSui1pBAFRmtyhTomlMtYhBDlh9J75B7PP9bW3OBMETs5BkhieGXZKzAZ30YoHJxnex9Ygh7uUd6SH9cQ0zUrBlDFkM8xZa6H1hhAsrmjwEuhMtXQyoqP1ZrWsrTmqlNQVnthwnviMcpoywNagg8tZzIuhK4efsQMxthwou113HxOkRH3uOvVRkkUNPchvDpzmwfk0M135oHg5YGy9F2OhwTty1UQaOZKwBlYqJg0KWUQO/m284vh8rbVc6hCKhtFY5zvqie7GF1f3UZg3ZiVMEl3xzKlwteSY/gBIQTBKKa3lBGXJdbnpKBJWJoXsPc6eIylDMLAEJZm5oJSAOUgu3oYHzymODm4Dy4lFmbiCZOSi1npjLd4VxEvrqtEBVXZ35R1kzptUWdgVTcRsmonZxU6a7WhzYNjGelyq6Cwbrn3a1A+uLtu83drcWJsEFT9SDNzFWXfXh7PpXD8um1LuvfPwpnufdrX3pYjutRehA7eOlVgqlmrovRlrIY8OwkFOwhHywklYYEYU8wDscweme2cJpn2UTaox9U2DCB9tmJGsAsDSje4TsDHsHFtD7RP8QvUic2ffdttamX3d9stHD2fjtQ6VEFxH1FvrHZu23D5fvrpt9YlwNaMa77HdK7hY7N68LwfMx419dfHlh0/0ObfY1t988GTd8XjQ3MADd3vsDmV7vd1pA2zBK6BALIBjMTbT3tBm92ZW3KsLwkJWtUbW3lt8Ww5ZzA29J5+GNEBoPCHDCPlhZyksbL3F+QMZNAczuzP6c1ePeegw4AzHYnM31FIUJnZBq2J6sjOTu8Wk/hWwCsgo66pBe+zJSfCWvqmJRpY7CSNi4W6vrBA6vHCCnFG0WTeHdRtUECC+l4GwxdDL3dUbgGqEo61HI0ytF2rp/p3fWD/94vwn//7w+G9fk+aaMTc0Z6LkOdyWYCgBmNHUey21jy3qlQwihQ10jyi4683MXWWMIQlR4ZwSchQTkYJfFEbl6EonBbJF00NH7N5R8kQ7crLbvSMQA7Pee2VtrgKSk48+8CQ84jCUKE6SS4vW0BiPHeaIFj4yka/74wyiWy9u7sdvvrc+/YPLz55ev/eusFghFrA2Uc0nM6tNDlTS3bqjcHJrrvTsCZhVyV8tkBcLL1C4g0jPbcFhHtm6hkxo2AQZWAVZEGowyDdsamRlwVzKclicwYBCd5HWilWhuElALQC8tUI2ryXQjQKHF3g3KWgNrsBYaMV7LBs1Rx/ItofAOsZ669rcrGJGwq1yC72LKStUJyhvThbWWITjaRR1YjS5meW6ZhtIUGCVcthofSOxgYSVAne01koppRZ1772VUqMnCzRBlnZ+rSlwzyDu+auYcVJqEgSD5UpOOsLJxeFWUvLkjgJIWBbB0FvY6riZGS2XBAdVwnNhcJNVlpT1GnvvBjhh4RrhKrWEb8yqrt4NdO8xnQlbRPcUQ8ODE4MT2yB0MkFSDM9jea+luqP05ASENOoEzaVnAAiE5yLW1shSi7c4nJWAdbm3VmJCHCN5OVLMjw6VHNkPFGJQQDoMNPReyPCFXXsHDGwmGoxW3L17b2ZAAWEWa8Kre4XNsg1tMmDBPKFanVBXVtzq9uLyqnHWQQvb4XaBtkubC3iwFb4ha+SAWs4wt3rWfTttz44/nDb1vD+6r8lhuli5L9ZKn1yuvfut20o/QAeaiuPgZu7VjESLPWzw1bhxNPcWFtCAF+DEE7IMNyagR+3lUgtktIcWS4ZayywRPhkmYIY2jombonPZznXpunBelrIjdvCtaXO4/97F4+n62RfCFrNBK2zfsRyO1zPOLurTZ+3zmx2Oh/11Y+GZdlovt+3i9e27l+2N7dU5rn7j7Yt62P83f/Qvn7zz3pP6bIFd8WzVvRVnV9je8PLZg0fX261mWKwTc1gbleTLCZqBOco8dwnLQMFgCB8UR85pk590N0RLvC04/rmuJJahnn6Yjcv9CkgVQVO5xyVZLw4fG0V/ieCUBbUH14+VXNYGuzv5qdWLwjXmfeYc1KpQlLryhg+STKQw62qAqTvHlgWWILbkXgcgsc3T0K7EzF9alsXlQdlQArZpUVrgMDz77vuv/8G/3n3w0+vvfY/X12tltPkCspXEaJ0Yg1oLosGpDXMY6MXtDvwdYIIHOtMBoJYaZXzM4pMAEk85lgOOyseKFVpvuZFGJX1a82W5eBc98wPFZzRD720zbdbeSmVbvVaurQ+pE08NUPqG3GFi8fXSRwE5/XWDtQrKq2zZL4fXX192l+XjD+d331qACm8TzapJG6Gz+ZCYRCI5qXpbVy30k7/x6QWfjhsQMKkCw0FE2EA+oa4wKKSVqdbDejgBqfM8H24PdZpbV+W0X1qtk7lpXUGjkw2qlm8ulwl6KbU1zNW9LwaiY9rM3lvEi9iiyhxA2PBkDjQmLktiGMHwspInY7Bl853kbRpzTAzmnnJ2a5kn5SGYhsXuhBjgu6iwImjD1Cnf1emcGN01z3PvPSDped4sy2Isp3TwysPO6YDdnem83qUUjTfjuLOI7IkUGORM+bHT2KK38SQxTEkgV+sqRp3mxsbWu7lz0KFLYW8NZC3VAIR9B+DqbkGH9uisBsodI+vE32qAImGpMb4pD2jKVVl61+16W0pprcUIIzFtGIuFRU+TKi08hXqXhTUGMfpw9+RR2sDt87adYoEHomDFTzTSPI0WJJowxgzuQbg+hQdoz+8ECDMQK/IwXKoSpUJuzGbY1n3rNldUbYvNZlVlrm27sV07HGjF+s2hbi947958nOseyzL5Ymfz1NRAsRQvDQV+Dm1d2z6d1Z/wwV+999mm+u2MuekWdWqybtiaHRwL7AC/dhw5H3fb+fJweNFlZgugWktrQF9LYfBhIuF4+CIOb1C5mMvH4PB5moPi7gCQO5pqZRWKVIUKn8HJKnUmbMEL8wvvD8AHsHP4OXFB1Hmdrv76d+pnL55tzmbry6HUtptvZLZs8cEH3/nGZ/e//Xh3qOd4G9+cLt59/Q//8l/89u9997J99biuvHrxt37ze9urp/U+fnrxg91a39xda8VzbRdcH1gvdPnCD4Qw33/54CL0DtaAI2wNMT/8JpRRHWDxQGjpcLA6BDRDGV6JMSWV+UiRzOuaIoXojEP/jbsfMf4ZfUoOarO2c3hTS5VniYv7yoG0QfUyAOrpaprezp7khcC/YQEwneZPYCb0HFmfJnhxnH1YUiR3FrAEyhLHHlNPj9MAA3qMHtGWJf6im6k1kLEYj4w5Kzo6XHrn3f0HHzz42Yf7t755tZ2nJg9GVU7RR+waZKkOJcuFyWCLHaMjCWb+dQwCd9fEktTQniaeHtrqfDED0TKz05Ql0Gw/iRszBLuNziz/tV5LFRwN7oi+vPeVQFtTQjBVtCb1Nm+3Ll/7aqMZ4N18Ig8B4yTE24ppYpNqdbE0rQ/u3Xzzm69/8KOzZ88Oj5/wcKhkE2A8VhGqPT0NEu9EEk3tjmEa/06ycThORkLPaWnijuQfrW2Nw1DGfqe1Hc2sJFAKuE11nuZZx4Nan0p1l1ozxl4jdwfUo4N0X0uETLHOM7zRYNXUe1sOAGuZurtaCxoF3E9T6eCNe04RM4ieKLaBagPeY6Wop72gWbjI3x2OoA7l8HvwGXI8NLj9yCVO8JSKB4rl7lDaVboc5j1yMFlaW8kC9FyLPirwE2np7n2bM20ugt9gsdzGggsND9p+aHeTJRD3GAG3ynw8lBO/XTDjXNlbh5lZ6erpl2JpkuuAdyPD6TUF3Zn2FYCQs1ihrSfml2dzkBP1aE9jaJ+3x6GUy4/YBHnk21RTDp/XnEkDCPONkwdqxMxXGM7ZMhjQ5YYWhgYe2wMtF2MjiKKpssz3NwilqfEdZIB44DGxKWaxeTbEsiSqY5ImV5XN8C1sa7bVbO2iYDbN4kXXbAc1zleFatrjpbS7bC92tXXfH7E3O+DAA83qll680yn0qfW51YvSt33ZTj+fX/utx8+KlqZyJmt0b/Br+a3jOFyvDlgnYUWzbUcnZKhLW4Kd6+gJlhjNe6o80zwRUVXAe6xLU++hSTNjHEtzVaE6KlihSTazTL4TdqjnwAPTI5QHxkdmF1a260W5uVyfffXZ8nd/6/1nD59+erNu2Q+8eCkc/eJmu/ZHL3/3b/2tRxffOny6LF+2i3d3Hz/7+PIbb63Pvvj46Z/Nl+sb09UP/sUffOex6dkv3jJdvOQ7j/TVoW94f4/Dje6dYXHp1stKeS3X93bWYEfqRrh1m2AF7hUwD4oAg7zdbOQo6CT4jhylu1UHZup+Un3koTaCbs6sVDLW+4AQRmJA+C7FXk9niW5bQBB2Qp4kKIwJkQHFw1m3Bb8fp9RiQRs294ie2TD7yNyDCnz345T0ggqRNpo9vlunDIhQEWxeD5pROIfFDDqu6SmABplFsAq4obtT3tp6+I3fPvzzf/bgxz/e/87vaFk0y5a0JniVNz4cYJD8rrvZ+n/IRA16uZFwLxaTUcUIOT58RoOkvSC96OAxNwnGI3It+d24LCFtT3PnrIPUs9tiWjOoa7PduPtyXPpd71jb2uQaTfern9cxhDI40aI8s8aGZVnUawEaDjcvv/nNy7/84Pzp0+snT5pThKQ5VkSB0bqF5AgEPTVIya6KpjBkLoDfhbJEUzxqkZw1gidKj1xpZwUlzSgeOZdlBbDe7j22jYd+tUI+1qqRIRw3g+TretzUSaA3oFYrbGns5ADUVtUM2B60Ig77l6D6weHZE6eAx/OYBcHxJKqGy8MwFRH+LW8VDK5CIj2uO2JTvZUY6riBGEwIC5lPHuNU075CLwrBMAtYK9zbaf8VBvkHQ7ae2QI4ZfPYvGUspp6UDZThfzl2eIyhPYfa+E5rl+EnPIGGaUxnLWSubYqqWB4Gsd7U5zqH53FvK2sJtkclW3g3DAlkIB8eBZy9evoHoSSEuA6arcqIF38kONVdKQiOnRGWBVNcrQh9v3TqHMNWEacRevxggHbw8aZHxtUA5U+S7azXGGOCXN0ZDEohtlGdJlMFqFKJYysVeDHfCjtOs50TZ9CuTxe0Ldp572fbz6/s4rxpM1dyo83nL5fGG1+bbWfs6FeNu9q98Sxc0b2ylM7j1HnRuJun7dc/a/Oj892TzfVxsVYlgQJ2hmvgAGyB2X3vVnG42tdpKr6jqRb2JhaskKMH1hhXJPxFcqnDaHxjemIhzU/tqCfx1lDJIkxmc6lnwLlvzS/M7lm9sPZAegx75OWxXeDqnl3d8+dvX1z/6ZdX5Vr/p99e/8vf/0tW3rbdntujrvb1+oU+bvaNn3zZnn/w8YUuf/KDD7/ze+++9xj15bP3vnnvdfvqra0OP396sWB7Uefl8PQXX/T739gtz7BdS+UZlj3OZFxtBouwW853bSH24Ba+gc/A7FYNrRp4WgHp4Z0dmzPyuzYLB9bR89456rySQ5KQOtLHiTcBhhBlWG8AYT/rqUwJYCgHvVAuBotn/kpnYyFviuiFBKYVJaS8o+fdPO00S5f0UdtqBFyDCbLQn8NP9enAAFPqE4ZLgMVIDemZADPUWqX4RkP7hNEcJOPMCII6Hg8PH+/f+ZVHH3/44BtvfvHkiZbbmVOaSZ5K4/wft7RuflXT4a/cyoyWBisYRlB5jw3BbQk5rI+Q7Tkq98zcsO5hbwiz7imMHTCoeTJv8u9GUg/CSGtrmCOv6xqrWFrvpRQDmnophSgjCWR/e3c0AARG7TZgNZNrkVeUJjlZ22oPHixvvb17+vT8V95/drFDU6VTqqAcDWmWEu4Zcg+jZjINMBMVTQ5LekDG5CLx2nyWSJzPTj1fdoY0tKTZIDJrJKJSCrw3NZARQzfTpi3rKBI7UNVQvFC+v7k6u3fRDsv++uX24vz8/N6y9u5OwmLrQ7DHXdQgedLQLRLG8K8Nzm0bUPNpfpNs5LsleBgdfrzCoLLTlMfJzUyI9UYooLua0hCjwJrkSRy2kKHnE4wzk3bnylIwu7GRIP2V9ntwvJRjhvjIOWRKN1UrQ0QfqwmT/xvD6lia9arw1xhQgaSeLm0O9T46e48v2VsP48ygnkxTcad6yxo7OXjZfePUv3ty1gKxTxBo/F6PBTApE+e6HDdn29aV2E/MtBhSdZfSh7vUqob+S3OcDIZZDKapkZuZFWMwRmOV78i0MUrr3tzhA/8bZaWPhwMAsXYmh4luo4qLsGBgASY1AhNsC2xRJ+yIc8PO6yX6vYYz1kte1flqW2fWWuHA9dKmXVM5Fjvz5/LZpplt1whpUuvNC3s/osG2c7vo3DVtbbOzH6zHe2/MRb2Xhl4gs9l8cjsa5hj7o9BM1fZurUjT0htYV7WKaibHKhjQjBa7Fs1hCJwHkCxXDsQ8FIPtFzAA6lw2KFvYBth4mXBufiE7N3sAvCY8MT72x/jygX11yZcP+tfv1Nsn7y4/+cEf/q//zjvl29e//6PP1u29pd+7dlvw5PPd8ki/uF5eHnZ9Pixvf+fRly8/e3z92d/9jbc/+sM/OvinX/sX/+Bv/NqP/8c/f/Lu+3V5/vPPrnTRqpXl3ny2Xabzr2ceYWXlhdnWcd2x/eryPm4dV9QWdnBVswmmAquA4JNyGWbLVoXhONyy0x31SRDV8uolJc3SUCn1xGDIYoLWiIjkMZgdAXH4pjrpPXGGtBIMTxSiZI2a+cAwlAmInJxGTommSiSLxiYZi+SUCHq6FMXM0jJRuKOSCrZnKBCY4nzzX37JBqkxeCi954RpdBCZ7JFFdXHKMGNe1uPtr3//+vOPHvzgL14+un+DqQ93a3sFm4p/Jq9OenGd/gyCwBIV7knaYY6e8qXhSTJ+OwC6PhaPj6bWYo4YXKxBKM2WBaPr8hQBICRrrcldXWKxgPLgyPnyaNfCoaUEbwfDDxoWvLLxITDMcEZkMnT0Wu3+2cXNfln7weRXb76x/fCDs08+nn/j+357qFVLNYi6Yw4FRT6tbuP9nsS9/KWiJAmAyiItQ6zGUw0U8QQkAuqueL9AUDYZDtxBEYjJbVVoRbogl+Za1uMRfmtobVnkOrzcqy+t3+p2ffbs8+XR44vHT9IBLKYCUSTZyR4lIYF4kdE7JpZEutK0AQg/EACKEpijl7qDzAGjrUqh5FSncElKOd8rQ4EYP7v56Sq5w+Wx9sAQROvstq0w7NYtHQFxgpbis+PkoRBU6JjX6LQzKuAz773nayp0+bSZeqzLdDf17TRJWlrsUTqB5nlQa9rExReMDdiEoctslGVxTbtUGStMaPCQjSL4i5IQRKaIAyZPhlrzHrBaQMHD1SeKSZVal+MioJSythWp7OpDVmVmBnnz/sorCUbCkCxhsJkzFSMqglNDbDj9izhR1/KiDMEngjEZvJaSxxuOagxDHgPdqkSgGopxIidgJ82yWTvHhXBJXKI+JO6jb1Ef15dLa5jvbet0eAbw6kDb1Oli6kud7i3rl70tzgsKsA2sFE7VVln35uDF9jA3XZT7U9tNWtv+yeW+Lc2nWaxXux2u4Xu3GahADX5ZEWTd634XOqxKNB3N5UHsBYGGYbNgNISLg/yXWj7IPRIEoW5mlWUmN7Azacam8B5x4X4pv2+bh9Ue48H89RO8eA1fP+TV4+nZg+MXv/V4Ot/vP//RT//+Ny83z9qffPJJv3/vBc+POpTKJ+vF01/86OzsV5/X89/9K9//N//T/+fNx3zrbJ1eP/v+69++POqH/+wPHvtrP/uX//bm8y9dr8+P34NZBZYDKV5eHOvuWnZNbLvfrtjfbO4t96qdw/buR+POuAB9K+8O73nDwzmlGWBB8mGlAVQOKtzhXWBvQkBPIZplIECJ1aRuOoJmxLxiLKUEfBNrKd0hr5uptbW1SDOchoNlVAEclIf4O6OZc09eQ1CIA0TOZfJSP91+OZi7wJIrmQkY6K7iqKVqLIax2G0QAxbCQ2YbHZwba1WYd3jSXCKHRe0hgOZGMzEWEwiaOm43m/6d71786V88/OCn/Tvfb8tCyx0AWc8gabiBt5RSowkO767TLDDB9fj/aX86+jp3REGe43cfxDUUWA6ezc1ytevwL7hrm5AIeq7+hQ9rhtgT1WMpqSTUWgXRSotFLoCHh2JvZgRtdJVhD4h4XQM2P9UsEGzmvPZ2c7s30UrVYVlee+3w4LX500+27713XaupO21S+NEh2mipgaCNxcAKz6BT8IqHSXmH2SmdGcDYtR5vM7dvw+FdHVn8xflwxNK3GGpaViXuzQzbeXNc1tbWOlNd62HfDvvnX395uxyk7qsKJ76s3Y/3tuda5GqVRV2DgZ+taxZZbmH9G5R4nMA2mIVFQ2wqg929pdO4NVKbDXkA3GCKfZ6EORV0IkchvXXBnAK8JqcyCfMxV0x7FA/eqRMFctLk2s7b43JUd+8pE4olyZH5xqn0vI3w4Zea8BkAP61IYchT0NUnTMHvCqlcdqJp4WFgiCMSwW1q8cWmqb464zCwpOwiq4veOoqzBlHfWoxoBeSeLbew8MsNzsgbMVYsEp7GGiRZ5S3sYB1Os1ySqNFORyEaDnnImUHM9Bmrm4JqFzxRZYk9wK3A/Vp+BYvqPSCHJOiN1CtPRE4lQmkohofhtwejXrlglZyASV69F9rO/VxG7OSX5o8cD8T7pd9fy+PqF3a5vSoffvre5tE3rv9yN73Yoi6b+WraHuv1FS6fPbo/nZ8vh9XPfZrrjINrmWxdUA6Y3actrt6e1wf16w0P1nx3sO9eXl3R6CZNV3b/6mJ3fXGx3N9o66rSzP6VrACgu/n+zCWYZDJfgGrs5kEGNcAbwsIsFec9t+ehFnSZh40WQrBgtdQtbDabrZzZpfGcuHR7gPWBT4+42+0f48U36/PX9PyxvnrEry+W/l//X//L3/utv/b4m3/l08/2f23+1nTv6R9++PnD167w+GILr18tf++73/nzffsM1//iH/8//zd/+9df/uS////+P/6L3337/Mcffzrvr9ZP2y8+vb3p9w7Lt16///Z82BzXBajby2pH+IT58rDgsGC5xfFayw7Htp1wxrKFn5ndGs/gC20p0sFsld0gJkFYPJKPp1QbGPtwHDG+NRaJ7m0QPGDGEgfcGHAT/e56Vithk1sqLVqNOF8SyWliRPlS0iu/1Ky4y8g+bpAxjISQiHb0y3RPawoBHOi4YA0qSk6Oe0y7LJoRosRGLtqp+uWiFsvoR8obkzbLUtdHuxWmdC4P879YkUUBYSni9LK4aj3s9a3vPvvZ08cffvzyrW/p8owrfKzzBDDAOJTCru4x+IpKhiad5rJRFGbczkZNXkkrBXA1GU/dvY3IoAgDYSkg7zF0iqqhm9NjfJ7/YJCbzMBci23dO2OvOGy73YR1vnq6YuFUxBtfUaKO/t5OP+8nJ4sILiTYAJs7WinNUdmh7Xb/5psXP/p32y+vrt5+gkMvKIp1cvCSntkAYhcThpVCWrW4BMEKg+9qxgBNAlRheoON0P0f/IgJX1MphhxcK1xa5V5hKHXx9rzdznOdZP365X5/db1/dvXsOZpWwWGtrfCX2+0M1C8Ozy8vH212F4f97cQNSHkrdmLXBHCYfi7JGWZxz2oMAEtRGxCjO1J5/wpqghMEnL9YUxAukt5VSHU3BvEaBWzeGpyCPGwNFCln7NwYiIgSc6bZ8XCQFI5PzRvNUq+AhFM1jB6AV217nbBVHmsy4pGmk6RZLTwcDinXLjTHzeFAs1Jrjj3T7vFUmHqpU+yJMbPcy8sQT4RarMO91lprDX32CCxFdw7zHEzDBMzsROXQXQjLRxwnx5GGemGMZbGCKIQVMQzj3bUEitGdCIETfFU3xK4QT8r0AAodyeoHzL1FB0uW+GB3DgsZdBLwcCM6mPkoKkhrEHvuI/GwCwBiEmzcwraYV9wzu4Tfc79veM37E7PL9pq++iY+58XVZx//6fff7NvyEm354kZqb73V1zfnm6uZfznfbs7Pz/Bi224mHbbVqw4L5gXbRZuL8vW3d1fvXzyz/XXXcrW8dqnbR1qMdcXmuV+f495LXDzfPHj++JJh/XwQO7gDjm5Lpc6bRDSieqgCYbCalm22xjWnUbH10eDw2FV6d4etuLFOdRI2ZptSC7amc8MFeAm7BB/0+9P1Y795wpeP8OL1ejNftbMrf+vyr/+3/8Uf/YO/f/He27/2+Yur33zy1uM353/64ceHm8OTx/XDD/58/fTZD2+/sefZbNt/9D/86eN59+TR3/jh1Wdvok5mDx5uCqqez2jPL7g9X9BW2jJPS1uPsAIsON8uM1rRQtOOuJ6IamHbxUsLZN2ualnutXZrZuAtsTp8yHusVEgr8rgE1ziPp9SBOVCRoKSa2YZT8FRdcvUk5sQGvTEChHshB3UoL63cKwoQm0QJA2tJ3yAbF+RE5UpWTfxHwMCDepOjXScwsaJn65upNIMVO0JtHD0Hw1J4y21o2BOB81wHDXjs5bChRWsNXeppa4rR98XcpjdvVQS1qG3QD7/+6+sf/qvXfvLTp7/7m8uqOtNbK6hAW0zk7GhjoI0MLJn4o1qX0WAysxD7T7W01rOej2NbnCXBA2XSOXVJ8D7KCYJmrXeaVYtVcDEmGI5XNJc31+5sc1hXb2CtJNWbvJMe1lkdPbyg5jJJ6l3VwgBcgzGE0EnGlp5haQrAJyuSrGALPwJC3cIau7fD9bfeufjwx9unH27fftLRrRc4S+3eiqzDSFJdhaUHa4gyoCK2LmebzVIlyHuJcajAWvrw0HFTtPqhpIyJSZcKjETrDiCchGnuJhCr24ReZVPZ+nJz/fwXN88+b+tyfcSzm5t5mnbz9nY5dmizmQ9rM8G5Xt8+56e8ub5+9PjNJ9/89rFXMy+wHEDGbjEa1ECqt9CYxvCzaWEL2SvUUSb2vga2G6bdvJNH5mse/hnMeT8pwIkusQBDPlBZss+W1aTZm7nZYLWQbOiFsaMSXSopHGx1GFCqdzI5gMXyrJ4qAXexMCysu8JI2ejevY27mwB7Ibu7OYpVj248s3+x0PyYkWayUqvaEqWk4D4m1qXUti4GY63uTtZasR7WMrHBhV5YehMLm8K6xQBvrcfDlAUbq6Q2jJYVdVYVCjtAMxRjz+ToNYc5sQvBHBbyYk61L40s6r11gTB4QdieuoVXq2etD8FKcH1rVFZLa8gdrAbBqrsrDnyDgUTvSk6ZsuISiNqDBgFSUCmwyX3DdTJu+9S1kW81XRbct/5a8ye8OD+8jS8v9cWb+KxMFxfnV9vnP7igvcRjtc2Ot+/Xn+957/P12NoFNV/iy4tJWK+2fdlhv8f5tZ0BvvXjm8v14cvl0vYPN/sH7dl/cn3z8cVG3hc7v4eXF7p3g8c73KLq+aPL3oA1dJiyxbB6vy7AbFqFQhbBkf6MMku/jaChaJW7U6BZ3UztuIYGcJWskFBlnYpms41vaTsrW/jOcWHTBddLPvD9I96+tlw/mW+21/v6NfpX82tny9vvfv/3/9F/e/1X+f4bv7p/fnj85qN/+J2Lf/KjD75a1kdv/8rH+Mb2/JuPbD0WbXj59f76fG735odvf/6jw/X58/3GPrmqS8Xt7aP7j19+fawXD3Q4LDOIyrrgOJfzZdva5G1rTb5nfaTacOmobtcFBQTd5DfO2y161N/FSCDcrZshl756zmCUzFsaUYVm0XlFsRa88ezD5MbuDUIp0QBZIAYjj5ocpUZECEeLu+YwoozRLEfIp2pfp6uev5LpNitRnfgKWR9pzGpz8gnAPIxwTrjuaelNMBbHMiYfVr8WbSvM0Ht0qIUepbi/wuVCVgm0Bnpz1M16c1zefPLi7bde++hnl289vnrr7fXmeuLOe0NR7XMAs7H8ONtKD8t1QOgwlprkv9gOEs/ILceZUaePyXa0GwZzdysWAHLu7xvju1F9B1rtQ+edoELUHy1vO3pvACA0CIYaCB5Hg2JhW2OUOe8gk0qGDTKmfOCMHR/xz4S5cuGZpnijHn6559vrJ2++9uWn58+/fn55n4tsbt4IOsHtdns4HGIaWMYHBhBmBAXeIci1toCmEVabzFY4Iqa7q58IpkFMjYbJwUE0lE91CgvDpla7tWo2gYerF5/9fH/zQo79Aniv5t766ktf1zLVvmg5rtvdbJxevrhu+3Uzbz779OnZvfuXD1/v6iYK7urhkOVKNzJOU+9dZnDRMNcqaZWmUmmo4BQK+MII+cPWkBzfFMMn1dGHOCeKyJJqqTHVNQBBCdNpApwEiGh+hZKLmwf+FFu9gFyuyDB3cHXBRbKl2CEneAG1M/1DPbH9WAWhxPxhLGbeW+XU1b1YCGssJqpSESrNvfQud++9l6TgRBRKxp+8I69nByDlotmmHh12V2ctkuY5POTHswqwZ6zxaL2Tpt5YanJAoN5Uaz272F1f3wA43+5u9vtap9ZWIWgDXmpJIzZDby0mYCyFpNF66zQotiUCjjsXPAzGuJAYdFBokHIm5O1rmlgkdPdY3BanOtRMQIE7fTIrbpWocqNTcGNtkG/Ae+TD0i96eWjbt7Zl8/JN++rN/vQRnr2Fp4fjcmmH+rw9vxZQHj92bn5mzud65P1mg8el+K4/v7Qj8fzCly3319q0crb45QM8542E467c2gue1d3hg9//G995+99cvvlGffHwbH/Oy2vdTHx8YJHhxYMHtgAdaIaD4WB+lK2To5hVee5osBiKA0CLGU2Xy4RgK8IpxQKG1jtrgXvrqrCdcQKJiZiBDezMfAdd9Ad9/wDLA39xb3p+drv3K+jFZJ+X5Yft/e3Xf8zv/eS/+cP7f+f8weMn63pTHuB/+/63/39PP/rF888eXW4mPNt56wtvidduav/40+2b1w+ffcLDkxe3j4o261Flz2/cv3j204/ePnuB77+xLFWUboCj1+PVdn6w8Udsx424sPGBcyPM8Coj0BVXspDYbyEUFrKAFSiuxbAWAWgtyc+ykz6IgiaiGDVmOLJqUnet3j2c5BA+bWuzGurAbHoLawjXo8w1BqkvOAgp1LHAZsiMjAA02DY+rBZHIsZIKfbqL5YyIMdf6pstdl+N28z0NXDk9DK+cloiRAaKPpvs7mPrjisChEYmPn2k4hLprVu1acXNd76/fvq0fPjz+cGTOm1blyocREVVA0okICF4cDghLHQMfSTdBFpXzN0jrzk8wtZgd0UyTnvLjK2I0VGEr9PT8RzgKX/xVNk4zJZIYyV87LxOBZ4epKE4Df7tyXC4woJzGdSTCH+jjnCSBvZIPCVsK9SIKgSyyeZaVpvL4a23vvrk6fL555sHDxc0kzEeR3BrSSMK2VobAu7wpSIRWjEnYHWsY83lKFaMpdZlOVqWIKcTMViwr4Baseo5ZKwEVbuzsB2++vTDr5993kiy7l9eq5Yy7273+7X1BrWbQyE287yu61Gt1uK02/W4Pdtev3xx794jmIEFkuXGv/SINoioPQn48TEI1zTNrS20aVlaVoeGoRsl8vif6JBeY2B4N3wdKCdE98BXLdU2GfbNYeaxBhwAc5ctTwMXBwRVctitsNBO98hha8xQDEGE8DjA48dwlco6Nm4IkxigSON5E81YmABtou7xwiEhZVJ3w6lAf0IKJLJGupIckEwEQzVkuTybBpd3sywRWuuxy69UNnWzNHBVb2SROyvLPK+9+/EgGly369ItNhTnj4gkw9zeY6Nr+HVM86a1FvUxe0Ae0cbATVEiVYyZSkScTMOIPYIjNIklnEKCGx1vKoGbvK810naTVUOZuF01y2ZOqPemZdf71HhJXJrPh4f66vXyxT17/sg/el2ff3l1efXl3F5+861PP/tPN//qn3167y8vvjs/ro/PPsamzexnODzQp7vb22mFHdbDTXlt2/rD21scLvbXZT2yVSzorZe5/lQPfuXPPnj43dcPM3Z89trcWKCyu27Xq51fXzzwK/jWvMIm2ARWw1rBaQwxYChphw0wjhJwYqgFALC0Vsw2m3lb6vX+Jggw1XwnEjNZgQrfABvYBn1XduV6Z88f9Jf36jN/SXwx7176Rz++/eM/vlyXR74s7779/n3WP/mffvLdv/7NMy8vj/hPv/Urf7A8/Xe3Tx+cb3i4nasJ5cW9bb93cf/5h7d6fHv55vRsWW8rX7Rf++bjJx0f39r+s492n34w/Wd/53h9qMbDy2P9xkV/+UL9UmUueP4m8Px8t+x2rPQil3tzwErNhQQ8nJGVnICDZ1Qylwk1ODC9xPmT5MWcwVFlBVQqXWptcckHVUFCU+uS5e7GEEtUOlFBFA9bokEIHL7ljrFpxMKeIq0hE4aOVHkSdP7yD7eToHOMNx13+tpx6FML6ZbDWyRrKzNbfi2PeszD7Ssa/ZivxlMolKTebZC5khoQfAFCrFXLgfcevPjN3/n8q59sD5+/d+/d53Y9i6tVt3W1XgJz9/+wewdC3BRqnZDiaJQiRI5A87NjlAAxxmdB72JI6ow6yRCTJpNuRpbU5UHWtKB0WabbWmJsNof4yjJLpP9yIqD5CYLCygxxcIOAGgCG3MjJqluimjCrctKFXqXGssyioT558uE337rh1986XKPOcGAiCmuXt15js488YDoHnISH4dJQB47n4Ymd5g6mcLKNmSsiW7/inGjAYLHBwNZbenQbINvAP/3kZ19+/hRCePU3almb1iOA1sRat9udSa31zmVbz0KJFh34868+u7h4cO/RG+sSJEfG4vd4gTIo7abzR1N3Q3GQFcbmrZbijt6axUpGgzvdw9ox6rBXxeUBb+TmQTqTKp74jiIL+jgyHAhBPJbkZfvYuwxoKD/y60YbyuKIP3Q3hTlVoNnIFuuSy0vhuvbeWinFBHkD6FBBavrRXBY5keEAFufNc18TYkDN3LQBM1t7Y9ClStEga+oEy+e3zKEPbnEoNboCssKxhmbJ0UMglHEjTUD62sBi9C7BG2Ea4+xAVmK7otG8h4u7G6zUcjweSymlsudUIyJS3vGhvxzj5LEeSmGQAutdrDHLtrW3KJxSGBvlNEIb4gZf1SbSWXtYDImw2jDVGUeuPsN2tHvG+/YQzx/z+QN9/rY+eQNf9k+rnk7t01vcTF9+9uRD3v4f3/z5nzz95//44/eWd157/CvPBL2BZ/al7a/Zluovp+3LZXfvq8v7L8/u7e9/cazrra/07lj50ZUu/85f2//k2a/wi3/7jTdrO148ue4Vi9cL7e5j99LuXZ3f8z0wGyZDdZvNbwM3iUK5AH0MMwIoZaoKg/4bK59rgXxtvTh8GDlUe1C9jl4g8NZc32WUKhpcdc9ZqOX64882P/jxdWtb326l9Y8/2Iv9u6/zT37/g/d/753Xv7u9+eL4t//Kt86/fvrHf/QLHS955suu39veYt73l49+VrcPD5PmFeiXD/13v/NGe9k+//BfX/ze3zz88/8K//XL5e//zbqq+vbqkw+Xx78jHR4cfj7j5iXOtvXxng9fPHptnaq8ozlVzchmFCtqDfA5QMdISrk6i5BAxRNxg0ExIHHA0Qx0b2zu6JIgCr1JkrlpMkpa2lpLDb51aw3WaqlOFVZLcCZyUEJDPv57oKevRJg0gPzlXweAaKWj02F20ohG7i4BhUwqfl0+lqGnBcuAaOMuxl8LeN1g4WUfkUYd4UVqclen1CWT3M2NDpa5oZVq/XZ/8ca7f/7g2c2DL7eHy0vfKr0gaW4qKqcqgOZAUP7IRNZzOjV8NgZS5wMgR7YwyRoLdwgFQzsamqxFHGZg7HkbcMB4iA7zygJzGkupnmByAVBZvNwhmVGGBG8uGHejZrHTDHIUtZB7qawsigqmmpSbqBrUCT/IgBnEsZtVvfeNX+x+dlaff2f+1qHdgmjqCvNIh0xqrU5zTLLdFfM5DzwOHpozY5KNgqXlcvUWPG3P1AzgbrPG3Tg1Tpyj1EKEBnqzP3z1/Osv2eR1asT1Yc+GJrXWCmthJctc522t++XAptnZBW52EtoiYHn+7OW9174hrSwVXg3NSElTqY6mnmoyY471jSXOQO+dLE0ibLvdSq7WDOljZaMQCsOKkTvzKLhFpZlwRXpxDi7FMKjSNG96b2bW1Ush3TAWMtZAvaAmsZSgSseAw7sCLmxx0uR4tSCmUXR5LUWmdW3TVOe53u5vo79nYSkFyQILqkM4LcbVyro7G3kHYYLP83w8LsEiJGtUB5HkgihJlubwFjz8zJSSF9qw7rLe+wm6UQPT+o6xo6JLFgbvvZU6AbFoiz1JVXFvYu4TwD+CRakuFgqQvBY6ssKPBjoKrjhwDGRP8mDzpm1d/m60+vGmYnEqGHCFAWFXzlMMhEcDXg3VWAxVYaoH+MxeG7e1XlLnfZ4Pj/Tl6/ziCb58Z/5KT5f9R+vyk509n+35gj3/h+tf+9NPH/7v3/rk/zL/9L/74Ks/vX3r8tvL7dMdPjvce3nz4Oqr19frXWn29d52rI9q/9L67TyZLTd+xvX9uurrzb36dv1kwQRv6FN59PCg+ep5uTr282e+v9rdw87szLA128CrWxWWCqNZcXSPygmRbQUIKc/J++FI22PBujTN4ayAqkeoFai5KSO0NwKKrSi1dfJ83qgun+IXT+tPn96+Pr14+OUf/2D/QK9/6/Pt2b/8uR599+z7709/+m/+/IV/59f/2oNnL59/j092j+q/+uOPD9yU86LzxasBF/PV8eU1+lG8PvzNv/tdbOteU+tt93j+4rWLdw5f+4P7x28+3L7evvz0y/JH//fv/Obvvnzw5lf6+toePOe6x2Gy9Yvto/3l5Hsv6mioO1K1gjykqZtj6zhA7qzosPTORs/t627uXkNj0aWKEO3MxdvapdYb3Gi1EhabviF175nXYXEDY/smxTL4y4YTEpeZkBETMzfil0bAI08CHu2rhe8rTmOwux+J7kV4GktYI+XlutpcaQbjyNSWS0nNvNAcRjjlysGqW9zwKDQkU+/h44Tqah00ls6VawW/vXv3Xz348Afr53/ji3fdZ7ch4XNz76F/GKkxm/ciMysge2uOyHomR7UhKDmBexGqUokZPCOdGnFkgxtEm5hth7ALpzxcjLUw5bHhOxQrTIAQV8RqtvFiAJiizcqGPPpI3j1l5N/Mn+QHQa1VLdS0bvJ1xKBjX4rVh7sHx9c++8v64t2v3WzuAA18xYi/TjMCnAcAlBgHBMHGPBrE0GnSggANkK3D4BjTjFPZETqYbNMTN8kIGww3buzlp1/1pansluVo6Fx8Jej1wYP7y+E4zzOIw/W1z/PuYtdiECBN88a87KZpPRwPVy9ubvbbzXnYboROlUNcFAYjcdBoKqQJDWAheqoJ5NaWJQpJMzD8sIyFQE4ZIp0Bw6oR8NweOlAjj7cQpMPxdlrvHo4HZOploowKw+pg/bjMy11BnCsNABeHzXbWumbyziGSOVlVtJZbFFsA+65qVWtDrWGO7SUalleKQkfotMjYYzacrUIl1dPYKxJVU6ext3CFRZdKqSlEAzA+JJlMrqhQOXrQgWlFcw8FdtU7S+lq3UUWuVCIlh5b7l5Y4/CWWqKwGLbkGM6RQ8MXoItZmJ9I2XHQM+X6yUolnm5ib6fr48VM7gVFmaLC/xSFNcy4JlS3CqtEqaWoqJzVXqUZ9bJc4Pq+Xb2Gr17D5+3zG/+F6rOL9pTH58vmhbUVFS/31/Z/+/zd333t4T/89U/+Z+0H/+LHF1/89PatZ7ePiF3b+qHusZm5ubl3XG5VPyf2XNrhHi5uDnhW8f73+r0ffv6TN97hs7Wo+OxCv3jy7JFd3pbLXd+XWdqYT2bFvMCLoyDNf/OADsV3BjUMnmzYTdx1W2QxwLvLVWuteCgVoEKRzkfP0dmLLfMO+NEPn+0//vef3D//dNFev/ha33vtyd+4+fDrrz9/d7f755/rB5v3f+e9t/7e3/7eP/2Tv/gKD//6+fvHL/p76+uXD3f/4198cF3p97eYVs1q+2lu2/b09rfefffJcVq++vInP/33s75sjRO3zw6f1D/47/Wf/efXa//Wd3/nJz/8aPeP/quHf/V33/jbf/ej50/PaS/ZKujmn16+tiyVgDVYJxuw0CaaJrMVrFIttZpVSb2p947YYp6mAiIgbwKaN4O8yPsiN0IVBqyECLlaz5GTq0P9aCwUvcurG1iK00+DvNwDSguJ0B36ZX6HeOZE85Rt+MutMDPLnupye+V3fZST+YITPHslbQwByOlrjuEWfLhYRuBOnwGRWZiJlFyrt9jOa90WWmE9YH2yPnp7f/XR45c/u3327WdP9vCKdmSlMu+fqFBIEN37WP0e+wyKhQjrJGnWq6B17DUGbKzKQEy0B0QY+sexATLGV/FtkpUh1a6M7GsgS5jM0sZ00oxBUjHG8Pk0ID8BqDQGoTQmgRNLlPD5KYsZEC1IA7pc7qKpw7sgrP1wD9s32uNPHn319Ob5t9bH3XpVAzEW1zgLl6Uh6qZB/zGlMdnJnrwEWusQ5LDCNNH1dOKPQTVj9VBQXsdzNLi6uruRbN6//vrLm5c3S6+oflbnmdpUlB3betxWAnrx4roYXTgcG1aTVjQ09tb612Td7S52UPda5qUpkMjuvZYqz7Q0MJ6elikQSxnr9hwYm+3cwgOXhp62EhSk6M9SOIwUDI86IuuT0enLoVRtA0Dra+RtknK3UpPgYN4djm7GmrhC4gPG8BI57VoIFAapjfWc4qXP68lM5gT4O2C2LMvEGm1h6jppjlJZTkUtje3kUGBoa4NR6ERJ96YSnpQGodayNvkrFIRiVBcY1jQxRYzRVdQSY2GYB5/LAKsTY5TO1orReyvhjG3Ybra9tdu+KK+pj249lMYOC8J2Ozs721/fuKOFEZmDNq6bOwOPGHJknMzSc7zuiKeZPjaJPZtVhM4uQO14wlaaWqmVYAGWYSLKQlbXBKvCZKJmLBtbi447HNoB7bZeXN+eHab9XnbkSxZos+MG1B/f9MuPpv/Fr+3/D99Yr744e/p1/fzr9uULXXFFweNpNzcUV7kqum4Hrz86Prvdv/yds/L9P/zXH28315f3N3DBitTDExNcPLVSEU2Vnj1ZpUWA8KTi558aJb0i9cqVzu1SwmmkmVWjpOrnfd24F3AGyfRObai8OL74+It//v9+56N/stnZ5ZP/lc1bff3psxfLH321+97rb53huGB9tDytn13Pl839t//h3//13//pj/7d/+tP3rv32Oqjx237v3vvvX/65599cXW7OeP24ho3/vDw6fn18sYPPvr0z/afX3/20WefXrz19u73/7uLw8flPvnJT/qf//vDvde/mm7O6rcOH39w+PLfvvnGbz/4/hsfXn12RZ85Odm0/fzRa5NKUSle2WlHmMyW4lbgBtSz7a7W2juW43JcjgB9iKANKvDmMQBt7gK1dbZCFfqs3g8wSQ2+TmoWMBNysp58anlhcdE6SZZCOGUqXkSvWY1nSIGd1CwDNMy0M+IOXpkBAq7ByH4lO/u4/6MNusu5Zqe/m73J+L3AVUNql3/GAVqSwwtHPZ1xQ6VzoczaYjg7thV2pMPtOy+ffPTg9oPHV2/tL+e1LpVFLOhhPKhhdxQGky7FepVEmk2BmjpgiikmcRpc5lQg8qHJvZQaYxMpVurIR3leSINVEiyCF7IMQXRM1WrAZCCAWgjLHOGE+Su019N7iazmDiP9TqgnwUpQqOLzyQTK19ah5q350qDWljVxWINty9vH+x/r2acPn//qVw/XakVuqNFHtNbWw2LRT8Dg3kNY7NE4lJAtu0HCBNRaVzUBrNPsUNTMQ25pCGtVCCJL+nKkHGUKu9Rq2zfe/5XDFrqG9sfjsU9le2wv2RsJmlprPmHeblhKOZunWttmts2mcFvnyt1urttHFxf37XVoMhMLuvdqcj9OdW69K1H7Bo8tVxa4pRH91K1LFq8BKaj1tGtQuLGN8Uju2KahEKakTRpyyH06A5EP1VudJvUeVsy1TrQAT4fFRHeYVih23hmDD+Xu3eQFPKq75zboGBFjSM5Gm51MJXUJTqGQHS7SKgMOYUcIYSE1V0WNUa0N9s1mmtd18WxkqcAnPIGPaLZZiJaTqag85b7ZzreHAyzTGVzRlLrckT9nsXmel2VBuGUVK6V0ne6jQDb15ebajOC47aV09WKstfTeSXRXWxrJ/fVtQAk0eE+c6SSZYNqhldQbKo3rYpoVpOogZ5iHqBMsXNOjM5x8SKtwGCuc4CxBoBxW6chF03IvM8MfesZadZytEQceSl3Wm72u99up1RfEy9Z8ua77j783P/v2W+0Z9GflrV9/dvP0Ry92h0e/98I+nl9cd3zYjvv15cWX/eppa/u2qHkt71zWR0/Ot4f27z81/oPfWXjNYz1W26FbJ1rr5KR64qwplsYHrmkW9LmM15YYDgEUhidRuAlJQiUMPnxQeu+1lMA/aru89d3c5rah3e6qLX0Clnn3iw9ffsz6m//5//nwj///VP3rkyTZdR8I/s651z08PCIjIyMflZVVXV1dXWg0GkCj8SBIQiAHgkhK5Oo5K9NqaWv6vrb7r6zZ2uynXdtd08h2bbQajh4zFKWhSIiEQBAEgUaj0Wg0qqurq+uRlZWPyMjICA8P93vP2Q/nelarP1SXVWVlRHq433PO7/weK/7gf9mc3TsdfGHhVlV7tlw8/2XY/FzZjnrF527dHHofLx+efSQX/Rvf+Ppb7UcnP/mzH3/yzgMeDrb6/df6xZcKcqGlHFkgqXVVLS7mOK+bZ21zGQdDPxxd3y1uv1Yfvu2mC/r3f/osXLroCy6zPNOid/r9n107uPVSuT6RZcB8gbLhVeXRjjIfQC1TxZoBLenaqdi+r1hUABqARJx3A6gXZvOGYxGlqKokyNBAG1UNPvPex+hJg2fnWRVR4lo0iLYiIhrUKFlJ89I2MYDIe868VzCLY5eRROdUnJMQcueRWnYlzzGI64S69gu7ZMf8ApxOVpXG8zSuxJXraxLr2O8JafakbghRVTv7DFMCDOUiMdaD8YoVZhtEjqHi2EuIYBUTZjkGC6kEYW85pSJZ5MDrMpSfPR882F3+cnv6xaN9keDpUxKZ1CwY50IcUVCwRTSpWcMTE9iltRyIzKBCVcAIUIcXR4357xCUr6w1jeTGtoVy3jmBZM4RsSNyzpullpGfU9oIpyuUSBGSlCagK/mVIp0WHehvnax0h7whu0apFZEoIhI0SBukjU3bNqEJMULhnc97Huxu6NZodXpcXix67XY7WHiiGEHMITAz5w7RbCoEDqyI1g5b6QE4Khje5wppVfK8R6AogYlDBIMbt5RIHDwjsHeBlRSijaMSNhixU/jMI4uFuMKPm5d2NhS+bYLWoUHtQg2ExrWZKhDyft7EEEJoVHKf5djwyg0zwCGidYMq+JHf9sT9Xqhj8BRENCXOQzwRmw8rE7FTURZImnxC4sZBffJcpMCEpM8jdeQo02i58mKOZMYPtnM4yQackyhCRk4GBSHnNEbH3lmLRuSIk9GG0VuIbekrIo4doI4pzzJRiRZ8a6biICISCdrp2I0XxmzxyyATaofUKypb9EtwwqYag0YLoo6cFJDmWc3mhwuw90FaMGkM/X6/bVtRUSG2JhVEKgysVitiIseQqMnMjdZNy+xEpEUgMJGHbcu8sHpEk2JrbBsrD6qAaNDAesXJgoj9WJz4UwBUyYyDiIJGgRA8MbFGExebiQcDgQTEUe3yBlVhzkTMBcxGWxjLlAQ5s7CoebQp8rzXNAFJzuU9sUjMszzL+peLVZ73osCz12imfvDMxK5tmfMsaIRH9CCvXoLnNiPJKFLjQ9PmAbNFGS7q/rrqX3yyOVgfHj9ol3X58gHXw8HQn1fzH08v3z1chaOq5bJXFmWZV7ncRLM/dLvsn/fqftZfFeXb04vvPKO72eitN16rq+gHeSiVQ6jAhWHMFIitPxMGHMiMSdlRipIwAIsAsCcSIe84AI69Com5T7C3TQuYmtA679WU+gqo+sVu6/NV3NjIi9CbHfYn2yHsHk7X+9fKm9duH37w79zR6V4+6B+/7W/elInXURUm69Zf5uNx07S7zsFhuZdL83BYyfxo44Mfvvf00emzIOu6pdnZHmeTvDfey3MupBGZh9JNBl+68/yjB8Xpqcv967ffzG4fzG+6cPQwP58Vy3wzFIF6PratD66U9f2L9aM67vDOPq1iveKLy1AOqF6UpSyUvFDG5JFM6pVFeiaNdOxEyPs8NKHDK72Lwo5CWHkCO2k1Yx5IU7PPWDKPoAgRK0UkCawEakgcUytCShzR2jRswJSqtk1apTgGbNbzuQ/BETdxzewZEM9oAvQFZb/LXJCOApTYhkqiwZAaI5R14mO9WnlSB0MZ/0KuOJCfYgu/+La4Wssm9+mu/qdpAuwcLP8mSNIlG0McBOIQonSsl1curx1tfPJ4c3HtcrG3GC2yZS/2lc1bjoBkXKeAOsrEhRDg2TmOIm0I7BjJo5yiiCM2vYdFLbyAzD/djnQr3DSzEpjYMTvnYVbdYMfkvadETmM1VlxXaQlmI5BQyxfoAH1qO9BBj1dOxXixeAZg+jRbZkhoG4kSQggxaBRPzJlzLvMFQ7nU/G69/Xa5/Hhwtr8Y9SQq+6BBMp8rRDU4zgCNUTqbP7a3mVIoyRMT1GXe/EmYmaknqpkTlZBLzyvQQ1Bi8X1oZN9jtNxpkhoOyANkFbhY5JeeLwtkIp4dchYIch8ELHXghWaYnc1Ii2uD69v57kZ/p8h36/rnhX+lkc3j1cWT6n7dy8p8Qpq30ngKkApYCTSEJoMSq1CAc6JiyWzsmdpQ9oumacT0acyROUrI4RNTSZRJRTQGSdpuSkd1B/qklTAoeUlqEGIkODYmEW0M0Vb/NtnGtCs14+sEk0aJGXvDqGGdmArUTOi0yzZIsiLPGSAxCdbR9b2wVB8jDHKCfABEpLik5IaqyayTLNeKmGPqihVK67oVDRZ1TESUOQs6BDFCkGDuhmqbwM4pA1ApXN7N/7aycjDJr3cRSgJ2zhaKEiM5FlVKAUeSCAUGt2gH5yCZzwnSEgMm6HfOExkbSwDPLgjMpDpCmBFCpM6Y82rFe2Wo3S2MOIq0rSYFlhIlqwrOsh7AKRWGmJF1YBgDLEIgr6xZ37cUFEKOPYtH47TOUWeNUE1auXFbffnZ9z44fjba39q5+Rr8wYOPP14ImhYyC7Hy96brcpnPPD+HPqn0vGnq9WKLN/tx9X/ax8FkfF6H/++H5+dBmGY7124thsPmYu1KyZJCQaJjYWfXzXYkyiwOZsMtpJSKa6fEMMGZ9wJlhO6YZv6UPZhnjsauZwoixur1/cGll7B9/uHuo3v9bHT66lsX7dNvv7aF4+nZv/5Jdfhzlw9PZbLXm7vmF8XBK/EjcU2xjlLvRd8wqrgacrnjueD5/Udlv//O278Is3Xo9+dBMl+eUja/XG+Q/72/841e35dup57JX/3wRyfTarPIX335Lc23T54eFz3H+XZzcq+4CIX4OVEDFJGrYWCdymHVRorMW5P1pSw3fXUWqnk+0h6rk7QyMRVgJOJc4ZhdFE+sa8TBeORd4IwuV60nbtqmVw7bOqzbRrTHSuzJSV+pFm4IawdWWYNZiFi9oOl4fmBWQhBWCRFEKhJVYxNdYOfFe2UmNAgEz56JhRswy8rsUQC9IrLClgXa2delU6irkFdG0JaAY2OZWSCZhAMwbwsTC6YhOGkqrG4QotjC1aAu6V6gG5gT5at7N1YJkqNlcr1wIFPut8z9mu5Oxz8+OL2/M9tZ9srWNyw5SMhOyiQDJIJTEojzLBpCA+fZZSwqIUlxkLOVRRIxSmqHnH8KPicoEXnHieZGYGaXNokEJuccETGxdy7xdZCO8k/v7Mxw57+G7dNFIFxdsuRgm6hzXU6OqkA1hqAioQ0hBIltiLIOjYp6Jnbs8xzswD4HNHevyt679eGD4vkXm5f64muAmYjIWSqOXRxiMROoVHSE0sfHxOS6jXZyFLddKRBb55mFo0jwyDNfqF83VVzXrT3muVc0ed97xWQ43Clbff6scgcbIQhEWrRQgVRCS9fzdZWf36++euutQTbpu8mozE8refLo2WB463Iui8XF1sa1r7/xGz+4+I/HRX19uM9N7SsOVQO5cHKZI0RUjVRCYnwH5oQjiEMIjYjYDiiKHbHeDCfsRjc9b+cbQ51GvluH216RDChKrHhKBnC42tMakMTExBRj9Jlvm1ZVo0QFTERkyxUSlk6TpGqLDgEzw4lEUx6qUrCpnROzXzrabjAGPmtas7ASZ3azWiRiTg6MoGr2OeYvE0Jw3l/J3SVE9tzhV05VmDiIxDZ0PM4raV4XqUEg029QtOwhY2+JCFh8locQGCDiGINEgZIDtR0LRDWxq4jUeach7QLMvkBT/CIACd25Y0JlQUJ9SJJozjOz486njrvjKyltTMCRmNVEal0OOSXjcpFjHyHLqoXGLOsrSJRFcmavysy5wqLcRQJkLRzBgdGigV+hWFPRUCklZyNCEd3eZPDawa+ONt+bnjWn87vl6Pr1G9vMyDQWgUu//+rB2dHCfzDbywofV7vgS8eKC9JF3Qzefjp/or3r/fFNNM+a1Z39m0w5DZosz8JGixFrH9KTdSjXvifig+ZCOa+jrlXEswJNUGGFxBC85xDMp927tBd21pcY11MQEpwJgXaMToaZsPjh4qPbsni5mvZ3cbS3t+uevfVaf/Xnf1B//8+38+tT3jwLMedBFobF4uN85yW+lg0LLM7i5TDsMuYVsr5vy4o9D8Sf3X84cZvHGyFAV2EdVXNSHsgytP/2X/67v/33fzcfNe9876/f/fAXd1++sxm0avKLJ09DsVpGfxO75eXGfNYOe+XZahFJgvps7ck34VgkR8zq0cSvji/z7WHpqr40C5dRRuokQMjkC01kl6sT8hHUqNPesP/w6P4P/vI7TWj+ye//s5/f//CHf/EXr9/9/K99479ZzRszTAvBW3a6WJKBOaYSgSDRvHcyspKBGDRtR+TKv0IRgyUzk/ckrYjzQRtvPldGFTUvmFSASc2YoKuw+BRNLg2mppX79J+AXGfklAIQzQogiTS6TAlA9copia70s1ffpyv2NgSwWtiqXJU+M59LM7cjKJGy5jGsWW/NJ09G9fPR4v7k4o2zSYOrnaTKi/YBEsXyXJiIPYlEpBVQUHh0nFfKnBjd1XAcVe34HWZjwkw+UVuSA6iVJSJ68RX2wwAwYrakgSSdFd2U+yl2c3cJ7M1Kx4y2C5eW1kDauKqIxDZIlBDb0LYxhCgCUe+cd857531GzBnn7DV42dXx3bD3AR49z+afjXtBgmMn0CjCjlkECnjmKOiE4+aPRKCOvccg8kTMzm4gBRrRYa9chwZCPitrwuXzY5k3bsDlcDwuDgD0GqV6wedRZsdy8oFGp7vcvr61zXkltQbnqz5LJsjrxezsF5ff/vrvvLx36z9//7v1/OcD7i1l1O+PpzM/KnaKzRzXqX+j2L9+O2SLbMPHuaAG1z4cD2TmgEWEEsODBbVIw2QyH4H3IYS0sDQmla0kO5eajquvhiGndsnu+k7HewWigqCijo36QEpqIihbJ1twjzTRe2+Wxd1C5IWfMzFMi2oy1C7B0KzUJF4RAJDiIPnTD6N8ijagygzutplMyuRhlmqaaMS+uwdBYHYxmKmAMrPPsyBRRIk1hpagWZYzo7HXEbhk/0JKXYAdw6lvNVJX9QwCUIbChSYA4jMvUZidRGXbEOBqLWBEExYVCUJpajXGUCenB8DkFap8xfJiNValiAoZGxHk2IemYcdX5C0k+z47K5EuIHsyVawKlD2TKLUhMjvmHOAoBPUgr8gEGSMX9aCeae8hkYTIjOMbSHRNVrQoaxRVXvQGC55w1uh0Mtiq6q8VBx8tjsvh5rgoectzVvtJpAJUYevAu3lxOQ/rupYmLClsKr20ufVxlp2J5L1yj/MmLK7f/eyr9w97y+nZb//aenhBm9oOg8/zjVFxoRsXUsyUG851FTRwjIpGOBDAJJHAQC5BPOfE5Dh6M0WEMiLM8jNJRAmicNyd0vYwCBH7b7797yYXT3de/czyy2/63r09hMs/+P7kw1/68d60OSqkel7u5dMTwWTs6yI/5YOsIQQ052XYnYz4dInNmgeQKbch9uL6ZqbzVg/rdQxBiqxuqjLPsFy99NJr8x89+cWT7wzu3n1jd38/1OuNOwsOguDK3q2dreo0zuKg1OPTaq6TEcrRxZOjzdph2cbpQjY3tWizwMePHzw7qkdfuVufnwntgFMOJYkxQIBeAw+wcA/C6I16b3//LxfZXJx88OynH3zyYGN/46h6/KMPvvf1r/2N+ckFeUJ0jRduMg6sqsrOlutONZjKXCOpA2LKYCPLx5NE2bTEZ5EQWgiC907FsW8gufOybgIzWmHHHeSsVjyiyAvrDOrwNyMLqpEQE9Sc6rhVbzZZf/JpBOhqShCBptfQ/7qE23TRSZno6nefYiSl/9uPkmiN9sOLqlJUdi34ldn4pJx/vD2/fjnoh14w2aoSJ/8Ec/dVZg+xykuAc8RKYoorR45MqZI8ciNZxDcxoFdNi3POOZcERc7Z5IsOlDbg8UoplMJXupklnaBy5eL9qQYkwfLpmmunoHhhgZJs9tTQ9xBDiCGGICGGEEJomLiX5eyYPVsVzryH97mxcT1/sb55Lxy933v+xvp6DoDgRMmzAl5UEh8tMWzBFp1inkaU6Lip23DErHAK6rGL69oXZegVl0+fnj9+PL62f+ulXZ23+uGH1dMfALiomh5vrKssH9+Qu7sHv/Nb5dN3j/TR7/3jf/aLn33447OfoVk38zKvBpef8Es3d2/devPw0aPrB9ewv+fFFZt7jQ7W4rJ279SdP5XZrb0Ve0Upw0lRn15iLriECyyr3rqNXkASGC2ByXmXgaAhNCTkORcNVwLT5Gio8N6HIObYJSKdcs0Ssa7KcNrO6hXh6lMNqrEKzO2LVILYjUQhhBCCVWWkSmBhAum5M/PLFzJ9YgJiFLNwT2ws7qr+i23FC6ErM19RNoBPrTASpT25MUtUdJKhzLsQA3uSKOumZe/g4IhDbIi5aRuYtk2EPcNQ4pSSQGbCIgq23YpJ/iw4BUQwAMiF0GrywrTjRZ1nx14EIsHOMUa6wtJRpwGyTgKAKpGtorrjzBJbiNkzNyEwkQrapklsYBaCRQzYt1NoIOaYBmIh8gQBOcvGJSLvchBLZNKMOQdlioy4B8pEM0iOjCgn9IQcwyQpa1BDvMLaFUsqFjyspCgH63xvcPbwg58dvb8j7rO9rYN62HJo9rKmaItcwihmE3YevCm7t7a2L3ljWR3PL/OmvuF7/eFw5UQom4emX9Sbo8lw0dDyaW/abn0YTv/OrxS7mYybxbA9Xvs5ZRWGDY8q5NxyWIu24nJHJcUmuABdOG4KhYIQYxiUw6ZZgALDOUegIDCyG0HJenkDCbz3IUZWBxE/Wh2OBuf9D/7VzXv/PBuMF4vZS8RNf9hK24u56qw5vzzJb8SwUa2f8/GsPNibhXNs8lRis1dzX0I/40FwocnGRXsZtg42v3nc7vdmD9crlowd90OVD3kU6uXxdLe3EZ8939/sVdc+g+H1oa92D3aWQ2maajgsioMbfH40Lgp/2T6ZHvYgHtp4rz4XR7nPLpfzl16+9fRJHiUPsV2tl1hlVKnW4Ia5YSpAJWsOzji4UIyKZ/Xhorzsj8pI+s7Rj7Gd99wwhnBv9sFB/dL4xq5UQaogAVhDK9JVH4pEWYOa9C5oFG2TKZUSkxOIN6qnGmJsW1hEFW1a+EzREDh4bdpWSFJTqd1Nbrhx0iGhWwG/qBbUjW+adpNp4POOCZyeSjU7DpPfRrXFl4qaHOFTR0X6X4d+mzDJ9q+2OkoEWkpDSqevMBqHiiqJ60WpvO4s8juz7Y92Tj4eX7x1vB84efgoyPj6VgFj02TeO5dSgkUlavTsjCrFhBTxK0og79noj5zmQCJig2EpKXvT7+0qWwXu/I66V7ez81MnJF7YiBFw5ZL3qb9/4RKisMEapCpmzisxiEhomhBjaFsRiSHCe2bOfQYm8s5n3pFj9uKkdr6veSOym48nzcZpcXHolvtuVEtggrcf1kMtPCG3DEoFElcOiUPe/Z4sQIsEAHtZRz/cxrJa/sX3NzbHtz771vqXvzj/0Y9VWgdsfuFXASwWbrDz2eEbX3E7fCnHi/35+NqmPMcfv/8vfvNv/t7p4Z2PP3mvPMnacxpdzw/fPTl8cnbj2v5oMbpYzU+a6ezyEqplb4evuclouL8/fj5++nzn47Mnx3kYXn/9perJMp4HtIJKeFrEJjp4Qga0MYQQG2ZlaIQyIwZ1zqwk1XUCODJntmQ+RgREKCfIxrQsCblIcFGa0EyflgJKfOYlBBWw516eN02jkCzLJcQUjWBTNjMBDKcaJbmGp8J89RQwESlrykBU64wlGTYTrlzLTArLTgUgoS5GM/0jUjjqHi9lR7Yv7kSdSkTsyJMnphCjUAATUjueaBMkwmYPrkpRo1NVYUUE2Cw4lTv9HhhkPBSFMHGkmOe9JkTnOEj0hI6xEAHizh/3066lQCdQBZzFuDJI4RSOXGCYz6VEizK8ajjIObZAEQYY7DzHKBItElmN/KgAk09GYEoEp4AqE+ckXtWJmo9jTpoT55IL96B9IQ/KIR4sLAEUmAPWVK5QLjWv14PxAf/we392+sGDclQ+OTm/sbmZ9bnRpphkWhKK3I25LYNWQXq8LlqpZFTQiLaO1yFyPG7rUPGi8FyMFrJwxdbo0VHjV2ebvdH5R4Mfn/7FzWLn5cn4ldvr7Zdml0XgohLONSyGLQdmZSEh4Ty4EIIEldoDGbuo5OeLCgjeq+FApIiSfENVYyL3AaJqynIQsXc+3N0bfu7N3vFp8/Yvwv17m3kRcnWLS8/Vzohr7DjX+qZ5jFGG7R6aekFne7fKi8drqk62etf2erXEDLkM/WJY51uibt1m/o2LrZsYuwhPfBEuvducnl+MCh9IueeExsVLN0Ybl1Sv2/phgcXy0fNr42v+9hjvURGlVi53x7I7Ch/cL/OJZIhoGuFMqG2wt3/zaL4ebd0+nl64WmgNWoNqZgZyQk4olEsXe+DN5udvvz16dbBYr9u4dhmKopAosZZci7cPf7JXHdy6frvoe5lHJdIWaMF1Bm2VOJBnMa83D2QKEWnSLkkTjVKhph9g4zGCEYWBpm29UtNA2LSvMLlKN5xJYlWlSt/xsF7MYQY2WZopk+lb2QW765m9c51RkCLZyaoVFbPuNyROXjw9HRxna2CiVMMkqlLoRsRugZX8pRPcJYpADQAOqwy3Z7uHo8tHO/ODajSqcu34pxFKmupcf9BvVmtAzDtTYJgtO2IbOkWk84kEoN57dFpqplRtmVlt78v/9bzbaQ47KWYqsZ2qJC0GrmqxmHcikqxXP/13sZNIGeyZ1FiQ2IYYYwhtsBk4ACBH3nvHnn3GzM6zzzLHTsG5kyBQICBucP4Wbn5HfvE+PTnQz3swWAksJEpETHDpTVvuQwcXdp1MgviIKYMau9YXo43VLz5evv3dm1/++jqfPPuPfzLM/OatL/vXrnM5lPF1APjze5ff+U5v9nwxvuyXT+tbdXNn8uv/mzd+/uf/8e37F+OXPz+T4Ld99thrlo9l9zu//PObRzuv7r45zg+IxxiWzDthLzS36nC9vbx+Nsvfv7G13Pz4J4tHj1766j994Plyh8UJArx3cjSUEASRqWHuUSKL2lKCyZm/YDTwQmMkxzFGYtPhqmfqalYCeoioy1pINafbTKQeSxChkGAkIIQQ7NN07JumccQiEkU54cwJbTKLUvu9zSJGQI2peCZrC8ACD4D0JjpdqyHjiSBt96AXRFXHJJ3rs6qoZxbAZV6jNG0LZo2S5b0QWij1cr9eN56dYdQhBmsTYKEvsavYAtGIhG5REsFTWqR0/qwdoEXUNmvvs/V6zc5bNkPbtta+O7bKZ/jKizVLOmNMHk0E55JRO6VHwTEpM6LAOcRoCfKqYM9RQ1LGKoNIIpk+iyKRjX5qq3KjwUJCuoFFwPDQHqEHeEYp0XOftRQUQEmSqxsCGeC6bPYKssByVJzzMIvj6zvuj9//90u5726W0+MaRaYTj2VTOMgY+ZDdCDrkuF9G31ArOTOPfLOU+izwk1XN5DmTkUdgJ03Tz7FclvN5PurVJS1e2phsyBt+/e8ePVk+vXzjtXDti7+Cs+PgY466QHWytVP7whGrQ6PqhBHhm9xLXtcBlLETQFSCS7tF9WKyXjYv2RAjU9I0smOFKJz/xq/eKaKfO5+/9Yrc/Orsj/90dHrih8VCljLGweRZcP0qD6M7X7y48y0tytM//n9frHAy2J5I9f1L+fJ4ForJpH48KGuUYT30HnCLdR3JzyG9pobE07jIp3XBPsBH4iD45b0Cc92p+wcjXxSOlpOhq44ee1yrJru9+Wziy6oN9fGScwllj72HCw3TsD9+9tHjeOvlCOdpczzePP3kAbfsAlEQ6oNKYMhSCPfXMsxXvfkxHxKTDLP+ZrleXzYOw4ybJRx8aOvD5UN/wXdvfbaOtSeHFhQoNEDINUaiYJao5tOkICLnvT38jUgUjVc3t9rAEiGARGFy0W5SlSQIs/BqkYSmJmdmNo1dMoK+KsBEEjrfHxuAHWumUeBFuRugBRBVZpYYrCSRply/hJVDXtBL1fBb83dXKBxz27bOuTZE512nP+5CFT7FRYpAAHwg7wpfr+6ebr93/fkvd45+5dFtIjjuqE+U1MBNa34/FEW97wHwtj8jqCMxLx5LH4CAnUurN7oadylRRWCcpKutrrxAlK/GXrOShiqiGTPZINX94NzRzVLZle5SkTEiEtKgYsOSAGjNFq1tJUYJwZogn3nnvXOenc+9Z1OAMSsRxHsIEJi50uYOX/th++FHveNvhM96IjMsJjiIuqDiXQd0AmTa6DQNe7gmRICZXBQmzskXUfniP/7n3icf3/69f3z04LF88L2b/+gfDj9z/eEPfh7+P/+/sswgGwByeoW3+xo/GA5Ivjj6zFt7eflecf+Hv/3Wznd/8P+YfrR1B689fvnzg9u7pEVRFwXf2MiH718+CCHslSPGcLy/bK71ef8yvBwq/OXfGC/K5Qdt9d5a872zXolXHvjxg9u3fS0IQjXr8VC4AdUWKWd8BG/9JUEkkncgCkHYmWyNmLjVYB1lG4OyrW47CFqsn6W05lEBkQUtiHS2hsabUHXe2X0QDHBWS2Sx5E3Dh4mMTMTW6Vq7bKsCihbayQDSS4CoFcmIr/hhQsKAM6IkeShA0RydHDvHbLSRQGBGzr4K9bppqCMJEzuxnF3RpglETBZU5XzuMhVLroLB4BFXngHqRZFsNUmVryRx5gxpPloWt2SiXkqxYubt7A3hN1q4oQ2ikeFswaswmxeFEDsjUaqRs6OdPiakBosG53yUZNthA3uiab4ALLrDhSyG1WvCzJnIMTtVr8oML+IdFaC+iEfuuSAekhSCIWmpUgiPHHIFEWfEnok4LCTM4vne/kae/dWjH5+8N71RvPbKnSdhz6+OQjVpRsFX3md7eaMLbOar3E95b2/yiIMs+jxcgFdBMxlNs53GTdkthHxPRX0uPD5f5FLxRs+VKq5eboS7+eYXaxzifPnuH1d4Wt798htN9QSXp6g95Hi0X4UeReJG41p4zXIZ1hWA3EEcIBKcz4x9D1FTjimJqBcSLwQSS6UUiSCQitcn/L2/Onr/p3VW42uv79z52u+f/Mc/2Lr/CTxrxbPxzugrt0Zf+OZE7n4sdETji7u/df7uX877L8141fPueDH7bHgy9L3hCNk4R7NYZdLP+uqlQQ2vtOB+4aPwQlFj5dgVVSuxzXubWleR0T8om/m5eAcG5k8GvbotxYGz9aVcLuoNFMNMB96pBARlDtK896O/3P3GVxfrqsh3tvduXj55pOpjgXyLMGIdifTX617s7RSL2Yx3mmIoRb0SnQ5665fGYTDMn+Pg+aKUMA1nYVoe8+gOAhoXPURalsC8iBx8y+zgRG1vYtWDYWHVlAfUXVRwZPOAVSJWUoXG1M4av4bNwIig6h2zOdA6YniRaNtkcmwVEZaFAoChTF1FEK/EIo5NO0/RXBnSOBlNy9LLe8vFgj0xawypPTebPKM6iYDNGBaBBJEZKrFtiTk2Ma13FOw9TEQfAUZUqAirNM5zrIIvbs35ycbZdCM8HU9fme8ItCEx/wRIwagCyDsHEXhihs+8RlEVBTs1OX+q9J49256b0m6arByDDG9Mh0Wqtd3EZFM+wewFrv6OzOUsVTd9IfUy/wcA6cdPtThY9FFa/KqKRoSE4IUQY9Bk/+s9MznOfM6OvXPsmIyGDWZzaGJRZY7SEG35/q3lznvl4Qfh0dfdZy7DWpwicqa8zlpWhqgJZoiSzptZRTiKqvMUKaiD5AqSpwv9i7/aaNfl3/8/H/3wZ8PP3p78/X98+vYvTv/Ff3GLw9HoZkANKQEgd7Ll+aVe/OLWtX9y9+XJ+ebD78X6Ty+/t/q8LLO/94X7y+ft9xYf97+2sZ70hyWGDpN+zrVr8y9uvdmT4fHz97lZTm7dDTeni2fTm+ufF6e/DMcf9d8aHBQ/XRyvF7JXUXE03sNc+JQp40wL4lykUTgHhTZCCksHY4eoSsoWGO9cjBJVGA6qaiVTTb6Uxi8wiBxUzG/YOY4hEFETGlP0qYXUEpk0iMieLAelYIQXlbJXEGNV14YSC9Dzvl2vXeYFULDFRflIXcBjwlaMYWGrEBHxRog2syPrBlQBckRRfZRg+AsnvQHXoXU+izHaEcBMROrIhRDMhSO9mpKImCkbWUQKzPk1qPFkDTRmFcv3NKmQiALsnECZHDQygZ0ToaBqhmtGLBAFaYLlDGeDwqkXCszesbOAYGYSiET4KAyASVTZLDOZIRIpmXFCQro25k3dGfvYxk0obZi9KlSAwOSMVh3UYis9CRPl4FyRK3p+6CQX3nBh0OZDF4aEAcmQQp982ZLjXq+/4gZhgTkGl9yrphvzozh/9szfFFyElR/mT4sDOsrnN3b223WNfiv58NwP5jI4qTdFwuTak7JsY+3aR5GyGEqt6na1Et8va8e8qq9lef/y9KzQvM+rnZ6483A8ck/WX/Plv9WTXPKz6sNrWxuL5Sev7H3Rex+jVA7N5u24lnztY9W2NbSARiDkSiFIQ8hCaNnB1kYqyDzHaGs6TpHJaiijNzDC/5vvHE0PtSd+Pcd/+aPTp3erL7zxd2aH/7I3O2zeeHPn7/6jGk1sFsfhk956i3vDcPs3l/cfzpryebmXV9PNcmNZbx3Ws18bzl69M1pu7uJsjlnb9qXOWl8rLWIICL436g98BV9gclwVYQW60IkgHjYXh6TSRORF3jw9pSzLhiFkoWyziya4oZcJaIg4at3YXfpek2/XOfPmXrXiMFsMswltyfTsSTbyNHI0EQxAW8VglFF2/uzBD6/zUVGHHACqGyN/c49O5dbpaTVEHV0+eXOXdKMZr0OMDl7Wir5II5yrBC/iIcRwjAiwKhM7iDAoRGhQ8uyYYwiGytgwmpZdBiYSG8/K+EMpKQWdqknVSjgzOfJKBoKRZxeMeqgWTieqFFU0WiusTYicRmo2uYK5CK3XNRjMZEGkikTGuipbCaq23AM2E4gEmBhiLSLOeXPnaUNg74jI51lso6o4CLyDtD5mr1/uf7949PH26npdF2GjkLbRxpMnHz28846sXWYSkIiQWnYqVMXZhJts7ij5bBCz891BiAQtJPdQXAFo1nVH7X4itmQFm5BwFVKoHS/bzIOSO3wH82r6yg717V5BRWIMCkQRjVHVBh3H3nvvXacQcszkku2+XTgza3IKJS8c1xpe9/vvNUf3+OQr4WXPLsaG2avCq0NHUe0YYPZuDbzPSMEoIAo4icPwy7cHoRj95j87/MlP/cURerenj86aH/64nB1732vcBlOZBvtC/Dg0k9z9g2+88voRf/d/mhz/hdCu8rR6e771t+Jb325nfn751w8fPavKarJ9fXS4eyQbUsRh6GP56EnTX4yK3mz59NbJ/Qndu3n5k8vvz/zP4+jv3cyn1c352+vB5y9kcrS3G88VQ2AqVHMy3Yco28bE/FLN+zOxvM2cmZN1ZnLAsJgEY0tdsZ7M6Z+IPJMEIUBiHA6Gy2UVVLzzIUoUtYE19V5Q0QhVxw7ger0iZzIBW+hCRb33xnU285MXCd6G81zxL2ANMzHIMHPvvFW/JBZEsonuksBUKMUXqiJK7Jh+hv1CycIxoaq2EKJkP2q3H7MRH4igFIhIQIJgwJBCnCAleduawl5LMu/bYLYQ6hmG+potLhNCECGFWbxqygIhm4Bh+bUpy5SUxMZwpFdRtUdLnVKW5U3beJ+HGMyvJkJYWDqMjJFixiQJKUxP75gY6kXBlAMZ2EXJoAXQRw9SKIYI47UvB2G8kpHnoi1HvBg2fitXjbE63+Bsb3KggyHPZ8337p8dTW9de+XWsr0HHzOX1X6cu8PmfFiUk3J02OQrGUZsnsugyjdq7tWt39mcDv00FNpkggH7hgfgqayzlctJ8t5o8tWvLUsJb76084VeJSQBJ//j/3jL4Q5vPIDKJxd/43fuzHbo9P0/8699XcrP5/VUfDHdPqiaVit1K6AAGsCzBA945kAcVdjiRLstCQPKJswmZn4hGwbgFz/5Udl7o61HOJnLyi8eLKpJ4W9/PfvRv8fPft68/aXms+OcdcgXFxiEarVzsHPvpb+FX/zF+dYrOe+fHv/I3ZyU5e/+8OIHveHTG9eC+ixuNmEs+ajQ00Cu14fQmTRNNSw2RvWiqBb1UPj8Xj7MUbS+IxSgod6YV9slndR9CtpIMRqTXIR+CNtCI8KWP1zy+JWvvfby7uGKQ8ix4vXpYpBvt5txmT13Q4lDuD3WstkswuzBO8Xp+/tuVWCVwSNevBRo2x88Op3toirq4Y3bn+Fx9t2PFrsHO14kxhZLkQqSQZyCRRrOOFNEkQA4JC6ihSUFZadqtH9ndy07qzpkZJ/EzbdnwFzJACRrUUqnLzkk+FZsqczMQSW5ZkjHzkViJkKiY4LGbsllD5qyZ2bVEM0QlxTJCtkSHux0IenIJcrsbUELCwrodDmZs9w5COB84oWKRM6YgushKnPwGtTfqCe357OHW7P7u4OvPx81zIwicJbLOuY+i6wq7NJKMyXGJB5UYqJ2x55CiH2yaE80UOOYiJGE5YUlQud/gqTfMY6DiI1c+gIW676bmG8KiK4o6KmWQ5Nzkznnk62AFaLd4ATPznvP3jtm57333oqvqVaujiurMqSILCzOi9RoD3p71+rR8+HsvkzfoP1LDmn6ZnJq4ljV1I/Z97KPS1jzJoev8rVsbA5vbv3Tt9ZHF4ff+083//E38Lm/d/jP/9Xwoz8bXuPV7pYsIpatNIXDGgAKsBf52hu3v5S3f/UXs7f/p1t3Xp2/1xzwAaNpf1n3Pqdv/cb27FeuH/0vm9W/mDX+cnR9TDey3rBZTCq9nenlZPpG/0Zvujh/unNydvHD2erDOP7Ky17KW4s9DPhwfbqZz3OKdS+nXNizgpwSKH18SmB1ikhIic4gS3oGRWHPkWG7mBg1cywiKRfnKtMyrTIgIaqqkeGrZWX7CWO1iAgzmEmIvWMAGtWDLeUs72VRU7ho3stj1LSIiSaG4e6id/dKYn7ZAiiRL67Mba4I1enzTgQNe6wMC08tGLNLqQbayQuI0oDOCF2iOHUrkm7lb3VciYkiNDXTrOgM0rovNZ2aZ6cSjTUFsldIb06UJUrGbCohujqJoJqyWzQGQ/dNuauKqMmUQ6GUApgVEEQoqxgobSCeA0myTyclUhMbmqpalBgEl3QIdsUjhcRocwoH5NxzUiqGKkPBRha3a9dXbLWyxbrlbsXl9ic/z+SyfPXNjVfGuHiw/P6P6l/e88XOWU73zwaT7E7V8Ec8GvqtaaxzbP3ho/CNuy/NAs3bPGBc86jiXt24Gplw6fOqNwnDto8QKZOQt6M511J4V+W7Ob761lxmy0KQX8cshlmuxc3Vh48+P96/vz5C1vveH/zZb//+35189ov3P/rR9m13MdwYhvXMV76foWDJVXPAKzlC682VRgBSdr5QNVGz3W5BVboYKSKK5lCviJ5PK5x93/k3ctl//QCv7JfrabXcvDMvXimWvwzvvNe/81sxhr6fImyePfyw2H9jfP0l/DmPmufPb3+dDyZnzSfDUaDdb70bf9E2PxmOfLHV+p4Pl+vL6rKdNYiZz7J8FtqiLuqah8K5dwMPz5j0642mV4zyzbx5MutToxcXsu1XMfSJy1KnQHYt92OpN6RyOzPdvMBWnV07b3Kp4da8jm0zuxzfuO5yrfLjbK+gUbVdT7eaJa8/YjzeYUi4yLTeKNzr+y8/Xzzdryhk44MvjELz4fff+8nN8ZuXM78x2ln7xhU+5Eo52JHZJTStEAd2YMA7D2nALmogcUwaFCFGz8wgUXGcyCQW8N1tj5PxPnWtcbKa1HTqiqaIzSR2ZDhlQ0SNOkREnTRGY2yT9pUdsUMXDUQhOnbeFqGi7JzxDq9qDiFxSg305I62yURMzuRD9lLEzN6nwgK2IAFHBO/hScAFsVP1gV9vXzqsZ4+K2e2Nyc6q8J4bAVEvM10UpTlAPiU6tCNFVKFBwaSpnJltm0JVhLp5wuihUESk4CFoyi1+oRjpbPU1zbF27ncCWzW4wY4vmMBb0wyj6SXQfSaw87fjXRt51WXsHDM79pwcLgn/1X+WfAOyrGJEqxLR8efl+kk4eY+e3pUdBZPGFuqEJf2ASbCaTn8A8B6+QeFrXVTZePdWvrlVnT2PT3+49/tvTXWK7/ygGD3FuFjnvd7RIvRi3i9jKOWJA4A81Dl4pzz+kz8e/eX/7eW2J58s3GPF0O/43Uc/et772iQefzS58+rof/eF+PUz9847k8cPdt87bl1/tvWfjjduPOvt3/3he/v8ZGt6L8yPaCSj//21Mr/+xQeTWzuvNnfd8Xz2k9MGuXI0rq9CBCaNNFcjpc4pRq+ujgl8LMyImKJGaw9jDFYhiFiTeFW7OcxYWSRJG94NyqYO9z7zGaBtaO3OyrMsNIE9i6CxMFEAQGwDse0GiMACtUeVQMpKqilsqVMBWuPaSgDghR07k/IYHIwXdzKlkFtKqVYmc6IUhZPkfFc3pk2VBKhK1KQ/sx2EWVYBUDZQVwWaMau9bcCiNXDFzAQluNjIkpQ2x3YXiiJKNI6lyqeMdlRtqBdVJm/12giMUYQJ5tcsKva3RAyNQcSxjzE4581C50qxaD1EwtNgPBe1ZZIooMzk1JFGEiVJzGevWaSSeUg0Zt5iLUNz07txNq7PXv/k3VvbLh8jjPY271bxz/85fvQ99sN5MbrE3MfyWEZVdnM/v/Oz5nkh5fWd4cDj2dFHTx60r9x4NXB/JcM6G3rkK99rxI3KwqOabN+bH59qAb/fa8uoRchmfvPmqN7rnzUPNveLBau++xHP6/Z81HefWS4ebnI9qf15KY9/8fj/9X/9v7/5u5//2m99pfnx87Prx2eDnWGzOvc5e6hTsCopu+QBZJebNQtt9N7ZLe8YxPm6WQGE1HbbAQ4S9Vj5duXax+8cvPXrNw7eODx53ifuCYXrd9uPP9JP7hfn3z5dL/Ld08vjjY3hloSmX241uR/cO1qHh/LGnYs8HoVZi7ZXfsW148340xu0Hg+P8q3+7u1+4xarp1W7qEefuTnywX88l5zzwseNoCPxOTSjtdRFX8O2RERfZGi4Vmly9VIxr8u7W3GXa2zNsDnH4FKHZ03ecs/VwFq4yXrjTLfCZPcmQmjK012cj3B+0F/uDhZHeD7Udc6XX3jltRt7B6CsPDkdbm/eutF+/OTtdz9Z3L311cPmSZiF3sivRgVWIQyAStg5ZMS1cAtmOBA5UqHEiQQrxWjBH4AonCMGa2oDDYbrKEKGrnZuE6mXBcEMzJEmQRuITd/acU1IlDlF2XefrnPGj7JUCU07GLAyROCdaDS/RhMnmRZDTXRJfHUsmIChczrtqjTUsXfO2WbTuC2eHUDMiHCOgQgmHzJZe70l26+vrr+bPX5/cPTb+sZaV4XXJmTQSGbNj8SsYTbys1rxpa48gkWVFdKKGuVKYuiWtqnxl9R8oCvlNmWIqWbN1bITgCYCGmz7JaBuhpCU3959N0n5axaEQwAJeXYWaAjHntiiWZxzzEa4YWYyUPXqWLu6cqZApghBECIvXLf1Lb9btv3j8vxkORvzaC3RM9oojvjKmMtGdmPDiiLAgz1ktL2zn29s1dOj5p0/3HgFq4eHIR7JuCj2C63W2lSBfXFrL9bDfnNzXh4CiGf3s95o8d7bpfuR13a7Evl4PvvB8e3bX3wWAg7l8l88vfZae/7g8WTn4Wg/fu7v9rZovf/4sJ4ujx8uxod7WfWZW/zRa5vv572yeGs83D+4FvY/d/8VNz85nf+0/8YXr1+/u8/7P5uuQT1nTg0xKtRxcvkhOKMGEiUluoga6hY9JIoX9laniIKxgIDOhow6WrvNlAm0EBHvnUaBKgOiUZRjaI0d7zg5fjifBYnKgEie5W27NsW5bTJSy2m9knb9LyUjUJBxnYlUo0QjXzOzYVYhRoOjUq+WPjuJ0Vw+2HVuqQSybIYYRBlmTRKT3Ig6xVu6P5GSugid72z6Q1Wxui4iEIpgR1cGbZKigOGYJQoREZsTbwroStI2SViCbWsd7NrCQhk0LV5grpyiKmQLBE7WOtYP2OfBJBKYuRWxFTl3JAo1pZaqu/IJS+0MiBzgQJnjXmgZyJBDe6Ch6gZok3QUws18283ufvD+9em91/Fo4xeH89de7998U/+H/8vg8GFRFAtsn8h0S/KZlBvYPGrxxWz3l/XmIY9OjulLb71xUo3uzy8/eFLe3rru4YplT2K73N4hDn7eLsNWwMHmKzH6NWZN7tui8P5W4fdy3S3arD7tN/JhPZvFDVwvFsL5TnWRb9WrUeandfDjgUb/7p+/9/D86Jvf/juHZ0e526d8S7JG8hxeTIyl0OQxTID0mAkIKkzsPBPQighTL2pgAjNibFQCMSnYx9DQyuU8fPtHf+WHm9e3R029oGrRbt4o/U48P2pOHsb9YcY8GA2vDTaeh6i9zPtiyKuDj35+VDr+3Odm2Yx5N4TZbDC+MWup/jG4HA7medPSy8Vgazs7pWUIo2qGl3OKdchD/lIh4ybs4tTvRPhReznazCUDLmOuYFHy7O3cv9VbFntPq+ICk2NsznhnThNckFwoL8Eibs8t+bL22cHnPhPOLvP58/1isRfPn5y8d52O/Gz18sHtonrpv/wvz3su3L493m0fPnr6Mwzyb3/+8/fmj6vV+ZKQrYabo71Fk/s1h0XUQrWFrKTMcqdB24rgFK2AKbaW8qXqVKM9RWzPHhRmi/Fppq4miUNaClOSFVph7bYp6V8xO3vqXEJyruhFHS3TuBjW3oIUcGRW61b5u5w9EdstdYAQIJQ8eqyFRrJsT0knIHT7KGMUE6HIcis8UA3EjsRFlgyI4sWDpMqbL8udx+vzk3L6UX34htycy8J7WQXKhK78eFMwAlLPnCB4M2kWaSUyqDMPYonB3nHyCMAV0fLKYkRV0YYWRN6xCtR1BVglhGgHakh8Q+ZOo8UJJ7ZROHFELSfcs3NMZm1FjonIgc2Di9I87Jg7eXQHXV55XIqqEmkUUg4uOPGNBHAopXcLe+/Ts5/5o281BZRFAtTVFHwHhiSXB3F2/dnl0+fTycat3uZgenrkP/7B4GDV9ELcqPj61hDtUM6aka/yrd7m3Rgn8aPFWXGT9m8DaP7n+3ndYCFHQ/c69oV/Pnj9jRvtqMxo84Jee2mnuD1sXt55lr30pdP89UcfvPTdJ2VB6/yLuC2335iH35n+zOkrbjT88Gv5hz082Jr8h+LlfoXh0eparH/8w8nf/cKTFW/t3eaqCO0Fx75lEgI9kEKDarCGTST5O6VKBctCAhOLwJuHasrHZHTr2PRrQmUpigjgmBgc2uAS5wjMMI04jIPY2WrbNG7gCwgEZxWdExJiuYQMMe8TeyLTh/BiXgUi1CkBFCQmFD1BLUrJIdaeF9PxpRsCADsWEU8cQjCf9tRJJETa/L6JKc2malnBfIW528MCMygxHyUo2EHTDt0uoHRHQtcOaNdGk5oXlcQU44IuqcUmZPPB7nzRNfX/bMlRJArvXAhRGCRdnJ6KsVuAlIlypdWWhNGp3QTCPjUVyiBO3tDIozJzAeqhT1IqSgp90YH2b/WL+aON2QcRx8f5s+ny6a8NLr716D8s7/2r5SIUcFW9mIxOtiQ/wXjb+Se6oyqzZvnN4Ut/sMjjk5oPenfH3/iLe99vm/Y+H90cbpGnvACJzrdGwJZwTVDeGu/1H5VNjQou+Fh7leXGr+ysTi/7z0q/gZEfNdM4Gm4eNroxGF3OT/eK3YfNmZvlAYH7o8XD0++8/cdf+Dv/x3feezS5ffdZIyWFxoZgpxHmeS3Qvh3yKoESF6cl05irALlogBgq6kVUNXq/Se18xY3Pz3HvnbdvfvubvdaHofgyD3t7Xo5W08P93/ybMd/cGG4veML9DR5uhqxAWBxMiX/67iFn49c+i81J3V9m+TJOJo8ftvXp9w8GOkLjfSst5GZfq/Vw3kge1hBPwB6yHT6SyUz2zrXYzKuNUBWDy+1hbFAL+0yExa9kp538ylm197j26/L6lLbOaLdeZJiSTFUk5Pv53F1kpfM7PL98dmc82MqH/pOfTbZQ51UdC6zz+aP2Ekf9MNDL5qMfTod7/vqNfWS8mN53i+39SX7YLOqT42F/MC+9lOA+oQ8KQEGsTK2DeNIAmBexh7aARY2aCogUoiLsfeepkHZIXREmDxfMNxXKEAIb+dETYpTEAE7ifjB7I4lQN2GpSEcTZkB7ea9tWoF2/A0wlPUFocnce0yQ1k2SL7hMyWKY6OrY6+axpLj13oHMmx3MLKLeZKyWpASFgD3V1Ix5/NXVy9/pffBu+fjWYpKjCKj7QiGRwlPfbeM4iNkRIyF/IioxhGBeSVCN5qDJFthnfUGHoumVRLfj6yigkYkRha7gPvNIgg3TJrZmUoEzyaNNvZ0xDa4Ex46dc8477xw5u87ewHEy9yPtzj2BfUN6sYzWpHESpKhUkWCIoMqrsvNBc/wJnxzJeNiMBY34XpC2C+P5lO80odfzJ588X8/7wzfG1dlq9eyX27/1K4vLrJ9/uL3htsPDnpwOYxW3Xr7c2Zzl8XAyCa++cXFYuL4C8DffaM/e9dVkNtxaYaMecl9ONz57c358Vk4Gfj9MXhvfx/gC41u948/6k4N5cfxeXnw8kAFlBxi84W/82pxmN+J3svg2Rs+1n62byWLjG6559rOsOJ7O57OdyazJDkZ3nq1/gT5ownQ8RDIeXStUVB2roc1IfafxzRgSmSlCgxUGIepYUpJmUurWogTAOw7JZQbeO4jwlQGGSFQjEZpkCSFGpCW+gNhsGkMEOw6h7W57IGU2WNPzacWvEQlg1D+RqJ0gyjERUxsjfeohsvsUV3ZsClUll1pkAM5xG8W+CCka4UXZpCuEPk3dCiMchyhGPzNRIROLGuFASY0v4jr6liQHAYAEkA5oQ+rn9QWGBEWkZFtNTNaTkGnwCDEKkQGitghK18raAeON2IumXrzbnRgmZTYmiZQOl5Y97FSI4MFMkjHn6l3MohaQPnQI3nJNM8vu/3g0Xm/M7y2bT4Cjd86mD+bu94rRTsWnx8fsSYbUH6z3BodNuV3VbQ2cl73Xw/Sz5fVfDvH8/aOt0cEXJ28eHj6dPjr5JU72y/rGS9u9wldFjt4eI4I5yGnoofBVUS4Lv2bSydOTemuwUQ2CazzleV5QqKYP742u7eHm7ct3jiZFQ0L1RSOl+kWdTYbV4fR7f/qnp+2NWT2+/sbfPXlyDs6EEmmdlEhLZqhIo8Kcq90J5ICgCODCOjKJkTkHgkgAMk/rmGdlzRj2i+r86PTekztfvBPWs6wYrifDfIF89owbVKOtYnRwcaSHf/Wn50/rzbOl51JCc3Du/Q/fPaybnbe+RrlU/eGsLN/48v+h/eXw9Oj7bnPd374s9mNZu/HDcxQBueSs2qNmgGPsnWJ/qnsLDC+b2YCbocMZzbVdl5wRhxb88Wi7GX5u5ffbrZ3jdT53O+cz5+eBL52CaEuiD7ypugUfjnZ17qvF9VE/urp478+3VuFklpd1hiXtDHk2nyP2i92dqMtHv7gQbuWANw+ey6Jf+OFq6vv7k9z3F0XuBowFdB3znJtq7YXIu8jBgyWCHUUwaxCJTOS9NwcbNmJO4m5c3f3GcyW1FDZTIsLMu21hIkzk87xt2m5VaY8MgdJDk7pzMTc66RxxrIE2H8w095qPMIicY5Eoogzf9eCpHHbHQHr0r/6JRkGXTR4l5rknUpdlUDjvogQfYVn0IAIFEBfkG6le8ddfunz+ydb0R83Dv7m8uwwUKSZVD1RC9OzseCGCc16TxbKKmLmjWDseY2Q7OK4WJVdkJ1ttG76sooo8z1VEJSUimu7TIs0lpeJcMbdMLmS2A9y1IY6Znfekys45712i54BUzUXY/BpsEr9qA8y6S0FQ6QYnA7UBFaFI6gLBiwalpcdm29+ry+PR4lF1/kYznmdNXgMIbUeqVYDIRRGC9w7L2t16841QD6Zv/8noy9vuzf3DR82vzB7fxXQvPBjkzcbNnVV5Omf55M7tsOt/foazJ8/rcwC4+cUv8V/ex7N5GEw+Gt4tdPZGedqbzFc77UZfsY17DX0w/MzDcPsr8vbNeMnTzYOI2dl52459ljePqtFOsYJbz2MJ9SNZrqrdYq+ZHrnmyeP9G7NHPr+e99Ynt8eb1beuz9859t5DIEcDILAvbGQLoTHzJLUpyYAHBYzC0/3MBsgm2w4bAw07pW6to/BEUYiYWSOc6yikHmqmzaZFFXYMVTWXYe8lmo2Fi6pQZOwU0obgO0hZ0/6d2KRBXcqoYTAaRKBM6Ll83TZCYGXryqzP7izlRBSOnM2x5tPOL3jXQrYndmxNBCHZjAQJzI5ME80mEzDMjAILA6QcWNho8fYjU6eXjuKcj0iu1iqWNJpYDaokQpCmC2EhR2z2dtqBcKaWt8YdyrZbz5wPohLRNI3N8WD2L7J87J/b2kaNA528axLZPRmHmGM71AGO2EGdCBP3VLx65ZIxYOlHHWhvO49H7+4MVjvNY5EneX60N6/5tLe+0D+4nP7DfDCpR6erprxexNk5hgGjZ8PXvjD+wm/w3W+/86//09+cXn40zj85fPr8wclXX/vKnezVnZ1rjx4/fPbkcrVorzWjkS/n46IqD4L25iu/dmXfN7KabfXCzbY6qnW8bMYDptWjvmbzD56Ux7IVNp9/chghxJg0Yafnpj3xDO6D+nB9Pnx2cupHH529vzv4wt7w1vzZJa5Qx74qJVogwUVz+xUC2ii1clAEaPBcKDVWo8HMLL7ZEKrF9wMqX2blg6Nf3nzjJg9Y87bdnnCN2KzzjbCYXz7+yR8tflH1wv6k2WewtK2Pnue0E9B/+/1nNcubXxqyHhfjjVyuv/6P59ffmH30g9vVg4P6yV5Y6E5TK0Isa5er8DnvzXTjSLYr3jnGJPOTPuqhSo9WWV9DUw8JAfirmeA//1CvvRb3v56/9kY775Wr2CyCxMaPtR1EHcRiKOV6Osmnm1QPl2d5oXlfiycf7FZ7ax5XT49zf/PhtF+de6xW+PAcI1/eyDZ3R2fTuhxzU06HVenzMMDNARZVua19pVJRQXJmL1gTwTE8pGWwneSAaWYECk/uyuoh9bxdjWNmSaZ67NXmg6t1kRJrhD090XlWEVISO65SVh2nymGbUxEBK3C5rKxWEJHZwJH37DmIeCZvlNQ0T1iWoEANwureG17ASrZK5kToYM/MnkHp1LGoEwIH0qARKmojdNQQhSjEiC/h5SfrxYPy+PZ653q7XdG545wkIjKDwa1HRuwdQhBhwBNadMsmgoRoGgZJPoJsci2RkHJMFYYCxyayYxUEacmkxgSfgDK2ADixkAMJjpnZdUAdJ/Tb0EImJicKIpdnuaipPQXwIBabTKMKQuK8iBBAimCWBUytiGfWIMQUJRIQDLSgCNFABMf1qgX1bi3GR+P1/cHs5nSaB9e4GoHghSUGYs+OhWsJG3mhdW84GY3GO7Of/6yJR/Jr3zht3r8+e2cfH7/qjm/5p73VrDf6nTnPT2/fqjerk+c/bU+u++sH/acVgAVdayZ3J8/fwyh/MLhdIizCo5evF6xtFdbn/a2H8aUHi+tF2W75Op/XPB3UP28frjDc8NnxfEsW8zs3m4Om55m0jlH9KOeiOf3udzfeWtzU2a0P/7D/hY0JbT5sKz/5zA9fG0FqNCpVhrokVEDG3Kg4a0kh5LsFrQLkGID9SVLhhNiRlRgvlvRgZpJoWaxElqLg0rBpVYSoA2lZhIJEFTLD59g2TKxkZjhEkaIDouZJt0PETAqnEp0gCiWOpBkFAgTyjkPw5IWIve8kRMQ247FHRx1IASKi5E3xf2XVZj2GEnGIwSh9KmqKJsc5oM659JXJo5kFMfmhsbgrKy4F+yu/WcC5tltLsQnsABIlMCWqGjHnUaKkYReCxIB2ZC2QFVVnWsm8l/Wy8nI+N6YIZSwQ8w4QkcGoXC6rNAuDYgjMCvJRQhq9RaFwHUof1eKYTaBg8E6mwuwzFFGLIP2ADXab7IbozR7uNYcFnizcyU7VxtP1+lD81HONP1hc/JPR3l7IT1eL3rB/uTPWN798+9c+w2FQVkej117B9++/Ve7+aIDGh3sP778++QKO5y/1DtbbzcPjh4Fkul7svbTZ3y0ej67vDrdiPHbS7G2EE7SXx7/cB7KzB2526JbRVdvZaOTRyrNmnJcuNgHIOW5CzwhUIEAkNlG9+B58z6H46Ts//vVX+ll/s+VaIrhgHoiQthIcsxdisLk0UeOpGbJIlDVRG0LLTOS8ytruQS8O7CEZkEvR7y/qy8PTw5fuXhOvvszQY8b56b//75uFG2BvGy+fckHCFFmiQCEzQSM7ftR/74Pp8eP5ay8vbm41u6uny09Gs8Nbs7XUHPze05AplxWXOVArNVSeyeaFHy10fC4b5zxyMsk8l7L0Te1jVEY/VOvXvjFrZP3gg/ZJRfe/W3/vvbJ8pcyuF34bt4Zxy7mdQZEvtrHckmokF9s4H7gTfvIR3v6jybT36Hi1qPNisYFqloUwDgd91nw8DJ7r8/p8WTcDbqT2B81br8rkM7c/qObnUubDcTN0riKpVBfRlxmWIJici53zIbbEbORDtqwCMkthuMTHUQLQ5QwaKmxaMLOHSaEwqTiI3fqqmghWBjqJaBRRQNT0uiBm9k2IdiKAbDROxnuWPdLVXSCxFTv7ZBO2pqPByFm+45R0RwZfNbvWyzlK7iJph8RXGx+RaIcmg4G1ryZN77X5zoeTo7cHT769LopQRl6rEjx550QcAc6zgH2KpYNTgEkZHEIkCtJxRKNY0l8ak8yE0ogzqt5dZQcLumwXU29Z28O2YTHmDSzr1EDCZPeQLMc0yZ6VUK/rPM9Nww2iEGP6jDrowe705BbCtIZQUBY0SSJFXY9jUz41TUNM2oKZq7CaSFZWrirjYX+xfzmouc2FpQVAMUMIPUgAYVCO8/72QMtmyfOP7vEwzt/+EykevjxZUGjq9bpfqM6r5oN3mn/4Txvmh+/fO/ev7VwbH36Cvd0DAGW5UckWP2Of+yrivTuv7cne05p9T8rcTWl8pDvzaVbIelnkcSNvhnU1aO/Eg16oL7g6vbZz+uEPxn9x8plf/Vu5FP3pYlhg+vCPh3sLFwo9auTkLHzu8uBWkOKjOh++vP/qJ6fMJ8i38vY5FDkjAKwIzB7aUtLpkjOJDgOAsy2KJYU4B4WYQUWCjMg6oURQICXAgeCZiTw7YROrGwAEc9ASo8iLRKh3LsRoAGBeFKENoWkdkYgzOMp6KTZ5gvnQWZpmF5pFIGIfIAiSZ541xXmRJMoW8MJkzX6RRJ8ye0h062FEiY5dlnkQtU0LG4Ip0ZvU/O+8Kf/sfiPLGYkda8F87agjeEaJKVVCubOb6UDj7heD0p0RfoyuoJGIYtTUw5CZU7KIhKZt12sY5MQUYgCQOW+d92pZ2aoMpEpgECtCDKwEZx8oRZHAHRsJLyZtIiLyIAf2mkc41Z5HIYPN3iJfjouGm+OhW8z0ZK86xzm3p8pHEqcR4ntS/uHx8X+bjydFeHI9n3zz95vQhCdSDu83sX/r5is/y37yG7L1oI9pmZ8fH/+Xe39yd/x6GcqnT575wi/PJS9o5UMvuHapRxv9Zu9uATmu558cPfjbdHMPD9bni42Xr+PBE3YBWVOsL7lRXB5T1ahH7PMoZ5SBGZxRkJY5j1EChFRI9Ud//dff/My3hdl7lkKCg88wLAaL+UqgDHHitBFqCWtq64imh8Dp8xJSRINRPO94EDjPNJNWW8A9p6ObO/taCsTJJdyojVVV57cu9NpJ3FjIABXJGtRwXIlTLla8enLmJ70Dvdib/7B91G/3+mVR7RerPD84itmpLjbzYS80jfiVZw0+cLHAxqK4Nr3MG5r4ubR1Aw6znufrd5og5WL2dO+z69tv1iHn4TfW734PR4/KZo7ZT2fVezrJ4oPYO+hnG8xyuMZlLWdDOWnkNIsno/WTQejPmQ5R17NYNBNQ5qvjDNP++FePzi6ansYCOuzt3ejffeuV4V1+1NSPzz5B/5U+vBfPeR5YsowjNyLKIGKvWGd5FtuVYxctktqGtbS31bR7udpidSKctGqlq664W3Zxgq3T15iqw0q1SgKHzazSkDGRGMSzT//W9H2kJioybybDbkW140IqJJpG84qxkUQhafOY3lnSDF2RMo2emUocmbElDIMzjzu1EkgMcopVbD4fDp42s0Vvfb93/Hq4HqHecQFaO8rgEv/JFA7K7ACCRgiLT2bBKiIJEEAy1vDeGc6fctQB7xxUlLv3+KLDYMvY6QapNEuA2TA6JuoIaC6NHR05xfwCbW0gGmDi7rRIv5pDpDMasA6FHUiihhgS7VURtRNQQVlJVGKMSlRIcW3e+6RcPN2YTy76HGTtI8CZOmlbB6xB2bLmwa2QU2/Ay59/JOdnxbCV0ye9L4x6fDLIWw6X63WdF5Pw4B7ee2d++/ZOtukPNmKe08sH/PQIwNkf/aH/yT0py/yoOW9QNvnxeOtotBX2x97leRzUH81oqvrrxbwOs/q4x/PetQMKJ4t186DV9oPv33l0//lx8+Tp5Prr13t3qurdvx7lT+VWr37YFsfFuq70nSpIfvP1o7ls3fC7R/tjHAd9ruQ8NCNxgPcsEmoAzIiqWZ45dlGEzXRGiZRNCESOmaiVgC7+Om3TVRgUNDqBMtS5zHou06gzRzWPp443BSiUGL28aNZrG1md93Vd2xMmSk3bOOccOzLDbyt/pKoWE2FvTVkpkDqCV5esp0nBbD6ajszTC3QlcyCz97CClbpGZlUlgmmUtW2DkQmgcOw6+Zt6Z0wFw3e7iq6sCmNBXXXQZMiBFW/bfURR422m8yf13PZtVEjVBIqmNbL23B4yWNvOxJ6NB5rEQypCiizP7OowXFTJUj6XofQAwOyTQFkQVFTEibmdRTYeFj4FWiBTZXGgwiEXzbF2jRuzr4+HmM/pfNROZapyvM7OnEyZjqM0GsPsUos/kNnvb+8MP5zN/tUfj3/7d5dyQXeHy4uPb929+8ErN7MHs98c3/q304Y382Im9x/dw5S5Yt5hJ+KHg4VDWNdU4qyerYdRSwpRxlvDYc61K/TiOc9ivkGr40dy0qDOm+USa83Y64hbkrLH0g9c+MZHLv1l4JZdo9SSz3uFML337jtvHXx9FpZaEHkI66K3loJZ1LHTStFA10AP6DFWwMpj7VU9UUMgSAuC530OPWAALqEKzvLzchauB7fdlxVkzsti45yuHYXdM4zn2GqagmuHimQRqeW4lljLcNyr57ES5Drqz7Mh1rozfCyOeFWybrrJ1Dc+k7gMK4GM9xtQMwuraeEqbmazfMbaL+iV2+ePHt7+2//g6N6D9f0/ae58Zv4EEk7L9d7W3u9VeFB98AuZPumV4gvCAC5OR7PTHVwWfL4jxzeGYaN52p9fZOdlWNdnw/EXf/O3/ux/+J+n1bT0G2Wx2cwvitn9jeGNOPHFuL89zjd2qsvlRydP6+za7dGwfxEA9h7cREEQVWbvCWDvRBrHCEEkinNCBJKu5bMZ1BaGyeKX/uvcEUkMR6XUbhMlA4ZkSqydqE6ZiL1HUgWkXACjxxqvP1l5GBuLE2jL9mddKUVHwOjeRHpuqGuWrRR3bl1QTpMidVNI9zVdX83QlESqKh1xOu2qRVu/8mHYuM9c7Px458mDjdmtMCp1yBrUORc1ldugzqd1ODR5PouocIyBvEiQK38BBjNIxYQsdt4ZmwUkSKmO3K2dTOOoifCtBErV0mZoStsqe88WDmMUVEBJOEpL7OB9aBr7UdMbU9WrQn11FTNPIcbQtJ5Z7HOXBKSasx/B0MXM+bYNBAjTtWrwSXs5H9TzYj2pnAjEK+pAuat1zZqXJ8d0TaigOA/Td/8a1XPk18jn2qxbymrna7cxC2f7vSwflv6TH++/9da0OODRLje99vvfXfzrHwDoP2h8LKC5exjcQmLVYBRltuTX9mgVV8uTrM4iuH1cP7p5a7L+5I3Xj6T45dNFXIVY+otXj3lTXmK3an/23vns/uCbQ7k7rXg0ml54CCiQV15MfT7xQeBJyGfcq5u11BFqcGxM+YM+LSFz9kVeMHNdrxLZCmZFnJhHAqZUiCnG2LG2mKBe2Sqx61pBd8W1s+cGai7FqXqRNk0tKfQwMMFleYzBYkygL27wdHN3/WciKzIBYJCNluIIytZ/g3EVeIbEpE+V7yrlwMBeRpIVd9IClRhT1SQOMTjnmZ1oMO62KYCccwq9ct2wm5btZjczGhUiYmFiMp6Udl1+2qhb92yK3nTSmJO8iZbJJgRj/HURGJI4Z0yiQpxGZ4hKjOy8BPG5ixDHRCKeOTLFGAMsXtmahc63TrQD4FNIsHWwoOgojxSV1IGdp4DWeUIIp9KOpR0HCRK9ICicsnhB3Q439/oxPr8MP2kWvzruz3/+k/mX7uRfem0jTi+eflDufHV86/UHH3x/J8/fdO27a8odMu+VBdka8z4Jz5/MRnErcO/48RH6cHNHfQplPhzLjBDjcIjNJw8+PqBn/RHXzDzVwmXrShrPKnBAb0Ox0WDPy5bGsT/D3lS3ZjyeYbx4dvn69hen7epJ9fHenVvVeu17HD0chOE9wyk3VeCG41JQc6yi9sAFcw2tc6pzcG1X0csOUxG1p36UQRGccMFyLfZ2OFT5/JPrCy6P4+Ycm5c0Psc2LhFnkRbMa0YNEoeWmrk4gQ8KqeroUTOv5r4fpMwvh3tzF4oNJ6zCHmEVZj3fNIu5Q63FYl1+/huznz8fDjZG3/oH4d7T+VHOx4zzzXo1oakUUq6bi8hhNH5972tvzl47nLUf1PokFqc7OBvhdISzIaY7WGwtng4XMRwrz+SEhxuTN3b99QHcIqzquiGOlA33Bmf94WnM+kcX4cFsjbP1zV//3O2XP/Pcb6SICp8jCLzmuQshpGPEJlTRKMFcWdkZ4RIxlSQ1wiyspjLMLubqcbeh6cqwhu3JVIDElBhRIrpkUwnBFmGmOHLOE6iNHYTsXFLSmKWAqRAImnrcVNyuDgVKDhvMSIbu9qxqpxKwo8ycahNxOj1d6FhMGi0fyKjKKRfdDiTjXwaIW0j7anv98fr4pKze709/c7m5chAi70klRIInG21sCE+nglkkkY9NVLCnENCKU5CIJ4cg8C6VWOJPmVJ2o3t3Yr1oLNiWIwo2X+0kRtLuYEtkH0pb8KZphsNh0zR1XXvvrcOJIRA7+8HROSsYvMyAHXIsEEuYAMXUdsH2fOmlgMx5jSH6sNuOdpYXJ5P1yWgxqTYVLm+kyRhgT+wFWeR8b7I6PAyPjr0GQol6jaYNIWsLF0Me0ZvnE26r0X5RX57Lo0P+wleqd947+/F38GEzanIAMhqGUnX6vM/qj8eXC9koevknMntwPLq2j6WGSvw+5JCPi7175VeFyht7H77yrftLQfmTMns+Oj1faG80KvKdJmzufCX/2//oL1ajXz3+X9t7y/Xbj7PDxx7nVeEbbtfi1uw4MBpzJoQIeedFGmcoKMPsw+rQKoQcO0IbQpSUSWX/iUY24o6os8SRxLnVtmlVAVUJqizJvYbM0tzSklINT4laUIDZIcTAjiUKAQwWVrM4tR5BNdEatfP/7kom2U3PJkkXUUcCUiYW5OCAFwyurlW1Oy492mw85A6UppS2kh4z55k46xRxdlOLuUdlPosS2xDSHJ3Y+aqCaDe7aYOZo3RZJEm8fNVJW6Oc9BhMgMW9aCdyUHuTKtbQpIeFYG/SNi8MgEOIYAaE2KmqaHQ21zp2QAgxSy+YyruClKBEDpxaGgKURIUZRAgSOfUgEiAREhtQjy+JhvWaAB+Vfc9nro4rkehzrhZVpYHV/bRubrCf8OD5X/xp8Su3jqdH+SCfnp6P7nwxjH52djr9ik6O4+VRDD5kCEDbiGcsciJZ+dXls0s/zGITwxq9dQaE23Ou8oJ5Z6rV0K1m4MnOse87+Bi8czUrEBGEQ2+Y+9K5m15yPpOdGSYz3pnLuJaCLsPHTz/aurF7vLNq9OnetQl5jjHoIC+iSpQ6BPQdWpUCtAIVcDXLQiQn8hBHvOyDoQj+STuaDJqyqMIiOOfZcaX1SX760v7u8cNxKzuN9qcyPuXxlMZaDXjZaAVdiGu9NLqugg+q4gCfNwEBaOuwRLakMCrdzMlAer28Pqk1y2O7Es7yupV1s/urv9ccvPb83/7hrfyz/sa4+skno59qeDzb/tpr9XvPVv2XwlMqamkyda2Li4vpetFu7g/fuDW89Qpdb3v0cFw/mMw/9POPJ/P7wyYOn4+rauGXkt361g6PyyDhUVvWvQWCIFs3i0xqwnbbtKFtGo9BMfzm7/7uch9LmTNJo3nue2yoaoQAWZ5rbNP0Cesb2dzngjTMadhVVev/vSZHHEICyq6qb/KKQcLNLCQJiRrVORh3mDXZ+tglGq5EUZBEuao51OVr2ybMtmaf6u1ZO0apqvrMB7GHxcAhWxxfaXORGlV7qEXhbY0jpCISU7cvCXg2GZA9nqpIazNqGATvvNNfq+7+B//+g+Hp7XbyUhy3KnCiAQCELJddkvdtujKGs1EWIEBIkDFElB0A9kS2faOUsaidjwGlqSQdRqkcd2ohghhLAZwESLaCT1OtXTQRyXvFYlnlmWciibGbJdL3vfoQjY8KILQBjsh79+mOKoWMSeb91eQtCgJCxhk0E39zMZoNZ+ejupqVed1vKDATrdfcL+XoaSEZFZj+5O2DzfFZUfYXZ3Hls0rDolkM8opHF2Gu60HlWFCuOJ796Bez9+PZaaa4JU3uWg9Ajxqd1/AhcFvoLMxy4TwH55+cnr+cDzYHVDfRD90hYs4Pbt86hzzkyTi7+SV/b3NX5HQxGpQlgznEsT+Wpazml7tfmx/I1pvLef7T9k8eZzsZj2jBg5b7kQsvzrwlQQK2K5wFbTLOAYEEqDhNDjCkygIkVAEmK2LA5Q4ghRgpInmkiBBzZvFWNnyCxcANEUGHh9pIChAh2v7G1MaxYwEQI8l2bO9PRJyIBp0i4FPFLH3WkUSBnDOIOd2ggToxybGtRDtJOCFKpJQRlHKdbCjsVhWsFvFJRAqJLciDhJ0DGIIo0rSNRDFMCGxiOVGhzkRVoUTOpcPBcLfuzdLVZUDqyQlqulxDjqxA25GQHjvrRp1jx2kMT+lVafhWiCg5RwQ4sAQxHqKKOmZmiiASJgmRFKpOOVJnDZbMhPgKbiMjqRLWJMTM6suieHb4frtqfE7WeTVti2UTSXpw0pCE2rkeUwvGT+v6t8u8N70IJw/Hn92Kxd2qHZGo7r/UHp9Wsv6mz/9jaBaAB7PPEBeMfnPu9zdHrqHqaIVe7A3yehI2vO4v/SoPoRj2eS8PDbNvHI3G014ZdMzU5BQaFo0U+xvs++4k4znvn8r4CHtnunXOW/E0ZqusydfHZw95TUde69fvvvblN0lE1lX0PhPXCHl2YSE6UF2EbO3lIiAHKlVP5KEEqgpC8HHn9aPZ8SivivEi86xc1IGf8nXim89bFWxf6nBOm1PdmWvJ80amgjmo5lBFauADiaxZPS9iLDyVw/XW9ubdzyw++kHxbCHF2p/LvIdeXgDr8tZnBn//vz36s+/rD75TPvexHN382j9qaipW2bK+n7d19cffnf2v/z7zrvkb34rPtVFmL1yD2r54WYYpNUxN34Vsd5DvDG9uTsp9v78td4fhiJ4sxk/D7GHlnymO67zsMfdYhqimPKS8HLWLOjp1G9lkQo9n09FgyxfDNc994SHDgHIN37CPkmci2rIECsQFs/e8bkLuWYIQQUhVNYh0bkvJtceeAUfpP3QwUer0kRQMqhBOR7ZaALil/qqGNtg2F1ANArv72ZwxmIBoJ49ZN6E7P1SEGKJgpc4ujzo6h40LCTFWdI6LVxtQSDfvpiPKiC0xRvvGkZNNTwxi6kh0gwCUBI5Z2feAKFpLPa43X/Z7H20dvzM83J/lcH1qoxBBOSAExJ56088mr0dyEdEW3RolHRJq9DYxDnkaUIxvw2RVmJJQ0QwDrrwFUpW+YlBZhy4kUJK0H1cRAWmMIgGthBBFNZi9AwDvs+7YS3Q6Tgc7FOK8I9UAMz0mgQqBo0SCAxxz07Tk2JyDFPDgSFJz2KkHZV0thquLsr5W5chJ1w0VeWyryf2PeuMbTZCgYT7co6MzcJYREMgXGyuKS0ymRZCAtumvNLi8PKrz03o99y/JeozzqV7mAFpxBWdRF5BlLtUaQcQT5yOfXT5+fklbo3LHHTvKHUHaBU7G148mO/tFfxSPRsN7/WvDYsmLc6KNFd25cTLGIpQ//KjeyBef2zku6qkfkxxs0DDMw+bC9yst0BAC8rzHfW2bPEpNFHuOJfV8LDHYnCSi0rQBqkwhiEnJM08iEiGUPB7sxk5AtQOiIxeZAIsM8xFRDZFNWHSXlEWq8OxU1FwjjP4IM0oDiyg7iyWwPXLaRZhNdXIEJVg7HBgEeJAXBFFhYXYsRr2OXVet0qHPliji2CVan1VfImaEaDgwiGldr51jcpY4bKQqhoNngqjzrDBbMFz1z0ACi2wwFlGz/YLjEKLrusOEJ9n7l06cpZRoh5wo48ZKTE28acCiBZNaRioHhIQcizpPEgM587ERhheFiuberzV4mEOKz6LZn8CJaTHtFdLJlBR8MUAMTPRqhEwJTx49uMlGbaEWwSsLQ6N4X4SikVWRZVRVwj1+CjnNqWAJj5+Uv/q6zkMxKmZF1n/j87N3/rNv1qVmv079/xRXrH3EwCwSFmXRr2andaVBZO/mwbQ6jj15dVjkS6pJdGfzLA89X3mNAdJq3/XmG+WSVCgqqQc78jxtdo958xzjc4ymPDnH1uqC+BLRRR0iG5fohSILJ6c/57988JUvvD4ZD2fLRpGrd4F781ERCq85OLB4Fh9sMjOxGgVGU/jl+HOrZnK+PNzskWj0nM8byXCT5eazekFy7SLfXMdiqls6pThjt4BWCItIS6IVWDTPNEhNbuj/3j/1meYlLep6fRI81TLcD7Pp5H/7T+qNyeJf/ncbX3/j+fcejfd/bdp7snjn3sbuFx7/mz+5/at/Y3F62vvg8dnH/91YuRfXize+crHIs7DUvMdzkLI04E0JEi9nJ8PtcRbLul5Uq+MRXQSc1+F0gOeyzOIyFDEPiwVFkpjHtdy5/pXpyfHetb0nJ0/8sDwarDcc5nV9kTd9X0VeDHf8YSxmflhLsUKxQi8uBTVkHdGQb2KoSZrG4BzytssVMyJXBItYYJs66SqcO2n0uw6QAGg0oofRGQDz44FoUOrs46znFFWB5swSuuIjlhnOTJysnKz5tl0yE8z3gK6G7qQ6hNFQrzT5XRnukOBuVYVuoauIwaZQOLJNKicFZ+zOkrSLTcszx4B3FJQ8IFktl1+o9x63p9Nsfq93/qWmPHfcC9qKVU+JMELKlUpXbeYQJpVkka1Q71yM0Xzwo1oAmoDJMdufO5dZ86AKOIUoEaLN1CJqSYxk/FgC2LFDF7MjgMSoUQlo2uCcmfunAHMRYXZG8MKn2g0iMDn7sTNAiYwA7RXiUmRb0zTGoRM12g0JwOqVQ9n29hf5B4Pq2WYzOQsScoZGzXl6Orq8qDfGw4Yf5nRjNuvPHiOUWjFNORy1jeRPrx3M1zxDLy8EYVn66zOhJ2HcnOV4OuUz18xqAGWzGfJ1rEPsoagXbjLZuv365cNH1fTJFujRs4v8RjEq+nK6DaBchOVQLm+u6bWbx/TGdOf5OJzVPNIK2NHet97wO8NHza053/rri6MlD7++/SPczujVweP8pUNsn8r1mWzQZevWmVQiobYORsULgrGBjKZEojEEu81CjE0Tm7Zl5oydz1yEkqgjthkLane1MKAgZ8BJkjEhELOKhUrZrhNEDizp7wHDRcDaqcrYM4ONtq9Xpe2qM7ZGjQWglDitkkVWz0FjiI33HqosmjOvbeFs9b5T4jKxJxaNEqNtbh2xAjFJ6pSZvfetGWcyhxjBnMytIBrh2Po3AKxG0pTOLLgLWSCQxGh7a595EQvd7KjXysbr1+7hNws8MoqIJqjeLg5gTC6EGDwMYFJ2LkhkxxJsxNc2tDnnIgLPcF4VrAzmoJKZ3MCeNTJVtEZKln9K2pmTKUFB0YuTwNKqCy5WTUH5s0ePTxb1Xjk4a/MJbYzHl1JFXhDvcX1cc8HwIqEuB4xNCqPmw9HwrbxoisrvF4t868mTcHj/nerxcqPhUPNs0V7b3P+ts+WftWfiyAcIlky+GPncxZOT1eLwoil1p8her4rF8QKBmePsYDcfjsK8t0J/k7cqrTbiRS+sFQiQ3BeBikfizvzmPI4uMH4eJ/N25GbqHNGY3QaQVWOZl3U9KoKvq+c/es8f7N+88xlIHgVBe2MaLPKN+c5otQZ5cMZSKJaAhwAIoFD4v3rMG7SXO5wj5txbVAFl+fr2l0+LG5frY9D4LGyvJMcs87Mas1zmLAvxVY/XbWzZh8tFWHgOfmvPbY6Wjw5ds+ztb639weitb7svv/74r95zej1ctIu531lsD9m1p5e7/80/PPzT744+eNT/6OniR//Ppo19CeqzjJl9Ps933SwwMggCK4sgF1E454X1cnW+vLgYbjfsuY0aWbJCehirtk0QF1C3fv3s0fXXvrBatrev320evDweXx+98vL9D/76oq12vvg5v99/jWXw5H0/qOfD3XqGBZUVNheysdSSl8BCsIY2qi2TGSNKaENN3DBHggAhbU9FOSUKQWw93IX/WfG4mjKJlQQMjhKVAhxLVHYODGJ2GTVtywpVeGYHFyQKILANro0FNqJxCFEodIbvIFXnIIwEzlnZcCxQjiKszN7iSmzmjDGIWkRWEgGbFSDBgl9IVTWKagCnmJQYJPFdFDBjBO8SYMZeQMGrD9KKNJwXdfj8+e23dz55b/j81nTUD5s111Fqdn2OagQyWEBhwr8lWlqaaDo+hbrkmNRFW1MdREIQgESdSmvNQJ77EIL3HgALAoLCCTQ2kT0BcM5HCIl1PAqFAJJeEM6REa2tskLVeU7YhMLIptCkmUq+RB2ondg6IhAhQZRABGGGKBORKDOx81HXFFRc3J9v3J+cn/bCxWC1PUPwWVCMjqdwjmYXXoKwa977L9ewfZkDz8DKyg2v/GLOl+Obj4c3ENQjaF0Mai/P6uak9s+dPPN+BgBSfaJS5b/zW2Fzs/6D//7WG2/ufOMrzw5vz++9Sw8+9Gcn09mT4UFJUDotm4s+D9Dzvir5nb0bkC/dmDx5efyoHOXl7u2mj6NwJ9z5tl/encvk/fYRlh/0kVH42rH/leN646m7qU+Un6I3z0Jb59wIYlQBgkqw7X5QbUVjiHnu66oKojHEKMETQ6iNsW2X3ns48oRMTDhLimj6vbSeBale+atEeAeBhsDElPs2RHNiylwW1MgTRuZnqDpAheAt9TrY4BukRVrFcNoDCZxHG6MqiLklMea08z4qSBlKTdCILtEhLWINLJKQxm1NdH22Dhu2tVbVJgSJClCIwWj8KXEMECRH+Rglcx2Z4SqCTIiBwOmeEwndOkjAHC1wIoIBcmSJiewZISppDKoWV0okJKAUpkDE0q2QrRGPRIjKSHmdzqDnGCIiKyGAWYhZNDjvQ4h0ZQ5HEGZjRye1EljVCUAQp1ElBo1Clxw2ea1SN2GgpeTPHzwNbjSTy5EOZ1T2s6I/rmWNnDP4KFWQDfExx1riduSy92RSf2bIflPvffjo8J0fLJ7yGjeBA9SUrUnEz6fP9rH5LRq9HU6mtGKNRR8l1k3Ra1G3a78H9xulD7O1BulDmoic4vG1vJHtNWdzaTa5KjHhvMnLflu1mUcL9yDL5mG0dBvncbBcl3SiTE62FJsywqIMi7Ff7O/xevZk2FxsYqUf/0I2T167+Zl5FVvtrXS0QH/Im1VvNPeTuszjNGimYOYIqQMC/FO67lfVuNzW9WWeF/1J+eUvfHX98o2f3H/y9GhxO95uGrexaP1c18eeK/As0qxtK8mrQL6cv3an3Mo3Dm6e/PTHzR/98/Kz36h9nEyun8vj6gLFE79/8/Pnv3i2uVM24284v3H6H/5o8PFD3H1z88HD6X/6qw3vMs+ZbzjESCT1crF/a00RqyZGzXwB1II8L4vIMUaLiqPQhunF7MauG5VZWecuINQrqYXgVSTv9ZePPjk5n02++ptVxEuv/8aHP/03X/6nf++bv/OrH3xw34+wd3f/8R/9wWAi7e3yPLgpepe0OcfGggZ120OtqAkr0BpsNAJ7DsBJwQIme4LU8q0dVEMI9lSg869JQFUyn0K35SJiZzzbTnoE0zAy4Jk16Ruv1osWIKmec0DZOQlmN6QiwWqGqoSYvBPBLA50lUDA5JgljYoSQ6SOuSRmuZiML4wTbObUBvOKJHjXHBU1mXiQiUccs7MSrKpszBMGA41I7XG3Lj9e9c/7i3fL599cjBsIIdeYbG2vWN8G62oCEgEGgkI1SnKN997FoAS2uRaqxBxF29Ays2MnZChEInxaJ6FADLbT6jQYIoEASUpOURIkJSkrbN4lIEbpXFIAWy1qx/BKIw9e1IYrC0l0fkFMHZpgCmoSgKM6doGx1FAGmiyK493mcHu9Pe2pc/1798rnU/U5LZfUTG/vjHNpV2WVBQlgXJReoU0TdqCF0KYHUYNcqjYT5IuMTwNf5Hy+CosLAHXhire+5q9tC/Jmo1+2ix/c//5nxwev/s7vHH9/p/jJ2z+bHZ6Uw/2dm004x3pb2mF81MzqxXSWHU5e2ZP8DoZ3d17aOx+cvVefXufni8v5O9+rXttu7r75o+YHw3wiw28t5G4zF52Cj4Bjly2KwJdNrNhHiqKtMiM0bYgtiNum9Z6bplmt6mBTGSgokk0ptAmBBWCBOGZYsAGZd41jgFTNYywSkcl7tUs6sp0FMamQaGSL3jVFmqYB2allsqYPzu4i0W7Bn/jTlk9v7874zHDMjonMuiIFiapRLJ13Kt3iqSMlJApCArgIRAjBOnG7f5xZ5qkyO72yFScab47n83mrbSTmK3scAjGlkGWiDoVOImMD6olYRMizrXGdd7D3alfRR43oHEvgXRZfjMPJctYaEYVSwrZYyZA2wHmYvbnZh9nuKYi1wkTd46GwodcunBKAQMgBg6NEEJzTUIcsZzScB14czy+ezfhGXkl/jnIu5ZBH+bByNUNUgsgAaDnUQgE6AUptN6LfLuTZB/9/qv79Sa4rvw8EP9/vOffcmzezsrIeKAAFEARBEGSDaDabzWY/1Gq1pJYsWTPWwx7PrNezs+GYjZiI/R/2p/0HdtexP8xu7HrC3gjvjGK0Y8u2xpJbcqvVD4rNZpNsPkC8H4VCoR5ZWZk3b557zve7P5yTxTaC0YFGAVVZWeee7+Pzmj32jC22L3RYR1tJJ7zoeG7Fm8lkcs7a7xp7B/ae7wLms8CFXetLe73qvyQVZnEm0cJ58QyNFFnidGW1HawPMDuRaQ8npSzkpBd6ppwujqvRg0gLHk5iQQ3TvtCU9KzyiAd6NMJ4xLPaP7s8Ort9/bL1hwM/7tf2yBw/+tEff+mNr3c82G+PB3Z1IPOxzkh9qNbDes8wSQzsC+2MBrHy0GK4uecb06+Lrv+rr3+jLft/9oN7H/z0L+1Cbnb99mh6ztOvjFs7oTDugqwxx+rr3xh/+NFwe7jxrW8e3Xo/PH0Q9h6e+41vHftnwRfjt/+U7302uvH7hx99Uq8O5aNbcVSZR8+O/+k/2zg8KBnykx+hpBULcZ0N86kNli3HBkyTF88tFkc9jbEoSFprWVBKJ5J7tDSbUFHwk937w6FfrxqETr04ZxOLJ4gvpmPZu7v77NGlb/+XxbmL1y7+V7d/8OeDezzhcfXNVxc8FLcvg/W23Dya2taunXS9OdVTDNBAWyJPCOAOpmOKZAu2hSMSFZFoFbnSZ3NzIoVaa61hazi72pzCM0umMUMtWRCHHBJJDDLWdiFAIBAjatikXnVpSJtUwEvVDci5om0X8JAc05lrT1o5ceZ5LgU6yDsgkexDmXQ1CaJOA3rmZ7JRRBK1xobTKFOFprTurAEhypkE+ZdhImYrEAOJImA2MBq9oTpWr7abP3DtrXp8OT7bXqw3JhCBl+QQxVKGmQk5DI2qEBEisoZAHGMXfAdiItZsSS1RNCbryqT7YsQgKcIF0KhL3IwZjBgiGCFEqOQLJUPKhOyHjTSyJsScAcMW4CUlNq/HVZdtA38e45g0YKe7TEFe9TOBmBKhnQjC6XbjQsFcPDce7A/3JmU3XTdndtvFvc/c6pnYIYR28cnN+s4OjMbYOVgXWhlXhqtZH7rnbaFyjIQ86oJaLzYU2KcwDrw4kQvnAFTf+pZbHc33dlbXyll/8xPb3Lx3uHllOHv0k/DOT89KfHkwPK5k3DystEJZxijVeHCGHYVBM3Ht1nM7N9YXs8mHjx/58urKs8v8yZOLn0X/aFzcn07uuQkuVQ838Zh5D9J43rNmn08OJ6hmzEGkldgCXdctVDqJMcQAwnTaMCP5NWYx2PItTaANabIaigDYcEpdJlAIIZGIMkCTfaBTE8ggChLTLhhAjOkAAsoCkSwbIk2eiTmCSyWFXUq2oz6laCR2MeWUXJMpiWnaY455qZUeFiaQsSaEgNTBSkwubMm9LWnP8wMpSsxdCEnHb4zRdGhEDRMRqeh0Og0xEJt8gtK9kfvdhJoYSqIuk9Mec/8HTpi0QiBA0IwNKUsUUcUpi4tYcqzJ58v6018GpMSJ7p34p8tIY6uqMfu2gxlRhNksS/iSfb18ojg9BSnoBZKCylmgsTNGZEGxCVXsj/d2w9TXi3ruVqcymdCoJ42zWN/c76InIvagDrpQBKWhom9kKLyKA9k4Kc5Oq+f3TtYntsKksYtKvOE5TIN2MBi3h2Vob3D9Smnb6XQylcL6VWet06ad+iOynQHE+IDAFAwFhNa1TecHfXYrXJ0pvUjlOuEVE+/D7rYzboAG2giNibc59MNAjjbMZEUPR2a6xWN/847fo9rNVKQLRTg4qDp/7/DR9a++vr7xwl47m+l0hQasc4N46EbNaNWKRVSagwZkhSWMp+7M1vmz59pp+Ov/+W+COJ02I5Ql3OJwsXg2f6I0Gb249ui+ldXR3/tvP/vjv3jh6o29R7ea/X0zDXowIWKVcPiT9+wbr9e2dOcudXceN+++i/HRdDyrinmYxSF6HbyrJIYqWmF0AVF8bOFL1AFt6Dx/5VuHJcp2AZZEqAsoIRI7YWEJQkKxjeRpEUBV/2B6WC3GK5v99U0+8TOpgl3hOISOih6qRdvc+/7/eOnXf3fv5OagDoPJkR8s7KWN8uXes7fr1W//w5ty9nHbju3mHjbGujZb1HoMTIEG3DB5aAdDCgoqnq2wIWscMn4bQJxt/dM2l0klLTBBxDkXIU2NmTVColqQM5bBQBQQu8IliEYk6eiFmI1hgGJymuPTDMFkgKxMCDGKaBdC6uUpTXBMuuQDJ7U9QGxYU2QcTu8+XXKQT5/E7MKDZY3MHrUAsQFnzzxeFuDTLAcQhCHpJmMwU6ligLawV7uLu7PDOyuTn/X2Lsp6FUlAaapeWngtGaiJVaXJpscGCaqA5lkBRKLQmC5rTq0HgckaASDETMQ25nV8isTRlEVDbKxlicLGpUFVQIrkRs9QZaa0l+PUXVh7+i4JlmOWfs47TZ7PqdOhRNRasqSzYSbAxpySc9IdFQAyVArPGWfmg34zmY3oqDe/8NGtKRQdI4opBtMfvt1Z2+vVPkaJ3rrSSyOxLp6xqtfKaBFjJ8bABOPbEGDtpJCwL298tX7jGwCMyONbd9WGte3KV2uPuxNMcfvZ01cXRtrFkY9nzl+RwebR/lOEw6oAhYkcbuh8zYirZQN2gPeN3r6DhS3aZtK838d6u6P2ubX57rNq1+PcJXo86o4Ch8KNa16g3X9C7R6ZJmgTeUFxTnGhKq2PQFRi7wMxdSEYaxOTPtMMlZKTBC9pt0t2u4YQhNkQF6ULnYdCoNZaVaF0FPOpJQIbQ1GUSMAcMi9RYrKwSFoBAeWdcCRiZpPCCTVrhIiZjbExMQvytJjm8CUonMlNQqCisEQcJTjrgkjqYtPEe0pLRo4uAifvaOTlCpEmCYAEMSZbkQBI+rfsIK0gJGdrqGrSRy8f0M/tenLPpwImjQmNkhDEMGfTkJTbCFqamOc0qNSPLDmcSzEVACLlzIlK+jpZRnyKqE0+1Mnmi+jUcg9LpeLn3T4SxzS5dUZW4VTQtZOFwnMZ7OTJoZxEmeKorKxZ5zhj9hKsZ2xeOEQTpCXpxHiKAvQVNXw12MParl/bj6P9mZliEI7ZTaxMrDnxEhbi2bbM3kdZjK04FtF2U70IdcbNpvvMleUV22jgDrFI/boJGubTarX0s9KahRQ8L7gLbaF2pnyXF2icXUAWkKmoVbZsK1tbqShUXbNi5hWaofM82acmdGOZt2J7jk0PjXzw52+/+V333JlzT2aeYxMtIAWgqHtNcDwDKqBiK8O4efFy4zZ3nZSo/PbW6mR8gv3pdMrzdRGhGZ2ptl7+u//o5u6fjGx1QOj23pXJ1229hfZZuVqZ7Yt133TBbl6/EuCn9w+0VxSjorv5Q+uMCZUfrhe68F1DhmPnowmskWXRFYY8Ktv56Y5nt/E7f/dutSlP7qGqPaSIwtYFSAVaAOoDApuEMQRF0VsEkqKexdndp494qNuDvtasPV3ZXjn71lef/fjfjHxZy9Hef/yXvOnDiGdnet2gqkZh75j8uW8/3rjx8FlzVJ3fjxtj2jwMA52AJqBjpRlTA/LMkQ17Y3QZDWszLVcgeeFqNVuvpeWQ8rJLz3f6Mk3AgKJEBpFZftwiStqJpudp6aOU4Mjc0FPykV4ecRSFZWYrwXchXQBRRESNTT4ABAipSeRQVpUoy7smK6NEREXML3mF5Gk4lw3C6ZBLTMYYNglvJiJKcVDL2w1EQbOrvVpYkVAYp0RCMPS6f/GB/8WBG39U7rzeXh1LYwqhLgkYkheRJMLo6ZYeDAQkvJnBbEzXBWZmyyLZxscYNtYEVSjnPJcEawkUathCRFgRYQtrmEPwJImenrytQUg2+aJCNr1RSwswFSFjsnHBsjnJziZ5y54qQmqz0lAMkJCavIlL7tewDGZKac4MoLOEEAtTXJgNPxkePj0j5/wR1MbRqNhvWlO3ri2tDUomFjA2UCjqbj59xFKxlkLWukKDRhUWhDhfuPlK0Tvxvv/cyynUJ4Y5USm2jIHLpqFpN21OHn/anZx7ce3iK2t3fj54cnNK7Zmrb8jbfxGak27zopFj6Zh2Vo2a2Kn7+aPquJwzcduaxtpuipMuHDyzpW2cGb72Nf/M2WkL1zFXYdGGdqFCXccKjhBI9L7tRAnqfWC2XeeNscQURQ3bZOeYpqbEVwLBsonCOTQ4yQkUEUJdF2IGaxIGQSY/UOmkmuQKlwY2Ups9QZWFdEmcJtEliyLVWhYRMkYk6YTTVjn7W6Vu1vJp6kOOC+PM7kUIkVmYuWnbLMgJgZhTirH+khowk/FT65zVUZrTTij1eURESlq4QlVPn16FCJv0PbGKJPuR1J5w4n6m1o4iCZNBTDIIhgGyUVewxoiCIvIbJ4FJlG1e+vyybj5h10A2rMloc1oF5C+eLo7Trd5yQYb0PTNxCoVkZiWSpBVTARJpvBOxIi24grfdZHH4cN+ULEeBC3MwGpb2jBON7BZiW7G9atqrwLEpEIhlitVprAOP9rG6z9Ueto7K7cVsBQfeTEWmLc9Ep9a2TBPPJK0Rlta3c+JuAuUgFYaGFfBA5xFtWyF6qzaoRDKOB9x2XMxhWVwIfbf6d/5o78//1zPOTNSZ/UlsJXo4cWEUYowKRQyGvSPhMLNh4tzcibd2xTianExVKGhkw2z57T/9/ktfv3H19dfuHo4XnV13VSe2kf60PoMeTJ+0gb24veqd/+bl+eHe0U0/uXZl69GTinmwXhUwpd7zYUO2TqbTvWf13/01BJ2//c6ghh1wNTprH33c/Pyn/ODWyXRq924fHe+4Ky/warH60pmduz3HC8s9qRbF8V4kTwQrJlLkxACzzkgLiZPW8+UXtr77e43rP33nR0Nng8yMMQoXJRRcg8QGkC186LCARsBjHotaXRsLob7wcHeyY8OzzfVLYWGbptt445Xy/Nr0o3cWD+8XIK09D5wv51pGb3niw9NWHu7LAW9P/eiYBmMZyFgxJpqApoQTpQWxJwMmDmzUWuQKlc4+W9YgmnKQWEEqSDOj5NTrTPWlU8tnkF1qC3JEIJOhjIaqajLVoWVFTDGhzJTIU0C2zWJmNsHIckIl7jqfqxkkImlmk31OWlD7pWIHp1kM+WZQzZA1GeS9dFy2DUycKohB2pcnpnR+gXnUhqpTDlFY0DHl7TWzIi44bmH9ZX/mI/vwA96/zGdHtt/GEE3M9hiy9IUXIHlPhpBuLSbDpCoSYmBD2XUfbJaueEQcQsjRrsjx4imeJQRNPwZmtswKuMJxkogkHcYpAQaqGjVK2h7nJoCZjUkfy0ntvLxymIiJifMKNd/pULWJ3UUGjJRNA2KTV9pgJhYNAmONjcDF6ehWO1mMcP+1C194Z6eo6okatKHgQfSgqk/OAUY8fL0K33AApBNEK0rSQVRhmapFkLquDQ/sk8P5xkUA3WFz9oUXhoPy5OmzeTgTfdVbecOtnD+extkIB699qWz2v3T9pfD2fyh8h/WrX/jtP7z5V/9hJE2D4J6sbBzOBvtH3sUi9ESJTXuMk5o2yHEcofrON7pvPQ9/Yoo+ddLs7lku/aGH3xA362atURKwoFZpiVFVbrFoq7JWQMWkFk41pE1yyrpK9camdOzUI+aBGCISQqAUnWutqCyfprT/56wEVjUEGI6JeZ4OJ6fTrblwLTlH6fBmJbykhwtRJPok3FNOlpin7SozmHOQiUJzIHEQUcOGmFVikhEmPAVMZPRzmoXm4qUhEpPmSqc4zbZUUslL5dQL5BTAJeSUXy7xf1IwEwYiMb1SBmm2BuCgkZlMVE6iAJBQ1r0ETVoBMGsSQyfONpDjPzmTVBJeRgikOajYJNV9SklKjhq6tOFkMsh7O5NgbU68lLTNQBelUA4MCKIJbrx7OH5yVF1wchTVqKLYG211gobrE6qOYz2y3gZfGoG0rNqgN0XNcbDLw9soj+TcvF2Tfe8WKzr1duyptdIoeZBtQ2iZggOLIeJYRPLwwmNB5SJ5MyHUvABHJ4WzEtrFwh57P+z7Udlxef53f/9nf/P9kb28eFqMr17/wvNX3735P81mcGq0FMMcEa2xUFjSCr4nfuBQy8y2ZrZ/ImMYz0YgLCIkhotVd/fHHz3defTGd75RYti2z87YsgtHHfemq8PFuLOltW7Q7/YPhvP2rVeG4YfT6U57ZZV2WlnUUjSdrhieVZdJ2ve/P/iN/6y79ai6/U5duckP/rV9cOT8JPzgP1jfWguxtr7yii+l27+7+973Bg8fwZVBZprs+ohhsQhsDIgr6bxfTKEB2xdHv/rr9vkX5ofT3U8/rkipjKYTS3XybQHUy6IydQgKC+0UgSSoVxY78Dqbx6aRQWFHLZ2M/d7Fy1d8wc3xeDi6uPHiFd80i/CsbZ+Ys4P+unz26fet1Ctb17s3L4+Ltd2wNZFh8FU8ZjqOOhZMWE9ADWHO7Nk47wpL5NNjQ6TGUDI0FkmS289JHalwprPNzGSW5CQyyQSXmROkwpYVKhIlObHnlvn0Wk+1nBMNi23S0qdOnUQihVSh0yzHAGIM6cBrWt0ibcIIApM/P4sKoiyBm6zUy6rejKJFAAmaQt6ip6/BSzYqlGQpeM7MI4YJiBxEDCnIBhbSgMiMCc1fD889CONj1/y0e/Cb/EWIz0pjFUBT2sRye5cMGFShcZn0d3rvJJJXthXiZLCQzIRyio1mQgxZm4xQCFCQEFFhneFCU+Z4kk7lWwyqRkQkhiAiEgBNiZFLkk4KulUsreUpA/rLpUb6QiRCdunDl6wAobBZsGZCZIvgCpEI6izcOGweYXeIk1fPT97bXbu3K1WflU2AmF5opKicwgrgHrVkykAGZGEkAFoYKtgYJpKguqi1wqiNx2UvUd91fDT/xb3u/qFtX/0vmNnJNCymZlU6iBleOOzNuzFfpK1zcq+mVf/hznM7J65tA8eidWJOqKhEnFgfQudQAoN4behXm+JrV4v//IuxN63qAj0zv7WrQ8Fx0e7sy7Syfk1QqbaqYLFlUakKM2x/AJBoNIZit4AxIsVypwvL1lomRtd2S3EQmFhVg0RRKXuVinrvJWV8FRagz83WoUScFeEMFnCUpI4LRKppICbh7JecjrGIpCYuMd0NEQwnaf2yfuPUTFxEKBuA5+MVJSbXLsoBWSzJVV/zJE6Rl4KgTCxMyoIuBGNZRZlMcuAiUIyBiXOGpkaNyyDOrDRI2xdeZjQoIZveZQIzs8So4JRIqrmvRrTGxxTXCIrKIEtGsh1OvmOIlHL2dyIW5KU0G874twUkYnniE9MlXwfIcTB6yj6ETTY7nLoKNYIAZShFeBEYYYGrdeXxoz3M1DZGmYITZfVsj0bb4+7wbH+96Q732mNntMcAeyc4QTUV16fBQ6l2Zdi1fRzOcRj4gOOs06nYCdu5oSAUFt513DUzFaDVXuGks8FYsGhgal1gdSzSNh1Xh6XvrRbf+fWTx5+W21fXX3/503/xzzYmZ5+7+g/0ppe28s0wfDb/2iu/9c73/uPcdVRqCCHd6fDd5np/1FU09jY0lQQ0QMNVw2jgfTQgCREFQxhex+3+reojd/nw0sWv3Js8HXHZYGW2OuIBY0rWTp6tYfzuB4+r8Nx3Xt/6iw93xs+KsxtuNsN+x7Luh/vdqi3j3q3mn/9fMfaVcCgZOztWNYh1XHWlgS4Anv7oL/z0sJCmZ0NBvTnNKy0Ck9rShijRd2ERgwqTDDfcpRcGb32lGm7Onu7u/st/sfmF63ud71ciXRRTLGRRo1RygWJhaKFe50xMFEhaoIUuqKmKivvHPDNYEPoTrNv6+N7kfev7lSlH62uT8Zgr7lfnh6PnZRPnXqHjl+q73q3Y3th3C3aNuHZe8YFwAxqTTKBToCVaEAUGdZBWojB3xMKsxCIQRn4gkB4UIk1kSc2GssnKmLJ7Iac/yftdNgClTFlTFKwx3QtZdar5VKfqYw0lPqVC2BhIzlGxIJGIALFagogQAocYEIUEIcU0CZIIVWPU5UY1SMrsSxVGOMGouftWAowxp5JB0uU8klHlZGStp5Hd+UFFYFCAIELIsDKgrMIttc4P1b0+u/DX7tad8tkL7YMr2J5oK7JkehIlQ9q0yosiKpFOt+6fU2SANAmleQIIIVi2koWglHw9NXPPJYpYZsPWGDaFJbAxHNKeOe0hsXyXFZYgZBGCwEqyHRMBkyED8Oc8k9yzaEwiGTLLvoHBbMHCnO1xiQkm+60kAxNlG+yCW6DQEnLw9NLu4cHz682mmz637u8yt1bAno1EsrZEK+QMowgOcMRMwSYDssiOxQAmR9H6MlblZL3XPSwYwE/iynQmXlBvbtr2SBrMozKVtGgF3Daz9X7ViL954w/a/qUv3/mP5c6jljYbx1WUWJdMG1gw2S52szoaz4X7zlcnb20Pv7QVvmDXB+NB0QxLnhxOmm3a5/M8ocMfPLq4dQWgdr9maiO3yoHg2ChB2SbhuwGCLSsoqw2UjKkkWGPTQhiuEBFdwpXpKKZ5i5D6WCZijULE4M8jJY21kvzZRYOKJRJSCBlVBgmDcnitMhvDnPT0bEwyZOx8R4YYkChJrpAIwFHEGuMKF0JIQ2k6mkmlboiI7TKkm5ar53xOY9aap00QsniXEnhMKmBlMSKiiTKSvucuhsRY0izzZ+Ik1mdK0Y5JykhImUhLbmeyHqPEk0jGNVAlJZt7CVHSTgNi+uysdLp5WNKzQFYQKd1raoHkfxkIAFljYxTDlhgSJdG/TXa+B053Sek3JslGEImR/eMFBGuATiyTxPnu3R1XVu2BtwGmKMQoVKbT9vmvvG5XzN0P314rV0U6J2KtVMCxlhMxZ3m0H7ibWDkJ5cT6GVUzY8eLbkqx4cKT13llO4S42D5ff+1rBQnMdPyTt+39Rx5VCT4ajrhar/b2YrU+/N/9t+O335X33lt//qvli1/b/fiDlXutaepw58Hhp3fXr74Sj1dCHMnhoalGN976zs/f/0sffeWqAAmdWFftHz4GZmcKB3DsUDAHCaaoogQGqSgHhcTFSQdjrHP3P7ot48/Kidl+7bfHk8DiQZGNwogNwVcUxOEnnz24+LT6jRs3Pnrq3729fxLqgLp0Fy5feYne/xHIueilrHwXorHOivEqHCFqOCqThQ3tcd1fYS3bMOsghsEiEuaQ6UQsWNz2ufILX3Jn1tzWtgQZ3/pk/90/NfuPLr/5rb2NVXv7jvZriYHFMFFkBSLHoBwIHrFCIHhQC54DU5q6FQmBXGRYCCqKTFhbeRbak6Y5grUbL27FucDCrgWshJ0Z3T5AO1r3tOptOeHVVkY0g0xgjzjOhSZEE5aJYk5GArgVeJGoWIDRxWhEmEPyF0jmGcImefBnhT6n2xvpgQCAJKWAUM7ozbMXMhJjFMjS29MFFGviSqQxN/m6Jek9JThGNX9iS4QOKJhAhmIXQggMStZSQSOIjLJC2HCabRMiVFhrrIves8HnY51i6RIFA40SMs4Z5XMiKSEZUvLyn0iyoxC1zEAMUZTT/GG7zk/hX8ToZrP2ZHj0bnh4cb7JQgJWRFYJeR+fcnRVSSPUWRu9T7ZTnMmVSe3PAFRFNA2nyJIhDUwwTKqUiNSW2Rg2xjAbSn5gkIIzMJ/XbprylNN1I2KMQBHAyTgQrKyqcdklcN4UEEWQIZPqMpMBKWCULZSXUUiJHW1S3BvHiqK03HFnQZHnpQ1+/fZ05YgPV/nkK1fM7Y905WzojExDZXsRBsZBSrJsnESGOIgRVzhvIheBLCfKOVtrrIo9987Fq+8ctgDc0LoVX3WYd8ckVn3btcEIbGQqjG+7uXHG1ZjPdi5/e3/t6hufvb89/ZjFSOnEWmgTKDCj2jq3YNYXbsy/88K9O//LxddvvFKXr4Z7q3bRe7ofSae9M7uz/V/UL3TVLTt6fjpHPOhxNGJ7hYlAtIaIRJGWCizJaFwjJVMqIthCJKZZitMaAQaK5FNBqlaNiASko5a5drQkMllwII0hQFRZWcSKVepaU7JjhGgXwRSJ0qQ+dK5woRNBYhUBYKMsFLsuOmYLDipMOdSEIUzsg0/ciC4KI38wVeskdshjK+kpTwoJ206lPOPUUGjUaNmoUqqIABnSbLIhqpQzR5Z4c/6YkgKCCFYDEmYOwTNzemysLSV6BkCqohHZMpdBkTON0EjyryHhZDMilEYFQs6GVKjSggODVWIisjEToloCLEvoLBuCaETyrSTioKe3GCEbe+dtmZAhWySILa+uMz4cXUXjyYE/PuGVAkpKpouxO2mrNffK73xDBJ9878du9cxMlYv0LAUX4alq2+nLi2F33PiJryfVfNzatoq+6iatmRF5ZdPYMEOYtiZs/cE/OXz4qBtP2sVYnu7EL1yji1ee/uX3t/7eH9jRlQf/9P/83I1rizaMrl69/+mHW4f3D/ebC9duQMbwjsds3/lo969/1PuV35wfNdbXvvGrF1a+/vVf++D+ewfNXhEsi5nGcsj9TnsLOQnsomHjAIfYxeiCSGAUatUUtlcXC/YBalxRVObB3Zs3j+25N/+AxbSLUBW9yMGWBYfZdOvMoJn7h/t+58cfvHj5+e+8fundnebOgziZzD9Y9LYuXt/86MPO9E2ITiK0oKIWaQUSHJEaRJCNHUozm7HOWTsP7zl4NzQXr1Sbm4MrV7H9Ah8/mx4dNvd2Dv7s3xTT4zLEui67stZXrz17clCVFMKMqeREw4AYRlFQt+iIWNXBM+bQmWitchxANFtfc0EIAkjPSKFQ0s2tQ+ursd+bTPcH5bCVrgh0tC8nk7VHc9MfbS50MBHXYCBzxlS4gZ8IHys1RDPQnBA7oZbVq0aRwPCqBMSoIYbAnFmHbK0gpNzvFNWglHwt2FhO/AXRkIUrUQgJvcGS/0uqYEMxtf1Aptmm4sCJ0auApMXpEi/VZd5mquFMaWFFEin144gSktZQ04aKWUUoo2gMaBTRrksiJ2JIiiqGJmm/LqWTLKA0dmDJe0lBxSIhTwDJK5cBLJI6U8ECVW04lkEbQ1D/5enZg+r4aIAPFo++1D03xkIQLKlEQDhgYYUiICGKijKYrUpSHObZnYD8yrBc8dHyLyg0hw8mymV+N7PTr+box4SjSbIjzsy35JklSY2MlKCULQZENUVX8CmtJkkqlx6iAJnM+kqXOpk0ziR3rMQYI5hIUCosnBQIxHbehgdPykU493B6vLZxcK5+uNV/4TB4NpZLoIjiCE6doFS1Sj2GAxNQB7AVtNXKaBE9ownWmrD4+et/7+P6/Kg5ASAhdHDifdlWnQRxrnYIM4nzEj5wjH4xZxia92w38atb3/v6b13aufjW7APpFtweheFa9dYbTdOuXLq+vxgHYsUzrg/dvb+4uL3+oru1zuOBVKGuZoe/2Bpc3fnw00tXhk2feK/zG2qPeqVGGGHynEzT88qzS9jBaZZgej8lx2DLcumCGDPznA157xmGCUw2xo6gEgMbS5zUr1BlkSiINrBIIRyDtZu+vfTZnelgePfc+eC9ZQuQYyuLmHhEgLDGEELHzDAlTAwBjMJaMrzw3qSfYD5+uYQmdY1I/GVsKGY28n9KaELeBacdiyRSQe7q8hImIkue0lmDcNJn5bgVTTRjztvenAcGQCxbETGGlThKXO7jc3BhdjjnJVODRFijACBOWy6TxQuJ1yaqMaVBaBL0Q3Wpi07fBqVcqYRMWRFNqmzK1ZWTB2BGZsQQFUQcozKZfHWRJeKS2SOaKjy4eTOGA3Tr0kLmCxrI5tqlC1959fH7n+3fve2GJabEDsLKlgJXsRCaB56a50yxOPHVxMWxiBSDmeueHhatQJ3IbBqOxWr1/BUsjsY/+qvq3NZC2kvfePPjbmf98mvVlWuzOABcO9sTDnTlBWn23bnt4o0vSHgy3719cP+d6tor69M7kz/+f6x1gS5ebupamv2SWWk0ORoXQ/eVL//KTnjw2fSj+WRab1VBylkszo7OhLaZt5PhiNHKZDo9s7k5OZrqPBHqOxGTRgSRKEHrnpstmtu/eH/ttUu1Kby0rGSL0vXntV/4Gxe3buvuzty+88l9rp6trW29cX3b78bppMLhgp0txbjOdo2qodi3/liqIH7uVT3IL7wn4ljWvq7Lc5v9l69trg0iJA5Ww87T6Yfv48O37V4D3xbTSZ9auB5qt5jOyldeXaxs6sPHtnKVwAtSFIk1tihst1ic2Tqzvz9l6uB70orOgSnMCksBNXo4WBVSYVMxmFiDKqhXL9xAoeHYH2rhjnQwNaNFdW7M+55WwetH0sxlICeCOXQKN6cwgc4UcyERUAME4kAUoD7GEEWMEWsEGhKBCAAHHyGkCFEYCBKJExsLp9REw6zEWQaQ5L2ntGcGFFFOAUteUoMTJpRzg3C68c5sXCTV3fLeWmJUAmsMREKURJ9WgDjtotKiHCqwlpmt7zxoOa4BEEGKMNWEcC3h3WU9Syia6DKqLQVLpOplOMn/M38qmeSArGinGoS9dKPgXpqu/2L98JPe3vk4WG2qOVEkGImdUe7I54QcZsD7kJFay9nOL13jurQewikNMxkN5ZuM8hY6rRfyraVIuux8KWU22tLlJHU0xKCgDIpLZnP6d9kV8fSWVQUyKqBMAkrOSAyjykoplQogJiUNSVBmBQFEDGM7WFf7/R03E8XWCx9M77xybtIrjt66du5Pd6r+qF2EKNZWFXociiBOyBEqlUJRwLtgaqi4pmqsBrFw1Oyxezg4GAwGYj2AyM50MS5sa1qwoKUoTuF5EGQuphj0Vurgu+DbJmi1wLCyN1+5fsjX/uittSf//J+d/+KXcOOVkycPnlLb+K6qisPpAzvyw/YX1Xu7W9dfpHk1OWghU9rUYbF3tV8/IXd4aYhxrFgkElmoxuXxXh4esoZDlJh4BNmAIq2MBGmUDDGCiBhd1xGRtUXyg0niLxUVRKTwIE6PG4dUYoCWFRAWDNvutZ+9GxazIe+sNtP3L77YKnOcq7WMSEQiJBEFFWAW9QS2zMocotjk9cGwhrPKJlOa81MLQeQU+5Nq5vJ4paYrWwJwZtmrikrmTmlyzDiNitB0K6d/KVlBvmSk/SelPD/+BkmWS7CcJEwMItFuCegu8ZzMPkmWq0ocSYAUgJGOb3p1nNZvnBOyhdimT5KxoSVupiIRkltzDcwsCpHIKU5JKfFNcxyyGlVmsFn6VpIxEpnYttC6Kg7GR2azNjsG0taD9dHzl4YXNkLV3f7XP/HtxPUKmYv0SCyMtS0HloVxRbfgS7HoLWbHDZcztLO2ntezyYn62jiILqRP9bd/L6Cthlvzn/6QNbiKQ2j33r+pn92yV7+2f/Pe2S9cffTJh5eeu2xfeaU+d3b3e3/W3rLrZy/t/dW/OwOrh+N4+2fWUEU+GkwvrYdw4BA7wKivUPtOJ+Nm/eK5r72ytW93H0xutu0hqsHRyW4h2rN1Gw6ZrKn4eDoRKDnNWbKhk6hLJg3apr1w7eIjHbz94x8NbvznLA4GNvrpDLE4Dg/D0dXtczIOj09i0xU3H52YvccD3nrx8N7o3nuNDW7qm4bhOTZznTE8+QBe3yy2Nsv1GgPjNs/J1ipDFnc/jrOjvYeP6MHtHi14dT224bjn9gb16rXXz3/6IT26R3DKDDTVjS8fTg6dtdaZrkWS7ZjCQSWGDsStb5hFEWP0WFizYGmF58wFddIZ5slgQyiRXKSDBDKF+qLregVR0bErJr6axMrL6ECwd0hnLo4OnOtPjUwjN+CW2za6YLEQhIXynBCIAiAiXiWAglJQSdb8nbGsEAaFmNlXBE0oESQ/M8kog5OOiTPBIT3PaeGcqqICqpLiT9hy8l+UKIa5k2CI8/T5+TWQC0teeolIYjKpKDSEyMZYohAiGIUtVDNZSaIYw8awRBFoUThNHh8MMKfQs6z9zfAOE2c9Uoa6kMfE5ctZFv4kYU5M0aVnAhgSJLCK6hzSSXxpuvGgPz3ph/fD7q/ML0G5U7GAjUGFl15YCizDSpEyIpjwebHUZJxPdPo+gIWyFReU+NSCZCm/XtZO0QjJIyxIk/iESFVi0g+pJk9cpJtl6eiZ627iwJFFTjVPwFuaK4yQZaQcWAKY1KhaTdgvCqgVNWJVDQp28mAMlFRX/ceLi49w63K5f3lwVO2vmwo2KhBdRCFSqx1YqUQduCTukZRGehqgxkGrGouWyd/bXOeXXuqOp75mANKR1YWd2YVD2bc0Y6mD6eoI01MEg2YRSIw2hY0CliYu1tizAA/H/RdfmuzdG3/SRsM66HMl3oXh89tY++LZvXvV4Qfv/4ufnNv+1ubmK8FNXRzo5AG/++Dsmd86qMizd1yAy+BPyBIANUAUaAALE0SDSBBQFsImpiClOOsgHXjJjFJVFV2EBRQ+htNyJFGIOUhAx4Y4MFSjAAQLRKcy61Wv3L0T5hNxPbXav3N3Y7i6s75hoCJshEFiChvUhy6QMWAnKl3ypMuUaTAYUZZ8umwPt+R4hXQYMkEhpSOoUOq8s01NevxToUtkal2mMyDBHwoyKf8M2Z/u9HvMzR5rPse5W6fEkpCoKsJgDRIlEpvkcaWkpDDMgCZINsX8JhJ/wpcUAJPlzOkPUE6vShWqERKjEBHMUjmdRR0co4hKepEhRhFY69IDmxtaWJHEiSxEIGqhjsAKAzIKa5glcM/Vj/7m5/Lc8MqbXzNROg1hOr/3N7+YTvZNH7G2dma4VJRSGl5w57SrRoOuRTef3jAjaYJpZd6gayteNPWb35zN2o7bwYuXHnzvz25cvLLz0Xtx/Lf+9ge9qy/s7+w4loFtqt6wHNnxD3+yyU13+/12cVj56f6f/X+LW59a0cA/3hB0YHJVZQch+rBo4rkL4/WRbQ5EKjUVq7RNlCJSkMUkUk2bly9uXt0+flQ3j94ZmDbaeSPB8aQsvR2YEDrrCmnFlgWA4AJ6whVThY6duJVb9x+F8+fOXXrp/v7eWtgsn+/bjbr10Q5Lms8OPvhsvL1xduPC+t6UdxfVzrE5mS3OfOs1un45PD7G0ymHmn1V6cCWZxCrXgzqj7xG3/mwc9///GeTp3dc8LUzMyx668OTrc2fTg+n+/sTg7A3gz95a7AxnIQGcPCYLtqz58PW2dm9h0VFRsDOBfXMFsoAiQRrqqaZADah/ewdLRhNwBgIMLXlALRohsOd0s60nPGgwaii1haEMC8oxI48D8axt5DNJ3Zlth/v/fDRYHSp2AtxwjgRmqFqrTSAF6R4F12IdKoiCJYjNIp2ktyeKKTefikJRYhd6Uoffbq1mZJxFVnLIhqCRy50+suVcznTsjEcI6IEEk5LLQnCBLY2SkSiLqbKndas+WFOD6wul3hIXyVIYOLCuQTxGF4ydU02yWKb1k2izDmeG5ochLLDBhOJgJdkaGB512SsLq9ucwYcJ3NQAbE1id7FBBWNDEdF1CR0Dq6pvnC0/u7m3m7V3K1PLh2veCsMCrDRKjOSPkUUp6LnJYj2udY/K0AplVThfKHk1Xpa2C0tprFc0SegV5WW2g/kXgJLYwgVFVVLnBjMy0uUgEyxphQRm+l1FulFKotSqvoCXhZmpNVcSksUMQU4CNlpF3pWDqd23Np6hQKJyNlPJ48ubR/1eHx1c/PDhgc9dUoVtFYaULFezblBBXEiJUIN1+dqUId2JmjMwJ4A0ytX7FrV0hG0AGAX2uzD9L0AAQAASURBVAWIZTcNHSH6GOcd+0YQfQcrYcVZBfueTJvCSbQrpnX12vTp3vf+RF+80V6/tKidg1MEZRSBn/7i3atvyvlrL1+2v43wSPYXoTrsbV88erzjP/t5iQKjsrMaEwu/DYHFCokoBSEEkIA6IGbjNlbNGEcAYAyLAhoELEFsSk2Iy5OVFx15xZOAED31sFzy7oSURSNrP8jqcUsGAmkvXXb99WnULgIdibYAiYpoVJBhw0pRolGSBLISdxJSIQ4AS0jQTAYulIgVKeUw1UtKdpOpuDJU0+iZCm2OK6P06CFKWuFyPpbIQROCzDXIwDEgpMlFLQHJ6SEQgBWd71zhUsYUMVmbyOTWx5BMIhOlK90IMCxRUhoG6XKLZPLyXD9XEGQSo4pko6DTTRFIVSSCCFGhiijRFYXrVfN5l5ihRDYFgCkMwFCnSmXR9x2gDrAq1jgbogwGgye7O7snJ/bD6cTuszFtMxP1zN7aYZgXtlOxMdQoOl4gFOAFuzqwb2fXpLehNJ637Ln65m+tjLZu/fH/59WvvPb43/3bzaENi9aGtmv9Ikw2qpXF5Quoef3S1XA8bxdPe83k6E/+p/XJbPej9y/Atffu9MEA1mwvjDZ0vO+roYxGbn8vxAmMLeAnz62rn4vtTGeJOHSLYAwvDJdMAbGVZn9q2G6+/M3Vi2cPP/nLZ/sz41Aa6q3vCTe2MtKKeEUSmhpQDxiABnSC4YTXxmFl597BC1/fuLC+PW6mTN76qTfaTOfd2TV3HvUnT+9iv5XV4Zl+XB+dvXblSzXVzeaF4cXIE44Ttk+97O35u5/5ncl050iePvInO05b6jlqxs+5unv1+g/mB8fz5veufeOdT99uFdfPXmyLwftH33eD4dbAhpMDMvA20lyqV683UhC6oqgggSOYXNfBGCR2PmdfOSGItQyx6mG8k3nKgc5R9uioW1s5HA4Ex16HBfmChYtIYR5FWho2VLey/qjbt9Pm3MJNRTAlO2WZSvBEDagDc6caFREUgQAIcSAIKJCqJoyGEfKGNu1uRWJcSLs0qJFUL5LEEIhLwV8O/Uvr20RoXD6wYDYJ5I05K0CT7VvOC8cveScDaSGMJOBNBq25kc6YjbV20XrDbI21RRF8V5ZORUIUa1KLL9mtIkJz8meytuKU9ymMPH9CzXIRzsyS0N7MzE7BQipRiUlVJUr2HElEG0ggFZUiIhozsf5CM9qZT/Z681v9gzNNjwUzGwBXRM/CwmA2EtM0D2uL5fYdKVglsZaX+mNwYXKFPJUnp5BlXnKzcneRsDFJHZMud/pLiwQkSBvL6ZuXautc95PRMCfSl82DjDKr0TQwKSckHUKAhaa4eIgyqAA4B0pbo2UPDx9qF2w5krqI4lZ/cbzxtUtPR3b3tQtbn9yujKpT1EQDwgC+8lyzVEIOqMFVlKElG3gA9u3AEmNRrFVUH5qCWUsAMUTXsk4QCh66xXbNg+2qx1T3qNc3g6qu2Hhp4tQc7cYPHx4+OJi6kWu2SvvGP5pWUgR2mCuLBqHChUZ1uj9/sHv04CfN8G61OGcV093H0/t3xO1W56aEK+htauCysEyIWKhlCV6ybwtUhEgFksI9SAkaEkMnnab0k0xecl2IEryx6T1MhN7s7pnOYpBorSVQkJhot6wCCqLwTBenrR3vdgiMiqeRpvdfKcvP+MJTB2UTotpobJekeIjgwriIkOPLRBQUQrRgtlYRiYSILZu0TgaSPicTkpMBdNKrJaLvqZddIlkkZj5pFktkM7wsPuYliqIEUk5PuUCRg0LTQy4JVoISLLgsKjIcxNuEywRlBoxJEzsoW38Jpcg1YLn6AiU7DyVFBAipWhKBlSBEgBYKZVaRGISSgR+ylCqVfABMxncSo1dhIbLGUC69htlBrYiNkcVWUI1ihdj0WR0xmAb6i/c/KTd6RGijlF10vb7MS15Ej3kpoM6KV20RKljnQieuiONJc07pS6Zo5rNex4jh5OP7w9+6Ut54bTFt+bMfD/7uH03NXPvo6jDYPMuTR7456X3pevTTxeyw/+L1vaefrH96p+ZBWbkgpjR16Lyq8xpo8kTYFG0TJ21EK1QV825+ZujPni2a48iWLXzwhkqm0JM6dLGbB25tsQCmevL0xKyvXX7jN+z+uf2Pf0DzFmiHQ7jah4k3gUMICBpdpAGjksb0Zxg04jz3PLvb9+5ubR76vR0lsbGbYb4Y1LyYnkQrX3nx+YeTcHc8OToYr26Vg/X+yZPOetDEaIzHf/Xn/OOf2UbsROwE3LCzXl0VuFxZHeilK/frwQ/uvdccPji3vd3u3vlqN7fH4zP16l+1RyGEq89f23zyyPvjajgIHVoNvZWNNowDS0UUyKByTqI1JgRwYQALGGjebxhLIiJdxYHhVSgysxCiRA0p/Rwnw62Taq2AtxKsemOli7HVQSulbRwOJsV+S6uuWEAnGqeKE+LjaFqr6pmDSABLevyyrVsMIBhDyc4fEBVDy+i0ECNA80Vrrc0cD0kQLLKyIs9bKqnwqsYUTJRLLzGTtWA2KohLIaxJ2UVL4amoxOWaN29XKbXhS0QNCckkCSFqKKyx1qS/Ya2NMTJT4Wx6CamzT3CoyikzC8kkJ9f8/KUpQ8jEZBlB89dBRnpF1bilS85y+ZyiyQs4FgQyiVAMYkRcPV5/Vu3uu+ntlaPX9kfRkmVVMiKaAgDZcHJjVmiiLGcFtCBbdyxxstQyIO/Hl/swwHKemk6vvDxMC36JA516f2aGFRWQGkpQHijpRrFMk1PNLZWlJDCGobTrziACq1oIS7J4Ypudq5VBjsEwLJ3TAoZNuz+5/NVfPXi0R0/uVsVoELD58WLnV9an53l8Ze9SKKaY2VoxYB1ocN6uGCkiDZT6sKYb0IkN01DVLJOzYfF0e/NM3AUPhooFZgDaouyM9SXr0/Dt19avnBlOd0+ak+losD6ZtcaFBzsPK67q0fDcwL5wefvJ8ex//fjw0PZOqrBiWl+wKarOkOkCO9cu9qVdtFyp9JpZqI4/kf1WFoX2unpULEx9bNZnWxft1LIXMMgRtIJli5DswWKUhAsKWZLIpESOODKbfPkTJLEXUxdDWcoFRggeRMm6EvmtpqR0ZyCwWlhiGymaSE4xszK/+GJ9+CxYW8mEx7O4tn5258Ozhk/OrJ+srE4Gqy1bCa0VzxqQchVSnqiIsUZERIEQ2VIK7EtCoMQ0ON0HseFkeXXqT05KSlEiMwPmtIySplDCpbUdsso2Oc7mpjrnaSFTMFP3qEtmV3Lm6USMKkViESYIlJwJIohh6Z8jEtOqXw0zgig05ZAAyXE7G7TSMqOT8jygUZKmgymvxBhJ908C4syFVJWY/i8BBkpQS2QAQ3BMDrCiheXSeyvKqIASWhMq7ffru49uzXiyslWjixI6CcIxwqi3DieipY/UxEgF98zUEztFEFtcqfpvHE3VzjtR0dYg4OH73Lxy9bfevP8n/6qWtjm4F5p268nj5p//3xeTJyGGIWIcODq/UVaM1cp1QxESu6AWkUXaztoYxEdLHI1qF63ExrJzFkG1jc+/GrwwhCRE4/uV8423qDUGaRVzDhOxjskQ93hcW7B7bvT8tW8Mpw/eHz96v53vlJgO1r2RVuYCAVUcbK/VstVqrCsNDSZaW2vMs/evbL/KW6Li7VefG82ms6eH+93Joi26hdgr25tnR3z3KFz70itNjOSgJnZtsNLHaGOAruqvNjGwcmTxGsJ8Mdg8e/u5Fz7Y/XTv1vs9tIPR5uaZSzjYNXc+KF19t+y/98Hf9Efrr/QG4YO3XVU/ef56fetmefVK+fyVvce3C1eSFs56FgSQtUk/awGSqExW08RCHXMAAd5iJlBDIHQqlUFAVNFWMUesybuiY1WTwkFIp6pNBCA7cXrzcOXiejFg3wifgGdGPcMr0AIBCARhw4zApAJhwyIxAY2ABZJ5TmLLAkhhdpyaR2Ps8jlNzCnOLInl45CkfEsIEks5YJIDJPGwIc60rHSLqTCxpGQGCTHpLtL8lshfid4lKsYYa21KN4PAWoNEHiaiJPCFWsNZsJzpIjDWLodzMsZoelABcPbESjwLAKfOtJq2W3QK5GW/amQCpVq2HcNJ5JRTr8FGGwrZkjPPn0xurk8e1ntXB2uDtm7Ik62c9UsZxXKsTY145pvJaQFOhn0EVWElUaRFSFpccwbgE0DMoBQxQSC2DKHkTkIQYuKY1JNWJPAyjDYjCxnlS5HpomC2SSOauOeEIqlGmYyqTe+rKKsyUKSkchhLalUAYeHCFlSpC82iunjh2t/7nY//+E8Ob3+4UsmZZ08H1fmm1r03N7e+/0DOGC0hdcSA7Krtet6skO1pT8YDzGrTlhdf3Hzj6735yfHdj6pzzYOP3g318CQ0I14FsA+C1L5cebxmzIZ5NH68c/8eHGZ775vChalf39zc27+388m9y2de9Y1+7Y3XN2bF0WH7mHtfvrDS8qKSkjmwcOGqpgkMmjeje9X2Fk7Wtt8zazU7xwUWRf+eXni2eu1weKV65MRbu4ixGPnYFERZPKcCqlS9IhmGOo3BWDCMtUXyR1QoEJ0tVDVGAXGUlGMEstxzVee7LnRMHFWSQ1mmUaSVqxoTC2+Cg5w4986Vy6NL11obQ+vZEovohdjb2x89fbj6eAfOtRe2D9bWp3YQQAuJRfAQDRLBqXOFklhiZpMjB5Ydb2p4Q+wUysaG0OXu0xoNSdDGxphlpy3JhDQF92JJdUwrE5GgafrMBrEKwCYaP2m6+DRDTZLOP0FDVECM4ST5qAqH0HXem6QjVGgicxIh2dMvBRWSO2vmz8EdgElBECGQY+M1GKiCkotQSDoxZglqDC8WnoitdV0XYgLLmWJOpCLLDNgoToSJHFuLUlFTLFX6Si52vfbJ+E4xksa2ItF4iSJxHsUIKdZWNks3enrnZs+K12NnjNcpC14IxZcmk9ZpMKjmc0NkyLrF7uLf/48T63o7n/Sd9T/+K2goGGHWDbi0zkxjO7j+lXY+iQ8/mXz4/xruPuJqAN8KF1yzFeOnYq1HgLA1xEaIahSVmx0+K974SnzuUjjeL3nVagRxlBCsBc19JF1YuwAJh0bUsqkLFDxdW33a+pa7c5df39g+V0z35rs3Z3sPG5y4XkMaF1ospNdq6bmeoj/RGsA27yM0Rze//403X28nY/vcuULn9KWrV2ZN++GD2aOW3/t0d+3s1h/+ne/cnQ4b78Ww6ZityjzyaL0FIXrpAi+YolotREQGm3PfvV6vPnrh+kf336kav17XfP+TCNDVL/507yliePmVG/0Hd81w9N5wc3q4/5Z2g7e+fezbMF/YuqcRCiIrVhiE5BWlSmwIiARiFMYkTUhUKIITDwAQScENBiKdqFfTshiBEZAVK7716kFTYgUfmq2LN1zg9tBbD0zBLUtLhEY5iCyYO0AgHVgAtQATbGG6GIhIJBASaVNSnyoqSR8nopYJGkDGWhtjIMNRlRgEImElYRApmBEAkUiZbwRjmAwbNqno5uY3DV5EsJRo7Mn5WSSk32o2ScyfxRARJOM9AmGVZWFkIolLApVIMpASTR49JqmmJPG/JDJIl/MjL83rOWGhnO8RMhnGJkp8jc/HUqR/rkpQYi4yn5Rh4GCY4mvN5d3qo2lF7w+ffhvPKxWiBONcNCkaMC+QkxtR8jcxWCqGPs+Sg6Hk+Z7FIrR07ksc1uxoItnFUwGY5L+VcT3lzPs0ghiQLjYYsgbBU0x3UXoTDATMRhWAJTDUEBliEyKILFknwlCGFoSCYFXZwMECFoHBNkrlBKEaxgc//JP+5NGL//jvzY6+efzgQ/vJndHhbnflzOT62cNPds5Y39VGhuARwkpHfVg720RbVe2Im/W3vvbug09v/fn/5WtffGP1gi32736B/WuXrs7YvX3rEMDlwdqkneyEJkTXG63NQ2jPy6rr16PewJ6vSh6Px8OXBmeOL88Oxu2T8UE5Xr/MqKyM2najrNiGKpByjLaoxI9nsi465v3rV++Om8rMXD/0gVb6LdZ2urWdc9+O7RD7nqeQqIU6YmYUwgtgoSCG51w0RRHZuCgdU861pSz/ZSbWlMCRQYBcZgWqrGxZBEuncghggCKKEjGrULCqQchGUIgTzDWQsZxDiZ2ZPX9+/9JWr1mU+wfDx0/OfvbZc716PjpzfGZzv+q1pNwRYkDUgm2nUsEECsFEJrKwOZSDEwmaRaVbeGI2xCDtJFpQsm1RkbIqZ7NZURQMYuWQkkNTDRUYZgkCTi1fgmDTyWVDnGxRmRKdaqnbSiBX9qezqYJaw37hFXDOxpiTiQTwIsxsRWI2ryRhShYFqZ+OrKSa2/sEq6uqilGGgiEM4myCm1hvUMCYQgKETUKVma1yoWIJzpgKUnmxZGu2TiqKhdoVkirEWl2/MAM+XpNCN+tZr4TRIN1Jq62UvboerJvGeK4O/+ZT1NVc4KJt2F8mvMGmkG6GBVNJLnXOcdFObeF0714VVVS9bbmwldqgDSDooZ34Hsv4f/7voTQgjERhe6IK5wQwLTfG91yMXLEEJyTRB5XYtJOwWP2NXy+uffnhR7/o1ZUGD1NEaaMscXn1iBznhiZItmvxMBBD2O6NtjzX8+ZwxG403Dy3ff3k5of3P/nxirMheJSD6aIL3PNwB1jrIW6bZ0M5KovONCfrE2exsH/99t9YlhrFte3117Y3aU/c9vbVazeOJk3ACrOFIqogiIlwKzXcENNgQ7aUa8O8qm37+NblR5PR9uVntuIAX8yqyLLYXx3UO323c/NWPRhebEJ8+rhaHY1D97yguvgctrdnt3ZdWaALcMZGgxxAkO7VtBo9/U+YlSkCERQVBQeTBLO6tI6SwOJFvKQTKSqeAsPEJqJRgG3jprsnW5vnwxyxk76vfBu8n1QMDR7cCUcmsSSESCTCSiqKpOoMadyUGNImM4SQ16OazCMJxM45IkJiPZ1KTjmL4ZO+NPE1iJbGxJRM5onT8os5DbW5eQWJCAuLBFUNkQUhJHKi0jKeNq9EJZtViQak6dSQiTkLCcnNJyFDibqZMsokhlOE1EtMbjvMHCUykYHJJDDK3w1l1/ZEGc5d9XL0x9LJK7cCGaBC8i/BgMrr7daPq8f77uSJm57DVqdTp5XYDJ8ni4Bky8uJsUJ5tEiEElFAhWB06apHbDRLptSmOTYV4ISRQ0+bCgYop8QBGYlcqogzv0s5Yb2J/iIMZlUmGAKDDVEBTZJfa00halQsw4IKopK4EDBZFQexgFXDgDNMjX/wcDDwZDf98c9u/+lHg2++vvF71+3v36imi39l35tuVk+/ur7+8U5Yd6glVNH2qbaTddeuuKYfDoZ6PPjBe9909YN2r7fLBcvGYDxr7p7xF19cf+6lGwygHOGf/eVJU1/2pui1+5uXV1/7wtWetYf7fhoOR4PtydlzFmiolq7gdtHOwmg4Kbr2+ptng07LSlXmJbMPbPsId0+Ks4CKHw/vu6uGK26ainuNK1oaHZj12H8Ft7R4yhQ41gGGeWE1gJFMxChH8gBAUCEiyQqXRI/TAMBqlpJnv3MBGQZUVUPoEp+Rs9lUtqUx1iYtWMpFYbCxEIkpOswmQrKAVQKBomrgzvXnl1aay1fcdFYdPq2f7p57fHuzqtrRmeMzZ4/Lcu64UzGLzm1UMiYPtZYCKzSAbZBQw/oQxeY+IAJWYZSiFRFKnetkemKsTaKiGAOSvHdJjFbNHuggxM/zvilZb6ooWwYtCZ6USfvEZLM4HYmokSZdBrEiSFBZPl/MUIQlCXGZBYEMORsmTbgvCCAVo1CkQEeDpSwsi/qRSq9l5hi8AHHhlQxTQbAQIzCkDloJV0w1OysD4QoYcKgDquhWDa+oGdGt+x8U11bXMOhZywmdEJZ53H98OD98fHz/yDiYcyZMjU5Lltl2oZgddVaBGfxJr2Vm8YySoI6Nh6jCEJxFCDG0gFd13Aa2pOwGgYU5FAwiG6OALMMEo76pbIhCIl0AT501gw1eW6GX3jh34xUK5v6nH7AllQBmUCC2hKBQkbkqiISaAVlNDWLC8EMINvB8tD7jqqHpiUyfHp6cufStIq5//On7dWW7ULTEge0i6Bo369jfosk6HRZxZjH396ZbmxftAD3j4rOd+3XzZDgYbPTc5av/eGaraRMWHDuKRowhVmYKYnqDOVM9sL4NmEd07LiA72ylfuq9rcbzI0gY9Dd6bHUeZpdfe/v+XRH92itfX7v5YWtEubeKlcWze8Pv/ub9B7sn7YGrjWXLsSVbxuw0l5Uo+fCmHVDyiiAAQhRUA4S1tcRQBxWVoGSFC5GZBBIltcQhKqKSZ7RirGl3W/9oZzi1tjrLLQcORZwL4kJmhXpoAJRIgEBpYYxAhCiBCJwYRwCyl17aTnFKFjKWRdQaE1VMKqpZp0OAMrGeTmgQFU3fIyXYkzml6xo2IBg2yzRyAFDBkkJtRaK1JEQUY4xRsrI/ER5zYnzi4YKRNIuSk8j01OYyhQ/lfNZT4SxzWpeJarpNUmkTkkSlTiR0ygYB+VZhUKAMj+UfWcZMlyX4VN6bvxkEK1/0z92bHu4N2veq3d/364oKsAZCtGwoQEjG+5pJoipJ28gsSUeEhFRhCdymLy6aoIJMISWlJYSry5/FkhWd8UeAhDlxvHMRP3WKACBCp2QTgJkLgWHjICxiQIVRp2DAiRTElhzBiq0MSqJS1SIg1LYcnrt0uBhPd5tBf15uDnpVN93/8ckPfhircuvV7166fPZesz97c/twZ3e9J34YdY2GZjrA5MTvBn84QjvkvU052qhXbvB42Nz8ucdGPbz4/Pm3v/+/yCv/cKsGgMm9o9oUW3jKcNtmZY3njz67szsen//sLz57MSzc1Rv1/H68WrvNJ/vN2Lz88mvXLTV9PjpcyI2tVpqmZ0W7NsAVqI+bWx09s6a/UB637n1ctaSEfqdsJ2C3XR+PZKcrCqsDJYIUYMPSVBCfoUYFNCZcQlmAwOR46aQEtRkJ5tycqeYmTECkypqGseycmtbBZhkvli3h8p6GI4LmYTQLkPNvVGGJSWxciNfG2cmly3xhuz9rV/YPh3tPzu7eO2/rdnVr9+z6bKU+PFlIxVVUiDKsAjEKK7cQA0L2eEq+kqoUSJCNOaDOFjFGgEPmVOfoolP5oCQcJP39fAQlNbPG5FtFIbI8wrRsw1PetRARxCrLKWFhaaAhKbU3Eb54aTeGLAqwhvPNyaflFZrX1VkPka8zZnCOW+hCIqtbltxzi7BhJ2QVjuAAB+6Rs6ZPOuDYF14lDJh7BTZgN+3Tk4dHJ3eKuT1hjcF7MHmynkMbpQU6w5slsygHY8Vre663ff33f/+TP/+T+pP3hrbwHFvrXQisgeBiOxMR6yxbkhgQAiDRFsZytyAiMIQtWbY+CuI8eXMHRSATDPNwy527IKMNjKr1rSu8uU4uqLcn77xzcvsz/vJX0DUCgQYgpV/GLogKKxZExNTSrKcAQtL8gyMx2DeeVtx4Zb0pBrUdTXwwz32b5qO7j+6wdYEYsT1vDtbC/sWV9gydVJN9nUf2NH40Dq6zh82EF/HCxXPbPXitpBpSeUZUXG9IwXZtF9uOO2bP1htT19Jf1buPTONSAFWwGryYGIWgFOdxIbBbm5v1vXfKrfNvV27v3m49OjMqa2nHZSz3xw8vbV5+4b/53+8IP3tw5+xmPROILUExQAs1mvQHmV+TRkgBhFJSPXlNVFfyQAQKKEtrsWAuSZN8yYBJARtCZ4yJiw4BNOdoIk94vbpw5tyrdx89c7aDXxDPjQYiCAUmMSokQTiAkvWjgCBRUs0FZYDxlDlbuUIkhhBVcoaoaHKDhaikfjIl/SZaLTNpVinQcjpWTbVridDIMsfllzyYAGbDoAiRiJyXl5nWaflKSJsrSbLBNLAmPJc46REUn8+pSpkADLLJlD3XTD4tT6kLIihDCGYpQ1oG2WedrE0/rfR55Zfq3JK5rbmLOq1pwZTu6+Hqny4+elZPPgn7X+TtuUQsg24YREg5u5RKeBqXkmm8sqpyIkXnLX36q8sg1ZBgLzLJUvj0yyY/MSVJHt6a7fglRgGsiNDSA4tQaFJPQaEGZCQxnEFEBZNjWLJOhUUsc4+4jMGoJXLgkqUQ7gEFxApKVP1R8I2X8dV//GuHv/bGg5/9q6K9F0cydLFkW9iT8f6PXv7q794rHk1fHE5e7q1PpjSkvmlHOOZiPOwmKxgL9iFH6k+Odo57Xk7GDyeXfnf2V99/460v3sDh+x+/O73yEoAu7p9z9aQDS7XV25jd+lu/v9t3g/We/zuXf3VycnChuzc7Ogzrb3JJz+79qB0ebg+GF4v9vZ3Zbtu+fk5kPimNBjHaDdaaBxaHUSaBKoUJqLzWom1nh9SG4tJFqYAVRJuWmMnkSUkIjVMJCotsTG5STiCzJQSAs5iOU6VF1sUgpHxbyR2n8vLcKGn+gRKlTVNybWYQhERiTLnYJvuVZnMVBSfaMmMhUsCQsw7s2hAYJ7WbXr24e+lcfdKOTqb1s52r79/lXv/ozNZ8fWs6HMxhWxE2YrsOgHDRMdciCyukHsrRWBc55JFSBVDRqqxjDDHJEjh3rEyULe5EhWLiGKfHIjvJA1GFwUppYbzsMRILMIokgmFUsEZDCrWaogBJGKyMZahXKupLw9oMwQjEsFHVvE4CSeolVCAkeSQgMjnBTUCAGjYiMXSB2YgYY5xqSXBMVlCRVko9OEsr0IHyKnhku37AishAZAOjtWb3k7dfcofCti6t922jHKuej8YN1vdniAOPcVBrUThYtW2oh84v1i79yv/25KXX97//x8PjvZoRHCDkY2vVJ1EKgsSwIIghmI4Q5kZIpfNgIYHOwdaMNtuVFazY3ubz7uI1NxzJgNkJPIepLI534wfv+507cuuebcfDL3/9aRBChEI0cOr3YoQYlagc0t5CFTSrsWRpcmDxYoc2zgKf2FBX46oUUjJ84ct/fy/++N7dT7dW/FndH3TP1nl/0OxXfkK7FKdipZBJmLQntvX6xtd+c7TCB+/929JNq83roXS+kQiGcaWpiCVANCK0naCAGSAi2sKrd9KRbywoLtpRb3jbWB+9BQ8H58zxOG5dGE/Hg9X165dfMQ/unfhgty/VN167+OU3fvzhO+On0y9e2NgPC+MqExbWOYIIB2g6rjGzWqFEChJVISx75+SuDAt0IEMwINbWkhgyrKQMkqgGRiQ4KbTrdA6ysI3d+eidsLdTv/Jq8A1sIPioAUaMRiCqRtUAREDBSiohAoiy/MoxUZlBqTAsfKcQZy2zSVthGEpWyTGk9VmCGhVIxsJYXiKUV6tpaSzKnO2gs2Ap2TItbbNSB0oEoszBYl5W06zZh56KOzSTMBWSsdH81VKi0nLfmqq3CoSC5FXW6dKQU1XTZVnltAT7fBud/LHMLxU5peVnJZLlcpqW2+j0Vli4xuASr11pt26WT37uHl0JW7UtfAqXyViu5k91Kmxik0pt6m4UbJJRNDhpk7JImEyR+LTL9VsiVOfXn96QfMEl5+2QEmpFQvLTExEmFQ3Lnw4TuUSzUrXMBaGIaoksmUrUEpVAqU64AtUMBy4UpWil5JhrG0o/+9t3pvfuTI5ubP7Kt774f/pHenQvfPwj/uxv3XjP+WpFnvh7Fzdf25rszfZ/7bmL/+ETNzQroQnh6aA72MLxFg6HPLbNlI9EO27brl29OO+e4yft5E5T2431vfemvArAbKyOMLPVgdDKWRlfeW1zgDoovPmddgodrk78pbeu1g0GDQ3feKPyk3FP9z+sd57J1LfuDFqRQ6MCLlt/vBmeAYcCK9rzsMI1jPhoa9jDi5v26tm4CFhBkIAO6BidqFVlgBU+J04Rp0eVFIoYQVH0cwZjsnjSpX0iZyO51Bx+LkRf/ijzQTaGiTkBMEk2EKKwNZRWOpTPsBKxaIBSiNZaqyTQgI4tGOwEaCWym/Vds7E+uH7NP9jZODhcebKz8vDuOaqb7XPjM5sn/V7Xr9vFghWIXQOywjHt3TRGS6xGoGCjIRJz27VV1TOFbRcLSwaf+8HkzWWS2dMpFILcpCac3KQnXCTPIEiePmIiiCiwMLhIJtGE5F3HS5ETKwvyfjuEaA2DWaOwIcM2iFi2lO0A0mwTFUosqssNVeKBAJQWCTESUNc97yM4RVEZQiFqWa3CwTiuSfuQVdgNxiq6EWjdoJZtNx0e3To4fK+sOEhwc7HazamcSQm3eny8G2J/srbRObBlgMW54MPV119VG6eHs3LrUv/v/3eH731v/t4Py1YdOssAs4Vw8BDP8AHiBUTCxnjXD4N+de4ij867jTN2uI7hsF9bazS04o/m8wefdA/uyMEYs32JzM3YKlfKtXq/dXZ3+7nojwsuNN2CjG6hqmCuBWkk6xSLBIly02MGsQ2qLKoL4R5zR7FRKaK1Vkie7Dy9uvbqfDaL4/cL48+uD8/Q1B48iNOIiXLHfh60UbNg+/U/+O9Wi/7x8e59vgRxr73w7b1uALFTWzXBoRUOXESiSKLRqpHRaC7BdZ2LFggK9iwq5GvXOnfSjouNQc3azYNOJ6+71fq5lcoMm5fWtn/vD7BVHz+b/vUP3rt965Pf/+a3jp/s2Qtb6Fp1FAI7awGhjLLkWSqZLsjni8T0ywOS7B9YrSACNtnPSjQkhABEJQJFhC6wknZCFBbt+NzVK5fffO3unQdlKSLeWgoUkJ0YJUWWmSztk0BiJW1xl0rc9NiL5NJIYDJBlBENWwGQ8k9yYEjSuKfYsmV0CZY3S4IzOT17KWRAsmAQyzK4fC9oGUWQADFKgFiyrkkW0OkvMqc+IMNOxHnPipwjlL4JkyyrksWeZlVF2kvJMus37aYScgXNEcZE2UdgmWWQLJnzsJrvSv7855T+5Jcn0cjWqu9gv8mX7ze7J1XzbrzzHb4REJbf8+knShAiAxDNnu/43HMDS4oAliwBqBLZxJnRX/qa6YJLQJfgdDVKJBps0idx+upJyiykIYmcNXGek3hSLHGhUhh2CgdYQz3VEpWaynBFUgNOuc9wihJSC2rbHU3i9BFveTx75/G/eXfv0Y2rX99+8atfPvN3Xtp9997ev/4XfT4XPnzv8rU//Jmf+qsrh59VV9sj02swmwxl5nBoecx+bOfOLnphv2WvB73zK9300nMv3nv37ouvXTuP3i9+9iMA9vo3ZNTrnT2oOJw8vX1r56k/nDcRjQ+tFl5My1yharkWN+zcysWRApORb/o1uvE0jmVryN53DO/ErMTjiCm4FNHIdiHgnmsioZ1U127EM3194tWCjBjDwQpMhDPapDOcAGCSxLcnRT5yeSGk+bIXQHAqg8snORlMLdc/n8vkUivGSI9SWv8Qk7GqQYlJhAWR8wOknAu9gEyIgVWIOaQpKW2wA0sXobFpJtMJD4c7o2H50iW7f7xulT/97PzDhxfLqlnfHJ87M+vV86JsKSCK82AwWWOIoWIsC6JkcxxtTmZsGNl4IyPVSOmlTGSMSrDGJAeeLgYAzGTYhCxwzql/pyRpEkQCVDlCRaITVeUuR4UAmS+jhJRCHmO3zHdRImjKLkxaJMrO9aTKQgmyye7op08rQVWZNGFPXReYi6UVkokBioLUgpxUyjVhABkAqyTDyOsoB4t+2HvOLWZPf3ZBHzh2JKHgoBJaLRdahnBSw/XKdQo4GGxwYBWtENqK+ufPdidwFfOJ8daO3vwuXv7S9O7PZs9ummcPaNbEGATKruDeiAZDWl2zo1XeulSfveQGbMkHON9Cp+P57Q/kcB97k+7pw/7GwNcr3vu4Qmv9c3rzZlHXrYgTmgZMX/1K16GUHiW2HwHKKYglG8ggEoKqVxgmqyCe98iAI2kQ07faSZh33LfWmaDBGmbQwvkvvfH1/ZtjPLovftYsjtetOBSWre88Bw5tYE+2G1yYqexPd978u//Vwdg2bnPSOXXVPDppgpyIdkyByBvpovXQepPm7JhbiLEgcNDOQnEyOcfyKytbqyNXzI7aa9f7569sbQxcvSq2HNXx4b1nT9/56O13v//aG29+57f/4PH3/tx96bpGkOXCVZ0PnUjlbOJALgk8Apg8znESpgUmQjLZSb0KJaVLIDVJ2qkCkIBEQuxVvabrOvEqHWDLCouT8aO3/6a+crnzbWmgiJbUShCOYAhJUnHq0ihOwCFEtgyChMjJhzVd9pnb8zmBhLJxYrLaS815SJEC2fCOloQMSeQuGGgMwqwhuSFK3uaGZTGibHN3urDWVPjTfcVJP5troyrAUeTzzjpxlE/DhDLaQwQ2p3wqpLVechYCMrs4zZaiEgKsYdVgLCuSUYdJe6/k3qhLDwwk9wEAORMmL73zLJ87lqCwc4SRHXyxu/iu3P+0OHzB713gjS471qvmH21icCfL2bSPzIw8IhOJIJwsMyjHExGUY8aqTwtzWl7n65sAyib5UFLWCBViITWq6WFL58oSpTQkJlgVQ2wS8zOyZa6QADBUKAkVqE/aU5RCfTY1oxSpgYqkj/WXrg6/8k92f/6OHNxZnz5b3PvJ4f09xmzxypUrf/Td6c83/YOJLHr23Z/ya1f313zxxujSX912crhh/WBE9d7Jhsww6enE+mnhx/M6YHbML3bNa935m7949+3d6R/+F7/VdR8AuPXeT8zVr1IsNi8sbn1yx8wieaMGhdPIxD2uMA2YzHl9OhM1e+0j37fz0WAF/l5nsX/rzhvffv1gfFIUKyeLWTXbNSaILAKChzWQMBdn+r46s/XK5QMDrgsTFV5YWFQ5MJRQWxFBB6ADOsACMdXg5DksEpc71mwbrks5QMKDRbI9Bae2NfXg6Syl/2WTm7plU8wEkQhCACCIhKjRREBJAEgUkAgsK5Q6A1VxICkKFg0Ca0uNwYgPPnjiyWhFXR2/PNqfjuu9vdW9p1s7D1C7uH7mYGtjOjrT2CJEXyGgsBzZxEhQZ20IAWBXGja2C92y/csNSeYyRBTWJIDHGCZTQDXG1DUnS1mkUF7Jju+ImXWigRRK1EUoIlOagVPZlCUALBDLJCnZGpnYkdy1BGmvQ0pZnshkBAB1UOjnXI5MYGRrVYTYQEEpbVkTJYKVCrBFoVIpBsBQw4hkA4NVf8Y/WcHeRTl5ePTzAd2vg4EEgyBhUQ7Pjr2dYjp0A9s2ZKWEfbpRKsw0+JdvvLh6bnM6bbk2XehsgIwnVBTutV8t8SaFg4WfUUpyK5nKmhyDhYn9NPjxfvhsNz65pSeHFjTZe1Z6z5BBPVxcf/0nz/Zm09mxbzCW7SvXrr/xev/dd21VB+n4a7/a9PtlEDYxKhEbhsYAWzBA7aJjmzRbkRABz+BkbsaTnu3Bew1thFN2zDPALTH5LYqCxrdffv3LB7jnH/24X3W1WARUXM+nbZwG2xppxOqDR+GVa7tdhX0J0ne6MVGV1rrAK1Jxj6Ii+Bh88I2YRYzliFryfio+BHjlTrlb1OjOX7K1ufK1t/jMc7xaWOO858O9/bA7q9f1//lP/2/bF89vDy6uXdz++lu/uv9v/93g+ef9aMuenLTWFQ3BWga8l7RqSEoZUSUEQ8I5yy+NcFmEqQlhUpu6xuTNkAz0OTF6ODbNCSDMMbI3tjefH26sDVY3R+P22Fk1bKOGSKl7VYJaZSVoCvdVhWjQuAR5JOGUIcT89Kf4ESCb+jATkSHEGGMaQpN3g7GGWaIg+UchATsZJJWgyukFRGstIMkXV7K0Ni2EhJNhRq4kGTzKjWuOzl2W9uwhkh6iVJfSi8y3Qc73MYkAAluYjAupcqpklPoPKBBVhbJoOUUeIPszalJVxbTxo+XA8ksgAVJua56oKdU9YbCogT2h5k26cq/d36+7H4Vb/wCblF7kKWMLSU9FedKlZL5DAKky6+cMeVXW9HvmjB7n85Aik1K2MZNCEQjJFChNF5EZQGQW1bisvvk/SoFqsMyG4MBG1RE5EQsqDPfUMErlAaOn6LP0CCustWifaWirrcoOC+Yoc/uN33/94FPWH0/PDE3bzNanc/fx2+5n565evHR3+mCloW3etA9OZNHz1y8//evvnw3TPiajvd0RzxbjzrYVnk1lj+o5xNuI+eCijn+008Mb4u/+4N+/3ZoTANQ0Wn3cuRdsD6vBda2jiQJQSysuxvuP19zCD9YORoejoUA74xbSMM8OK0tYHUwffCx79vLWC7N2vpg/Wwm7lmih/YhQSBERgrCIH730BTozaI66sNLjELrICLDCNiJGoYKlpwgKKfO1pVANiuRQAhFhhSTPRxBbSm7JCXcXIJOaRAw4SEijMevnqX/C2bQVmrThqXPSkMqPUMeIGpPplgLWAEQSERWRAjPbqGpNBAXSgkEaPaknZltykJ6XeTu21k6Hq8365uSq74/Hxd7e8ODZuQePQl2ErXOTrfMnK0PjWRBDSuZIblkMMtzFIKf4CWmCbjTxPAjGGogmURPnrVea5ilFH0uyNmUS4RgDshaAUiJ3hBLDgthaSR4cqglZT0TrImXCpAVXpnKkxBLDgFEIREgTVGdBQQjL2BFdgj9EHKJYY1WU4FSZyRJZVSdiBYYKgYPUygPCADIUuyLrYe8MdjZ4/4Lfi/Ob7XRuPEggAeIJw4NhFevaDOwaY+hEiUNHPNl8zk+mmxdX7JlCjkSfhbIofOxELHe+OBx3VkSstUMYBD/pDp9hEeNi7Ce7NG3x5GEpXXn+8lO2Xb0p1l3cfk7e/2k12npw9cXv/+1PNnu9X7n+5R/d+cXB/rPjh7fLixeAwvu5e/3r0xde5KePYuGgBXEUVXAWcVnLWAjQqWoKWmFKqFgURNEYG+bGoQSVHFm0AIzAAZtq2AiTGplNj9947bUdf5v277W+i4fd9FFTodLWy1RX0LNHf/G9/ffv2G+8Wa1t7s1d8GxOYqV2Mt67d3tn/Gxva+Xi5nCzjBsrcdVYdf31Z1dekXlkBzscytlhfzikulxZN4IwP+kOnxzt3XmmnX146xcPHt96/fW33O66tb1vfuu7pR1e37u48//+76v+mfLqt2bTXSoKKxotAIlgQicgm6czhvjkgCMQRmAIJX5vptayKgtFhlmismnKjGBwDFEFkCCBIITAoNKFp7f+VsNL1flznaY3N7AqbAosjcnvQdNmLPlSqYCp64IqWVc0XQeNltM+i4lJU8tIENUQhBnLTKMEgNES0pJTuyhlSrmnIhI0WtiUPJigMrZWVCgkIiUUif6Uq3zGezPvSohgUpPNpwNelOU2OW/BMzCWZkMSFbIsEYZNMroIyb1LNdt7EmmMhvMKMapGaAGOVpPZVwQzOF0TGUnWtB9fTp2pvRDkBXnSAqVqGTiYQGpFpLTure7Kv+8+2Hfd+37nDXNuGmFUQEHBTC7kqs45+UDT3M+ggoWS3Z6CQZZ+qRgTGckGRsvFZR6WmMjmvUNad1OXIwQ1MFg1Kgkgoh2rUeRhWmF1+fkZluAIpahVp9QDeipDQs9zvx8GczNgs15514zf/fHANnVRbL98efbxT6c/fvvCtVfO7z/tY7zoYcOLfu//Vw8G57du9C693h42Z6pLdvB66cfdr/9O/51/WR9PKm3sfOombrHfxuPCTRBnXbdypel/ye+d8Hgh/PrJxJw8+qEd9QHoupQPP5a6Flf4RnTu9IQZxmE81J03v3ypGm5Ppkc/+fTm4qUXm67ETF2QwNJUNJhHszv/2Z/9xR/+k/+mruzkZKfG1KmrOAbLC+8XruwG4iY0uHLtIEyqYjgHYV6UHWkfGqCl2I4RYIOgMvBOEHNZzJWTgAWzUxVWC5Lsnc2JK2gBsaIipEgKJirILgHgdIwTmymyKhQhBmQuOyhGa51ICFAjipAEsEYBYkNg50iCWFhOMLOgZOHCdgufFK+O2EfJ8ArbALDv2IdWtF0dyfr6Xoyj4/nwaM8c7NaPHk9Hq4+unbObo2Goq2ClA4BopYneEYNYIqzJciAiFoWzzocYQ2BbKFuTBEwksEaCRBaWlD6omognAiJSUkkydsNWBJaMqCtd14VkGMfMXehUlEFQ6SAEZkHwkZnJcIZxQijqWkNAgBiiqJZMjBGkzEYgEWqQ6Rvxc1t7m2dozdOOUQ6RxQIVoYi2X0gf2tNhOBjg8AyfnOfjYffY707tUycU4QlB2FOYdPVWLSGw23VDMNfiyw6rXg51e52e3Nn58H8YvPCd0FsPc3Ha62QmnNaPLbMG3/WHg6e3PzU//Hc9O9Aw6zNYtL54+c6Fiz+/c08Z05OJtO2lG6/duPzivBz8+MP30fkvvPyync98UGF96crL7tOPOkQenZPt5yZPH7mirIxtEW12tU/OszEsxDkbwBYqEGGIBsstxJzCj6LBLQYyZ1SIFgyYEUdhQIqgIRLYhqeH31k/D3/xwfhmW9uijt20ReVEukqNdVhpDk7Obl54Og71YKDR+XlDIpiYD//2venuo9v6s9Hw+auXXl8xw/1PHlzZuLL1O3+/psBFaNspFkG0CV5++Bd/s79ze7B25tHjvebw6Vvf+s2qt7E5xGtv/kYbDq9f/j9O73/c/fx7R83e9td/q9k4czielM506kzsVMQSk8m6kRCUSJiZ2KYIr+RhlBzCKdmi52k4ZC+kNKQm0QoiVJUjiyB4SxwRhSOxb+eTK299qa4Gx603DEjM8XnSiYY0UabCu7ygoZrQJlZg0fpEBQidGNYYYzJxywofBVQ7UVJYMgYQUWKoSljeIcbaEIKGTLi11gIUQlgGhSoppAtsGJq1rwnZTq7Y0CwForRVWtaYbJCsv1x1lxASERL5EmKYQOQKJyK8PEWiao1NOLOomvTp2KT5IYOrokLKoBCCtcmPNlpr9HRV/MuoL5Z0rSUIq6crYCCtFU30xlRTnV9xm5fna7eGx5/g3jU5Y1GogQaTehRmUuTiSnn/bJf8MAuwKAM2l2FYgJXMslhSTl5blmAsm4RkQ5SmW2TsORFYKCPBhOTll3M3MorNIAu1ylZhYZULaMWxp2qFew59YFDqsJMNOfjhDwY//3cFYoVx+BmXK+e++90vbTrZG5brP9tfwcnMLWTjavP6W/v28kEzaAcVtjcfHe2sH09eWNs+/xt/cOa9P97eeeBl+2nztBxLPPThwMa5dtb6E78/mg4G/fHuY1x8q9q5FXkPAFC1tWJnTw7rsHrWzryOibkMUzl/ZX0lbNz/8O5LN64Ow+Hj+wZG2LOcGGdYK530g93jpw/2/+x/+Dd/9F//wzqgCpO6sAGDIFOGMVwfNs/OvfZNe3nYNK0Ug1AYaQEPdIQaEhgdKIC8ILAKWIOqA3VAQQrkIxclPRAk2fNUYiLsqYIYaaEtmhQEyX80/9DSiY/Za0UMLfNLVEEcJYoKkoYAiYYsAAefDbnTPklAbMDEUWCCBMpm0wuJpBohdBoOmFWzRFEQona6Mxwcbq6VV14oDo6nx/t7ONwZ7LvBaCv01+b9tZNy0FqHSlUWLATtNCbD8ChgFTgaOBuikoIkgDkwRyHVaEhrZQ/tWAtiqwgaNJ9aoSTOzJHeREySLdNtjFGVTHINSTbRxoiIJosigmhWLjlrNYQYExytYO4gSspsk3TfKhEnvBkWBOVT0EfAoiyqUFKxyhaOUKr0tCs89bhn2lWM1+VgRfY2yn3sTcLjrh7Xbddxi7iIBVlTMTrFKgvLwI1H1npnPW8ctdxubJ2rtunT9w7v/PHGd/4PndO5j2VRcRciushG4MkGpRaGLHPP2oVdRZjyc5ff2zj/3t/+5NqlS1++8oW//Pjdgza0e4d8+eKt3ad+Mlvf3F4p6r2FPz44vnDp6rX9MR9389XNjX/wv3m6P6naEBUhCMwc6C8NuqOIwiggLGBDKqTwyO178mED2HIIrXpiZ9uCYaUkMjRwbjr3QUI59YYbO5sujmcvba5/rb7wr4+fHI7YWYSJMOvMezs/8vGl5wXl0x/8h83dw/prvz3cemHS6PmzG5uDzUnY7VVrz1187kxv8Mk77958+8MH9Z1/8Lv/9b3H9+7ffl/DdPfZrbY5/O53/+H7P/1RiP4/e+M3Zifvv/TSV66+/toVGa349ujBHX/rI75zSxAGr1yvr/9BA3c8Pi63hoWc+C6lS8YYlU1HREZz0I1mYFEpMwwy6x6Iy+VrYkonM5jEcpAM3ebrNQIhqYcIHdQCMn2wM7hyTRHIcIQSR6saNIniUu0KywARARA1cLKbTtI6YqgkXFY0aowpyzPVYKhGwBILpUhtSWQMXsIryW8jaACRiBjRwAhJ4wJAlKyJEEkco9Q7J7tYhhCyk7Mq5+Q8SiGdDIGwZB62JrfMbH0VRRLLiiHKElXEp8KfZ2n9XGdriU5vvCRmSJ0Cg4QQQkhelelD2f02s+VS+V/Ov0vzDV5u2pd1GR5SwIqyMjiQZ/46v/TQ//TQNT9pb/0mv3oSO4uStBM2CYJdrpoLEAE2/YnCgtKHrC7/EMl9Cvz5PzodyXMPBA255ibBtyKAApCUBgEIyGE4y4/mT7G8iYgBB0uooD3VHqgHHTIPgVp5fW5rLrvJy196YfvV36t2bi0+eve4v/mFV6+VmO0+eLDf8ifP///5+rcmObIrPRT81trbd3h4REZGBjITicSlUCiwWDcWi8Xmpclusu+6taTWZTSyoyON2ZnHmYeZnzHzMM/zdMwkkx2TnSPT9LRarT4tdTebosgiu7pYJIt1RaFQKCCRyEtkZISHh/v2vdeah+2RAFuySUsDEgkgMjPCt6+1vvVdXrm9PPr6/MP5wff8RLZfubY3PGtH1+OwebzXn59vDWm+6Yob/+TvDX9/dvPO4Zs3v3Jv7CwdhtVJgeBpA/NZzsutzdw9rDwyuf1b8eTfAwBaMRvXt5ur+ydv/PGBty879GOBga/PpndXBzt5GJ6/fzj49I69MallMy8pNk3dJ4uQT5vVo0O2Lm8cW5ofH7rIDuxcVUo25MG8qSZm1H/hxaWE3Eob1FqOjuGADGQhrvNuJ6NqFGwEZk0p0A4kggGUxUjH/FWiTvbf0fUEytr5fivAT9mfXQi4E2mIOGWBdawhaOfmpomMaBJ9MwXOY32ciaFQURKNlo2IGk070UhRABgkRjEUmmDHxCHQKGJlWNe1r0GIk1G2u/2FEPZn9ZFMD0flg+2Z28m2w/jqfGNjbovKMriWIBxqeEc2kuE6BCsQJwZEyiK8Jv4RrAUkxogLa2wYsHZBUDCWNACm89AKIQC29aFLDE1iTVFiE72IhH7Ra32QGGzmIGrZCCHlnLJhbcUYE2K0xkIERJICwzto3wiZjkBB0M63g1WNCosyeoIc1GPuk+SRCmzobCTTCU4nerTR1qf3G3mkvGReEtUgD2ddvr1xdHpsLrEZ21rr0TOHNbh2k30MRjsFFo/qAjsvfNXsbvj5zLTkgyfLhjKoT6GLEmyxMSGBSilgJneyd/ntH/9VsTG5fXmvPHnklzbC3NjeyWopRSE07A+HOnzj4bvcc1/e2ik/uHdvsPnS176lNF7N71kXrNhAPdc1iIkjrsoCGBLhxEY1aacWrHUhxBBLhovBWrYsBK0FOWvOIQ8iVVVbx1yGIs42bAW1DQ+OqvMNuN99ZutH7fKDem42SUSCYVs1NJzcGPnB8aPZ6PHj6n/7X9qtG+bVb6z2bj7/wldRVo5Hv/SVb80+qe5//ECCDLPis0fv/u+//6+UmxeefaEoRjdvvAQ7uvG5z7/wyq/u7l39djEqzPbRd36E05Py7LzXrHLOwqVr4fTYff03Vqzx/qndua7Bt2idIdFaNKQ4Z0VgCLPp6AXd1jEpANdriiQ61+7OqGmfiW4L0v0DaSVIiFHQihCn6iQ+L7JM2uPP7o6ffab0q15yhSRlTT24KqJiHb0DVYlEkva5hmGtDSEmxnKWGY2d2YauZzwlcCQhESE2kiqc6XgmzEwxCBEbY0RUFK33ChjudAWphhMYUQiIFzkpmliUlBZdDGFiBicDPO1uLSLa+TSKiKbNEJFGVWhEtGxDCNuXLnnfeO+NYVWNsbN0ZmNCiCoCw4gCUVawSJdzTMScjBFYNdnYrkPjLijQa+Lqeo2bNtgX5iDrATjh8dbG6JlNHduJvfSFsPemO3rPPXoxbF+hKwt4S8YQU7TaGbOkFsWmcquaqq8FXPpA1ZJhMtCs+zQSbaCTw3TfG7XQJ2FXDO/QOoYKWoHnzgw8EPlUetff+0UBtt27Vc4gGSgHCvCQtGhpwuKcceWOq6+OyJwPP3CjR7uvjTeLn01PezjtO2tz3rZ6H7de+ejwdhPks7euvfrKaeVO3v3h49Lncbs1Y7f8FO3D8+Kwvzw6mj++1Wbv7f99LhyNZ8fH572qFbEHR9fK6rzuLbk55uGI5esA4P/U9iZVdb5XbPzzv2P//D++8eCdEfMzyxeu3T8LL83pxu7+vffem5VXfDN2iyCVaM35gnxRbISTLVNsXb10e++lMKX60cK2rGrJ+hxtsGJD7Z5/bby7G6vARJqpVRFjOpMSS5yWZwxloUS67UD79NQZgBWdJzGvyUMgVV271KSkBQBIlkNCAAuJKCR2jnPrXk4IPlXpRAuJSimGHp30J0VsogPQLrrzTj8clTqWtAgMSRQWCMMQi+H0wPykzosSqVAEO2aQxCDR12C+JHZSX7t1JlWvvjc6Pxo+Phg+wFaxHUdXyvHWzBalLbwgN8IkLXkWyaJVWKWEnWeRg8aAqInGAgodDN+5oYuo6aKsGQxOKRAKoTWOlRhWElyvV61WDDBzbONaYiE2y2IUEDUhGJueGEKUzNrWt4YtExGxMCBilZVM7DiaBHDS/pEQiSVkggwWapT7Sn3SAjaPQ5QbOp/QfAszmg2W9xc85XAWsRStNefetFq4wwWPWSNRS845mdaTa2cLOd3F8HXMZ4tpee2yfumr9aMqG+bqhb3zvg7w1mrGJkTLcCIxAIYL8SWu7p9kw2f3n7k1vOK499b0aDY/2b3x7DPUOytXDw5m29vXv7J9/f2z+fRs9sKrX5xMz97d3jKD0fD2F4+OHilzK3nOrOqFuitHO6SFkC4mhXTrLoA0ihdEJgvxGbeCjBSEvrCoBCjEw9Xi5uXQn9utcz+oa0u1ZILijHsbefbt7WKX+bsPZsPdQV0Gu/C8c+P27N6j/OCE3WbOEu7ere/cLS/tb1998ZnXvtUUV+dHKzbx67/8G/NZvV/s7mwXv/0P/z7HGgZb1m7sPVN6/pXqi/Xjewdvv92/96AM7Bz7PORbI1qszLf/VvGVLx3+m3+PFT34D3+4/zd/V3QY6llvNLSiyQSdOZIaqKxtY5KPTdLcJ4+kbl2UdP3pFCYgIDGIsJ6AAQCBOqQrMWvSKjkwy9nJ4c2XXgqxMSHAMifXcw4iUIkJx0XnYQ4lIK69akHMViAiypxp59SUNC0djT8xCTUVZTLccZESc6kLMmAmwCRehYhktCZopHQeUYIaZi+xIwSjU/VrhzGv5fQdQrqucWkMWN+eYucSSxAIhAzHEKy1s9l50jxIylODptsLg1PcQxdxGCKBQrpFAiAKoXUuCyFYNlAVEctZAsg6XmX3Rli73OLpz61/N92qPmbgwK2LbiHVa/zcx/XZWZ++Hz/+J/Zq1phIZGJ6aE7bfYVN4y/Bgax0ZdYRLCzDAbbLIU2fg3uyAAZABqrrjaTvZl0EaA0IwTu0FgikHvDp1Vg//0mBdTEBW2sAS9JT9MnkpIX6XHhCceD3trBlQzaf/uCtu8ePDyeDuD2RXX/o3DyfhVBLNED0m9K8pfu3Zx/u5yfzf/3/yrGxiYJksqAtQ5dEl4Ij6GHEg9oUw8PPTPPeQbnJjysznX9h+ADLF468vcf9MNnYQW4z0+w+A8Cc7Kkdx/Z2dXh/azj+rW9dP3up/fmHb3/47ge+ufqHH5zs35RZvlOOn+FTT6WnynCwQURm75/O3/nm37u+sZtt2p2Ddz5dfXK4MzBV8NaTc/AaCpP1X30pBB8FmSWSwEkjZi4im2kt4kjojHROGiksI7nGdHT8TkR0IWLrMF/pXM4YULkYdKnDWNBFXzMhhCCagu5JJCa6vKCjC2qnkEts4k7spwJKicTJOBYUEKEIJBQQRMBsBNHAJJIeAVAEgXSxiAqwgbCyCBFZw6S6hBp4W/Oozl+fF7a4elAsHmycH9mTk9GJHfVGMrw8748XdnQOFZsrB4rCIoYtsShaiVEjQ6OSJPpzFAaH9TYkIemAmpTZxbxeU8YkGk7OPTZFp0hnVu9TqAqxABFqMnacBZUgMd0cGCy+FUYSXrJAWZQpwYkaha37RfjIMEOFiYkMJFMYUqNq2aEu2Pc15GhiJabm+aMZ1xwWrVYSmYvJZrlabbNDLRRIWyVPPfQ5VKY9ffVrv11NmxNyO7/zt6sTr3Wdo1AGYHPXFxEvTZRoNHjbRl9bYiMGyOVsdXv1Qb9eFoH+irfvH51fK3Z+qeZz1/5sXrlevjvebIN///TIwm26UTl7aMndfP2LPph6PiWy1uSt1hqjcJYplLs9JneyDRXDBFjDGjXEhmyWQL3MwofWkKrpBVEWiVBDnK3CMMzy8/N8c+Fz1964tVI7ny3mvHDYmIVVcb145XPbvd3Dj2t///0HtnHbo/7Wh29/f2uBxsS2jQ6jQkQ+PTr7+cHUI8+LMNyxbrLff/Zq7K8+fTR//3Q7Y8Razk8rbR//4R+77evIx27/yigssTmWvUm5mF75H//J6d0D28zdczfqBx8Md4uj//gv977wreXGVX10nG2MnW1jaAjMMIRWgzIpKF38QrDJ3HR9l4/r/B7pmD+pCOl64mJZR8hFRRsRJLYSoqgGgqhkQuX8bLuwoa69s9KDNIGtDagRsEataS1UTyzCNcmZiYkb3wCwNvXEQEIq00HFunZz6pwSN5cABZOuG3drk3OUMCdToFTCu/yhdKeJIorOjySRylRIuu13au9JjUI0IqYldbr7dI59ApFk8LSujoBGsZYliECcy6JIQpcMk/dRREXa9JMyEzMH6VA36JOIhBgjI5nuGjCpimpSJeOCBP0L0y4AgNE5+aa/CNCMIEJqlZWFVduQ29GX/fU/ix8cZvEn4eBLfG0mtSZLM33CsUofKyyeOOHZThD0332n9QKXu2UFAhAJyXXUAx6wyXsO7AneIXLnpSkXt57wNATNsGKUepTKfMwjcso2KLrs1jZ/+t5f/eCdA6XF7kCe2Yu75jyPpa1rTBHmTDUogG0u8Cc8ul9tXqmmmPCg1z6Mam1r4VcwlQwaX3jbC6GQugkcQlCtVryqrwxCruWXzM8j88Pp8ebu9ZPD83k5Gu8/A6C88evVsF9OP/vu9xbN8ZzIDm9t7N3e/e1X0Z48eO+dD++8Hazf59uXYp45WF5U1cf32B3v3c6/9FvP2FHWnI02r22d3nsYT88rhfRcHqnmsKHhTKSAXaJxZC0kSzTyp/3akxvq0y0XPUUP0DUYsrZFpTW8kKAtSmcPnaMidcEkyeA5FWWosmqIAKsxIXawTZa1batEmk4xcVIBEChdpd20vZ5qRKU7RNoN3+sowc5O2aQtKXHaszJRF83NApG8tUJWmFO+rhMTVYNRQfCKPPT3pxu3FuPzXnua1Q/6J6fD6XSb3aQ3avO95XA4tVs1k2dpyFuoFaLIKUaayTJDEDoO2pruwUydEbShdVJwFFhrQggdSUZVoeViAUodiVrKgigUJrMxxCy3ANoYeq4X0PoQAUA0OeyJRmG2Hc+ElMmA0wZdLg50ehWYjZBy0nMkjAKAAIFUhMgwWLUs50ScfNiJ2fV6o81RKMt84CIiABUNgkiZ6/HzX/72h3q72F+g3ukjrPJsNa2KmCPdiESdyURbiELFcA61Bm2wRVYuAb0fslngx2X591740vCnPwm2rK/tnx0+LrJ8b7wvLOG8vPTM7WfOF1RszdDuq8ynJ+dltXl526+qHC5YdoDvODOi61tAWkSqIKIlSv2HMBSGRD1ZqxBLgoSbSttvKzcPeTt1RRV6nI2eM5evP5a+llMXK9sLLNn1Z4rp3tbVzd4VyL9bTa0rnsE5yTufblS57zk7LUVEWFDx1kvfqirhN/9rfvJZWd4Pqx/rEr3ge2jm3DgRRe6Lncs3X9/4lb99PlttXh7enR7f+Nu/7V/ctY8ren4Di8Oit3v0J/8yrxc2YPjstflrr6guMDEiKD87HT97qa4XCssaYEN6BQEBCXWi0KQEbaXzZO9KHSfCUHfQO0m6KlSCqIgEQgREk6cVIBIB9aG++Y1fns9nMbSWjNjYIBhhS3HNkoUqMckFgTaSsOX0WSNJVyfGcJftRWyZ044q2YaKhCQpkKT5TVW589cka01aySYrD+oCH0SZL9y/rLUSIgUFp5NFyqQxzamdQySpKimJBqjRqCnfVzSJEmSdZZt6WWtNStvOrAUQQgSDhQGN0DYE6vSCGqNKAIx0GuiO1UWsAjIqCssXOceiQokHffFkXbCukkxKk7NeEpWt781EPoaMe1FWgDWksGYZ6xftjQ/rBweFvCl3r2OroMyT6Wz9u0Vvt+7FehQGW13LceGgnTqXqAfqQ9xTdRPo4LwG6gEHBMCDPMgC6VdLSorGUkxzv3asacXaFZEBtsK1U2TqLMNpm4MH5Aa4tIe/fOvtOx+e3byU7fTzsU7H7Zlr51xHu8h4oe1MuBIANULmQuD+YX312upRi6VutMPeKo+oIvJgKtps46qRWMNmNkSrbWjVDWQwHjmPE4mQvsf1yZWG2/2ivjqsiuwYQHS9KubE4XxoPOthkIMHD+/d5Z+N7DPjyZd+Y/JrcA8Pzt5/5/4YW/VcFsvl7/zzS1vZ85cn+f3lkg9G1y5/iUtt7ldUAuMc0YZW+qCIkO9d4qHj0BoSEwNM4sFjPZ7+tQbsKeV1IkdAmUg1Bdo+BYpQ58uouLiIaG3h3UVwqRJf7ImVRZWsYSKJAvukLKUYwKe6QRVJ29EIQILA8vrhFSSkJFCTrmMyrBo6SwFgna6VFPwUoqR7LxCByJIEQyHx+YwCzMmsKrQBWkdvS9rnwbVZr83io6I6zGfHo/lyXDdjHgUzqQa7s3w4Z16JkERrYSypGCUiFgsYttpZRZt1ICIzIZEhk6uXb4lJRA0bUkgUaywARMnzvK69ZWYmjcJsfN0GKwQKMazxVSai3Lo2+Khph5VQn9hxOBJItx6CmTofoeTaY7KkNk6Mi1Q0IkzMjF2dl029MmREhI1pIaFtJ1tbZbUKbTC03j9w1KZ+/evfkjb68yq/+UJ8txKT9XrkCz47LQeQLDNijQhItFXqBdf6wGRiGJJ4XwymL37luz/+ES9OvnX9q8OfvhP7A7bDWeuk8p7jwmfvPLg/2n32m1df4u//+czZF1579cbNz//bP/z9F77wKvmQWwcIMbt+0SzmzArpFhainbiclKAsFBLKqiAgkjH9Xl6vGo2tUSbypg6DqiyYRWpwIJlko8tBcnXjpd0odbe+dmtR3Xtsw3h0NUx4NnvfXbV2uLdXHpW9nx2gyOPCqyeWEJiyKvi7n23/H/6HO4fVxvz4+j/954//5L/Y+Yx3N6rv/9nml76yoiGKyXC6bGzdVkech+PleTZp5dpk9uB+jqPD7xzy/HR29Gg4hvOGh1emr/+q2MKUlR0bfz47/uSnuy/8DfGtZza+hYWIUwnEyiTQVpOtMYTIqLadkhXd7Nn5vFIX956GY1FRDSrS+lYkRsQYhTgdwmgt14Lcba6qk6xv1mRyXafjrBv0ZPaZSljSqoM0SGYzIFo2mTVpN8NMySUxdqvhDq1K0b5MbAjGEJLoPiXCJ3EexXQ7EdLkm2hTsLbCOudF2hiMpEiIRAlJlhm0ZmuB17g3wElp2aWtqCggKiopoNuEEFL/mlBo33qT2dSRoHP2SKQwEJGwSkweWGRAUSUktxQlS50kXxUiwbIVDVZcWgJ0d0+9AADTIUu3O13fFcmKCBkvrYOLFCOMhUYKDZtfCa/+u/pHy1x+svzs2+bVSusicrAsYCIDNR3KTA4oiFhzcA7JgaffHdCH5qAcai5qsCavUDSi7Xr2raEeXENWyg2hBgAYoLbiewztwmworSrTKG29U8csrI2Npm/6Q0d9afL29//so7YMz35usi2P97jqhdVYaluxLEjOhUsj89iWSmALKLEz8aQuwlKEesb7fCd6Lm2UDe7PYWsMvKnqeJ7nTdMzRjUDma1eHiseuXquFBqNpHUIPdvfGGqfAYgN/eAJ9RWqT3xYeQmcN9zmpZycPfj+HYwuZ59/Zufrr199/8MJI/vSN+fV6nx+ePTZ3Xht//r23st1NOfT8+M77/e8sZWnE81ytQ03O8GONvvjweJMkCWv1eRb9aTt4jVHA93Z0bW9Bq3V20/K9MW0nBz/kljnyd8y0Tq+ueN9rMODrGUSxCgwpATblQcLgSWrKjF2PXeUdTpnELaGOe2lu42RqiYvFw9hYmWNgAYR2wVJJARLSbVj6IGTCRwxhBCUIcTkWTkQU4ykNmkjjUnRaYHbGEgb3lsNrmWjeioPNhan/cVZvjgfLu6O7VCKy/Ph5WleLICmZcNefOc3KZwOMnd2JN3uxjBikhKqgCgGSTX4wng9xXsZl6lvwCqAiFhrmCxBDCi0UTuUS9RwtapMnpnEWOUkEqAk1A7JSrN7Saljs4oGAgg2y0IW1EAZDLUQy0QaMzLHJ0fRtxZWQ0yi2mI4nC2r0eZm6WeGjSAS82rVvPLKy5/75W/8xXFQ10Q/y3hIMagiN5nZGDbns9bXlsVaFtuzjSXODPIVGesk1hi+/OV3PruPKN987bdumOLe5Wvx2ZuXPvno+sbozs7NsFh+dHh8ZbT1+f44/OUP58/dvvGrv3MC+dPv/dVwNBz2RrNmSq5H0lrI+aLsyM3K0ChM6U7OolBECSnPjlQSSVw0hMW54Vwsx6CDatk/r+yigTi3aWTJ0nKoJBzNaXm8svuz69ftfr65HEt1L+iWMbtbu5v5lG08aUL5MFvUIVqpWuXMSLAQI/nyk3uz/++f73/96+Ltwz/7bv3OW8//T/+Xg/vv0u7uxm/9rnyykB6dn/z5lttquWLnImZZPZ2/+W/k9F4YIO/nmcmabSZ20GHztW9UstOXeRTkxejh/bf7I6KNjdAse37UZrVRAwaRB0mEJ1iGUGdwmqw2QuqXiZLiNo02KTqFEFSUOhWvJklC1BCog2GjSFQRx+K5iRyqmCBFw6Shk9ESQCqyhqeEmTlxgpnBBirWWmcTsQfWGGIyRKkfiAIGxPCF8RJzYmN2AQbcpRkkJN0QSEUgUZST/R6DRaUsSwEYJiQB+IWPBBidlRZzQgAMM0FjYGtbicScTKtFRKFsWIMIJLMmhlYAEcQQQNaHLu8IgMbuJ01nNX2bCUwwWRY9kFAYxCAgIaPShkDsUsZEgJikiSICENO0yIzEYkWHQqW+oPPq7XJwZO3CCUPGSztxo5fba3+Fo5/lj55rr+zR9aUpe8gEGalVGKu9oD2xQ86i5IqctAByoADSB32gDy2AHlAk3k96p9a2FAg1oQZaSANqQUtITVpBVsqZqoVUIMCETBE1aTXQIDZEJGQCGGxsofmQeOzqgSzE330s7z0Il4Y71yeZbQ5jbFZYDXmhvpbKYim6pLAQmpMtFRwFEcxs42nYkDK3uvCZjT6KqQtytfNcxRkXhfYtxpM+oX/SFpbmUN9a761VdtL44FYBFIP1YavMtgsAnKvMnJQ4aT3yMPD+IIRTMUPwtil6Jpgz+uzxSWWn0nt8reD7P6uPlvb5K8NXX32mP7hZhzG7+g/+/e8P4/3dawPb9v39uRbW1G68T//5zgffOFtJb68NuQgEHKkjNiXbEknLxggEUCS6QJ8pdbKxk9Sg20d04GVi7XcQMXXG3ujI9QlTJlUmIrZxHZeU+BNMKZKS2LEKDCTESMqp49QgzCSi0rOIsCl7kjXJ36OmPVd3gUhIzbRd04ApMqBqjfXeM8Qa9q10MgaoQpKHC0vaepsEiSmrSkihLBAiCEFrS4BYj5vHG9d55HM5z5cPN87PitnHg+mn48FmO9haZduzzUEp/aUqQXKqjRCQtYSMEhU3xWSmZCRHFFVDZk1UjVFSV8+GgkLk/Pzc9fKkinDWSGiJo1orUKRFshLIiApnGUcR5gh0frPaRd+QxAsHPSbqqB0My6wgimBNtp4IoKDWEyJ6nNmD6T12toFcv70/OzxZzKq97XE5O93b2/nwuIpZQ0UmOSMTmOLcZ1EEdmjbvgQEBolQhKqzzkqrTVu2IbC1GRuLgR2NeuL6dYy3Xv6vj47PDo8dFT9+++03XF+y/ucrXJ1FO33312++3AwWzjkHq4Ph9v/p/+4m/bvvfPCj7/3RN3/t2z33uWVds+1BIkcHCcZCQogJ51IljZZYmJJDWdJfpWgNgSoCI2OwNz7zGJTLYr50rYeAeFdq4/uZqyzPagwLxjZ29o72c5st1PiMKh9GubsKmg9vfsWGnz+4fz6/yePerIbhEENLQp5bir0ib97/fn33HQR2s3nB7uTtN8OHPy+Gk+lHs3p66HpAQc3L13rXr9DQhr+6m9tTV83zS3k9NnBRhQo7rnx9Ohrm13ZxNMtcUdssrKrHd/7qC8+8rEVtnY0ABxMjONkpw6uY1CenXD3RCBEioQ7WXfe2UOKUayndWVZRCBGxQYyppkg3a0ok1roqoygkMgyBIKGLAu/y+IjXwXy0doF8whUxhrqOMjEeUtwJACgjMZIlBqzXx0SckFhdO6unMOB0jEhBposTFAERYrrnd0QW0uRPgI7MmMhORIAEYcvMhlkkCkNDICKJUXFBzVAw2LBEjSHGpGzq7LAE1KVKdLdI1WTzwZ35RiQiNmY92XBKCRQJUQiwnL6uEHdW02nTyhdAYdeC/CI22d1W8QufTIChKtlIlQtf0Fsf1dOqsG80n/xDu09iBclHoCcwwhnQh43sSArq6u4Qv/BBqsd95LSyCIaUIUyRRTWzPsvrDdMEww3Ys/QUjZqM4Cg4wwYMEUiQyMtemwxHKNg8A+nAcE9M6MnDkt87rue5hD7Px6RX7MYVY2QRqgWr76Hs6bIPTyXJeYOSdE4613guVGMdUI9om1XMytCX1dSXNQxYZexkHno5uJaiRt6gf1g1zw5d3LAsUldLmhRYbQQ3t56lgaq4fEBjCkUAwMPc1gg9S/WEl/Xm6Oi6x6RUeFEJNey0ws5oc5wxehu5W4wYm1v2pS/cHI2fL5fjYiz3Dg9Pjh/vf/GG3eL+vCgeA8Zb4bcfVtdujwbBn+a+1aGYTHTNDkrchPSu6xd9jdSsB07qelJacwGeev3/GmuA1gfq4l90Ir7kOSOJcEkpM0m0o/Eri0aYjGOUPHOhDWKz1rfKCCJJBZEmxJSPlLHpmJbrO4l2XBMYQwpKkoJUy0UQJTKxigpi5wDL3F38THk/994DnWUmFBBiSIRGIquafPu8IReVKtmpNm6cbC7z9mC0OB0up/nstAh3i8dDGV2qhpeqfHwm44pDZoTUA7AskKhq2YBJJPGSrUUUVgNLosIBIUYDVnZZT0KHGIYovTzr1IZEnXE0EgrR7QM6pUfKwVAVFYCVOL1gjMgUQBYgWc/kJMRB4Vk8KhSV5nMuKgwXdFq75sZrN+7O7izC4ku/8Ut1JUU+KO7b4fXhsNezEzc3FU/Q28t/+PHJt7+pJW8vdRhXDZd5tiStmTwsYq9fZAP2oWh8u2qXITTBCtcZcy9sXz5+/uXyrTev3/6Cy3KrnHuZBDs8rsva2b1b2fj6pWcvr/pbO9d2W40f//zjt//Nv93cH734wi9f8uMTESO5Ah4rRyzs1M8VWQf5ISbvYNIggBUyXXS6tAqrMMSRtc7VrcLGvOJlnZeeBJEL4hiiNQsfzzyvCtZttY6qUJT9Yzcss17j4MxWMVw+XJ4Mt25a4/I7xcaHO7ee9w+eaadDX0mMtaLmDHXoDYqwImaPbevsVvnWX+Q17HGY3/vXdizBtZsvfc5ctYuDN+nwo6E/1r2i2qKcgi0q3uRQFPMV5mUV9qHj+TDP2XOeI8x5fG1Ht60Uph2IsTareisFiQUqSIAaIKqkzW5qoZMep8uWVwnrEI/upk5MnaI1rotuIvKvrX9TNCYzd66/XayCqgoZk8Lb0ZnbielYyyqidj3yJoIScyLCkrGG1hmC4JR0J4azp+8u1AX4dCbywIU9BcNARZioQ7yTFVfS/IAMJyWworN5vaBJR7LWsgG0Dq0BWeZGA4PjmrGMNLMGuWgPDDMUqVEWFe5c79PejdYjydrrD53wK4SQfHYgiXAFAokIkyTOl0TpuHBrttMaFteuLF8U3iexMOtP6cWXZFVVG6LaTTN8Pd78i/DZUeHfmz98OX++DC2sg2SZ5i1bayA5oa+ag4aEIX7hvQANtUA10EWhK4tgujTFaI1KpFp6Nfdrk9fDfi25OOYlxILPJYNEVVFwJKnZDXQQnCGqW996Lmt7p9SjxWqmbj4Cxs5mglyKEaOn8DV0Zah18D1IX70LFdfQJeJcZSFYEEporZFT5gcTokpv7t1mXa/Go3Dzmh9Et9kv/VDqyXIuMz+ELAfWTc1WGETOLDQ/j8uBnbMaW8E7r7B2k7QnvOUAsLZg6g/z6KWdW1sVQ56KPQ2+Lag4ma2GagcSy3m9vetGeTEZ5+bW5w7uLqcfH934pe1gwHkmaO6Ws92t7SKsXMahoFmYf27kr7/0xU/6RROtahaUamNDzKhN5F26sK4BrU1HuzOQhEICdHnV9BRZa72t6IzKtDvS1IEnirXBzpOeLYWJiYI7skwHvaiKsEBgWJLyPgHlIoKQ/L4FUCYjRCIS5MJtPe380J3llHbIiMmFquNqdK2DggSwlNifCiCJmep61Xmzo1tfMVPs4kw7R7jOYB5gphZty+IqPFNtXOdBncvpKDwczqq8+qQ4+3Qj29ge7vjheJFvn5thndXwYsU4IxAEtpqpaKSWmYg0QwxQ5Qyc2ShiYmxD2t4pKRvTyxygoW7jWpslEIoAEHn9wmlKce2G4LUDt4iapCgzrKTCiawta8leo1hp9FzaotR8qoPd3rjO/cHJw3y/f/bg7GePfvYbv/l3Ht9/PMXZS8++MhoMett2enZuNrPWbR6X/F/eeu/S179RHmeytLZWWUbxjIC6WpydP3r04O72lcvjrdHAjaMGNyjqbHMhPUxPBm/+6Jt2QxY+V/HFxG2MXP9y/syLuLKXO+OrZjqfbl6a/Nl3/vjk8GBSFBhe+vav/O7hH/zxKW+1+zuo5o6dNRliC23VFIhtEo6KkGorokngGg1xG5REDBNbBQeAYrCVz8s6n8/hRbgAlMUHrFgU58wDyKfnNnOhiDoUsWTVVVo8GD7vkA9djTgezee22hF7qS+D0fsbg498c7ku9+vjSX2+Fau4RKgqt4T3rEvU1amd5FwFVG5UCDYdBlzP78l379pNtiOzGm24bDkYKHLTv36FY3X04E5OBY1i/7UbYXcEWD0LOG9jhv2/+y2czXQE8WLnIYgwMa1yhRd16KylfFdT13DUOtcP6IL2ui1IytlNA7CoRImhs0hdM6sicLE+1Q7CEo0AGcPO9VZ1lcpVFEEXWpSAH147mz8hZQjUUpp/Oxg1kThJeZ0ND3QpQxDWVLeIYAxfdN2KLr8vJeKprCVNyScnBhAZIrYsMf2lKIEzw6pRIlm2gchyZLKSMrM59fep/5AonLE1NoQ2+eeYZB4bJeW/dSgfd7SVtEKnbnjvsBgDZmbRqKRp6I8qDDFgJUQVUqYUgpgepLtzPgkz/v/zponilRTfZCm4BTfPZ7fuzE8O++UP7b1n2ysu26oDszUsjtQgV+4bGQbkwAAYAiNg1H2Q82oDiyGWAyyGVHYTsCpBnIRIvLJFhV6Jvqe85v5ssFH1hzZjISjEwGTGcqYZxdkRf3LkP5u104pXtYbQKhPMRrbJzgk5TxkE8BBYDNBarTOqnS57WuVac01StqiYa8ZStVReAjWki5YQMK0QfIUQREbPXf32310M2mLHlPW204l691mcnB2/XXznfy3YlmYzvwR4euzdjQbiaqwGYVmxiG6pFkQTAiCzYJ2FlxArY51pLOpLWch7dm7a8vmt4VyKFQbSDyvr/Wi3blTuNp/8lx8+d/mqfP0FZ7Kjg0ewWLTl/KyyhwiBRj3+Fy+NJq+P33r+1SWGLK7NjAZiYQEj+eIEUGIUJOebbtkr2kn01qNmujxSQ33B16MnBZifTM2p1nZH7iLVowsMSCko6fynJjvxMg0kBgJLiNaaENS6LIq4rpVWH9ogsCnay1ofAnAR6ZXgqY5/lcBy7e4ZpASRuM4QZQFUdL3y5qhiYY0xneskQIZFYIjAlJziI0lyk2oNrBIJK3hphThwEK7s1WVx1blQ8LRffTaYzgazc3fGl3rDnXy3Hm/P7GhmsTBqwBbB+pClrFHOkq+J0Z6yNwHwEDZsSDWKsGGFlNXSsiE2JjHKFZEQWUlgUhfRZUx1Riga0WF0AGm6WtNzrEoxMbYlQDxkpVqr1qiGxSIMSupVPMy3r5TTe/lW7hqcr87+w3f+XW4dJu3u5/ffOPzB7es3+4O+35Ij9Ja9K3/+xk+/FN/kL/0jx62sQt0aU9e52/roox/97K3vBpbi3nB3a+P5F78WHfzhvSt7z2z/k/9rrE9YLLN1bEKxtb15wwxs3Zrzs+nJ3R/7+vQn77xDw/gPf+f/eOezt1//0utuPHmRt2ff+YPhlXHz/M326Dy3Q1GPWIUEF2okztLLzQyRKOoBYTZeRR2sslVWZlayPmjTDmbz3vwkUyEaAkEs21bqcE6IfO1zPLlWn9a2d8q7eQgkhdGEaVammkvjRG0xm3o7s6Eca5HPXcFq8dBNDmgyUEvzs0vzxYbHcO55Pi+OV9l5207n3BhmHzJhG9zQYQfoWRk7DDAYi2xdyq5N+jeuD7Y3w8/+0/UH90xvfOKFP/yT/WuTgwrV5u55aNrGDvqb/UHW8Nz0OfqgYBWNtSCyrlU7QXRtqYTY8Qm64PQUhbMO8iGkstO5MQEpRKXzRe7g0HTk2Vpf10FiYi9DNcTQtN6wTcpDSQldIpR8JrkTHZlu+aMiQsRdAhJS4j11HuZPELWkmlgn30GsNSLSZfylfFvpBE6J6Slpgl4XsBgFhi+UG0gDMBFUAmCZk4MziVCA0DqkYQ38pkVsqo1pRxVCTPZAJj27etHornVc60Y//WhsrURIghNUiSmqskQGiaqKIAZmNpwIKrwGrtYxbP9tudVfqMrreZsVABsbERAjGYfsq3j+351+X/b6Pyo//Y3sas2tlSwC5BTWcC5xQDQgDFL11VSDN2mxifkQ8w0shlSOMLfJ1wXKKoYoKDWaN1Ss1C2lv8Kgj/BY/WK81bPkCg6nev6w+eyRPDiIs8cZ5sztMISltWDrLVtENCzBC3swWKyITeF6jeOA1jsTLXmHSjykIQ7CNUkjUkE9qAaxmrRlgzibcekjU6hjexztcjVrYq9PTD2KPBzuVKPnp9If2mKqk6FpJ3vL4/O8bPsucFX52AeL+AHMJstWBECVCyZE1OQyOM+zugiSWc5ku2d2phQrB98IBNVi9LOzkyvFKCtmzz73ZbdZAKjr87feeYOLzDCkZ/IbfcraW7stXzHvfe5b09GzwZsAFqWS+hAXlShAg6YyjHa9D1YgOUKi23igK2kXs2y39E2Xxbp5pTWKnXCvrkZ39ThVxrVGkDq3gJRV0q13mIgpU1HrWLpfhUUyazVIlAgmKyJBwIatASgR/buEsYSVQ6FqM2tt1rYhSEjaPHSe6520IB0wVUEUkAkJdCNxmevlblWt2BhEElIwG8U6ywWsFBmUYndBDrkaiT0EAoS5op3l5u7JZlO0j4fnR8Vi1qvm+fmdvWy4O7jcbFxe9IfnxtbkyASWwK0akzJ1ffDCcNqP2sKo9+tVNNhaq+mrr59SEjBIkvctlAmGmIiU09adALVqBQBHIQMK6VbIiQ8cMgSFB2pFBapQbgz72CyxPAqr4bNfOT9+KCqxbSejye5o9523f/b8V1+UPTd4bhTGYXdv961702pwbanjZXbt0Zs/xU9Oimu/2+s9I7HBqm3a+kvf+qVPjn66KsU6Zwfbo/H4e9/5Trk8oi+G27/0NVndKGNtheEKF+2HH909OfnE5u6nH//wyrXLtz7/fDMqv/KNr96v72987vLtX/saDs6Wf/KHxWRcv/5665tinIcGaFgBqOdQi3UkrWoAJeid18+WWhUWi5SREcRIMMu5m83zap55BOuYPEnkkEls2Q2wMdBbV4IbYWbZjFEhLE4cD1FlyoShyapFc3jX9alZzu3sxsj1M93wphiC/SB6w7ykgO1NCVcOy2AqK4v4yvaeW7Q8Fz2s4tFJXmjNi8Bnw53M9+rimVG1mccReiM3HPGwPrw8/bQYVid4kIXZmHOZlf35O8btP1rMlmZkrg4Hq/7R3U/3b94OtahnDQqnakWbBIEke39NeiQkZvx6w6qQC0cc7vjPyaVCL5gfqSClNWfaYKahOsusb7vaqdKZrLJlEUmxPaJiwIASmdRcp3qsmuDlLg4wpgqdVFLdF1Rdo2fcOdd2SzAFQozrJQsAZTAZo90altadA6+hWRjDSlBIFE69uU3VkuDIWJfVTWMBEVUmBkVJuaosidWpujboW3PHNW3KU7MriRCFRERJEwZ3MqduC94FmXXQmqb/9Qu1EyqISFT1ZMhFjKTK+G/LLy6epl+syh2CKATLKkxtaPb7+y9UN+5Mjz4Yls9XJ9eHN2atZuDYY+TQgkwPKKBD6BAYE2+EMWYTnY9ovoH5COcjzEeYZxosQtpVGq2FbI3hCsOF5n3JV1xkXA/7O+fR3s2H7x609+/T2aHVumdZZOStkKkktsZHhoiSF2nUu6xQsVRFscEYb0IdAmceLGS9sFf2nDPNOWOxCBzgSHOC1+gjszJDEMUSS+sKjUJ20/JQAbZFP8a89oeBRxG9o5NgcekYcpLtL0MpYHd1+6A6fl4+q+ZePVSgQ/giD8wA8lFdSGEL67kWRu4oPzdtoGVTLZeBTOE091QBLrPIjUoOm91ye0ITb7fpo/sHYbS0k16cANsIQxX4s2vup1//2pRejWU+LwYrv3nCGyvamGKkc8WKUAMrRQO0QL1mmIsmklBn+9nl/ooiUuJhKdbQZzq2igtI5qkL7KL6XrRuRASzPiTobg+JSkBp2cwgWIJYQuQOOWqzyEJODFSj1RgDVJ3LkntrFEmAWxdyTRSDCGKMHdMzGdSkPO/1Uuzp712ZSSQYa6LERVkx4SKHG0yqaiMHEoAyJa9wYsFJMRGE4CKE2cKJ+jZrM1BW4mZ56abZmvf90ebyxC0W/dnH/dOP82x4aXO3GU8W2faiV7QsypFJDIGMlRjVkzXG2FVoVWJmjSXr2DJzjEEIASKAU7CShwbptki8poSkYw+loBFgFmaGgRApKIoJJKzaasWcAyuWpSBDyPl0sNVTz7W/ORkuL30S6b3xFh/Pp9vP7P7j3/ln5yfVbLy8/JX9D+9/duuF33h878dV3H6ouwd2u42jX74zffy9f10MX+g9/2t6fZRJ9uGHfznDnHP85u/8zRvXv/KTH//FcXlAcOO9a5++/+EPv/+no81eaPOz6fnv/dN//N03/93Nl65vX7uMOX/x1786k8Xz37r93G98A2F1+5euHX33j+vTT/b+5q+VH97x4yVLDyVxUC2JV8OmqXu507ZqtYRaJe1uZjCJNiugCCGxgLCv+vPzwWzK1RIMcT1mixgj1EBlY2TGN3RjLNRKecICW20106UJldgqhAJosWjleGqOzoVDlrNdDJcy7k0K9PxBZmurYJOZzDZiF1Q1ozxIuL2/X4QYoo1ExBs0uoJRVvbmh3rK273hrV5t4oQe3LB+uzffK2cWx1cn2fkP/u1NPlX2gfNFfc998B/MF/7BUmF7tOwVfoDBs/1V4UPd0opscpGFBDWaOs6kAScwJAXvKCR0/hKSdEgpXoi141GKSjpR6ReoMneRmYkPqdAYYwghhGjYcrcZDUQ2lYGOsQtNZjMJj2Bmotg9ZtoldyXJYO2ZzF0VW3f4aZMsaXMvieaQ4grTY3UPkgL9ujTdLmMhQphMVy3XFVDWZcyAvASpkx4cECVRmI4AJYlmkbghZFTVuqypm8YHY5jZJpNdWm+8NFnArmleCmXYNQ1GWRVExlhVCSG5zqbdkIA5RiEWBmuKs0kLuTXyTuuynzg06T7a1fqnSVgdtI+WTE9BYgJbT+2vjl69P/0LP5Ef1p9c8/vR5pklYyG5as4mV+krBqAR0QhbON+S2QRnY50NaTHGbAtnG2GRvK6SIzi3mRifD46G7mTTjVo3WUh8VMe//Cy8PVu9N98+w7bro7/lY/ChYWmkthBDbBm1JZu1EHB0EssAU0mRmyBt20pOPVFE6gm7OnBAFjTjDGQRlcRGMMMyWZCFsMBCoMFltp4Vslr2hbcK5DZk1jezEB753i+f1BuwfHJUTvjygtsj2S7sCKGXhbiov+iKht1KrAfFxg1XUowsALjJw8gCG1S97Ts5CzxGPfXm2I7UnLQ+9OEMrKr3frK5NS5L+fitj66/3PvwT5771f179QNMYMbAWCT35aZ74fdevfbCN5ZVcViqtbGyxcxuzsPGuR2Y86ALg1K1UqxADdBAW6VA4pXhgUCkqrKW40d0K17BU2+cYrPT2Vtzn5P5WLKxSQW483LRXyjGqrrmYBDQRQx0O2ZmQA0ZkAolFRkkBAmRDRnLELWqCeAJIhJjkNhh4arSGYCsJ3doZm0bY/KW6uzlkgMsASohRGtN27bWWijDcogwrCykBkqsVklYgUhwArFgMYElIGYKJbIqYgQZO2ZWrm1oEFwIxZKfnY1u2s2yiKfj1fFwPrdnd3un94ZZf3t0uR7srgbjOXoramFaw8FE00YfItZ07USs6m5GqdEQbdNTK8rJRV7X9I+08SYBhEUFRhKKZpIPTiCpVQCw+h7VhKV2TuAz9a44c2OnrQ1NfvNrnx5NMWxRND8+vtts8Yu3Xz7nrJRr758f7Y4+X48Wd8tiZoctTT48k9tlsVtxee+d6b33s+dfy1/+8u1vv77/6o17H7yf792sHD+cH8RCRo43Jtt/9Id/JH5+/cZLzRxbL+28d//90bZ97dd+Swb0e996YePWtf1ezy4eHr7/PX9wL56ebP7SN4ev/Hr19v2Tx9PxtSKcS8gDSnKcRfKsNngGe2jW7UYobVNMciJjpCTT1tVVcXaWz89tW4MR2VFYJYqoIqjJ6PI+NsfCFm1tJW9np7WvlYfO5XHO4udFW9bVCcpz1hitkFWb5W1v+pf2pNoZ5i7PhRmtcCV9K2iKRkfDvRtuHBbzkpDzlMxyZVjmpX1wdMdeLkbDDP5suze9OWi3pSxWjzM+fbZox1XZX3wy1KUP8xLhKoaHpx8sPvqPk+yZ0TXbjLbmoT17+bn5wWO2mfSDD+DaaI9NA61Tyk06hqxKuvZj1+Q6ngoBc3JrFU1x19KdIxFAVSJU2HBcky0IMIZTpkJKEmRAJBKRy1wrIcR4AcAma+gk29VkRsEkIYYUoJR0eEBsRVWZ2RqbfFw1EbGf5BGlK592drfn84X33trMMHcgM7T7ctyphEGxG8SjwHRZPF3R6tasmhFrQrETFwUKFZLO1N5aG0NMUICqxhDZMNJDx2iYmKhztSEocTIGMpYBxBAT6SshZyCEIJwctBQMtobR5ZQlRIufTLEJPH8yHa/zGdKgvcYR5Yl8ef2qwEA146CwChhCHfx2b/eV/PM/4vnD4eqd88MXN1+pTEk90hwmD7Fgs0G6ARphhPmWziZ0tq0nW3S+oWdjzHrLBjPArwtwCx8a52gwsTTimS8PqvqdafHhcuszicHics4Z9ITHwbP2WXM1uYUNyCKcmJrgSThGtrVoT5gE3gf2TJF9E3OXNYR5HQrrPJnzshlmaBtvjXHOegoB0VijjsAgQ9awZ4ytuHw5zTC6PlmWZSmfZL1FtfnlDz99uHnz5jmy8/l0pls1UNpxCQmGC2RheJXK6U370PuglCEWjdmo/RmAGLeujudi1fVyDBszyJr7sWdyzoLr0RZlUG480DNDl+WmVx18cHu+Wr7863de+D9/+NknZ/5evt8f7A1OcXL1xVf2XvxbOrx695PHZRzMn3/Z331v8MUvHLtxiwGV8KU1paAkqiAVdKW6Wk/AMQh5aMtrw09+wpO+qL5PynCHheJpnXDiZD25SpSe1glfZMc/oVArCCRdq3oBgiU827ARAYGsNcyQFMmhHEVYNIXuMiOkHlxiEOZkg5W6Z00GW5qGxdQcdLB1gpSIoBJj4k+IovUx+b2r6fTjKbElxYcJS3LzyJRZWa0hTRG9UMMQlbZ21gZBZIPMCAXxwc7oxmL4TDaqiuZoc3nUrxbu5G5/enfgejvFleXwWlkUC7ux7AUE1aCdgghd+BqRiSrpbiLSAiRq0q0GShcTDbOg23FzB8gF051ujcqqgVGDAZ/p0ih1e2+yBOB8d4ic63p+Y+crfvf03ZOPXR6LfPfgzvyj+uhLv/o3Pjn85AGuzjael30+vHPY8o3ywRIndFLJdjDhhb2t86P2wZvlo7fKqzf7L7z2+a+9EGRcSfzWP/lH5clpcxSouPSt3/yb9z76wPQ2rr0yeebLr7b95htX//Y8BtBjtOXpG38Qp3ebzPefu2Zf28ppP/vtr82+/6P+gx9sfeH51WWFbe2SlQQUVDga8ExsmwuCSFSNxtq0yxMVgoKFfd2bl+7sxK3mBgFsRRClccgFNSzZ0Z4Xzvq5DCyvoJnQIDcnkdtKrl9rVx7Hc7sq6+kBsDRoTcbiVDPY20PJi02U83z5jpmXygTuM0wlGNmtyfiZIh8uan/eG9u6skRwxnA2L+fF9nj03DhuhUERt8Vn9UmO073e6kbPj/ik38Lbl3Dvze1f+eVNH++9+ycuHE6mH60wXvzsqr/6jY0v/h0z+NzJZiZlMCuhUpGzy2JrGTAGlsmSGtUIsLLR5EJFRkEqMTnEQZiMGGOeMrxDEIlBNJIqJApRMo4xyiZEuJ5b1U2SLSZJIsjU3pMIJ8GuwmYmhdNxZq1QoLTzXHNLBL5tlZnUpH8P0SDKnOyhIZ3KkEJo014pSjyZzqBpaRtAFh1GHEk4QgKBrKEQk+uHSGRK8zCriJKkRCWltNrl1jfWmg64ZooiNsVFMUjSKUsOB8m9g1PvYtAZCuCJSLObHiSAGJatMIgNkgWoUlJxJEciSQUfkG6bqwA42X8QQBBhg7U94QWHdc0XTX9kfYpmwxc3z7SxdwCLjblk8yBfLT5/59EPy2cGfzW/95xcp0E/ZK3LouSOBh4Fa4EBVxPMJjSb6OnEnI3jyYSn2XEIi56cRV4ieDFsC5VM3bRpP/2o/jjKoSEZGmzV207Z0DQYEm1EvGJRbEo/xNyKCyGPhTctsuiU60y15ujRulbUBMQg8NbWglDXYgcqv/rC6LO7i/OK//Frl/Pz8t1j/w++du3tHx188fUb//k/P3hQV7bIVXLjgkKY7bA60yyE3LqN/kk47OVxufnaOx8/dvnevGlqGFpUVZP7zE1lImxjKDLxyDDF12N4M8taaaPwiMmuacPVgHm87UNRWpNXZdMC1NLK1/XCZ2ztcCS5Nc6Jc2bx2M2bqhheefC9n9/4O2/KLc5nIfwwjotvfP3vjm9+/aNH/pMf38k//9XZldc//u6/f6a3sRxcmtejbB5iLaggS1ANqolrRU2UgOgajKDJ6lNbUFIpJaZT7LgM6Y/pako+J6nIpt+e8KmfNu8goaev2Kd+V+1KMjOSmdMFc2RNn+hoDSnGWpNNbLqjdDiVFbHWxSBJHywiDEo6WlUQm6ZtGUmIiyAx5bJ1LMsQDbMorLG+DbTmaplU6ild6MSg5F6QUonSLEpESM1sZjNGVCXDbLMQgmpkJokKAXrMUZRjE4Obm1uLyW23O9ssjwbV415Z8exef3pvWIwvbe6GYnRKw3mWVaoQtUYcHBOLerasohoEwpY1JFkEQ4TAfdtrvLeKwMwESepgKADDrBJBNkWHJckka61VXxmUsS6EDLEEMe5si9vREKGyz//GhyfLsWatrIYFtW3+g3//o5N5uHLr19/81M/yWzzekQfLUVX4St/1/uYeuwlXGVyWj7xIedf/6O6RuDiamHzD55fc6ApPdurVcv/LL9z8whd8E+Hq8/NPsrt37ryzsP2Fbx/ZIYZf/ZK/fWNv/vjmP/vd6fFH9Z2H8p/+1ZVPP5BrV968Zq9d4QrBFM70mPoQCpYNAuu8zyGgFy1RaFMYLhNzbGpbV73F2WB2wvWSWFVCMoHLRDP1K8sKkcsTysZBhNsaganpcTNtubVbYzV1uzg2B8eympuy0czGvpUYjTPixE74fmHq0SUZx6GTLDGDVWQ0LDZvPLfKJrPIM+GZMWeXLpkxxxMKj3X1GHYMz5566Meziat3bH3VNns827LT3qyVeb63+/K7339zeL+txuOm3N6dbO9i1oazRzRRtxQ6dbSzwdVyknHd+ibvC5pa7MpQQdzkzGAESYYYGkEsZEWjrI9dt/uhTt6TjlLCVaLEBIUKdV2cQtJ2vVu70rpPTjBN8vxHNzayCK3pHkJdCGLi5CdndgBtFFbpfK3AabdHzKIIUciQqsYYDXPbtsTsmyaZY1lroRJCZylAyett7bi6piCnjVSXD5FILB0XtItk4yAx2dWysBFc/ETd5ja5yqXV65Px4SKkl0wimjEpKZOhdNQBBwYQVNZPbxe2yMqcjLwSITzFOEAutL+cokr5AstK/SO6SSX1LkD3cl0A0evvSwBGa02vDhRyA1+rG34je/UPZ3dksv1G9cFv4qvzXqW93PYReozcmg0ZUjXAfIDZhOaT+HibZjjheJy3M89H1A9UcL+tm49P5L275VlN9TDDtgx32TQSY3STx8GiBVdRPVELO3d9DJxUtVk5k6NsWnaAGPK9RpeFsvfCjaEcVq0E+MCxtdSawXiwtUsfvIcXdt0Lt4qf/ujjr39pWzO31ABnv/rq5qOjxxZGohcbPXuxdltOna24bzG8PNg1G9u37h7Mbtx6adq7Vbqrj48f5EsJZU7AWbu1Qs9rlkXKLTeTa9W91QvDxwE2IBuKingAzu1PcRak2XYyradtGVAF8RwlIHdNMJZccHx2cjwYb+3MKgsvVcgwgSxXbd17/tsD3pvN/vAw9Bs/LAs3+cprP5/lP/mjP/zaoL0zL69NXeYDV8Yt0CwVNVEFpPcVUDO8Aq3CA612469IF4iSrlG5eO2JoGs5QFcudd2oXXzQndhOUPDU7vWpN+om3Scfrx8vdYmJ1bou1d1SB6rJEEWoUyvFZMUoCb5GCF4SUQMiITgyAYgxGMN27S5nDHc7FmYChyBIQebEqinQeI1SpS/OYEtrTlrKNNLuHAkkKZaYQgyiktkshNSZQ5TJ2BBD3nfB+8ASgx+fbFw6KT7f252P2kcbZ0f2fE4nyx6119xY8nFd7Mzy0Vk0raVgvAtW6hakZDJYCtxEr6TEgpC13HoWbwVMQgFqVTNAjJJR5M55VR+j5SSuMJBWKSNVblhKZSLKDBsSVg60qvVeUexvvyT7Rx8f3Mv7vYNy+fndvd72uN8UUzf4T9+/8zvf/j1+668Wn8wvDTfbjViO8NOCv2kPTm/2fRPZWf/M83r3/Y0lpHpcl4c0/6R8P/ga+QzTOcyKY5TekONL1+dXxpPnblf5fPzF35yb+fVXb372g+88/+XPhzf+190er959q1+ej4rNN3ReTz8avvaFCKtzBAtm2OgCgkbRFnrGlpzt9xrfmGAViHFZ+Do/m7nzw8w30SRjJaMSrI2eTQrsggRLzMOhrEoJtRqnNJesZ7cH0QWcP3SzKaaHwlGJWQhLJRgE6ErtNuqhzkY62+LFRs8EZL5dboyLz1/bquzq0C/6GQqVgbjhxta7ZTuvllrDMHPONODNvN1BHKHcQjmW2cjOMfXtMXqNOXzzk3zpwmJTvf7Sa/+Cb7qjw58en9zJb155+aXPnfZwqIe7lj8uRnY0tBANMMueLYi7zb8Q+oq1PYVooGQuE1W6bBJNfjiAJuM5kRhSEu46z4hxEZCSqsYTPPqiUKRDyx2olTzc19QgDdC1mrY74FgnMaRoQAKEhGIy30jLa0iryRG+bnw6ZxwjmKJo1w6oGGPZcqqZF6pcJlrzqaGkXdOvF6swBAZpZCIJwikti6SjoKwLGxOBSZKbQEobTI1894N3PTm443J3RZKUmcO6agJYWwiCUoOSdl8pKaRjUitDeR3d/IsEq/XqV7XbMXcu+6R08S87EJEAijBMUaTHeROiY15pfG5w48bx4YPPyXty9oX2cNNdamzTkDWOxUmO4FBmWvW0cbzMEVBRKANKHdXK4sqpvn94fu/ITk91mPUt2sIFKalG4G1ijjY3/cF5Tv0i2DPOrYQcvrJkMhKO5Ix1krNt20Ax7rr2wNcbrtjeohOpG+/I10NYjvC+Pa7s0cwJ9Ww+PqjEZ3uyNROB27IyhNulbJtbMYYjmG0cbmzytXpewfZf3uNbgxjpuLLSv47erRqbvtg8ef+AVyHE3J2oGbc+3zmxpjB0XC7gRseXfm1x/B9ujoZlcFu5T8HNpA7IQnuGWPaGxo7Vemtz9oUPMw7HK1podVZvu9z4ql2cTIbcOtuMvIyCHajHA+xf3771P81KH5oxbd98487srR/+4e/duhEmu8O7q1jWLY2obmUFNI6qiBraKDxRQwhAUCBoylOhtKZJFTf1kbK+mpP4KFGQsSZRJqC0s03vMJy1Dji9XTRvT11lqcReNNTpKK0JrNAkTJCn/0Pnpg6TDoGSALHzn49GyIfIBJtZiRJ9MARi4yVCtZfnrfdBY2fPrNAYYVi6eGnNbEZEAg0iButoxOSUSx1RgiXJp6AiMYmJiVQlhgQOMDE5zrxvACiEyZoUzEKmDUImC1Dbo0pqy2pht86yvdl+7O0d9aujfnnk5vNsOuvNPtsrelfcflvsHufDZZ77npVY2bCywQlIQgSpEInPOPerVo0oO5LEiRQDCHNgXoSVgbFsg3g2jBgBw+xVLTU5d/cujbVyJAqGV0BfZrEo9n/15weRla3jg7vn127tfeOXfuenb7zZPviYHppLfueQPjvvL3XCbgP3Cr7UNF/Sut7AHHz977/++OjG7A/+cLj3bO/yc8s33nS3RvjwhIemJ0WYjYvcBP9o73f+VnVzWM/eE6qGLwxvLqvw0fde3Tob3H3P/dVfbBIFFIWlI14dNUfFtLdZvWz742M3VpOS9oDIiKKNtCsrdQhtTezU+iyG3vnMLU7dfMpG1dmMufUNHLgJFB0y9hYOhuso5Sxs74qzXC4pY8sWg6GyRonZsJAbvbA4t9PHbAwptMdSCwUmNtbW93KUhcx6kKAmxnjr2u7NvUshnM7Kxaf35dLtF4cZSTY6Kk9L5G6zCFVwsDw0MW85rCyvCtvkxg/RYFnp0nLLUraY1v54dfhf/2z8yusH07MHf/ajqlfuXB0MUJ5MF9Wl1za/8j+cE43nFoVtF0y5cObYCQdGkzwzDIQBVknSe1KmLqsaChVVCRoAihIJiCIhxFTHOEgq0d0hFbAoS0qXSojp04umRJJiTRksHXNBNCL5zqT8hOQageSrt5bGaXLHTIGZgGEOIQIIodVuj6AAokhKd0lemUQUNXCEIU5urk/KFjFrZ0YPUk1J2d2NKrGrVZhiKn5Igz2RoQ7JS/ZclObqjl8qXautRBfmWus9cHenSpZX66DWtRV2QsQvtubJF2ytWkqSowQfXxhwaGcRTOm2omuFyZPyzE8wRlrj1EkIzUoZBIZtgJEoKxd+ffjq//bZ2/6V4XdO3v9H9MsrC3ICC3XR0sqK9OEL1IWsCq7DkkyVDZgPp/TOW+X9E/Z17kIoWkWzCN418M44siIVRy/e+P4wGqlyHvUYudQ5QmkKgti+aY7q9lC+/eXi7K48uBtefm7jsx8uRxvjieWW+WFT59HZyPAiNR8z5siD23i4wsq4uRbzwDe2IbvRTyQElgkZsCUKLKFnMn8y2iznmeSvvbAYaeW35vlY7HbZTqp297w3PD4+vr6EBM7Ow/BoVrprtcvq8Phz9rP+7qs/W+3d9S8MyiPHWwd2mbJuvCdVGOuh7nJutWhL21hBltvhds7OxYpGGBln7e5o43d/5+zNPx0fflTt5rIhGAKjvOblgb8x2Z2ImDufnb3zkx/+3gs37GAUTsTxpKmdEcOVsCepgtZEK8IK6pUioYUmd4aOzixMyeZcu+v4Yk+kT6Ei6fLrtrVPXSbUpRV1bjZr6gD9tQL8196e3hQ/YWtd0A6e5GYq1rImRbJ9I0UkkFAGihI0spBkfdc0XkWskDBCaCkZBpMQUwwxxVVLR5JI17GwYevS/AoQdSZclLyUQlpVJaZT4ot1IbQaLbsQQpQwLIrRaHRyOmVjiLv8IGezNqUDMYXgnVrDmclstFKKN8KTeTE5LW73L53mzVn//HA4X6K8//jBg9FwVBSTbHtUuY0pDysjiDCGVWvLDhbREyVHLXHWtAHKbAVBVSSwcSICCY4phPriFVKtFSK1o8Aa1BQco2hJ2ofZ4GpabV/duYZn7334wXDrsldz588/bN6Ry8PJ1698s56ZrZd2red81/Xj6UBWNlQNNdL6XaNjEfvOH736+ld/vptd+x//1qc/eAOfm+z8vX/x3v/z/33zuV/xgxv4aFa+/34+mZzLwergqNbp5Fo2uP+WPXx35/jnk8X9neX9LUdRfF/ozAze8I/3Y1G44nK4P7c1o51ubYuxwSSRC1OLrDZtLTa3Nogu573l4+H0BGGJ3LY+9iiB8kYlIHOx2CDbq33JIRgJdD4jrXmQ67w1qEKrWM6pLDPnUBTt0OrI0mktTAHBehiQmExV7I6pNmJZRD9/NC96+Usvfn5/c4xpU0p77+Gdza1nxrwqxdZZfX4SNwdXFxaw0AyaRdO3hcXQos/RSMlhkbc21EFKaU4l374x3FnOzz85PmkM5nYqL7zyQukf9sTT4Uf+cObHL9KLv+KaULXI+oYrss6KFbYMA4FNoyMJKXXxBiRM66KR3kIIaaqkLswg4dCdqDbVNSaoSf+va4hpzc3VdS3gdVWmTry0luCmvEDpGEWJoaDMLB1X66+hYgk7UhVhDjGmB0ojc5BASHaQxMwaIhHZTlLc8UrWhlndhCmikmhSnaC4Uy111puaJHuc0hJh0n0teUei8xXqgpFI14IkuijSyRWrcypRQEVhjBVIFFHRtayZCGxAhg2z6RRWopyoXQx0DkDc3fIS1bWjzCXOV0dqpYvkJL3YQydppSpzFBiTOpUsiM0AH6pLo5uvHB7+qJodTOoPFp++dOX2VCprKRhyaHJID95R0yNB07LPevBv/dS8/Z7VhbGl2ajbJoZllBYul2BLC7uC6QXvf/s3io3r9G8+Ph3sTWa+lnKuo6GEyqrzeV6fLF641EevHlgbJ71Hf3n+eb8x7GlZB2PzLLCrjWmoPJc2ar5v2NuHdWb6m8er1SmNlv3tg/ZkzxS+cPYSAeBLRFnjlYxxwdibjx7wpKKNYv7em6cfvntG42V+7XTz5vn2C9PJK+e96dZHj1EXUoLOaNTnT3Ily/vF5q/8yjNN7U4PTx7KNz679/vXRiV5c3xpA4BH46Qp6hnxsM8bl69qT9CylIf+rKwCvIvZaPMSj/KN28+0e9v1pZgFksvjcDkrhtKMUcpoaeXxlG6Wh5/cfX+nXu67m0dNqCevhfx6WKlr61BlsvQUHGpFC2qpkx6pUIKdJRAFkCiSt/867yj51aSXWzWlz+saAnqCSj3Riz/BSeipbfB/p+xe0AD/+/V57d+Bi/PedYcKMJMSqyalWVTAWKIA2HRT4BhjDDE9dJSO+axKSRcgqtw517K1aQ2TrHQSnq1ppZeOKJlkGS0xGc53o3836pMxyX7AGruqa98GJrLGpPpuTSYihsAWEsSZ3EsMwRsNzMYgg1JLbeiF2PClc7tjd24Wl+b9ZipH05PZkS6mxRTbw2J3uF0Nds4Hwynlq5hHWag3QytBOUjLIkEMOWFpo6R9GYmQ4eRcxIZEoqpfC8dEokftEDIEkCdacbaR5WQXWM0W8+c3Xjk5P4rLEKPkdfHok/unxeEr33rV7dmllf627IR7Q1Q5yl6YjVF/1kOI7nX105//cX9PfvWf/vpnP/r9rf/6X2782t+ayUzsyfALN+6/fVCsSqeLcnE62dmm3at7l67J9/+Xrc9mo+kH+zgbjbDTW2ysDsXSKW3+PJ6MVDN3Odf2Bp8dsDFR2fDR9i5EEIgrxAq2T97JqBjVDx4XZZmXK+tjdM6IIFRsradgNWTW+WLEk4myM49qCit1gtXSrmrqj1YSyCsaj4efxfM5xmOSmPX6nCPk6jSCAkIU42zbBg72Sl4XLddT2nfb+5d23RlOy1kx7t/98DHn9sZtI5gFMo/uf6L+aivTOo4MM7NGgqWMYwVdxjjL3WqQtf6oNefIVpZaAzt5QGOMXt7fv1UuppMXXrrx/NVyuJRr4exgSj85KH/wB/nOzd7wxqrKxbWZ6SEDG8NsyIA1WY8SMSEmM0TtKmxnCNuBWukkxBjTUY0xUpfrnQjMUKUENjwF6HbrzafJmAQwsenwIu4qQ0Kb0RGfFaLUBTB0s99TbwSkKBGiLqpBDUuUtMZtQ5ITC8DCqqrGcqCO2mEMra0u0ckNRQjSYe3gFOKbrKcgwkSsqfISDIOI15qp1F2IqqryejQV5SRKThUaqkTMaS4nSJSukWei5DoigvW+fb34fQo4Zuqc67rnIXHAac3x+sVR5GLj+/Qz1f2FavKnhShLlOjYhqAZibBjyU/C4it7X7jz6E/nXyjeaO7e8td7YxtZrDFWwFhZlhxtjiWWyEHvvu/fepAVzHEGrfx5y4EpqKnFN5xv1qIr9vMw3raXs0I0fPNm8acPptvbxZdfuPYvfzLPe8Pf/a3b//O/OXjpxuh3didv/eTMsRsPnYK++OzGh/dOT0PtKx7l5nC1It+zXlh8VqPnsocn9Mrl3ZN701kYlthohOaUldn2zGwU2wvZFjYmD0017G+X8y/kHy0HtCTbYKsNfcikbiguTv3sYTU/aOthfg6uWarANQ0PzlBEK9oUxeztsJFNX0T2+LRe1K815VuFHZ2JB5DtDkopT2RExm+RLAI2L8tg2N/ezv3JLJwFxxvecugNenuTmX+/0EPas+UlawcSxtEKDTEbSZw687gahOnHvd6Wkaan2zwaYrJlZ1lVBVOyrQ1qoZpoBW1AAWiRPPkBSdDUE/URImlUiaLxiTy+qz8EjRcniahDcVLFfBLmkFrhrgdeIytPXUv0CxfV0zWYuk7vr+HW1MWOpgP+FCoEIgohwsIyjHBbe8cmWPIiFka8Z8Op6We2mpz5NQkQOtl95qwCUSXPeyGE2PkDcQLOhNWk3F1c3Fw0ytqONrXmIjF4D78xHFWr2ho4cBuCMjti18unfu4QgsDCMCmRCEeQqkQIhIPPOUgUT3v1YC9/KWTt3Jw9rB+f3y1nPJ0Xbra7y5cHo6a3N2d+nLsziRRiARL0rK1jzeBgTIzBJSeiKGRtYpgLWlUhkERRDRArEATwwsU65uI0aFWv2EGskLcvXP7im298tyj6UhuzYf0w/OXdH5nyL5+50nsmn/XLBxM772s11nIcSgmVuv62DbcW2Sd/8PFi/Gz/bHUVveWj94XtKG9mMs/3C2YrPctXnvPPXvc6zabvXp59NMbJxJ4O5GS7Lvvz6qgc3YEcmYqJ+y4bTg6yZpCHg2u544BAbgV7PhrTPGouWnDjapdhdfez8fzM1jPSRhmtrzOBcaYNrWUOmeNL27x1id2AJPSAnkhrWP0qlnO7dYmCyNkRi2A6y9oQegjYZUIgb6UN4mENLERWYnPjvV08PPQLd3ty7ep4DF9X81pc72fvflxW7e7zo21CsOXhiW/Px5ktyGeecgRZ1eipk3IRaAZ7NhqGLW5WpytXZYXv+UWsZ839Nz+Gtdf3b00fzMcDd8ltHs3PuMcyq2++9KV6/7XH3/tp9c6PBr/y/AItSUawYA9YQbLC6C5pFZEYk1VyOj6c3Ey77ZECnegnldxErEy/Gr0QwAiLpsi9NGCmDu4ie4VUIAzuXJdTsU3gqXZu812PzQkl5ifHOrX5qXyCYZgNc4hijVFm60wIMah0VnWdJYZ2xvCMgBRjxtqBu2SIySA5ciW2ZkybGQEn99lE+0jYM3PCsdEV4DQEw6CbllW7Ze262SCmRFlmpMdL+HmSYIkmlxPhZL4NhRrDul7qdvKGC4S5E1Bhve66uBkySNdR6x3qmKbp/3ZESbC1gYkEJCMaZRgCLJFybr8ebv/J9KPFzd4Pyp/9+q2vnfHCGrEqjlYZvIU3uuLg/Ny/dc/YaFfHTVzKitBkAh98QGBZhipzbrDgoE0xlsXj+j/+6af/7P/2+XfOHnx6vvzctuxm4aRUa7Qwy5v7ez99/+S7f3G2na9+97VtaPjstLm0oQ/vByjbyNb0Vk1tPccKUmmEGN+ryB7U/ZpHZrTFy2xm3YNF/xuT8ft3Zmf9yZbzZb65rPW3pj87M2bVXpqb3glNSh2UdnTCRY2dedgUKmzlYQupG7fUOsjodGHHTFZO5+EP7stOebci6V2+1dob92f3X9qVzdwBOKymlPdJJmMKR6EBb8SszLOpFn3aDxu7fXiEmT8aXr9/9v7LVzd6BUBauUJWLBFwYns2Hy5O5/Ha5Pbo1vbdj4/K2WbYu2rGQ/ExFEoLpqaNFUtj7Up4RfAEjy5guVNet8lzA5A0ukoydhcRTZ8HeL1ETZdMoj6tLcOfvkLSMUVHqfqFRu7pSvzkf6VLcc20V3oKd8YaekmB90/makIyYlQGI7NQIe9F2NgiRwha+z4gbL33EsVmWQgBIi7LIlpphdiytcayIWKw8hMDHOoYkelbFRFCUIAtsybf2DQEk6bdcAzpJFoAVVWBOQS4nm3bJcOEaJp2CaYQg7VMYqJScmNLaeCGHAkFWWWWW4nnLMHXgyrbtBv7w51z18x5ebY6nt09neqD2Wb/YDTIXxyNm+LSjDenLN5qgGGJZEnJgsQHzozJer71BjCgJqGASiKtiFMNnW6YhL0Jc8/OYsXiWleY5bTaHl+/sfHc/U/vuP1cBkpj6u3leT6TsztjzLfN4wnORloPdVqYMHTD6fTRnYF73YZvwjyeLk54e7Zz8+yDH8QPPgy8/+DP/hVPHZ9EqQejV36rOnx31zyefPTdGyi3zfxGeLzDi8UUb0/zB9OKLWdgcWJsi5rG4/nsw+8/+5Wx5xAor6XX2mK1kVHJdo4iA9pyUM+y1dIQpMtkoNCz0nphda5Pu5fD1qa7NAoRVJZpJFQyrIGXU1lOuC7D6ePM9AQhslC/n092EVEvPkVcgojJRPHCLMZr8PZXP/fitrlkSzN/NJc2tyH/+TsfBKt5bsfIzSJW8XhxYnK1hS5zqcQK52xrDhpHGUZ5Mc6KnGahrYfWWmA6L8Ms3n/nk6Etnr167bw8zTeywaXJfL7KxtZ6Qu7ms+PJ5dcnr4w+/P4P8NIh7DWFDwgSM46BY1p6JGZijCJBYgytduTcCIBSl61JhmtSFEnyqrTMIcTAHZ8KUAgJU2COybqZSRWxg5W7A2/5wte9O6d4whVak5S7PKqU1mVoPWETsSHuChhLz7lrV68+ePiwjcEYC1FnLYmkxN/QRtFEfjboqNbpMZgYSIzkpPSltIFOrUjik3UGV7ZjHFNaJ5HtjNPW1Te5VjKlOLS11d4TLlmnXlz/hNzxOVWFbbfz5nUejUrkRJW2hp6y2OwQfWvT7J26gbVA8wnYQE/Pzt3Xpgte6xqESIm7Ju3h1DCptUGFA2t+Hla3L9/8+dGDR8/T+3L4henD4XjPYwauGX2jkaGs1lpzeFKvAnqiJJR28i6Szwutq7b1RC5UZZYXq8A9YzY3cPv5nRbm739t///x+4ujsr4xHv/sk7P7x4tbu5eODsrXb4/e2S93nNnddbsjfnDkt7cL+iQ8Pm1s34ZGlRQZwRKXsCyLI8n3+yfZ/v/8xuLLu9vZSn76xklVPiM/Dp980t/suyYL1cr91mffl7C6R3s2w4mMKuk1tHkaRpXdncf+fJX1VopFrYNtCpDVnH0+Pm+yWRA2IyetHRyPfsmoxWzOOerhFz6+96fP3coB7AxHdcA58yNf+VCqLFeynasUvdVwmDUis6OTyee+eHvnxUWzooc/q6tqZDfPZAMlbN/4oDnRvMX13H/jVf2DN/e3zcGAy0U+6denlodBN1qt1TCL2mXQmrUmNKrqAQ+KIE/qQUIUtGNgiUJEQnrFO/iqK3gXF7YqMUOeGMUBXTFOx7djL/Oa0/DXa/Bfg1cuCrGknVMn6gf0yfpKO0bmkzWx4iIKgpQ5cz2JIUaxNpPCVHUtQfI8DyGIqAEBJG20xsC50DW3XchncqyR2C2YteMxdpREFRAkarfpEQIIzjlfe5Cm1IY1jEXJ0b5sPCgjUEAEwQBKBt23oQFi2TBYhaMgM4Ftr/SrIffrEAcG3rWe4qryiNhAfqn3bF340q5m/nR2bzrNzioTj/d2MHaX0L90bDfLoucBRrAIPRaNrq1ZWDV4ImITu3tiEiUCJGwotGKyTEOLkKPucW6kjlYzX69u7t96cHZfhuARYUNc3o4xv8KLXZzv0/kGn2zo0p9E8m44HNaPyiqr3rD2K072dqciIZ4uB5jkGDAWfetOB2NaevHUfOff5vso9OEuHo/M2TbV4trvT/nRcSvH3s7BgFhDWQwxZGHYhhrDB6N4vIG41GGto5VU1cZEp544Ol/l8zNbnUc2ioxsX4zJWjbtKkbmyzckH8Zx3/aMV876uTQCZmWRzJpG9GQWzANZHFFsojZsLXlVq1LOw7ykswdCNRsnrSdojsDn9cmtPXv3xx98WuY9Ka7kk9Uq3Ht0Uq5C3s+2Crcz3FJRPzvPvAzdaoZy0N8eOTYyNCDesP2NBjr3kco27G7kfjGbznyowsnDg+GG2+/vndWV3SiGk8Ei1MUoZxt9r7boRZvPT+fDm8+Hd8uskYZC1og0lmovDcFz8AJUioYQ2tC9iXhmy8wSAxImnCynUmSYJgNISSCrSfNwDNaYhBw7EEFZQTEaEkYCqwGwoXTHJiVSiUVRJLcsTQwKIkhMU2fSD1tlIo1ta4xNY6FCyFgQsXLT+AcHj4KIMRYgJmVnEFsohxBS6nMKGkrcKsOd62Qy5+r2SAknjl3TbhMtOiT7aFZOmRAwbMgaSdm8xAo2hokZmoZlZgEISnKh7EDazF7cLtZmeybBwd1uDrKG72ENiEwy6mIGEJVUlUHMRiC87mmw3t5dyI0u6Ffxwm963c5w+j8KBSsxK4DAsNCUlhYCu3RPyzxxi29uvvS/3v9+eH3yRvnh78puq5loLxgNMID1ilEW5gIyzik37HNmFVqEUFWenMt6kDpazl0vuEZvXskrYTey3/nhwS//9uXf/NUr379Tvf7VV/wEdmfDXG3/P395tNsb/oPfevbT98//6HuH7XD0k3sV2SobF49OGzatcVA3jKvKORZnwfHkoP2zWaTt3p02e3h0OrLDLaBv4+NPz3fyjNSvysVvPnxzq52/H3ccm9DyuckjNk7sdhkHc1vUbc+sIpXKTa6PH3NJ0dtQo/D11ml5cGnblpHYZ0IqC/QySOB889z+8k8f/ATA1TjFpmKkfVe4UGy5YS+TQes2WaiidjZ3dmPjxsvnZ1W7CPWdD/fyfvSDFW848RzIEqqVx7L5+t/YW5wsP6muPvPKTpz+aOyn5Qz15c97bVkaH4xFZtKNfwWitqu+6qFeJRAnHXCHP0OVQaIi8Oli1BiTmVpX+QxDRYQ5aeg64kDq5FIiKSUhYQoJkQuVb+I94UK2tK69ctHVrst5QogvYBtJxhiJ3oCOirwGwQ0bVokkZK1S0CjWmiLv1W2QKAQWkSjsQzBM1rlVvTLGMGATn4KZFBkb0UjCCrHW1L4RsCqIsYp1pj1DYGsQRSQqMUTIskYhqLEGytpt3TSZyyZsPNlwIW24CKmCG7adq4GxMbSGXJTAQktpDHNDTAKwWjGtwQqhrltX0Zj7I3dzv7i5sLMl+wff+enw5t61V69+ls0f8nK4iGYhVxcbw2UWHAsQGGyA4JNBkZLx0bKGFtwLCBItiKIlZGxbRSOVYXKiIqPG7W0Mt7fa/lJHIWxhzKe7mF7W4119vBvv43EbS8JUtcJhfSQWTI4ZP+/xQ7h8cx57vGCeaVYiP0b/Xn/gt3JuJBvlW73qTIw1Wpn2IDzGUSmPFcfqZg5ztDGQiQC5fh645gAt5JM3/vzGL//Npp6XOBlaXvCIyGf+xJyduNqHrYGurC4qa5SjEdgwKNSTDsd2/4rzS/GVVcDl1A/gPAizeBg27UoO7meRUx0K0nJmaDYTfMinZ3ZxKq6noY4md1KTt4tcVq8/bx88OPOnsb/M3z35uLDuS6+/bPdzgniHTz58ZMtePZTdsa1j3NAwWy1RT5er6EJWT8M8VtATR9OqOqntyfWsatq6nM7Gm6PtS9uL45oyk28Naq3yYggOTah7XFiLBhBiYeteujGdz3WwV7a1a1sWpsYki9LEqIzaRAkxya00ITjoiIgCY9hLTMyKtIVJNGGFMhuwGmtTeUaKOQIJU0h1jYnYQgHRoGrTalUVhEVVGQAKa2zylErsEeogcSQ+ZGZM50PJJCqI8cJNxrdtZjOFWGakbEFDEiWlgktn5RjIkE0h9YSL6mvYAGBroACEyEaSrq2+8N5gVoDBhkiDdH701HGvSDUJotK+mQhE/BSPtAP/qLMHgKoaY9ZTMncZEpK+eUXyteVU0LtbWnpGmMmwTcPKRWBUkoWoyAXjunva1oSsNVTYeXUpJRIdA3SBFiYjFIBUgnH9s1Bd3rrypenOj317t1i9d3D39qXP1XIuQg25BllkJ5kHSXAaXOAeI+eB8JVh3N7u3/30/INz9JxDEE8+UGX6ozd/fvwhDG7i8Adz98z2ncP20x8c+a1rP/wvx3ICNy7+0/emvWVbnZGc59YbCwd4oM6oH8AUbG9eBnDjauUgrVrbA2tzJNlQ2smVEgtB0ee6j6zGPPPyuwdvjdvVT2SXWNoozMOSRrMIz1tzybVkWxopRVaIC7GV0ZBv7eyevnd/1MQrDx8eul2NIpwb9hH9+sy7kWzkev25Pdl9FsCDxffYrcxwsit/ST2F4bha9Vzm2DdBaLix7Z3+p/+w9+Lni8t7J7XPlvFksDGlIkdTRzUcvnF7rG4w3MiOp6Enq7vDl07NzV+t/urg2s1Vr0+hgrKIQOOKnBNhkEoLtAl/7gopknyNNZWcDh2FgkWCSkhudCKRuhYxEhEzkrpHAEYSTKauE0ydF9762r0YY7u8nr++1OC/9ue0+Ohwma7uAsIMVdYUj5Em4tR+andqAEs2IKT/56wVkkgcVYyItTaEUNd1OgLohO/roVDEQ41hVohEZiNBSSE+DJ0LTGfz6aquLu9cdsYF3/pYw1isuV5P/0gMQCVETS07AFYK6q21TwECmtpfwyYt7AybjkoqyZEHnoUFPQGIg1GvQevWQDeRb2XF7mu/ZnPDh/yS3TWOjux5sdc/u9oeeL99jNG5yYJpULEpKssumqDIEdmwE16pBzgQWeps8JUaYsvqVMfc59ovG63NkGOOwoaBLvs6H1LZ90dDdld3br13911X92UaaUWcQUjRs5XT0tWIAndybvkR9Q/i/ISLR3a4GA1kHmBl01f7bvasWd5AuRdLrsmsQN6EuZcZsSWwMFMILfWhTqWSgzuf5dfeGl3726hqp62pZ7Zc2ZNZ7n0YFWH72d4Jx9VZ4xcML1ljL7+gcaYZI7dAzm0F72MbyDB6WcZWAjhzQYKxELQSDDg4ZF6Fq3ksZ4BK1msp9KmnUgtzkKr86peakbGsGxsZosi4KF578RWLEM+rCH7v4490x2CKclTZbTlyB4/jtROez42exW27yNkbY9qCm2VPi2LDSr1aHS6Xs83R8JId+6PaZGYw7AX1tlc0WlmrbjMPRbC5c2aska3NxlvP3X/vI75yU+Yaq+ArEt9ywzZApAHaGLxIrfBsVMCsGqMgxjR3+jZy8mNf+y6rJlqtkgGAuq4NkRIr1BhDnW1cou91esTuErfsvScma4xJ5EY2a+vn7jJPIfYEIAhIjbUhhigiKmxYRYNEZmMAUggnuxs13ZKUzbr8hhDZMbUd8CwiDLbWpnUqmy60nVLWWlr1dukpzJySObs/JNtLqCDRKtPkK5LSkCgxpdfeA139BXUjRuKVq9IaOk6RLgRD0ETzXpOwuqeVmXQdVkWKi104rfF6WodLpJ4gOWLJRbHtGoCn7oxrqDDRsBUKJBdWQCMQwdyGQEpL3375+pc/+un/Xv3m8B6VzzWRXc+TEXUtuFWGZcnA7OGyNkPPyQ3TTjb13TuzF25v2Dsnd+fWZb5CThP64x9+Zq9O+s9RHLfnyvc/qrm4/MmMDk/rudsM3vVDO95zi0OX1aEXWGvVnoQIC4Bqpl7DuVSxsM2tS8OZrwLy00+W9QaynQEFDXXVjLKqGBUhI+1v6OV/8PD3B1XzNu/lxD4yQG2WLaUIvY3lysgy2tqEElKCK1BJWkOq5vzRg16FpZjb8w/enzzDo9GGL5dillX12ueL/x9Z//4kR3bld4LnnHv8uodHZGTkA4kECkChUA+iqsDis9kkm6TYLUqipNasVrMjk2k1M7a/rOmH/U/WbH9Ys11b2djKbGbWZsZ2tbbd6pnu3pbEJtlsssRHVbFYBdYDhcIjkUgkMiMjIzw83K+fe87+cD1QLVv8hKpCRSYAv37P4/v9fC9c9J9oqP3G4nwGAJefHn15VMrZx1e6k7GHQRF44gXyUK82Cs6nYaNl2520Z2fHDx/4IIg85W1wwBFqoeu7/ndvjB80cP/RdPfS6NoE3quOPuGXZu4PXx6PYtMCRI4C6kJEjkGNMBIYA4ChI4gKokoKHfXT07QaTHIJjAoawQySnBERDCNBvz2xGC3Vn4iqCJCOezLIEayfRftMtrFGaK21xPRsJbxmavU1ZX+19rIPhP/EWJyGQp+tkhH1mWEPgIiYOf0XFVVURxrVOukQgLzPmDtVVXWOsbf6gqmCo4I4iCR5CaghkBEIQasULbz/zi+lqasr119+5RZxptBFiSmp7G949tP3wNvbWwcHBz7P+987KqPrJwW0zoMCQwBHlLbKjiglf8MaBNY7LRP61oCMgCCqGnETOz+FTjsZuGHeVQXs0/CVeO1Pul99PL27LPbt1mBcZRdOy+ECylXsMJDzQuSdAGrmCpWAzhMpAhiIASGxGalTHmQhNMYGI6Ixj/0yrxcbUPk4vTCIfJrdf3A/q71OI8wQG1QUn7sOOvLoc6cZQEkb2/XS6prCyuqBVDVvOw8WdcBuIMwiBJKzb7vGs++0AyESMFEkBI8qkQNjpHpa5znf/fW7r178+jjbaU5PJ1Wsq9ajthnT1St+80bsztURZwOY7AA2sL0bqbCTYzbWca5ScQi6qogzzVxQ8cSGpGoUREFZzQg7UVeMYlfnGgTRXD6MJNZlGUszry9fkhvPleRYq5A3QA1/7ytfqRciUmhmP/vlhwHafFxYq5t+iKyXN0vO9sj2Gyg12w1OQMkXfuS7jFttptOqzlS29rdHzWh5LN5n+ahooePcz+vTYtOv2rZ6eP+151+X1h0/ee9/+WC+/3UuLnwDVhCWtWXDpq25RmoQWgyNqbYALUDnHJDzbZgTrZOzHEcVkY4Q1UxUkCh5nEMIyRITQouEzLxOCsJoGlVTjnCSG1uv7TIAkCjPusZOlJlVNUY1NIuG6awqGIEBCCgYrFbN2n6TfigiiUqE6NgBBHYOuZ+XOeJ1OiEwohmCc2CmyWGbRrNqBGaUsM7pptRnVxdiojf3aigEJICYJtJpswQJzaVGpGkivs5MpH5Vjeu31TpyTP8TWQsRfZZ49IwTmVQysDY8f9ZGf/bS6iWc/RJ4/Tbrf8OfadV6DJnBs5CM/z8b12dXcv+PnUJuDjl0zUjzb5df+A+nd+5fmh5Uh6/sTqL6DliNJRI5g5wgJ2qpQ70wgbCID479l69t/cW7x1ev79LxSTYqo1vxyEk5BlT0yMbMuWMWcpTnud8sQ/KQx6dnnqJkQJWAxQCrFmMNqOUgHy8O96tjHY1e+ua3C+6kHFPe1VwczWaHdw5Pn78cR4VrtC2WXXlh3y2+cfSXXbt8113i6DsU0qzhjIAEClhECs5m0C46vzJqWBdGDbrWwSraKlqDnXDp57//4C9+fOmbx9vP6VLfuLm1e43/8qMgk7D74Tvb4zsAcHmnsMPD8fiUXTWpnz5nJxvucFI2RTXHOQ2K5+omSPNYhhcAyAUdcPm0vADBKVNW4MFM/+Rn0y994cL1C5NQbI5oGtSVfv7O7MLZ02Jvq5oZKyIARlli9CAecoCIIBlGjiDp8lCNSQyY0qnTdkgNVfpyUQ3UYlrOkgqtazVCYk4iPFBHFJWQknJJ+8QhQwDF9VW8fub6r9u3vn9zj5yeQXq2JabPqORoMSoR0LPQRERANFNah58YQOLwIDCzoqpIyhojhxKjJUOeqMY1it3WadwGnYkpODMi6siiKgGUuVeljz/4bZn77UsXI7hOIvOz/cx/chzSTRwkHB0/YZ9xxm3TOuoDXUCSPaHnE/UfYP19qya0ZlWnVXRyLSBhCEKEoEbsECiiMbGgOZ87IqrMNybU/mrwyZDxi4+K2aX64aOTeADHb0w2LmxshOFk5vKFYpAuow4aZmX0ZsFMzClB4dQ7LZS8FkCFe3LwGJxwWQYvha5G3I60nlgl09X8YAUnBDPQqbpz1kUE7wJFIIxeoUTKSZdARVUMhgWtBlqXGGYQOnZIqGCaQFYU27oFgbgSXUZoAGsgT+bMWqPMSYhQg9/MOBuJtb/59c+ufW5Sn8CyaaKOxdR8geMRS9fMz8gxTHa6K/tuNmcI5AvJcmg7n3GX6If1CqTypimjRjSksAADSC92IEOrEaMAAXFSqCsBNbUSV196fVnIzr0pf/v3v7aDm2XNJ4+WP/3offYFMLrdjc3RBZ10cTts7PjzbBaMz5sw00o8h1YoEHeMgaY8GsgiNGXQ0cBfvrQLzZO554KGrp22g43B2fJcR3H3ua233vuLdjTVB1rh/juzJowuPT3Viy9sNZs3ZCl2urAVaENUK9YAoSPqmXaqyomibF0yDUgUAE0gxmRciFFVwzMcRNTkXDfOM2kDIkazjDMCoswpglFPfkrZCQbgDKNGcNSZElInHWFyuvY4WCFISsXkhFWCmO5K0ESPS45fpAShFMg8WO/tIfeZ8UANPvNKIaiqQ1LoQR+qCNJvw1L/qmbRtO9TU3yTRHIULUGrMKokWQclGCYikSGhihH17UV6u/QskGT6SJGFfYYbrDMT0i9L61zCZ/ZjSJc/WFKX2lofDWuQ1VoiDtDzjRL4t7/EtX+3GFAyYiYJGJqtsRzOkNaILERQQwWLCuJJZSVqBuyWq8Xzl57/wjL8DD99Gw+uhGJsLEgtcm1s2GEG5pAcgpPxZHS+Wly7ODg+13E5mE7r3PsYKmaSKtq4oAE0jWiE6WypcKHBJlh17hbzzsOs0waoqaECmIfNjjZynmzhte3Rlb2t//GP/vWNJt6az57MSjh+bV6OoVH1job565fGkyfvLn78y9lLr7eTHU/hJv5md3m74eax3yoIG4VonR8ObSHqCqvMBdJGdY5ce6yjteBW1gKTDnnRaBANgM1cSndRj/7RJ//zB09fOhpdvdhsfPTnR7fy6vrseH+7OsISAAI3uEUIGYhZZlFkKGE4X0CjTrg+uO9hVFgp3Fb3Hm4wLUM+H+xuQqhCN/CeNX78yenzVwdbu2OQ2bTVMRfHSxj6+MEDACtL7FoJLnTERTS1XCzp3QKgEAQCZVMxoAjgyHpAkqqBpMWLqqT8IQBASs+VxRidcwZ9iHDS0pM5sn5Is54/96Wd4jq865msIT1l+mzJkm6vtJsCBOopcZhQOAAIRGRRKWW9WO9OT4vh5LHv9VT6N78SuIxB1EzROWKOncQonklAUhr2+ha2KOIUxKBDSKi4jG1RVcfToxEVb9x6/fKVK0VZSCP37j5opHUOrS8A+gbYzBQUAE0NCSVG6DqXMaZN3FpoTQkQ8WwY1ZPkk9gj8WhBVdmhIoAqKzkgIIqgDsiTmUjHadgvZE69AyIAGi5pCFvw0qURtvvdohqe3pfl4u1z287u3yyHcXDpfHNjxkVN0iqhdGwA5AwIGYAVSBWNADyczc94yymLWeO6qnShkGpgyyx4XFmYdTQnrrhd2lzF1TIuR2VeMkCzXOkswsgwg+FgWeiigEFBIYdGihKQ2rAyigTIJi6aimIHZTZYyUoFHAAQoAdTxQAYkS2TVUdjnR0+2vEf7MKVpzQ0x46UwChqAyvLgC9dkzFm41yla2PNWuogh6ZGVmSOGGg209OnsKxIQT0RgooAUe7zRgMIeADtApJT4owYQ2MI5NQqqb59q9kZbS5WxQ9+xlu7pc6m01X28/fehwJrazyDLzItNB8XdGEAeZzsXo58ZUD7pV4G2PHLkfNoVYQKq3LjafW0bQr2ews/fNIuLu2AX4UAq3JYns6f6rB7/vrld+/8dHjRfe13vveDe9Pbs+PJla9889bff8SXz2w0ssHiSYVhHKpAKxAFUlDoSFvCyBk5BESIMTI7U4jaYZq6RjE1UEn6Yec4pQUnNSUzdxJDK5DkxtAHBrDjNH9OY1E07LV8YGmpgwAKvdwx9vkniim4bx0HGtMzzZxygrzLUnWtMabwJTUlIgVKv4yM1CKz+xsFsgL2I19LEUp9MlIfegiQXBsKBGqmMeo6nRR6+cm68zYQFdC0QkphCo4UkVQsIZr7SZth76FCAibX18xrah+sqVSpT088rtS2/qeG6V4LY6bW87bWHM/+k1JSeurTEbQvI9Z2S4P1TIwMzMiQwCKQw+QZ7e1dGYAauMaWQAQdaGXsdTo7ezW7+vh4du9a9WE7K3GYuYJiKTSeUODdJm6jDqLvYHu8WlA2lebtu/Z7b+TzpZ6B87uDWJzHUt2khkG8siu0f+Xa9la2tXdI+6e6d9cmswMHI6K5wUjLgBs6zGosGkHwjZYdNJ+/9tzkg7vHULZB6KM3iy/+A60bVTCIUwC++c3Nj/7bm7d/1JWWl9wyVhmQ52xgkTUAEuXdaeMGpYrmgWRlVAOvIrWqDVmjsBSvbPUqBtNGMoKuJAmLZjyajMdfrD610+P6x821MefbUKN7apznEQDGLdZRC2RE9EqBSHjUQcNQQ9AiZzmba31OTx+PKC81fLB7a5qVQ603C/ov/uByUMi3rRvBoun+p7fnR/nzWgTQzVayJeovj+hbeRtbQ0GsGiMWjmAKHSGvxfoCBgRKZmDOVCOhmiXls6paEFUN6YJFBYA14CJFCfUPIgIg6ZodvX7msRdE9nPmv2kuor7s7pfE/Ql5djyQ1vXlusVc41oxBQWteV3Q3/1gieT8WSeqhpgcFskb2OsbHCGwqKRXR18H9LgAbdOFnM4zKiHdfuedaj79z/7pP7126dqqDU3dzk6mdVO7wqvFVOz3bsF13OGzapUdaEw3LhI5keAcY0IDJT2IgUKfWKGma+dGOnLaOyQQxAwJTI0Aosaopo5JlckBEEhUjixslHWupUjdYjYO3OV+0z+/+1RlPLs3mi+8LMPp0faKd3m3K/dmm5M5FauBQWbkowFQBASwzGVl6EIHwuxSPhhqBOmIIEO0zspsQOjaNoBAq3GwtX1xc5QRZ+SGPp/qUS21J26sNRCy6DNmTUoBjo1ce/VVPKpDgwFdxwreInQ+H/DARy/GgA7RE2UoPpqzOtbUEijTsHzz0ePP7+1BzGI+wrziaGDMGxPdYY1j8jXHZsVA5NF7kIKCaBdUOj2ZZk+PdDVnI3PciVGvslfpOm9oRIyuw6yTjlVMBS0i5jSfn9+8Nrt5bUvJ/eg3hTasEHg4/I9335nBshyP0DT3mVmn2BjnYkiZa4XOpJuDVAytReg01mALtQJgAMduXIwuh3yrwpNzN8i5KPLjyTWeHiyqvPriF1579+5b5/7k6298/U/fu/fYT2597Z/w1iv3muKEs5xc/PRDN98Moy1uDaLQqo+ATkWrmkYI1nWeKUqAfkzamRqSS4+cc05iDNIQUVmUZto0jYBEUWaXmj5H1KxqjQJr5T/YM1gjAAA4iqouTaEBJIS+eFZI7sAIYGgpxih9YLpaUiuZXgrppgREJAgirGSO1MAJmaNkg0zVa3qbxBiZOPF3AJGSCiR9wz16pj+IEkUlGXM1RSQ5dpg24tgv1UysP589YBbMjNmtQRr9jC35laNJWiF9lkHz2fuqf+M925GnaqD3bq4XuevEYvyb/88zWMJnr7uUv679C+5ZDUHrGAwAUiCADoDXuXUMENPXj4RZV0vMNILLjDJtPPxOcW02v/ve9skLcF20cDIwKgc6ihjyS/Nm7lnM7+GDY/3O6+XFLjz/+eGf/cdH2eWB7lhX+GxDmrj85teyV7754mG7fQLbd9vxFMu5K50UV4amjZogD3zsYrOS1TlMz5uizBU0HwEfHrj5cetGAz9ob7/dvPoNdhum0UJnGkbD3SdXXj298y5v7zazKRSUsTFl2kaSzq7f2Pnn/+Xdf/PHk4/eQeFaqABPNWmNIgg1QSRuuOvASaRaspwCBopBIHDTzexYbeQ554KWDs5bzZom6yLMMwBoLwgrB+cMuYuUaZbMAgwAaIIRPJANui4gMsXy9u4r7KVTKUd8upgXW+X7d47Hl7deeH1IxUaHvigKiEXXRWI/m+ujorgCs2VHTFnLjccMIoIaxv6vmNKVailNKNFdDNdW1xglxmgGKTJs/Vis7wjskWwJG97XoKDUp4b0B9OexZasNc62FjHoOtb22eP7TPUAfYUJPQ8vPbXrh7nXBq7TCzNymtSca72gJmqHmfRlASIApoC0lI77THykAAhRIwJ6AOmTwiIjzabTJjRXnr9WFKOmDQ4InA4nQ79chE49udifnb7qpWc/M0NHquDIxajEDsCG5WhZ131hktbpakkL6RhjkJ5DnQ6dSzo2QABK32M/TkvLNWUg1Ri4A0A2VohKYkpogpytSE3FB+JgRbn9umxdv7s62pifDsI5xyN3cnRhRrvjvdV4Upe7i/FolUHMAqpAw+i6RauiZuQiRYAAeaO+Ae4od35lHiAzZXVML167VikYsPe5hm4+m011al618QQEoH4wbBoI5AS9CliAp1Uolce0MZfB3mBSbNftsq26ioYEHRgbeOpIgDTb9RuXN2Y8p01egT9Z+Pv5RqejCZXxaOHPW+hKWTaMLRkINcygBI45Q7AcyAqLCzubyuyMZk+7ZuYoGckoyXcRCUyJ1NRZjB1GJXCgSS8rwKQSBuPlzWuUefvFJ8ODezze4tGe//EPPjyTpd8pYpA8z9TbsCw7ABiiG6GU0pFvzUX0AllEr61gIApAtYsL1cm4KfIKzsaZLdgBwLjAav7I8fnL33j1V3c/WClcePlrP35w0mxev/nq91q//2CZNZMrYTG8+0f/rnxwwvmOGOkKoQKLILFxGDDZ2buOSB2l1AVUi31tl1IMVB2ixAgARM7UqqpK000RIeIkOWYmA2Nmck4JI0BQdc9uHARAyAyiGrCz9Qg2nVrtr4y0l+ybwmR8BxVEIqKo/c54vfRKqX/aRfV5KaFLomtI3huC2CVqM3ifhxBSLpsjJ6ZgRuQIoO2numkajJbclKLJ5Y+E1qlGNVWJ4hxbP2IyQFRQU0385hAUYY1sTho0hPSd9PrttTS0t1SuF2HpMiVKylbr/8eeP5JekPbspk2yZ0ur47U0JsnREe1Zs67Q/+eeFwIGRoaJP00AAsBgPpn6LbHLFM1FbUvKocsZ5tC52QZv3BiNb49PHuT0wqI4LjZjtyqkCyJPja9sVy7TbNPrBah26Fv/cPyTnx/PJxueDQYKI4IR5yA71y589CQ7geIojo4JT2xwBqTTenUGeso0F50aLFhayxozQW+s2BlIsNoggmmnK1CiT38Br3zP1YHY60D0nDYuvdF+en+C+4vz2aACKGPgjCsyFNnMdCZ0KOEYxt/7vmg2+4u/YAlcmWuMm4DqIZCFsBJx41JM7ex8gCvORiAdA6hWDBRWTq3FEgHYsA+AhOgAhJVdZJdlgSMEYvBAQTECgRITimW2Lc3tvc9P81Eeayz901r//bvH/+QPb17Wcv+Ku/2wmgY3KEfCRVit/stvfe6/+cliTvTpib/kc5+bdgCOBKKioayFyaQAREJgpgoMIOsuUlUkxh6/lmJDkvI98RxihD5JLK030gNhKZMWmdcuf6R036blR1ocm6nFtHkxtc8UVs80DD1RDiHpGIDWOblGQPCMbIu4NhRaTGjY9bE3BDNFJewbSQeA1uMnkZA4CY17lSIRu35YpcpIgBRUlCDzXrp4/eq1vcl2I6maho3x5g0/fHD30w7EqVuXBPYMmQmqeT6Q3khtnjmN6dtmlVLRsR88J4eIARCDBwBmtvUOnMkBKCPFGM0l8CZojEAEjGYoqmCONQdQilHBMGaETrBhVSRn4BQCAKxUs5ANgD5X7UXmebmcjbuH+XKZLY+L6qgo8q3RVru7v9Tx+bho8iIOD86OQYEUu07jymC84W0krRdgYJvVM0Z2OV28ug9nmhFtjje6sJrPp9uj4pXr17tdWe22s2Kxvbt9b67GHo1VUYOw0MnxfL/YCDSqoayhzIcVjXFAZb1agRFmpiTj7QmwrYpVXaxohJo3Z27L3Xh5o3j1Pdj+cj3Oq6kVF+DCDsxirJ4aenVKxpE8oEZEWNUuSNcu+dFDWp7lGXfpLgBwYOlpMHTgnIAqdBlAJPJAKhRJ2QCcUiR6+RJs7W8dnA3e+i2PyhgDv/f0wYP68fDSOKsiW67MXZQ6b5gZJxmPqfGjyjaCjlY0DFo05kdcNrLSaNqQa1ym2YwGXvEIRZEj+JNZKJrJC1eufDBdtNnuVOcPHlZL2n7h1t+bw+4yDuuN64d3Hs9+9FeljT2Nw937dv2i1JGkgNiAiaEgpETMNIIyBVWLDp1pR4AhBHbMffgtgCkpATkFUQRAI2QV8czsuQnBkZMgakaQnHrr2SsqozdQMyHqb1nCz+Y/CACIpuAwjXBAVRGjGZoBuTRtilEiKJKpYA9ZjjGCo2XdqIgjij03w6CH6hkQRUALUQkIKIZWwZAZYiAFdBQTokLUESUUZacdA2qMhC4V/9FiOSzrumbyts5awkTy6j1EQEhEqF0KUONUNIOLIAaqjthAESla/1rrhdnkVKOZMTMiNV1I02voPZsEgKqR1k2FmqGt4+MAksB6jSeAHp6tiqpIROQUTKMSMRKqAjrWoIUfttpEJQJmykUjgGakBhY7BmhRgTN3BouvFM/TBv/F9q9+p7zypbqstTDItSvnbvuePf7iftNurmAf8oucD6h8qfCHAkXeFa2NKHoi3TuOe4+Mp5ZP1U11MHeubkTnQHOiSmRONlPXtFI7j8Ozd3+2/dWvyGikQUdXb1WP/v0A1CkoUf3RB6MXvwI6CKK48hTno8nlY9io3nnXOd/slnwyz30UHzVocXULn3Azzy79k/897l86/qM/K597bet3f//R//Vfbe5dWLWFfPyxk1VmWf73fo+fewGD2OzB4pOP/O0PMp6ANkCDYMoAEakzQydon901oqDgNMtaBafcMQVVr8aOlIwYgDVXF/3uOxc+RywkBCSQw7e+vK+2fLrQ2x/O3j1nyK/zJp7M7PrzO2fSBvAx0KIOh2FjP3sqhQNQxMwUNHTpPa4QEyNM03pCFKFPGYlRk1gotb2AEC0SmqGBWBIEgcZ0h5lGBYiqXSr32k5jRKQEcUUAZxABEp0YENSsHxoZphsnEVTZ9eAOicLOgQFzFlVgPY5WAgdA5NRMU3WqaWuYdiyoMfrMW9KUUOp3LSNGBDVw5ABMRSNZqlVBIwB0oSOiPpwBVFUIEaNuboy/9rtf52Jwcj4D0yzjrc3Jycnp0eGTsiy11UgAppljkYCOCCnltiy72jMzO9VIa7Oec167DpB6mIgpEznHEi2EJmPWhPoxA4CokZFUDckhoMtYRRxSNEnuauc4z/NquWDHkfrcVrDAxN5zs2oQCFIwJwCwRKAFRK+wMcu3ZvnlYhIKPhrNzwbxNOuO8qMjrvjC3qSeXDdXPVnhdKXexxFs5MPalrPIYy6azo888cjDTLlwWe5G2xubVHjvRxf2tm99/tWbn7N9CDtRr0Kz3d6tju7+6ujuE2lRgyEoaQ3U8BnxSHnbT2a6zImKCTcaYAIKkQunADbWfKsIPgK3bkKnNBm++sbBzD96cPjAnO8GX/Nbi/EVHV+kuITlwEB812hUJeWMpGni9IlbnHNXoxmrWNuoZklRrkBGEbBfZWaYdYRGysHEWuWMIgcOFjrb2tkYbhV3j6u33xuxLsllmvG7T6rieok1yBDBwDllD2AQPehWrP3wLE7msDnDC6dxPM+2aUbhaeCGUbGrImbgBqQQn4zHoQ2hq6dtyMPOxfHlA6lPD0+2tl86srN5YW+88d0TP2l4d9pkD/7qR9mdk9FqpCfz5unp3it/e2EbZ9UCgqi1hB2AqAphZ9CBJZCzEoKZqOmg8DdefP7OnY+iaJZ7jR25XLQDFSBWaXyWW9Qi95S63tBSb+cBIjOIzhERWn+RqKmSc0kmqGpRDfsqXOlZp7weZPXrIwClfrgGQKrO+uK9v1whSaDA+s8EBSQE0xjXeAzVphVQMoJOMa2boxCRRuBI4IAdNdJFQzCIpi7nEKWHNQIQGIGtqqUjJ1Eyx8mQFUSIHIJZTMdPMu9jTEIYJaIY1WWpg0CR8Oy3ZgDc374sEtI6rW0DEQHR+j7tM4x63VXfjpAleWvfwmBMGow0f1FQSuM/MlVa+5LNFDD2yZIYMLoQ5gYMLkOToEJcpt24gYoyEiOBcKcKD/GoHPtsPHx4dLgJW7ubE2pnJs8dM4yCLLlZlGdnG/BubIfHs1dulVXZPJq2xSRrmJ+GMsv8ofAjGZ5DPsNypsXUnM4CzYDOCWbazW3Q5M1cIvnm3r2yekj7vy8njWEYPH9t+qYUTkJHxI6rB6uD3wxe/GZe1yoDDgjMxfWv1p/c293cYLogK7b5jHIPrdhR0x10N77zn4VPHs7/z/9HCv7yv/w/PPrpb6iKO/+7/+3xh/e2X79lKgd//m8/9/qrpx8e1ofvDsrdkjiqEneqnFEuAsHEAzlFAXLSb+gtemY2nIt1EbBTbBCiy0Wb3Gtso9NCqd3M+D/sfLH2jlWigqcMVt1c6uaI/vqTWfH8hAZDdJEC75akRh/cqXbLix7qx9PsUygmgw3N5oNmA6GLjUCBBGAhEgMEFVCMRui7rmIGIscMIq2mujM1DUxRBJK4Aixq4ksoEhJQmkg59tqEqLKeVasjAlEAFSTFfm+RTE19GFq/n0o2IkNgSxroNXFSNKpqT+FASyfdkSamjKiCGpDTKIaY+9wMgnR9GAsiM4Napx2RS+IyR5RkiUnYgQQGfS54sgJq0pEZkHNRZHt729ROHh9HlUFZ+MzPZuc/+OEPvvHNb17c35udzzPHBuozLzGmz8zSqkkiAKQNtKg6JO+yupMMiQGIuVNRIEy2ZseqikjJ9VXmOftsUS2Y2BKcTOSZgEvXu6qmaQDQDGIUIk7GRJEubTHSCC/tnEyACDIw1dAxGTkS8TN9Ybb1YpEtSj3y9bHUFR+e5GfT4Zy/Pi6/+np3fMLNvDqaj3ehYa7cRp1vAs1gKDKU8WBzlO8sHtbfevWlzd29s8XckM5i16xarxkEY/bPv/bFv3N5a/rjD+5+tDQlaBVatlrrslzx5Fxmm35rgXM/PtMguEeUo7FxxguYr6ihDYJczyD3L37+cGoPD1ang+dWuPHBw7NbL30hz55rPw2hcwVn0sybauZ9gd6LKDW1HD2l6hGgxS5ltjJCB+gUzJFqNEREF0XNCB2hCiBn5hy1sfSi2tWaDy8/B6Pd5s5dXi5CkTslg45P3HgcC/KzyUBiVCEoi0JB2Wvtt+awNaPNmreP6+GKtxbdQJcQqi6rHdUEjLYCWUQ1RYTzwVZThQzKUZZZWGzvXB1d3/vo/r3s0peuXrl5NwTR3cODavXLH+Un56i79rhqGj95/Vv+ypfx4xMLTjQ66BDFQDDhdUDFlFBd0kylZjHK+fmZAWR5Jl3HnAGZinrvNWoGPsaI5EJU6BpV9bmXqJEgJRj1NI9kT4LezJoGpEkvhGuVkPVmHVsbBnveE4BJ7IgcIAA4VQVTIkLHBrHHwBqwkoFFACHDCAZma9p8usb6PjUqpAMDCDGmRMQuiGOqm5aTLZiADKTr2AjIgaohKAI6UjAxdeiSgizFEmsaCaQDAxBCQOvXaSLiHIv0gg6JMTVQRIwITei8977IQ9VpjEldKRIF5NnYAPFv9LUJhmq9fBrXDTgxaa+8UUbqST22nlemVy+AQkx/A448mK5X2krgHVEXAgIq1OQGYpEaAlaYm6pJx3U2L0nCyzt35vUw7h7hrhqcUlkpnOTNPR0t/Nl9jXXo6rrVXaqpm+Ggi4MpZGajzZhP1Z/qZEEb5zCRWYSZ6czonOzccIn1vKUaicX/6q9H3/7d2BFpWIUlb0zsyiV9dKJ5zhEKoNVv3yyuvbrMXGa1QS7V2WTnpafbL4XDT5rpDF2XUeGCGhXy5juLT47rvPAP7jry+//1P5vP6if/9k9e+cP/VfVoQYf3Kwfhyd2Lt1769Idvbtj8wqtfrk5Pn/74F6PJBOaNo65TIS2JhpHACFE0qiYFkFdPAVorGspbKzrKY1d0ZTE3G1PtEQRkM+CH26/d3b5cQs3BwcCtiuryjY39K/kP7i+K5zeX+XjWjOp8pxL/yrULS78/0En2UXVji7jDe4+q+zh4bSzGnTaEBNqBqYEHU8AOwAMGMlHmQiGoBgnii0GUYKkhNRURJo4ae+64pZsUASD2CyAgiEakQEQkMQKgqGYFp6yHfrajYGAuqYv6VGBT6DUfWZaJRo2xV1mZ9QU3JQkkkgExIWKMPUgOklmI0Mza0KRjB2o+9yKiEhHJokYM6BwgRBFKZkJTQkpbsARkNwNKPBKm5JZMZXPUiN5lyKGThw8eFnn+3e985zfvvTss/PXnrx8+eqRRlQCJMqYklUICUfO516hBJPmSQwgFZ4lLoJoywcEA0GE0ddgnpSjCZGc7NA2nEYJZIvE5IjMwNNBo69jxFB/pmWNU1zcJGGMkpLWmTcHAGEwN1QAwA1I1NRMmgCgtDlb8Mly4kWWnJTzRZraihs9me95/dZ/3nxs057PmePTgpK7dDPyouLB96RSsGxb5yPt/+E/+3uH988fnx7xfbGxsQu6GV0dxEukSypa7d+674dXPfe3GDw5+1B0FrwPoSGqBGpZlsYDx06bK3ASibO+cAwJ6MNLotBjmxXgwk1lNE9u5Rvnu4cdPG7+/CmUmFpr4V7dv/8MvXmtCS2fn0FbOLZ0IRW3qOVfzMD92swNtWhqWwKu0j3MAEaGvAIkNjLVFdYAMogbQaRwD75dD1RokP791czreyeoqnhxx4RQjokVRPgqTCuXK9t50cexyG4yyJ42iFaR6LpvnsaziYOnyUyyfSrE6D3DuuHZaKdbmvIMaIIvOwECwyk7NQzl5zgNcufbB7OnHH73/ha99v4bRx+etFruz396lX78zbja7+VaYneD4pb0vf4f8bpiq55I7ZyhAghgAAiGkZwZBUxqCWkxzTIn65PiYOQMzYk4ZAqQgTSDuLebJh+OQmKlfySBJjESMhtEADYk4ybAQXTqk2rNbe1vOM6t/el7TUAgMNIL3rGoiEVCJnJmpqpp45w0tjcskpRkDYJIggcEa9NgDMgA1RnYsqsGUnMt8YaoiwkyqSfitiBRjmo5hdC7N2DhJVqJ64r7oR0BHKekMDclRtJRRAQRIjmMUZgZCA0VLfAJ1PVQLiICI0VHUWFVVEkyp6nAwWK1WZJETamDNAUx8ASDqk5KfbdOSnAb7JZopfWYtRkxfF9aLKwJIC7wokjFnGcXUAakCgCMDUNNoFgC8dIFXTEy6UmXaeDK4Oj2/P65mqPebcGF4eeW609az+hl1B/XZ7uUNpdWHs+b8qHaglLtFzJe+bIiwy6pBsYQLs4oq2IQ50hxgblYZ1UQdhSawcBxvT978adGdi/cQtVMBNTTKn7sqBw8LzEFrUGqn9fz0E9p7DaQxSn+zg40vf/n8wUcXfR4kjxqAHAN4KPXh8Y4CcHHus+ZhO/3Zv95/7ZXRV148+MWvc2ri6Yl89Jb//j+4oE3g67C1Pf/wl8V3/+DKd//Wx3/6w+LxveLqzeWdw8GsbuuaCTgfCnDhNwAgrpZxXkMGi9Eg140StzJY2Wo1wk3zPIDqwp4c85WfDV8ts0oC1KXmGQ7yQZiEP/4QT2nCZTGvR+fZxtNluXTbn97Brj7+4huyNe4ezAfXr/MXXy0+vNd9vBpc36wkU5+T1skMCxGU1aGhqjLkUQwoIvo0FCakAISGgA5MvS/asAIzQHP9/hIIyFQdAKbCDpQQRoOyXi5NzWVeNDp2Fg0RQA0JSBW4L5+VAAEZ17dzjJC8eUmYj6RgfXpYYtaBSyDjRFBPydupjiSipP8CSxAvS6V5OpKY1kCO0lch6De3zmEES5D1tJZXsJQVrGpJVum9V41gRo4BoGnb8WTy+TfeeO/2e2ezs+euXNWooRNRTaSw9BrKHGmXGNRgIo5cApiIiXMcRZLvKCahFWIUyb0XESA4evJYJXrH1tc/2BstTJHSBjOmqiEJSYk4mmqUJI/TZxv73sQNERQNSBGYGDCopvlUiAFQNGONSmG40eRbvEs8eSLnZ2EVINYDCS9vwv5o5sbFJ5PyI4ITKptQbDeDreH1V2/+9N23yo6G13Y2t7ccZ0rabQfdVtmk43b7Cew8OM/c5PrmlacHd9+DFqw1q4EaqspR7XeXKqciETFYvrsz42EAAmIO3gv7efvcWce7l177zcPqHLaWOKzQ43nMeXxw5/gteOtLV7/z9O4ZR+GtzTairaZ+dhKnhzQ7cuMS9vfg8JgJhIzUjLxCNJ+pmFdnKB0iOiZkgqgxOOLtwkO91IvbkI2jLyZg1Zs/y0bogLELih1xwfneTcZl56ETlNA+rUgdeCvFeaHtxm83MFpq2diEaLcw7WqhhiAYNGhi7EEdaie2cufu6SI2z2+E8TBUM7a4cpe++8FiG1QzP1n96pf+9ruF7q1msyh+87V/5PdegjNtjmf18fTyxisnODB7TBTAhEgdqUE0UEeKZNY/CEm+ZD7zyaQDhKGpc1/63BPAqgnpoDGhrJWNaX5CBmWWp14h3Zc96g4R0NAACB08M7r2wfJmRASOuJ/DAKTjFjvpcRlGagaomc8cZVXTMpIzYACNqo6M0AC434cZOaeqBmhqQOgdg+j+7l4j3dPpSUFORHuRJvXizDXpBogIJFIiNiJGjUTUQSRm71zbBVElJHYudl2fs9aLrQCQYtJJrWF8mC6/Hr6TbtXE20WXOVUlJBGpqirz3kMvXrGevJP6fE2L9P5DUJMCLhUEBCldCtCS9QkRzKVEZQDrF2lIwAhWjvImBANiNAAjdkCkombRMUft2JGBw867lsCRtuCQLm9dPTi7v7rAJ6vF5dXWeblzTGOCk6cqEbHlzeV83vr4nW+P3v9k+vZHjQwGK8kWNfuiOIj5LG4t2ohLpIpsprgkWiG1BA1kUtqALr39Xn7w0QKUzg7ohRsqlWXUNYvNV7548tbPi2a64NK/+Mru61+GYrdZzSH3FhtAbprz0eX95utfn7/5w3KwoYoAgWAllgFmAYNCHNbN8k/+1aZBtPDgf/zXw71rw1tvHP3sYOOr36njPCyI2vvz936CVXXtn/5X5wfN5u7e+Mtf5r39+9Wf2cnJ6Lt/gPuT9q0fZM15HQgAQGDAID5KPprzKtPM+4sajjrkuqFJVoqFf5u/8sbXLtwa05OlzA8OP1Z/VDs99yEfNDQGs4p25m6jwZ3TpuQsjAaBq3Bzc6MAKfIOyp2LV/I3313dvFBUuIo+eckQAUlQOnOM5kFMtSPUjJwgOI0CBsx5lAjWEbnUXyKSQcS1hn6tcuoZcoQKgNWyckDkegeR9bMiAwTXy/f6+jZB7vqMEDBKAipESwpVSLJNcegSxM0gkd0+cxInjQNhbztOox1mbkNrBs6xQwXoIz5FBCE5/G0NnbO0G7KYBA89SAQQmVM6uHYS0mcCgog6l61W9Wi08fqtz//2vffPZucvvfhSmrupKq1de6kvX9Ne+zpXUZmQ+0gp60ds6XQRtRLSQB4VCTDESACqhqhJo6pgEBXM0mtGui7p46K0SeyWfl+9GhMB+g7EXD9ck9h1Ctihc4auQwMxl7tIHAGgA6ZW6wzK7EF3/JOPdm7uTr5Y1B22hPPnyvzWteLmRGaH+YP3Nm9/sM/hZDJ94Zsvl1nB6DsRUY0Msi22SU/C9hS262JrBZOFDp8ezSkQrKybB8rRltaUfp5tkNYATQTuwIty6WsgH8x1jSEXc8lx+9LTbvT0fL70exVuUfDagFVdjvzWr96/VrxSlAWwi4W3M6FHx7Q4onoeGip+/1vLh/fQDoGUiIWQMbgOwTuB2EBARIKcDcAiMinQAIExmgQX+WxrmEXY/PU7ocBljIVB5EwBWZSf0u4Ll25++PC9r7/x3Y8++OTovB5wqTF48K2OZyGvqGh5fBbLcmky77B2WhnVqI1FUSKnIaqPgc6XppcG9Ny8Q78aUldNNnhyuXtKzOXynZ8Ujz4q2t2mmvLFW5Pr32Z19cOjcb733i/e/ej9N7/79X/M0HXQOQQDSzoATAcHyFQM0u4QUkGaikoip9KNN8ahS2kNRo4ckYgGEU5ZvKkvUxOT4WjkGNgziGlK/zEliMkISMCAhKimPf6GADV1yeuGeC2+FHBe+5VnOg7OokU1VCMHgKi9PARpLYeitDPudV09wKcj7FTm9TLPc0YnIsRsGpNDABDLoqyWKwB0jmKU1G1n7EWE2UeV3OehC21UZkaDaCqxAwRCwx50pVFBLTrX73EdEiKqJqa0pUrf9TEsCK5nc6lqxuyYEugnaVSpx14lrGef3AKYwmSod4ukmx9Bk0NCAYlSwFVaixsmAYMSEROooEjHzMkC7ZANTaR2WBA5AGHKgGJeZE0dMAyYSQJADDotXvvL0VtvTGd7o5NqOdRJE2jlxgfMC/CLhXzztefe/bj+b346v3Zh/xEpYFETV74cZXGGOJ+BrzKtDCrDc3IrhKXBUrF1jv3FNz/OP/plU2YaurA8oeU8Gnl0oathM29ffkm6MPmd7/Fwr1tF1zmfNaqdyxyAkkl9Ul/4ypdPqpPVe78cDTYlxmCKKKiBHSgEx3mOmcUOT++NTkzvf3D4kz8q3vjChb/13YP3D/LNsyHywePVi3/wvU8/vnvhuf3BtSv1VOPTj9p3f73/D/7pcv/y5NrF80/eg7P5+Pv/DADO2+PTv/rT3W99ZTn9RLfHhZ/MDt6NOy/Z4qjDkXSzZv/a69/81n6sb3/4QKcPBg/vPkeD6o3//CAWjfASxjHAyk0qnRj63ezcY+0x/voO/a0vuVd26XC++OTnxy9+9XNxEO9CdmUcQhUzTTkalhXUdWYdGKBEpcyZqEYkIHCMqFGCARoQE0m01F72YUXEYBJNIW0twNLACR1YTEoGAFByLgkdOo0WLW00TdP932dspUNJROQcqK7rbFW1Z7GZRJactpDU0wpgSmuGRnKWWD9f7EtTxy5GSRaAtOpCg8EgF5He2a9p39yDu1JJKhr7ix8pXaiqYASdxvSNadc64qZeZZx9/o03fnv79k/+6sc3Xnzp8pUrphEM1RSQikFRr1YA4MgBWXIaMiI513Wdpb2aKhMl2zVETUOBXqSGqKrJ55DMETHG/qWWduQQnxmsTXV90yegOyWqgammTBdRTbjcpNV26W0MApA5RTJAsEiASXRJkbBgzU8/OhrzaEMm4+lw55u7dmXQZPem27s4uVnf2i7eOfqD6y+XzN00tlojgIKi51AWU9s+4wt+tFM35YqG954udGG0ctCAi05rdQXihjstBgZjBInkg+UB8hJWAiwGXBRB4Bz8xb2bd05WZzo+s83ThvGktjm5KbJyWMrPfvKTf/TG36lkFZaVPnoATx4IN447nvD5j/48U6Bh6VZVpzUBBfBe1Zw5BRTxvuyMu7bymQuNmKNhztg2QkLHB76ee/DSVkuisiNhozYCsSPgg+XG7FAub9385UFVTl5WnXajkYZuEaDm8UKHUYdVW8gSZk9WNAWYqVsQLFErBVEjAi/qwnxVX2se3HztpdX4ajc9qNif799snsCglfjBj8vzB7QadVIVt74z2v9qmDXNrBrR1uq4Pbp/11vx85/8+bUbz+9f3OpghRgRDUARLV0DZpo2Ksnn4ygBLjIVBbCu06TPIHIGKgrFwNdtKxozRxrNJaEjwNn83NRCCBrFrM/RTWeeiaEPxlubECG5FiDFHkIiQZkZoFoaz3Zp1pWIs9KpmlDmEhIvofXSIU/zNDBDeJbEtzbrRCi8r6pqVS0zphT37YjWWCxb1Y33GQK1ofE+A0TpYui6PM9FAgGpChokUEASmLBjjWnMlfyKyUaZfOaZ56zrgqa5ccrzNVhHChr0uavppgREsNg7otNfR/ImppEz2NqngYD2WWAr9UnBzhGqQdQk+jCHaXOUZNOJG0SWvEiJ/0NEierliDOf6iyNbYIPmDaOHGK0leeAuuFp1m2V2+NZc7odTkS3TrQM1ACHQeH2L7z5cFUXebXMLl3de/ekPiwoEsQAXQVZHdxGNlx0sIBuHnWuWAHWBg1ADU7d6N6x+/itedFAaEaTcVU3yqROAsQMvZ6vLn/5D4xJY7Y6PUH2xBwjORV0ZFiKSu7z9qzd+VvfPc20+fVPM3LMm0S0hFOAkhRWeuoRFUmdESGbTbYnAu3DH72Zjzbyy5sP33xnu1mtHh+3v3m73PuHH73548996x+8/5c/v/HP/6vi5VfO374bdkrrxC5f9hcuAcBolZ9fvTl+41sPfvj0C//sP18cf1C9s3Xj1o2zf/OvNn05Ut25eLN+MvvFX/2AYYEOBXe2v/D1125dPXrv7JHs5UqV26q0yGVVwnRAwtSWoMWoeOfde3Wb/eG3bpRNu6oOLjhdtLu0QygxKkBQ8GADI1FQjGrZgCyqBec4Z3Zts8i8Q3QIZMpJ95ewEkhkILDOHExjriS4LH3RNCuTmOe5c1xXNfHaNN9Xlv2PNDvVNfQ8Bf2BJJs+AGB/O/Y0Oks1unOEmN4kvYyjL0bXRb5zjhwnf13XdQTkHMWoIpIszKHrNCboHkZTRFBYA9jNAEk1ppJdoxBiIhWyspoAGhCKqqgSYiMBFF597dYnd+48uH9vZ2fb+qPkoIurrnYAWea7KAm5kc5uBOtMKZmW1USjgqFzpkqAElWp12Rwb1sAMIkASRMKAGCgGtUgGSnTn+yaBWZgBsi9ulIjJK61M+06AzQiJWQwJiIG1MxIQQNhjg4RHJkStJxDW1WXvviCe2Eiy9ny3pmfnw+uTvTGC3jpueXmlVX5kV0ZfyzXB1oNxmc5b4XQOSJ0+XHcmsIYN6/86v2PaGdH96/W9x6Pwih0G/OTGZ6TU0p0Jgru6Xh3NRyfdGdjWo2gGkBQJCNC8EElDMbR3/jw9C76S2Cj12ljcmV7+9pzcIbF2fJ4Y/7Dv3zvrfLjN1672c0rtzfRWQ4SgmJmUg7H1qnGRrNciS0GnxXQ1F29Yucwz5ex4yhM1GjMM9+pLVU3qWA2JB0wgKyeste2DhmTgI4yJWyXwp/M2FXZ3am+cHFva1B0288FcgdnT8AB2vZCc6sxC8yzyMssBrRatYa4EFgBizMEwTitZnv7myN//eTd+5lXHWw9ubSjD7oihPb2jzbrpkWnZsUbfxv4xe7OyQe3f3n39kd//3v/Yn50hA384//1v/y3/8//03x59MLwQj1b+ZzRpfOQHgglgmj9cpAQLNEfpfM+F7VWVqDIjsDQgI2k7oIjSkpFcNTGiIAOgckBuigKiIjOFA3TQxt5wKqa0MqJWAdruysiOUYi7EKXEhwcZdE6hUiMacODvXBrrUdlUlMyUBVMegW37p9TmW/rNlFNydhnaIoZ6xpGRSnrCACdqaiBEKHrpRXmHLdt6xwZWCeC6yE1MqFaRq5LrT8hmhFTWhADQJTYxlTtptUXsOMYe6btsyFXmp5hMhWBEdFnFMn+NdfX+mmaDYhmz7bbhik1zda8Z0gtBSJSYnCmJXsSz+jafdz/4SMhQhQpSq8CQYWdVwve51V1Pt7YCV3XhEBWQi0k3Cm9erj/ttx9cjmDg4fsdhS5KoSH1NwPW7uj3Xzjp2+t6q7gXK1WDQi1DTzHYtnOEeYGDcASqQZogFrUFYBB+cE7wdUoJLDoVllz977/3BfyCxexni+9lIIxNlorsPeFA2miuswNBMki5RSETKAFouZ8sfvt35tfGK9++OeT5pgG48J5EommzKqCZC1BFNIQFCa7o69+3WOJTz51O5/b4N9SJfO//veXwZ389//d3mjj8Z//L7vZRj09PPy//HjypS+Ot16+U+ul7/+9498+AoCycK98/x9/8tMf7NCwmXHHL+1+5/LhJ29OYLIil413P3r/vWmoSxzwzgt08YXy+Rdl/9rd6dPJLm7Mxh+tJpuxLmjuqSmhLbXZgEYxcCwGeXns7Ce//Hg0Ht7Y237XskVcCRsWJCGSNyhIJJIH6pA9mZgvfNOsCLIoHRKpCqxbrDV/FJDQLLLjTkLK5zKNAMSEMWrVrEzFses66YIQUySTqGw9kl1V7NmdaWZmaeuE/RgYwBJSug8/S+2dKGQZA1gy0Kd9aJSU75LKfQRQ77OEoosaU+1sCl0nyeOXZJxt1yAgWcrV1GdyzrQU78F8/coFJca03WpMmFG1N0ZEESSnKgoUV/W1688z028/+ODKlStlOUyXNAFmzqGj2KkREqEhRTWQyOCkDxY2UGVikU7TNonIoiEhIUqShkAP84FeQAQpAQIBYu/aJlUhx6kOt/5Eg6m6tNpmiqBKqgBFookBGjMQI0Ukh8qOMgVTbZlzcEAEm5PNruke/Nu/9pNcN0X3VN+T8bUxjfKta3TlxVfmGy88HF/KpM6bnawcGQki5MX4uNuoYFCvyv/2T3/58tf2vjPxZeXlqY5kXC3mUBMogBgGhAB6rvMxb1y8URdqzXSEXQRQBScUVCc712fL4dN699p49L0v/d2q2qJ5Pj/pGFfh+OPLw8tDfHD747euXCo3dyZLN4aS4aQeMIhh19ZA0XcWXPABAFjjXMhl2RAlqjpyorYiLCArqG6J8VyikBtznitFGs2aJ3XQAkjUyLlgxmLggPl4gKXWXt+eN1eu7ZYevCPMrixDsCXDKmADbd3hykGd4XnUWYRK41IHlqnGsIiz0/Nia9TOs49wbzwY7509rulSWO4Wxyfw4Q9HmEdk5rH70tfRLk9m+vbb7997/z0P7qd/9kfo9OrlK5NSv/iVr8+XjwYDvwVF2wZVJdIkkiUAjQLU+3BtfZFplMl4fD4/FxUgUuv9eybmnZMYAXOyqCpIzlQdAgEVWYYIISpicIjdKlx57vKF3e3fvPdRXnokJVBSJHSKGiECKBCIIFKvNCQHMUZTJUVVAwQHZAYKKZoJGdFiB4hJxsBEGMVFioSApFEsJcEgqqNo5tKcB8HpWs5hvYWS2PU0LFDOfCvdGmAFLiWnQL/BRSQATSCOTqM56l91BmA9ZwN7OajCWiEVwbQTS0usnkNrzEwAKQ7ZMYGCqjrSTiM7R2k3vEZp9omF1tcUyaYVQQlAVMgIDIgoxohICRCdBC1ELlqSwaJDjJAckJ0xqBEAhdWKXa4hiIuIWV0vAFy1PAfwBDnFoDUrCFW6AaOL1eh+WT95eZN/s/Lgp4cRdnNu9Avb5f/9T5/M3bgYmSwCNsRdlEUHw6gFuZnGQLAQaH1oNAuqlRWcyZMZzGfio3WNN+usZtT5r//y8t/9L1akg86iI6cIHkE1RjAHBKgamEABFZ0aOYjmnOswnM233/hce+PK9Ad/7u58uGvOBgUQkUQTjVQrEyFlZRHufXr63/1rHBTFzuXFT98sQjvcv6wnPsqi8APtuvLgjlpWf3p7n0fn/+H4+Ox4JIBVV73zMwDIvvoq2MXzN995+V/+1+/8xU9vfvHaIUz9f/iZFFefapTFabb1wvbeC/HaNdjeBeeD0WxeH53EbmPvymjGXX2gk6GKt2ZghqxKqzwgAcxjPRCqaPvkOMygFRRrBy2UjhslVeUMgMg4LVsYDayVgOz6cY4CE4mBagTtEugCMFl+1UQkKiEkjgxAYmuYQ6cRHLkOokRxSmSUZVkTVhQBkNKIFhQEIXnueG2QJ8JOBIlClMSm7bou44yQYtS2DUn7J9olcfSz2BAi0vRdheTtJFDVqIB936kYNZ0cSMfPRM17Lyoa1czSDrvrAhilxt4516kQgqOEwlAx6j9MDQGiSGJF5kWxrKr9y8+xz+fzOaFlWZYXZaJhL+uaHPVrOOqlIAkR8Oy1ETSQ9mO7aMnYkWZSaBhRk8KiTwk3BSKOGr3jJnbc04LYOnHsDZ/1DEBIhhCjgmmalTEhELnUrlDCmyAQJWE5RUzWb7Uw8r7cG83uPZ1sjBaduYb8DFaq9ZFQ2cyncnDvbEjivxi//91vV48fbvNeBHC+e/PHP7l48wvF7qtPGzmGvYsP5g/8k6OP5tu0P13e2xs+d/T4wDVoLWFjqDjhyezRrFt2OrAO8zAaqgh5Z5JFq/fh6uHbn8AJTQ+qQzhohGjalLwFgYUvcqtXxvzh0eM7H793q3ydupWAK4kCBVIii6CoGZqwkpl2CMSQQ9eqmtMOoxp4uHSBzuaRnZrlDjpon9RgqH65sNx5cgICYGRYdCgqwMw6E9cQF8OizE8/mFYlX9m/OFCcjLYPjo6pYa0h6wha0lmEM3FLWE1XQxo5FedDtWr2dnIBf/5gtr09kGV5NHnBQ0mfPHb3flUCAyFsXYSXv+FwMxzOf/KbX4Ywv7TzytMnt1f6WOrsxkvfOzl9fOXqlbK8UVWHly5dfPjoUII4VFMxAIRIlIxAQJQCGPqYsCdPniCaI7feHmGfEm/g2QUVSSQwEwfg82Jez99+79dMPouaey8aofCns9mimlOOhMTAYoJMCgqqDA4BogBigrNTNOuiRFXsNHMce+KqAa41nEhgmhA5aXcSTZ1jTaYj6YjInimsJJ1CcKgOe3lh0lkjEBCICBFlzGCQZWxRowNIib+I2m+UkiEymcHX+u31YG79L/vNcyLRIVgURUQi0pS1kN5HIuCwC9ILO8C6IAjA7IIIgqmaEqioS3xpJpWY+awLktgdAOl7RohAhApKjuN6RmiqxNyF9tq1axf399+//du6WTnO0kxATck5FSWCFOEp0vliqEpg6ggaadffqgvSFFRQO6gkkDafG1w+mx1WeTeszRo/ny340lCO4U///eL3X7zw0w+qp7PIPoOg1ilVfqxKWd3NCVYKrXLdZIE0wEjoQa17jw4NKo5diGLea1QApMNPwuHHbu9G01WEAuoMc+Pgg6pj1RgZUIGYRLpILo/QovLQNIRP/+iPX/ru98v/zT+p7j58+uZfF4d3fTBgx3mOuJ1LE5xXZTegEbLrKDw89IhcbFbz1keJaqKdIxdZHWVDP5R8NJw18tY7Ozmf/uv/6XIZAKA+eXr8zrvXazv+f/3P+8uT6sPfCK/qwYV7xdZob2dyaa/Y26qZRU1XM85GnLv5vSc2HpSus0X45kZ1QsvfnGWIxk3NMWRRnDatBW+FukkGUno+edLmg9EihOU82/UCkIdcY2cEoI7JKQD4wocmqKppFGm8JyCVEAnJ0BloVAETA8VUZ/d8GzIDItRoaRRDzqmaI4oCohFNQ9cxoSBQmrIiEBgqRkBCiKpRlYkMKGqkXo6Q9I/YiRChSEzRgaICfdWpziWaL4hEADPqt6qwhrgamAAS9IYDUkDo71dzFFSiCDOrqWpcc6F7smwnHTmXRmLMHKKY6drmCAqfRRVnmfd5oaq7u7uz2ezs7HxV13sXdidbu2pGnAFAVGNm6q9vVUAjEBERSeFv6fQnR+LfoGmqU1CHikgGrKb9lM/YOYkxY04Ky051XA7rpgH3TKIJEBWSlZ/IASWOQkpEdewQ0dA5dIiuJxkhAaT1aCwm3h00dTX1uv0SSSd6t9Hdhm/VFDf4vZGeiS5Yf3q7vf6Ni364+e7hg+Ojk+/9ve/+1eHPXtorXxvv1E9O7bEuW4mbMHvn8PrG+M7ds/3JS2X3tJ41PKBYQ9ZAqBpy3BwHVyCURAUVxaiq5szs2MWi03viG1/NG/Urdedynm/yIs7mtNIm0+sv7X98eOfw5NHOQ3/j0t6ZNgJixKRdiMEhRnS5I8pcaCIiGghoJKDOJbmpyZMTBlAH1EXsiHKPpefc6+JEVSOhUzSIAVtAJnKAyhehPHhS+bGHusqQZU6fHD66uDuhvN5pR+cnVThtqvN5Dn4I2/NH9Ys7fnB5swndydN5Xc9WjTULzIrJoOQOMWvaZupldnf3+J1RPpLYdJdvZC++4Wk0vXP37vvvzqtHKvH7//CffXC7vvfpB1/73d8tCgrdUjXWq4DEt3/7blGUKbYgJX4oWIzqKEsH1RH3kWfOmVqSDjA5NUxkZgCIIiGjDKkzI2IFAcfVqrm8t/f04eFgY7K1u9upIBgjq+pKokM0NEXIKO9CQw6RSGJEdIl3nE6mA0JFVIgZGYCIOXYYEx5TGShqVFACkhhTeFFajPZZCwamZgSZc7GTVG6nlSkmlqwBaWp8+/GuRgUCNOha0aiOKIIJGFhEAyZS7DsASpyvRAXHtVy5XzX3BzFBf6xvsXWtWjGlSOTM1CKuR8Q90Kp/K6XsYTSNgEjRIpiZEpouqw4IEEgkEAI5J0HYMcQIACGs0tg9GcYYoFNd1stVs+pCwGgK4pAUMYokVKEqO0cACoiqHVFW1y1gIPKqHQCJCFG50hY7tY5WqgPwu7PN88nTAK1Ww0jYzuGlYvDee/XywH7nC+Ons9VPby+YCxaAszAcanUyx6aItRmQLiMEFZMz9XcO+fnlU+MmmBNQGnOoahQbCc5v/8etqzfaFZSUebZgHUQyz6aJewoxUegJcwJRUBH2xcl//IudO+8dHxwOv/nt8stfyv/Fv2jvP2p++xu9c2cwnxuhSkMloyOIhQ3HLYCtKlY0CSBVVMKihAZQldibgYY6NnWJI+JC0DjLBAoA8LM5VZVsE/32fjkptdSz4dj93t/d29vmTOIATuOSm07znEcv+k6mP/qTa5+7LBukzVlGIoPxXt58vlveOSVHlEk3tKBgwMAKBsFpRmpZJqQhVx8kii/UBY8ZRUMHSqBGgDF0nQGQZ5XO5wVBCGFF6AC6Z3b4BIkBjS6NMRUgLeyAABSRknlOJJKC5yyqJQlxSgtWAEVaZxH0o23VuO59EdHFqEQWEycSgB3F+FnSKHESSqOp9EVscjoQETsCVBGBRFNNLt71CP1ZXC/0s28VQXw22u3TNrGn1STsTJr7RBEldtCLSCIQQoQEpxaNJ9MpO05a0+3tHeZsWT04fHTg83xr+8KqDeQoFeJApFEBQUHFNB8Um0UxPZlmzL31Q9dpDGkkTyganREnoQcDATKgKigkDY16zqJEhyQSkz4cwMQMAJgpMeTTFpDJERElfFi/NiJHDEgKREAJXSuKSFYQd2GVF1Gq2ZYf3ZYWC/taXv5MZl7oS5b/cCX5JDtfnN79xYOXPn+rmp/94mc/+fY3/8HFva+t7rc2jnDGcM5tqLa+UJ4cTG9+/nk6l6PF3c9/+Wv3P7l9/OgUyYlJDMobmWZkQSk6XcTgOzav0g63J+3jpjms2RVwTr/96OGt53fOKnw6P7g8vLzKY4jL/d290YgW1dPTamtvNWGiDiWREzKCCApBxGdx0RCnSaVESOHoxGaSOe5Se6LeF6ptbOcuUlepA+LMB1IQh+hMQ56RRiDy/NWrt2D50cPHM97w5tSReiqreZ1vcVzFTR25FqAOXR2auhrHbIAeXch861xdnc/IeeKcWBxAs2hadpsPHm2fP8xw6EJVX7up+1/DoEcP7qymD7/29W/99Cd/ruGEvH75S19/9eatyVZZr6bOMaJIDOVgOBqWUVNPRqZsIKaaca6mmKLuDQCAHFlUcmQa1RQtPTRkaYFKLkrsTICyECV3ZBHYF8eLR1/9xtdOjqehEUeIyMaI4AwjRYwaFSJFIkCIiAToECQFICQKh4U+CREQKHRiCBq1yHzT1EQEaA6JKNP+SIOqUgKpA4qBARRF3olI6NKRKzx3EgEMkc0EIKEjnarheiC25sMqOuqisiOihLZIagng3j0FCXqbtj4O0SEKpdSi/6Q5NoPY+xWAmFQVAFOMBKChGlqfBpHIu0jARhFAknjKmWg07aNUE50HQX3mNUaN5jPfxI4JQXVQ5C++cOPho0dnsxlxpqreuenp9Gw6U1PnODED1BQBssx1XTQNTQzoWMRCEGa/OdlczOvQrpxTRANlAUUgVIzEVnsJ9fO0eTR8Gmnm5ruVuun9zld4bZD/3vPF229VL27758kdHAZl1hrgRIvxcDaN3JhoKCALK9kbubeegISQQy2hRXIeNExnpIIQIhbd0WN8crA5uVaHioBABSk3EyAEBQZDn8XQMrNTW2nMirw6uQe/+eXAj7NYL3/wx6t3f1V+5Tujm6+M//73dPEHq8Oz9sH77YMHOJvr4rQkR54JWMERoUQix8DaBQJbSXKwgXcADlQtBAQn3i9ECgIALTwTu0WTFbsndZgdrtptd2mGFs5DkcPI82gzFlmn8/Dp/aNf/cmNz1278NyVJ4/e9RmpzstYzBscDSLLmWta9iQFUAwcvEBwGTgB0sL76aKBohh97sbmyZMFOTZDyCx2iKDApmrsnFLi45LPcNXUzFkS2KqCofWZXIZJ3YiAxAQAznGCWBFCVDEi5gwkEjtQ7dbUNgJUMF274AzQAUaN3mcxSqLXIkKWcYxCmEDmiOgQwXvfhrC2NmCSO2ifswTPQh9SbolL1kfrnRTJ4qT9twAKfVODgM451diPttJtbP1OmYgAMaoiMpFLsypL0SzrGzxlJRZFEbVXmdVNMypH165fPz588OToCYAbjkYGFjshR6qaZb4TIYIsw6i6qhtiZ/2VC4hrbgElQqcRZw4QjRTVCB30xU7XG59TTgwiWJDAzGBAjtQUHTkkQmTnABLOkqjXsPZFBhFjDy6BZ7d/QvUJhRy75WpFDJUsX8GsCXLncX02dOPArI4LDcJA7uDNj27s3boML19y7z9+e75HVx49uK0XHVUwCpsnj+fVw7nWMD+Lk0b/0I8+ePDpKy9cPXp4UISJGMROnRiwmkesFMiMkZyTNlzYu9wcLfVYlTQXfzYP+aTxzUanI11NaTK2arpa6Buv3PjJW7+ow/T0WC7Pj1tSB33IlQOLDizGrPAxhKSzNzBCp2agIEmFrsB+IIYammLvctze7uoqHk8JRGNnoEiUUa5dRGKxwM0T+cpzr0z0wZ3DY819UbDFQASnT+Y5Y5aNfO0H7USq+fnT+fbV7dxr07R5IRsjXFSFQKMKVXPEtlFYHD94d0c0Mos8no1vwMVLI1/NHn+6ePLgK1//O0TB08n+K68VLrZNNRjQajV3CBoDOWEC0JZcLnGViseEhMTE4XNkpo5Sa5jSpVVVE3pGoiA5IIyqGXPbtoNBDipN6MhhgqsB2Hxxhl1sV40fDDuLYMbkMCoqdc4gJi00KGjThr3N3c3xxt2797PMq5pZCnJBcmxmJkroFExibGKD7NKg1fUXEjqiLkpfCwNCj6fEtmmAiDNO4uQs8yYBCI0AumTvS+AqQEdR1ukOpoacQgxpLZVKkYIaI6CLDtDAqTlKpD4ANOldv333m37Wv2EQgJxG7TkDiAm29yxVLYEEiNIrMvUAlORmMRqRUzIm14XOUvYDUdeFtAmSKAyIBlE1gpWbo6vuWr1aoREQqkqKOE31RoyRHav1XQ2YpfdLfAbU1K5tmr29HRGcns7MYjQlMfA5dC0DkFgkgxN5Ebdu7y6yuXrQw4+6wQR3WN76iyPYHwcOW1n+oKmdJ6fkCOqm9cwhJxcsRCyYPpoXhwtmH0HzSEULAuQBagUFLP1gqPXZ2Vu/3Pre82RsUZCGmc9CqMiMHUvXkKBnp7FTX+TG2Yif/vsfTkAabQCsKDbo9FH97/6H6heXNm+90V27UTx/dePmxRBimC7k4b3qySHdu0eLp1mXRfZQeFNmVCCALIsdSFDKGofUKYOBy8hARDgGAoAcPdZzvHrlnmg9n+sIRpf3i3yrVXFBwuy4fvA0Vqc6P8j09JWvfWnv5kvtyYM86wYqeZ4XZdGsThS4VDMS19RZnSLVo1ARBBoEBoK40Xqu2D097tiJc4OOY4rRQHIRBV1CU6CpgWHTtmugRMKwOwMzcmCRiACYehoLIRh+hnUB51hiJEJFlCCRjJkTGDECgEJy8gKAUwAyBErK6iQKBgAVeXYZJt+tGQSRRNYAxbTWTcAZ6O286/sjwSoI+/ThZ5udlEhqqexP9W5a9xgRRYngqAe+ijA757DrhIiYMk1KUktMmiRkhD4XGyyBHELdIiECsXNBwrAcXr1+49NPP737yZ3JZHzh4sXxeDMaAHIEI0dd1yGYIxSVzGemEEWwf2lYrwJBQEBSEzRAY6AMMP0pGSo7UgN2qOl9CwbsCNEQiZB7S5dDROov4GSYRqIE+eqb/l6bjsnDbWZKBI6AHY1yRqs90B2Ir3TWmD9ZwWWfTRv+5YnCpodKs9Hw41/c+cIrhy/euHytvHH4m/t7V16oVtv1wRKk2MWt+09Op5/Oro+u5TOeasQ4/fpUPri25YoOoNIVO8y0UZc7IAMyZBch9tyVGbePGz73YIgRqmU4Hp9s7Gfzehm2HUw/AM/gRhcu7XmKT+4duJeenzAMWo3Okwtq6eoEBPDMrYghKDEGIda0maDOhJR9LqGybDB45Vbx+isuH43FwuJU2647etzd/RSbeeDOZVmOYGoMM13N6hvD/c1Low/vHc2Pm2LgHTIChCAriiVk4SzYfJlJDRHQCgTppC5KKQpogkIMLiP0q737t0dFmFUzAN9dvolXXh3lBjK99/Cnb7z2u8D1wcEHNB7ffOVm3S68e8Z7SodINYKxG/gNVVQLAMSOYwxJ3mcp/M/6AU+MkZxLSh9VI2IzExEi14TgiJoQGIgALRoQdQpOusl4MhgMYvKYm2XkQIEcq1PSgOzVQDphhu3t8dHjo5Pjk6wsAEjX2l2NQuAQAJmlDZD0B0Ca5NFE6YQnGSQ8AzSm9bsCRXCO1WGIMWcvIvWqSfsDjdbLqA0YHNB6dgw9VwtMzNAhRTUDIwDXT5YJnWOIaYJmYJSWTzGVpp/BrC1dpQAIiVuvLjl6nUtuCugdR8kCqCkxKdXyiWxL1puOXDItkQGBqjnn1CxjjhLZcXpdmUXmTCXevfPp9vZ2PiiaVQsAncTNzXE5KI+Pn/Q0hlQ8ApopORLpUvk1Hm145+eLajQqr127ev/eASKU5aCqGgPDqITkOOuciZnKanNR7D0nD7erCWUHoF+9yh/cX+7fGqPIAfFRW+XXBiqrXGD3Zvn0ZFYouQazJkMEWfBH07FxiG6z1u0CH3aW5yDkSLRVy5pamMf14aP53duDG78Tl1Ok/Ojg7oUr2wMa102D3oOokCJDWK1G26PpR2/nh3eZfYzkHGJoALPSe52frH78Zx2XUo7q6y/5F27g1avjL94qsze0puZkCp982Bzct7NFMTvrUBgzcnkOrNsjqaoIESgCO4dBbZBT6JoIABZX9edvHu/sre79crjpW1d1h/cO/91/X+vKe6aR0oBxZ3jx1ku7r33fNeft9CTCiimqxHxnRzdL/XTaKbRUDtkErcNVptRgbDkLmrMfBSy6bKelUVPDvMv2ymFolwQZA6iARHWRVVVFo8QMXDKWAqiqIEQAwTSsIQPt0RKOEl1d0TlLgUaaInXNIcYY2TlLpSlRTMho6+NSKNWSZADIzpHDEAK5VJ5DqtcRktk4Ha7eRISIhJgWzGnyjCmYQWMaIUGfvWLWY61MwHolIwIZGBhp/yGQ5uYKRiTJTI/AjkTk4sWLT5+epsp1rQthSvkN/YVOaeMTYzQzn2UxJtckFUUeQmdA16+/cHR4eHR0WC+ray/cGG9ui/bwSe+9aRSJnrMgYp8lflpvykBESO8scIBExI6dS4LVnvPVB6llLum7iQk1pcekEDXHzqXkt8T7XH8JAOjjxgEAgdflBfTmCFMidBYUhE0i1S/CmLEplT6HiFWx28j71GmIec6wghbl//N/+x9eff2N3YuXpw+rKxPaLfbDofiSttz+vdkdmnXN0yZcar4+Lib18rGn39z7tclS2ACIMA+tPf/cSw/uPTRkRSQ0cARCOKPlwwqXmHkTi6TVB4/vfOfahGgBfrMoJ6HrQntWbk32diePjw9OZqN7Jl/oYJWJRgcGSkoKkbSpV0lraBIQQVQdkRAwJDRshO2d8ubr5eXnO88A4EiKre3IhbtwkTcmzcN7eVPHxblqQCSOtfi8qE6bzWL05etXHj6ZPXqyaE2HOUPIquVqvMOjDSBfVnPa2MjFBMggQsG0PfHHJ03AjvOhOzsI9cHUec9++drflnKXuqWYLo5OVudPeTQ+fnzvgw/e+dY3vqsyZ+vAmDhb63EdQOp7cu93z84b9gxAZkrEnTSOUGPLro/8Y3YJpKlr718Xghl47zXFI5mRohICZ6hiRhlRxtQEeevtt69efaFeNebQCIidM3RRgVzbCRADqvf+jVuv3bt3UDdhNp0SM2NyxIM5EgBVAVVylMjLIFr4vItinVDG0RSJYnKvmxIiJJg7OUKQKIqEhlEEVZVsjQpRcAhqFi3hajUasQPt04cSN0pUyRJJAzoTA2RHjiiaAhoZYR+gaIAY0VKsyXoF3I/7DMw5F9OlC32lkMp/wl6LmiRRoj1N8zONaIInmJqBdtpzuhDRjJlVIxISUQOCSg6ADBez8/l0lkQyhpixWy3rZVWxz5BYRATUIwEpAIqIGWaZHxUlAGo05kFtkzf5AAEAAElEQVS1qH/72ztnZ+fODaQjAK/GSMzZCGnIKQK+RHXxxXa4eF7CeRZ99nTo8Ird+mLxybxersKXL05+/Ms5+YLirB43mFNnhVWhXWkWmwcysYLyXa013mmvf7m638Q2UI3dANmDmoJXpU3i85//vNz/XKTxqPTNcvrH/48//tZ3//ELr7x8OpsVVITYsGfPJnHRvP3mnnJDELF2kgXGiOZD49hxURbgrV6s3v+5vftm3Jgstvf8pcv+2hujy7v8+78zgN9r5oGPTsJsEU+P6scPNbSUqztr2BRVrAtgWY1VcBv+4mUAuHtjvPrS6/mf//WYQ4Qy37lCV/fdBR6NB+X+RR0x7xW0PR5ysMWT8/d+c+lLr02nC2ZUcBqa5ryNCI5CB76LiNG1EiHDgD42RISNuRqhXlZWFgCDvZ3OZtE57lpjkaBE5ihEU5RWSJ0D10jjfUJLYkx5eX1qR3rO0gq0lxqk0Skk92y6mcxiDJ2pYQ+4sNQsq6a8aNdnLPUXTNoBJ0Le+mY1S85FU7KU3dWb0dPwumdPJ3N/ooiqCkSHRI5MDdccqNT6rtM9+wu65032fXPsIZpEGo1UPWdPjo4IeT26056zldzDBtgzvxIfnQiQkMhBQkrHGPPcS1QwuPbCC1nGDw8e3Pn4o+svvHj5uatBuhA6JDRAcq6T6JKMgxAJnBE5p5L4Nwqmxi4zJHKpoHgWyN1zN5N0hRhEMXNk2Cuc17ou6sMhWdcssBQ7s56XISggkK6v+1QaqUUjyQYQpS6w/DDOMANTwUgclt/LL95myZC0kes3X3p4cqd52v3qP/wy6duOPn14fHB6Y+/K9//wD7/x+jee3L5bT2OR42J1rJubDuTd+eEnJ1PPpUhd+BFCDF1z/94HnvMQOzNABRXgzJO0zfxsNCol1Hnmspzmp0eoq/Fk8uDw6NqukJ/gxjZIfe3i+OCpZo3w6y+c3H04XFXGDM6RdurAxWhM2onLUvKvi6pOAYgFIkft0Iavvl6++LKomqAJCBEq+DZApvnVK/7SvoK0Dx6sbn/g64rffPOD37n52mRcns0qdPzixt7FbHz38cl8LgzgyKrVtCwGqtrJKi+cQgAUJBBrfKm+ILUgnY1PHgwsOon1lVcXFHn2Cbtx5/mkOjRo7r77w2j0rW98hzwHCcgjoFQd+CiUAmM1ErEncERjBI0myfrtOBXRZkYIChC67llMN5qSdGH/4v6iqpqmISICTGxIRSAJoWs4HyloVEmF5/l8lhdlxhyTspdQ0DRi8iP6jKtq+dY7v/HsRaKYQh+aBABAgGiaERtR6m3VzAiDCDKhcUocFQVCAlACQqIYlSD5+YAzNqSo4pCMkQiDCBMxUTRDtMwlUxMCwToOAQGAiRQgI5YokcwRopFDUtM6NOkSJwBMUWhgz2zM/QgK+mYkeXvjWiCKCFE18wyAnXRkkHnvyK2aVQoZ70UrquAIkTy4TjqkpDJdK71VnXMhhFRzqEGhpMlqhaAEyCCdEJP1UhYdjUbRtG1C31OsB1lFWYqI52I8Hk+n56PRpmM9n1dNV483L9SrUNXBuRLJo7GTYuWBM8AcY5llA8As7G/EO96VQz1g83vFYhI+OKheeW3H73V6bEyQo9M9BwFiW4NXUM7qURaCIWNFxbI72Jrc2h3StCv2b3YH91QaQ8dITtGYxkGqH//Fxvf/xbyZHh4do7q/+uH/2xf/fP/ajWpWMxWhafy4rB98Wjw9gbyE2AAVqopCnhmZLJBx07I5HuRUIg2GGlb377gnR+E37zai8erN7MLlwfNXsouX/Q0P/gtjEKk9TrumPrPpI5mfc6ftuCzBZ5c+99sHTwDg8VZY+kfXw4k6ERc2b9zafONGw4FHHDaUcw55LKopFlx/+OHw8R15edtMCDFqg5ppi6YQgSLBMvelaa5b2lXQOWGqkQSEoKQiF/Ir4Hm9O6bjuDKKTrVwQagxUdNgBKQhNo0yk2owDQjRenyGGqCaWG/zcdCDk02jOmaRvv+LEiGpDp8BZQEVCQAlmc8VI5hZRERjTjf7Wv3UD42xD43rCane51EjAYhID680c85BKigVHDtLuUmg6DwQaDRK4yJz2C+DMPWWCsBglMSSEgkpweCeUeEMgLNMY4/WInAKBpj8P0SOPPuomoCVMaYYGHDOxxhTzCIgZI5TGNTzN27kZXnv7p2PP/pAu27v8pWMuYsCSICYec6IASxITJC/XiVFCOZcmjyQ6/E7AOQIkNSM1CJixs7ITC0r8whGBgl3RQkI4Ny6fXeoCkCpIOnlcIl/3VfxKegMEq4LTLVjFYEMeUSDBligoHZ7sL2h7nZYxk65y6LTe++8B5lnlxEyOdc0zdnd2fZ49/2f/pqb7NLF57ZhL9RPvWfm7mSD/82nn96VxgGqth5908x9MRyN8tBJpMYpatrCEfuCzVZEQZU8q8QWkATCww/ev3Dri81kCBBdc+xo0sT5C8+/+NZv78yqw9n57nhzsrlYivfMrm2WGM2cU+mIfW9PVWPypsCjUVjVuqrzS7vF7iUQ5eSTyCiaU+6CEkFmBTIAAPBLJZWT5Z3b3IT6R++89bnn967vb6v4KjTeu5tX96dPpwenx02MYd417dx7tzECnwmYAlmERiTmGe5v6j3Ns+ODvDtBgNV452R7i5qKfekpoiMnDVhTbm68/MpXuthoFKbSqycogsNScyLtiNm8oag6pjJz82gVAAMAgSCEDlpQDVEcYxREIItJl0uGEkXyghyPDh7OB3mZ0g9CB7Fdbm6Uk+39k+kZZT4E8X7wzW98893bd1ChkcBGJgoMYNjnIiCDAnO+rJu5rQDBY5ZGTBKVmNgRAieKlQIQkkMVNSLUtHoRBUfckwZA0CglSSARQkakouSoA1OQNLZy1GeUO0eaTI2oPR4vuTTUkHpyVsbkOevazoDAorlkxiUw9bnvuk5RzcyiEVFKRCBDMTUAUiNGSSM2UQRKLxKHBArOoSIBUQgtITqiqAZIQSKnrhbQTAUBmVSNkMwM2SWABliyaScqIIFLRZX2rA1CcqwaVS1zJADSdV3ogNCjE+wX1DHqoPDeFdWyevLkBJGn0zNVJCq8Y7QMBBx5xAKV1ViHvhiQ5qIeqOxar9lQtgZ1OdhsBhEzmIO80xjc8G/NToslw+WsjW3uHOxH1yg2RBsZ1SZjisFTmHWFZ+cC0nR8YburJJwJej/Z6+ZzJQHVTnVQjMLxrP35/5de/cbh/YecjTyFd3/+Q8/lxu52rGrPPppWv/irXZfVMZSaA7kAjQAxe2XurMmMRVvUgpQ6O+bBOH/jy/dGI6ltl4fl9BG9//PFL3/uBiMpME62y/0XaPtStr1Z7OzC1SucqdUrmbYhhieL1dRVAPB0ty5h1F3ZcyfHS/bFxlazmMIokxAoeMkjW3RoJcMsnBeTjfrBnfLabtRGAfMi19Bi5jCyaRI+Fx0JEEkxFvJGg86NwEZCeUcMmtVQlwEBEESggRjBgKAzVLNIaISYamhFMIWIFskpOLAYU9ingYp2jC6xDOBv/EgyRgWIqs6lIEIAAHZsIqSqjiANTYiT+jBlcZoBcg+QwYwgmgMEAkeuW3t1NCqnVXRKCAUlRxBVzUgVwDw77/OmacAUASjL0hWeqNdpzMtpvQRghrpeEhMgoUsihlT+AhqzpUBxRWByKnL5ytXj46dgyhlCTOg8RFJH/T2XUbauZdOMHaOaGly5ctURfXLnzp1PP2lCc+XadSansQPnRLEoPHTBMRGRigJBNsjMDMFSYmmPc8d+5EAAjC6yZoCUblHG9IvT3U+YZm2UxFVJtSXgCCGicnIFExBA7BMonCkCOXSkahbBDBShjpDBUNRuUTmiUDt3Fpe3mzDIxpmpqBRQrFqXeSyGWbVcKUTibHH3ZOu1C1ks3vrhz5HArPn866/feOnl4yf3h6TvNDWXBYaVYJCUPqmhsw7AUCkFqjsE0booR/XilKSW/x9X//5k2XXdd4Lftfba+5z7yJuPysp6oFAoFEAABEGQoiiKoiialmVZlt3yo3tmuifGHv8wPTF/wPwpE+GZXzwRPdMdPW7HtEMt2zRFqyValiWKoviEQBAoFgqFemRlZd28eR/n7rP2WvPDPlmkphCBQKCybt6bdc7Z6/H9fr5qklLWjKIs7f3549dFN+jDODSYFN1uzhfU+N509mhxhrWu9g7LdO3dMueNUwiBOApCMuRgUKcUg5VsATx/KhLwxifaV17nprVhEksgAyG4EKEQkTkVM1UwRjeuTo4OJet8p23vfHjv+OTJyy9evrS/l/tMfXdwMJ7uHNy7//h82VVV8GjUSYymGottAxOTWZFJutTDnmbHVhG3BzcY61ZEbdsVt+748MbNG7dePNy/tFkvhNogCSEppimIuCiI0ASNRgUhUBGSCEy1LxzZfPupT775wZ33FvPHu7M9Zl0uz5gj0NdIH1hxRjNu793/mJ3GbdtbIXPAGsju4dG6W3KVIWiZjMavfemL6z6HRrIVQwGJO6DWq4pINROTE4BGGmJs+2zuHHi73V462GdguTwnGTjGILvQDQ5yDXeXNvnghWUHAlVrESewm/VW4F7xrRVAXdWZdR9UtDAPo6iq7nD3wIEAllBKYcK260VCO0rdpuNQKysGWdU+UAVSgoi4ajutHm30/LlWLRSDzxeEEMXq0qmoAbE4hVAIqIqbeg3BLnRYdQMUquDCzYpb4AAfgNLVlW/u5p4kAjTMEyt8g5hgbh4Cd10nIbAEVeMUq+MsBDFDTIFQgxAFoCDRLXBoHcFgwi1zayRG6KUvVHYOdlTKNnSyKz6LvKeNaZlSDCWQLvv1NOneeJ26TCwFrpt+h7WfWh5NViTNsvz4oTyZTWO3kJwLzjXtPlrs39h++OysixF5s9BkHFqIcd+WruxND8/f/fG1o2u//Y//29//vX/5v/lv/uk3//g/Lp4tZodX20laBX/ynX8zPn1qEW1pOoDNG5EG7Uo7qAjIC7FTm4qFwC+9cXcyfu/+cb5/Z7lYQuLe/tH09pW3r15LX/t6swSO/8re/1ZvpQNsPKO0Q9nXauW1NzeHt8bp8NbkKoDzH783ux2f/sLV0e8/OLh6W3YmWlYjSZrMyDrhlHgawvrk/vT8STObtoe7fa9W8gjUJDrvMpmTkzKrERuyw503uu2l2fahiHekytJDM2P+WG/uhPOsvVFQUFf/QIRZ6ZU9EFWkRjHLZj2zufVDjBsK4HALIUgMXpiFCVzcihuHin0Ah1DMS7FhsgUnJpZACI76/6tXjivwVaLUVGEqDrg5JIT6agQQR4NRRbQSEYe60h1KWBluSTNvm2Y8mWy33Xg87fsMQCSKBKCCKQf9lF+km1T8HRNfcOLIQiAf5C0Y6PWwoSPn1XIVRYhQjQAVyMUcq/Qp8sCwqPdscSNitmJmWfXajRsxpffe+/H9u3e3XXfj5u1mMtZeo4SuW1MIjUQAlORirk9MsGIpJvqZlmzQZxJRhWoEErqAHYFAg/osXEzPng8VqKYRRpPatrDDwEHF2GqsIzi6k8ELeQFni7YuyjKy9s/zximhZ5h+Me1wxp9yMUA5R05d11vu6qMQUSz3+dlJimZJA1Pf5eX8wUu3f/Xl25f/8A/+jbGYJlUVRE4oKK4ds5BEq2IY0lyKg8bT9OzkoSHPprPl+QL9ljmA08nZ/IM7765zf++nZydPni66pZArmTSzyM3jJ6cPl8vQ6W2ZLqcii3MJjebOcicSHUVYTXsAnEtJk/bNT40+8YqMxqVoVQ9UaR4GCR/F4r2pEUhYS5EQMG7l9tXpxw/mltrTfrv+6enRyezF69dGI9F8ysw3b+x//GCV1Rebs6OdxNgaigeiIeSuALq7l9b3MnxbZger3ZltszK4BaCRR01sYmzXmZs4S3GqHkKYRE1d0TaMuVFuUaggWODkIE6tryR0owDJGr/3vQ+uXT/sNKckQCFbMXunOUWpUkkzywamwMO96MQE5UnbjsfjJ6fHq3XXpJaAYvbOO++8/QufzblrWFg4ZyVmL6VNTVYFU41ooAHZU3tHhUttcF977bV333mnU4UICMKh1MRAJi0GwIvVOXNgAZOX0ucegEQhYitm5AzmUqqWX9kcqAnb7ia1c3YTcK4xn6AwmHyo7quYqNdei9aNeAjDOpwJxRXV4F/FoVbxTDbIroYT+CIwYfi6encS4OQcCEXARAkoaiwEQjEQU6BhZ+wDnICoclZtcDOz06Ctxs/x3wfIwKBQg5mIlDpXF5EQOAgF7t2YmZlhHFNcLTeT6cwMZuQIRCnG8XqVu24rMlKFbnM7asLYaUqjvdEaKxOfHk110usEcUY7rW6szHQlWI1HvVg38m2hfpZoU/rO9QUuS/jWd6424+/JlZPReFK0K2xb9BZB+dH168vzd4KvwYHJkxOgCkTlsvVuqTu89/ibf7r7m3/vv/yn/9f1uvutv/+P/uPXf+/b3/rGb//O/250+cqTdccQ86YnNE0AQp+JqDRxQk7WbePlq4+vX/3Jw4/54PCjZ+d+fP+zn3p9vti8+6PvAfnk8U9feeVK+90/YWyRSE0g1GCGvo/r7XbT5Zdekzc/T2ncdqxdTqMdALu5XT98IrcvnVyfXX/hZeLCbbP1QgzisDdu1j/5waYBn96Vh+9PXv7Vbpy2eT0mQWINoiQIqQcDnBkBQT0TWOKsQ9pyazzuMIWMnWddHnebcR4tLbvkAHXz4GroMjKxs6ub93CDq5oSw1BQClNxKACqQiSjbrsVZtdayFUGLWOQD3q4mJqiSq6Y6nTX3GuaUrWhGtw9oDgzilrbNLX6DGCrrtQqGveKSqWBzg5U3GolqDORajErDiwWi7p8aZpGVauNCsDFkV81RlXzSBWNWRFRz2+uv3baOYYDmODGxbRpG6ZantalthM8iJgbUwBVfxS5eSBo0fqptVCvenB4+GZK77/37sNHD3qzF2/d3pnOuBiYLwbDCIFDEIeBWJir/wuoxLpaN4CYQLXEHxRWldsz2LZqBY6Lm3g4fwlS5+9s7ObgAiawgC2Zm7sQYEYVCFfMJLspNW2LdveK+lvcftiXI9nZWP8d1iY0XqQvCJ6bJG5MIQRVN4ZDV95O95aLtTFc2p9+/Oxbf/IdIH/3uz8WbqmoSEJRkDhlh3IT1LvcdQk1TBrm1rSmecmiy+WJEyz0bRPUt53hWz/4PqDmFtpJM94hI2IOSBzLdqUxjLcHh7x3Nd6/Cw1baLhxI0K2J49ZtwgkVDKUd9p089boU68jtdZnv1A61J8bXdjHi9WLFXAPIXCKqkXefuvmJ1+/+uOf3P/4eGHSPJovzpbnV67uXzvYd+4067Vrk7v3H5vZzjRB3aneQwpWNqhmHsVwadIefeoML6qqc+cci1m0tEWUNgntAO6a83qV0r6K8V7am+3xNCkyxFNDcHYTAEF4mmcnj5YxRXKNmKxWajZ+ejqXUByxbdP1wxt3776XYgIyMbUcVLWQOSEQGORRzrv1/ON5MxrVTZMDWfv5fC6BS9bp3oxbOV6epNS6+eH+wYOHj6jeRYMp3upyKIVYVFOKy+X5D3/0w7oUwfMNipVaqA5Gw+rDczI3MxTV2y/dUtV79+9pS9HB4GAwJmVrjATkRkSkfc8iFHjbbyWEDC82kKrUigM51zJczI0Kxdi6W6+5adgLAomRqZchhAhkgFtx+HPBZX2D7j6kRFzkO2ruqZblNa3JrTgyLLUpBek2XWAmYVMjZmEGQ0T6XgeF5HPNM0AcHG4EY5ZKoq8BFFQH3XBiRZ1tYBhzOZjRMhdHYAYH7fvK2eEQVWtrLW4UYxOCaB/G4+mlg+uny3lp1VvQCKlNcTflNpdJwQG1h/FGfoQlZtwlO2+tb0oXad56wJamPa/s/JKVgwTn1cf52mq9PEzd+d44EOvTjKOxHOvZfjq5cv2Fxx8+420kyURiEHds2Jn5pRtrk/To3tM//g8Hv74vh0e/+y//xfHxXaD/+r//1//wH/6Tl/7mf3188PXuT/54h9uSLgV1o4WUCQVSzb5/efv2L/zZ976/fnr8mUtHv7J3sJl98vHDu+/9+D0TamdXv/rlt67/6Hv50SnaqEXFI3elt3WY7T177Va++UY3mlG39q5jG8uozU/PADQTHH9mmtv+6POvTOK0Ey9ikhiMME6bxVN/dmKUJxGTGy/jxWv56SNi7okji3u72c4hqRTuiKVQD46cYsCzLBmzjUwzZM3tmuIGaazlzQPo8szXHLeBmZfrpXRgE+sdhd0UsKJboA8SimWqYr0QSp9rBEglr1aYg5mb60BXAvvPwsqqXbjynKt7LlDNiA8h1EMjkJZCxlXmq9xLEi0aWAJL6XMQGZDm7ubOHGpyyQWAsqJYmQEJzEkAELiUIemobVsAgS8GzRfo9eHJ6oM4ETU4/OLM5Z87gCv0HQCIJAS7iG0SYcAlxsDSax5ep86HmWsUipbSpJSzqmo93ovZdGf22iffunfnvUcPH3Rdd/v2a4eXDsyMzEMciBnEqHCMgbrFIIcPvkiqxqd64F58pkEUAoQB7F5NjARHAJwuEB/sbmJsTBacGebZC3sEBMzOyQAHq1l2FTBUdAXddEcN/2mnv9YevN8tf2KhGe+VbXHqPZqxKCyKuBqPqZQC4vny7PDFF4BU5QOc+Aff/eGmW6Q0UjBUwb1EKW5wBiNvO0XGsBRQNZtOp0U32/U8paR5wxJrcI1QHxiBxyGYVU0MWrCQGrvkAHGxhc0P2slnP3O+6drA8vLVcPlI4qRZnbFlaNbVIo0kzXYsMEjQZwsBZfCLD+k+F1F6hS8soDUcWi2A5cfvfGe2c/gLb9761Gv9u+9/+ODpsgPde7RZLufXD2d7swj4jav7x08eNdypkou5O6gIwYMJBzPdfenG+t5Hz86e0f7+pLTZR0khkwNxDfc+omffCedLdXNv+5BCY7HdX77ycvrE9fbVV+Vo19Yr3lovOZIhTFod45yzZ5qybXSxVEds5YCCKi0MoVsPKUBgtqLmAgjB3HIBSMiBOE4NkjBv82DMZ+ZGmur2O5vPNXiSaFYYuPfTuyElN7AwCG5eFc5mBlUCXI2DdEWZmSFspKbEw0C3ig4GZkUIpSZmB9Lett22154poBhVgiyTg50qRZJL1S6IEHOvGaACa1JiveDwVEUKc5Dg1bvP3Gtv5iGKllJZ8FxrB2ZzB5woeKVJBzb3C7MeUUVNDRw61CN5mE9XXiQzuSeO2qubsXAVk1Q2Ci7MG/X2JKIQ2Kx+++FB417tkQgh1CVWZVISOEZR1zoPZGaA64SQQG1Kqr2qEzEHcSNiZ6LJZG+7VS01ZzKaUpvGB0cHK16voXFHSiztpTbtp963zWHUHWvDs6vtCgsn70ayGut27OsRrZq46dys+ITpitBB2NzRaw+enR+xrmUmy/liclh658U5H05UcOfohaPlfZ62nrtoEd73hVJUW+Z05fr0M597+N/9i6N1fvb1/++N//qfXrvx2vGDexyar/6Nv7NNTTrfvvjl3z7euz3/g39zsHhGvBsx8xRL96z50lf/gtNPvvlNy+u3PveVz4+nD7rFN7/7J96t02z2+TfePFg8G//Jn+lm6WmKbUYTxcpaePrFX5/ffHV5vkHuabkoHJgTc0JKerYEEO7dkS99Yu7a3Rxtzp3dpBWIEgvD+uVy/61XJpbT8vHs4FbWrnCQNuWzdbs3UcPWOEKWhm1JykFK7NmRy5rHLnHDaYux6qQLzVnX3ECen29m2yhZbZV1G8ZplhdrzSolJUjG1tEzk7v5IGM0JmeG8UUbC6u4m3rc1rkrEXFA1kLCQqEGIkgIzAQaMEu14A1ciajVLcfONQ/NRYLDhISJmZmpqS1dXRLVHYwRS+KqjoYZD4ciRAZXPBMzk5u3bauq5sbhwiaMCrfC4F8iZmZDqdrl581vpck+/yO1u2eCG0vAAMOq7K2qyhweU/J85utmFDgxq2o1+3qNTi1eXPcms/a1TzGFR48+fu8nP+z629ev3Qgc+qKJYqhlbAi1/KUBJFkno3xRGVdpdwCAAVcy3ORDesxFFVEZ20MNwaFmzEQjo5iZyagFFyJCMASYoGrBXahAQ7tg6andCS2rEOHjAmrHX+BwXNYP9kPLjQbqexU2tRwSB4muxc3AlBv1MUIQ3chXLh28MT341w/eP+7WomsSlD4omwSAk0EBieYgLyDmaMVSmhYNwmNAApubJZFVp5JCoCYE8dIRRcCttOTB2c04QGTcLBfLH79/9xc//2uXf/U3bbuR/R1zFVc5uGrYEjjZFkzqvfSaVJVhhiGiYrBiDs0JmMRgDpFAEAe0KDPJ0wf3H4UHT+ePLu3uvfna1Rc33d2PHj05XZ6v7c79ZzevX7160ETaXD3aMVqGIJHYYYkJ1m/JCMTuxjHdvHnlaTmdW6EYgm5pgp/8eHb6wRhJQ6uzW5NrL6edkTEY8255TGffX3/3O+370/i5z7Wf+6LIqCwX7MHZsANri4NtC3LmwN6ZK5dSEEbL1flysU6pBZt5TmG07TccmAwcBFz54W7mgQMMkbkmBnAINeKAAhNzkDqvtWImbSrVrGMOK4QBYY6admsQDlY5knAza1ioohmdilbYzWCPqwnEVYXRxvT09KmqSoxsBmKDMXMAAqCosd2AKQBo9a0zmZdOjUFMF0YrMHHpFQAHKaqDZMIDE5n1TYpZHUNi71Dr1jJYYtRSzLQOn9wc1UpUp8huYC7mTKRuzGyBuBjUE4sBRpZiYqe+bIbxVaVWOYg8hFBtCHZR6lW7RS3Aw5CA5iCAKRBVWqcTQpD6EGIQS+j7Hn0GwBzcSEQqJh4UzMBBUkhW2Dxxm3r19+++H6YSx5FHjAk0qbLKbuQ93mnXe766IavMp2uVGZ6OsZlw19pCuLgRUlh1mp5spi9MH5yuroenE203uU8yknx/Mbv+zEFPnjYH+w/06mJ1bX9z76wHrLQOy8G6AAPfur5cLM9Yd3YP22ePHvyr/8/n/w//7QZ0dSoy3flf/vt/fjA9evMXv/zqW5+d3/jEgz/8/fF77+4AAbE0R/n7d37h6uEnfukLz3J8+PTet7a03C4jh46nt2/evrFVe/fHOZJ4EkO4/kJ+eG+ZKL/9WXnp9vLxQ/EE2oGUkbEiqfPY09KWAMZny6sP+/NPT055cTpZH4z2ObgmtAxjO7h2edoUXs/B2aRdLo+LQJRzaGPa6SAdGuboZBuJ0cLWezJRKOIOaLTRSZdG2qQ1T3jtb9++ZKc9Nmc5u+XAKzLtAiSokFpebZ2K+2D8Nc9khdhqS8cX1lsHAg3plhU8BQy8tiChGv/Jq7Kpdmp1ml5VVxx4ON8Ch1CnL4AXq1tYJg7gYiZxIGMMib9ONY8EVigENjaYBKmBobjgR9bZy/M3IyRDFzn0ifVMrU4bZ+bgF7Punx25dZs62Azd7CJrbEjQdqrkRrZitQ6vc6oLzQb5gJTy6v0zc7hZsSHq2LU4vfbmZ1KK9+59cPeD9/Mmv3zrdmyTE9emkM2dqgIyqBmYqji4vjx5lVbRhU8i0EVTfNHD/8yB+PxTGTFz5KKLsul8WZuU4OKBATHzYsFdQAyXrRt6C2hGiNnipfHOvq6exH4R/CmXc4TDqzOFrnIpOQsHcdaaT2popE1tiynCbuCCty/N3kizsj75u+Ojf8ebZ82IXAXerx/nbs62dqq27gSqIgEBNLa7kcZWRCQZGQFZy2h2ULpNiJFErHC3KSFEUIIxS4D6KKbc6TjtrtfLd374o9/+7b9/vIypRHf0ZkFgCEBPxlzxQYLO2VCkqBMbXVRcg9OEwEMsNSoUnyEpmZu8/NrNbLjz/g8/+nDx4Pj2dHr46VdfOzmd3/n4waYvH9y/m7u9q4czom2npt4FZWUTJyOFBQAcWLfZIvYuNR3l8/UoSaKPH98cb8vs1Tkdja59Kh59Qo7GccRhYmjzdJTy6kE+vnN+58/Dh9/cLN47+tJvxxsvlvmGpUgQ3olF3YJKJFs5EG2thBZOKRW4mC1NM3NryMTJoTFUzLBRJDcXlpqLkKJse61g52FHQgxiLSqOING4lHpKENUoslBdv+4hirubFzMwh9xnDsKMvnLe8dzzbjWLyM1ijKWYWqnhucTMIoPTDkg2aEmqrpAMtRQetW3O25qM6PAQqBowOAqIcs4gXBjeCWGAY9SBcpTYay8sdfJcy9dSioPCgMuukUkXlsdKocIF7dnNh5PVQSTFDGSRtcKlHd1mMyQ60M+aYFxkp/OFBhUgqb7n+tku3PnCVN0LAMyUwTGleiFWJ6KWMhqN9ma78/l82xUQuVtMo5TablNKUfOYUlQKbdwRmZzO17FlJFgyaintJ+xCLgvtEbXnh7I+yGdHMsd08eDUDspignXA1lb9eAtlmGVG6R70H4b2UD7u8rOk+ye8usQ7qlPLD2znxrKE8uRJmr7w/Zde++rDB+yRunWHhrsOgTL44Pqs34zf+Gf/J7Sj4//hfz649/5H/+M//9xv/Jej20f/8eu/u93mZVy89+63Hzx69IVf/sorv/m/X37moyff/nb705+MValf4F43vWe70/ELX/rSv/rGH4xhZs3e/rTZv6J90ThtHWmveXSwe6LdjVc+Q59+fUuNPTkZteOAYECfyVIwJYZkVzx7BoCFx3dO6NNpK3p3ena9m60JwqFPsEZTUTfSkwdToRIsu7kJmbtxwQzdWhE75Q14a0kpMDcgKGlnyaL0MlIbZWslR1/n9+6umiVeDIK+cKGglNfoNzrS6IUd6ijmloThQQsHIbdtFW0QXyhUyM2tyu3gF9zWcuHud3JY4IEHUXtQJkp18HNhzyMK9XAtXlBl0qUgMLkHluEwYXv+9RjibYeDkZjZ62/WsfigcqjhIlZKVScMw/CfHa7P9zqoF/uF+BpDyiYuNBG1s3Q4sw8LoqrJqPvZ2lk617kU8VAmE+rtXOCuBcRqGQARRQm9qrl1BifOfb55+9WY0t2f/vT+vQ9L7l954/U2NQBpr5KEaVjkDjveeu9TGN7hUEcE/MysWBlA1WxFPOS3DL/lgLho0flmcX/5cGPbyFIQzAIjEII5MyXihhBBogVjmRUvQXbyWL9HuWtMg+1BbkpZpHJ3eQJmZuNp69YZO3s0MlWLLZa6PGhHmNDU0y/utEvY00u3NvvXr8phkHFQT9RvNsvzB3dkfaLLR5v5A+o7rmGPxPBQEMejXVDKPR9eOlLV+emzgBQIZrASqKQ2JVNWSk7Uyogin6/WAQ2Tc4wPH63VWuGQBZKjc2vYAlLFsoXU4GxFqnpW1FSZqJIT66Vby0cjRqC6k0eFOKrJlZduNW5XDvYeHj/6+N5PLK///C9O3n77jTdfvnrv8ePjZ9vHp/NLBzuMnjV7YCUz985J3IO5ilnfMUfuqYgdHUw925k2s/MTk/Lw5hdpdmt6sMfjdWdrpsZhlMhGGl+8OvnCjV36/NkH39v+8D8++Pa/OPz8320/80t5kWUKOkXZFNtS6cCRfW0emFZknRt6lgJOBAM0O6cgVop6Gbx3qiGGQKFaVK1WdPWIFeEhGZdEJBTyYiBnZjcvpXLpqJJhKITaULIEApsWATEw4NRRzXBGqEYGIoYbrNSbh+sR1RflUMdiUpGrpTJxDG0Q603ZCOhzfm5jMCYzBLAb1Pq6cxZm8youLtV8IUQgq7RXAhezIML1YOYhNNncrM9VoxKYHTbMkAmBuFI8zSxcpJiS47lSg+GB4AS4UwhehhPXh4PYCWRkqlrhQYEhKXbdhpmdPHAI4AGsw1InDcxhb293nbfWo573s/Ekl3J4sP98TRAkEUnfK3NJKWlhptAXcw9qTh5kFOM0elswg02dp4wZeMa8hwPbXsXyUtsdNZvu0bxHt+sdr7WcdXIW++w9WEAsvpQ0uttPpt7OnrWiKS+foLPQh8Sa76/3rltP+uDO8dEL78nLb8zvnrRIalbLBytP//TfHnzmby9+8rD/0bdlmTefeC395IOT3/sf0me/8sVf+03P/WL+5Mbh7e/+6Nt37tz5r37n/9xcfuWNf/L6/FG/+MG38vvfb0+eJrfx7bf++K/uiuYOzdGtV796+43Nn/5xWM/BY9lt/nhvdsX9UzdvbF66vVzPJ31n45FrMAiIwzhoJmYyExOEfgtAUrLj48ubw4c77RM5XY6vpRCVgzAilBJseRYXj9Kn3lyv5haiuHfcQ7Bcd13pehMF55C2FIOXArPCcN765HwbEds1RshiS5oY7t/Tty410quuyJCKdTEj9iGrc2fuvbm6ZXODlyDkMKDiqGrJR/XxVPNp4S5BULOGAAB9GYRCdQsbIl9A5YgDU6kTncqtgMPNwG5gDkTOQeHErGYQRrUqXJydzMSQWnGaGYYHZNVIDOIvLyYibiaxShycmXlYrVwwrIbszRr9N0ytUWNXiKziGGujWduf6q+oySpuPpDeXbUQqBTVIeHTaq+uZsRUX1dVQaTW1wUxM8OpGIQK3NzkxkuvxTS+88GP7z/6cFvKq6+8enCwr1pgThe5g0zPFc3hec9Lwwe+aHkvDlpgGEvAL1bfxJVJoqqP1ifH26fq20ZiQfCMlFJ1F7sxKBKF6nLiwLCWSc7b3EKWst0PeLNNLrjnfkyOsYzZqU2r0klMZChmAcyErWcJbKnwhN986bWP927fSQcZ47UZMqetFSuKVqaYvfpWZKwefcgPfszLR9164WZsJYUgaXJ6ck48IuDZ2dLNU2wXi3XgQBIYJH0yHmdqpCGLyMwinkZtD4MhqjxZzh+fLPeOrm+XPagPVBwTtq3Qlss2CzOJlTWzspm6cGCQVYdnFarCAHcqSknUTQxs5sU4BPna73/rxZs3bl8/evXWp2699OYHH7z7k/fe+dG7cvvqay+9eJPL8fHp48fz5eGlkEubrDMwhuxZGBUvTghgglCjUYWu3dhpTsYGnqeXbHyzbUTTNjQks2R7VPaaMEWYaRlvNxNNu2HvrV+xv/32yR997cH7XzvcO9/7wt/Sp+5PGStt11E7j0tWIRHk0l29fGTbvePjeymRAWZdJeUyg6HMAHogEIhcHVyKo056neFoid04gOtSzJkCsRPX29BIhYOVYkAVYqDviyNJq6pg8xCKMzHYixP11XJHzCFU3RsLC2LWvq6GiCpixGlA3AEwImfi4kWiGGnN+eoHFC2Y4WrEXGjQaDJAHEKK0GKmIlJ0GOCZ+bgdqfbFjBEIVB1wZkYchk6bBKaQYERUjIislvdel1T1YHZmqswfgCtbB6Dixk6BmIoZBcCLlxrpDYADjZrRcrWuIxY3bLutQAigYSddVdZMcDPszmZHl49Ywr1798yMgwiH9XYL4OHxCQ2mDg7Co3Y8ny9Ui0gNdaJAUQtrNtMsbRjttH3MpTWZMM/YdopNfIrlYVxNsdwpy6vNOY9Olg8yVo6V+4pt04ftVJCZPHGD4N66rxWdtXuPp7bXs/XkndNMDtdl2c3GygdYPPzujesH7frS9v751nmNsloJtbZ48PQP/l/jNYrZ6Nd+a/KFX37wz/+fh/fv2rf+4NmD93/1q79x/+TR3bsPD/bfuvXalXv3v/+d77/z8u3br7715Zf+zq/2f/tX1z89OfnL/7h8ur4dJvu3Pvlkvf3MzTfyH/z+RBrCuM9n59Mb0+OTlz7z2bOXXtwuHo9k7I0KyMJFnaZecYwELZ31yyUAhrWj2StPrjyePlwlvl9O31hf15grOX8bxpbPRldvcZge61aYxhS23hk2cXy0PD1dwwTNRn1RmDkZk5qDLWO6ljGvXTsLpvrMX5yNFyV3x7lvGWumrL6Olotn9m5LroQeyMwGV2YD3GHMwVHcvTJQuSZpBjHtUZPHUK28Nsx366YDbHBVZRFmFCvrrqueNybiQcsMZirEKG7Q4aSvfEoAF6zE2oz6BQWyBokMy1p4eH5EEygweS0cBwdBFSzVJVT1IPGF+JJ/NrPF8zkRD2tUqzeADYHcQ5QvjBjsBisFblqq5Kx4hQg5VAtzcC2gKqhEKYWJ1bSopSgiknMPMMekqtp3169fTzG895MfP310X7fLN9586+DwyMyoMq6rVtydKKBSMsyY4B7cjDh4sWHGRsSAUzAneAGcSWDujkKkRg/Wj07XpwaVkAzizhIFzkCCIZIASU2YI9AwCALZb9Macc9vKI2TnEvJ0t3n2EaOVKyh116//Vfvv08J3iO4q2jSZGJF+47Lla/+xvryp/5qfu6dce7EiEAWvRYlksGqG2i22By+0e9db589Kqcn5l3pctvun8wfUgmIAgsi0cp2Ot3tNuvIEjgps01ClOJTjq0QWfFSrKipkMQQ+4Ud4/61qy+sTpG2bd8X6sEUiotxYMuOTDwx7oEMy6DANcJh2AnyRaxf8V6JyaiSySDFxEw+uPvg/v2T60d7169f/cQbn7969NJfvfMX73/03tXt9Zs3Lq1LXiw2++Mktu5DCAPKdZikDGMWQCzkhNQ3LLx3ff/R+0fiIySW2GtMMk1xx2hWfEp8mMqMm5HZrlGTZ5snaYSrv/M37v1p3Hz363ms0y//HX6k2tK6JV4zkoGLk7BJx9myMo3d1wZBSEJg1ou7yYAwXMEV+gSY1o6UzG3bZ+I6/BQOVYfkDgPMg3hBX5SGzaVp3185Okokj548hdSUX0cxZlKUon7l6tH8bE5EMHIydy9amJlr/NkwZYUbQAUBlSAbQqjRK4OiYVBVEgBzrSj62vJqGcJeAlPR4labXTIrgYNZGY9G225bIRkXkQ9MAXh+dntNVKDqYqohq1TMyQNHh9ftT50XD3+XdCGRhD8HBFakX7VFmV1krhmtVqvnc7nawA5dS/25lmLm+/s7k8nk4wcfc+Aud3nZ17YDQM59CFzLuV41paaO8efzs8E8Z16dJkQcYypFOAREZM8I4JZ5zDximlK7YwfY7vj6AKurtN63OdT2SNZdtI3SgnnptjAOrABHAoqJYq9IJjOfzuZGtinIaDvOC+vQpC0p7Yy33frbVw+/uj5r+83ZGU+YMmeWNo2a8vS8fftLe1/5wkf/4/9iywf6W7/V/dX7zYfvnvy//7vDz3/u8POfbq6+enL85N/87v8DbJu4+3Dxg2/97kcv33rl8MYrt/+P/9vtk/P+7p3D48XrWbu//G7WtNUcRty8+kt++fLt2UfzM+eHj5v9HXAfkJihBUwh9wqOMLNSppOprjar5QoAjek8pJfzC7P505N2czfNX6XrWCeHQbhfbzdwPnhluTzbyLThtM69hHa5LGWeu7jXuTJLTrISc43ZKBCrw7eSex2Z7K7SctvRmcTUvzCNq8erktWXTArKgo6sc3Z1ZHDProAC6ihEhWHmxS3XK1XNJICZdZtZWEsBkESIeLA4BmZnew43JqrqfUlBta+WHhTjIMO9owqpC8uabFijEvxCn4GLze7PHZVemdBDf4efP0oHf3CV8A8Pu1p30l87butvXCyoMSiGn7/GxesPvMj6QgN3HShWSrEaKtNrj+chQgMcX+tBbkCxUu+auj7XUrzXASg7oEWKWbly7Vo7Gr37zg9Ons6/993vf+rtt164fr0oAHYrRMJU4bBuVtzrmWtWvYbVrFiMWJjh7uJBnbz+hRTimDpdPHj24HR9nJqWSUzryErISMDZ2QCQwANTNBNGMCkSbHk+10TRfG86UWyR7M12el/XqzEHUM/l3Xvv2DQUZGmb4iSl6ERFUVKcfvoXdm++cf/JUyQOVpUxRBFhS4ChZs36JPXbrDBIeuHVTtGfzps4LV0HtJJmoOxOcNbCAe1WoSSpCGgkl0InHY+ZG9/SGonHs8nu3mzvYP/+Bx91ZyuL/PH6wafbEqbUt/BMWLmVEelQvREJcTbUMY8ZbKjs6q7DKyUGRKyqQYSGoGVXzQKZ7s+Sqd07zseL+4c7J6+/dOMLX/zb3//+f1ZjOXnnxrS9fz7ru2KpNfKAAtiQNkLDDgYIhcGWrGHdhmh8cOPms7Ox8Do3Ijs9T6K14nscZsRToz0v43yJ11M934tb7tadtp/7ypt3u+Pt9/8nkvPx5Ze7ENpGVQChTJzAbDSfL1puQxi7d0BgSkDvDqJAXC56LjIrzGxw6wt5IRAnckIhGKPACMNkeljkiGjX706mbdM+OjkOQaLVwYKtbWPQyBEFmjWEWO3rIuHk5Gmltw8eVncYeq/R9OwEwAuqJpgFXFet9flSnYjFys8JKyt1B1QJtIYLSrTDyVTrza59LyLFCjNr6UNFEAwq0iGClIjNS6BhOl7Vn3AiJrdKlx8EAhXkU9dfVe8GFDisHudU36djQHMBQExxu91KqEM80GBvxBDFDBjXyaGzMDOt1uvVZh1T2t3ff/z4sam5e9u2fd8fHOzn3C8WZwCYQ1FzIOfCHJiFOPS9xpgY3HVdjEkk9m5BmCNbY/UAxghhJ+yGzcw3u7Y8wmaPViPt2qb1B+d62sQu+MJ4rmHLRs6O1FT6kHvWbeeNMKvt7c0zybqcjXgy5dk0HxtrNi3tpM/le3nyy/HJ/mi6YE+BlTsralPh4/dP/9Dkvb+89Jv/xeiVX1jnAz+8mh69l9/77vrOd+zq1aO3v/zP/i//7MGi3zs6+sbv/ovF6bOHZ+//snzpBz9YXXnp5uUXblx++5KUbf+VXyIOslgul9qP4vw/fWPapaUuD197CXDjLbuBLARzp0HWgACYQzerc8oFAIk0YUJh59bm+tPFe4uD80f56Q2+tuYcJFBYNZCtTRb9OjttorB4Yu4Wz+Z5nY+um6tsjENL27zZNkCxXHtsjhmhC1vjsGRbhw+X4VeuT7Q786XJNpVefV2CUrENk0IU3teHEbieYRU/TiVXTVDNEWQbghqKcCCg13rksMOsaAzJmOsQBeZeoK5D9JtXQWJgeC5axZJsGCJQ3N2GhIahwL5YfdZ8zeGEHQB0VIX6dSdycXr+/PPtwulQG9+fnd+owhGqL2nDEf9zB/BFkNLzA78qzpiKajFzRzFT7c1MtRSzIAyDWRGJ7mZmZqxWmAnVNOyIMQFQ7d0MhBjFDSIBhKJ2dHRlPB6/884Pj4+Pv/sX3+27/qVbNyst270ajg3E5sQUQFyH2z7kE5uDA8g8gFAIHKIZWKBMz9aLx8uP5/kphcaRvG7xvJRi6pF5BHIQmwsQ4MHA1qq1rjvZd0srQcf4tizalvaSM3dfmezcbe29fpPaae8bCy4pwIzNDElcvRnvvfGV5sYrp/NN2fXYs/dOmXhr3kHZIeBcY9U8k9l6BVD5iLvHC2w6253xGBvLMo6aPLWx32qI0rtGD+NmunVrx+5tGY1lm7Y6Cy+8dntydQ/sqr0kuXblZnE6/fDR+6cf6G4Oo5Y3yBvlxLQE+hbb4AiE3swNBhcmMvQXzhirVsxh8Qe/2AZXaiACizBkPrco7Sgx3I7nPj+/d/P6pc9+9jfuPHrwV8fr14+SdCJxqcyNkTOhZuT6kItT40KMm1ZSWWfIQdnk3Vu34wpPzfKErfEyLfES2RQ4ID3UPZ4f4vyTL2h5dor1fMyWTfJT2/3SJz4++fb5d/7V4S/9XZu+1I/HMm7yk9yy1RJQiuSiFNm3EkITOMMd5ExVR1C7XjYvpopKKq53Sr2WCVCwI1QV4mCXZnjoWTnGrt8SExEXgCQ8fXYaKMwmO5tu48TcihXzYgGGIESoASYMUlPAHWReqn9pkD4NUiNUhQVzICIfdE+44KnWW5gGVDyomF8YguAEWKmmherncbOKi3MrtVWA1WlkrcedCDyIoNGbc8X/DMMBApMQuSNwAPkQyUDEVNOjqC6uhvlBHXYTqumZQW6DOrR2wwYI2IUcYDMnMiMBVaPnoH0DYpRnz56hzhWJc86575fLpZnnnC8dXEopPT2dp9SYQbUQcykZHuFqRkADuKpRGxFhAZQYCWjAY5bWRr6cYrlD6ynP9+jcjnV/276Y4w9PetbiJ6EshNcIDCfnlthhHKz3CM6caQ/BeWd/Pg3TeTm/RA9Ka6KmNLZ+2bVt187+fHHlM+tnhwe2AFFIwcR8w5v55p1vyec+OfnSLzz4+rdaKXbrZjfJ+gDXf+OrJ/fvPvjOv5L3Ztd/4XNrpC//w79z94Pj69du3L/znbuPfvjTj//i13/9t07fvfNXP3znrTc/e7ZYL8/ntPWdH/3VG5/5tbund3ev7rI5OIxl3OUN3CpPEZDAoU3p/Dyv+tX22ZyQAJjJzuUr27y9WvbHklZtfy8c39DLbcfrvOZnp7NbNxfzczMacdP1W4kxB44r2ZX22TrYRgMlmDcrXy2pIc4ZRoHWKpq2XR+6PlHszza/9tbR6eO15C5AvIdlZcSaD39x/QQmB2K12BDAcHKHxGJq5lIdrqYgqpOfULUXVEdIqBva553r8/WlWl3OGGqC9YUvH1XlcHH4VVP7cxlznec8DyDC0P7W03L4n1ZpGvWLB88uD+T0iwO8lhIX32N4pTrYru+zehae19R/vVl2s+rJsiEt0U2tmLuZq/YFKNkYNc+01JwiJh80z3UARigXkEtmcUOFa4pIfQOqOp5O3377c++++869u3d/+IPvduv16298iupqaIgQNxCrWVENzLkvDKrzAwZAod7WkuJkNIos6609efbw47MPl7rhMI2UcGEjgVFiYUpqHiEF3JXqRuHYckmG5HkceMSrftVMUyvQZI9G4UT83WZzs41S+jxSB0JiblMw3lhOvXYmh5/9Srjy4rNulVnbUdGGRJm3hIYg4MTo3CLKug8xKHJO3Xg208UcvOAp96FIE7bIna7RAg1iEkMRowIvbFG9b5Vnknk9ubl/5Uuvr9end+69A2RupLMtF2pDM/v0y5inYzq+tH+jj9ZOxM4KgvjW3CNyPWsjoRAlQg7sbsW91Ej3ykmzodELMB9CM5lEgjhNZuOJWW8gOI8bMeQ7H2+W8zufePnmw7B8/wxtkxH2xHIfVWpnZwBpTZ0F4MZJA9hcRgwpoe1WJvshIKF12kHYQ7gUaN/tkl9Kyxfb9aW0Hp3Pp/n+lLsEtTatMbadvPvFtz/4vd/d9WNLs3W3mk9mjDFTQC5cOGeTqZga940XNSeWCBRQANzRuxvczYyLVzTHxWDo4t8MJwaz07AHrfdhCvJs/gxADIJhf8mtNA7uTZsUVd0cRq4M4wAzIqbAXopaQVViAEI0pDYNlHI4c6/KTlwTlYjdLUgoZsyE4kG4lIE75zVqcJilXdzsdX3EZABX7mNltg7q5vrJyJ+f5KA6sauc+lJqLjBbbRRqlgzgMCKuMpb6YzC3MDxbhmDE59/frcY64XkrPBz3QM2fqpqDavKoXEAC1ZAJA7Qvi7xIKYUQVIuIjCdRs3ZdV72bxNQ0SbX0WpiCFSWSGjNMHkqBqpkVTn3wYMjCQsJIoJZGlMfYjGkz4/WEO+lyXE76U7qW6J2zbchNtwAvbIFOhFmllGQmeTkfHxFMCe6JcluanfVYFnvpcKynxYTcc+hkkharZe4bnU2+P949Wh6/cJC3lrNBSgyUG0+ru3c++rf/086Nt/TpKn/07Smt9bOfOjEb/9qXp3/jC9uP3r/3p1+fvven7Us3f/HtL/d719fl0YvB3n7lrUf3P/rWX/znmy+8Nr4cl/3qU9PL3R/+4e1f+9vf+eH7e9PdeHQ1FxXhTrOZc0g5b6wU80KI25LNNHlY9+u6WFnnddOnpGWK8UvdwTvrR49GZ8/o/DIuFV2JTLen3dKeyXgSoxAjb9UiUkcYc7csjSIgsXb7hZ4se4WpkvROW6hphPhIF0v7xbdm3z8+2UmjwwOxpTGIk/jaHfCtELkXJR42p4CTOxOYUCxLiAQ37yuStBQDkwQys9rqhsDE5EbMbGrDBWZGBuFwMbux+soDzYKriIpKvard6QKpg8rrIKpQD3p+Zrq52VAo81CqwquPiapAbHAYGTBkFV/4SYbBzzCbGmxLPjxdfm7MTRen+zDDdqtv2CsRvVjptaj2NZO7mFNgBklgZtYL39Tzr8ZFCWGu8CoWRQjibnwhFK/oLje0bfv2228n4Q8+uPOTn/wkq/7Kl36lbWfr9Xq97rSaEs2SpPF4ouo5a5+zg2LbRkn1dE9pFGOTtX+8fPTw7MHGS0pjWGKRbMXVYZx41HJkBzgUGxkxEydp0Tgl98atNWk57EZILBPmKDJGTNg05SjSpWm820xiznkUmbfC2UpuaNxrnr31pfHrr+tmKUu2CGxDyYo+IAEKJKIlwIZcH5MupjoN8cola8/wRBGSxaIEcEay0eG41xyYUX2Yau4mU9GJmSz33n7h+ts379774VoXcjUhskcbS6NGmv345H2B37f7L9x+aflILRMFBIEvAXWHUG55kN+5ubEBrnVi4iCwVt17YLJKaBoqPrNCAjRdDqlJDLBLVg0xTaPMc/ejD4+vHd4Yoyy7xyHpRpuRU6GhRHPEC2McA0EB6yRxUGay5GNWKjv7UoKUVnWkOrLRYXxhkg/XD6bvfzM8+i6Wd8xOW1+SBx7HiYzj5Vs3f/Wr5e1rj7sPb2HykHdYwtOdkBEaC9oXUdGsMhask1iZ7czOVk+BYDXb3QmoERxcZ6KD9QaINPDsyKqhFzBQqRa44qbEMh6P3NyyhiBOjufkcniPizQiszZEc1dXsxooxKmR3G3rl9fg4kpgroU5wVgNEsw8StSiIdTzaWDggfhCTMLPVRqguklgrVm5Zj6g6RCkttQOOIuo1mjyevjWnEAiYi1qpQABTEkSg7p+CzMh1AQFM6PBYFCfG3VsR5XWYbgIWyUioLqEq4dxkIC5BwleTAniNTySGJUTTbH2373W+TwTiOXibVd9DYuElFLO+ezs7NmzZyKJWdpm1GuB1u0vtjkzOKXxdtu7x4TY9zmhqQkQHBhMgS3AE0pka4qGLUq/zZ1cTqN2u/WOOMuzhovsYdNJXq0WD0eyfvPWdevmdzfs66ILjeOGFI1sJc8nQbe9BUZvO5YpxdQRlrpCOri/99Zj7T6pd1N3Km1reRRivjYbr+68u7pzTFcPbbdZHBwevHGj257mJ+/a4aGOn+1dTmOSxfzO5g/f49ml66++/uY/+A2k/fGt2Zu/9avjhZ1+/KRdjfKf/Kerr97+9kfvTKc4ePMLj+bvtu2EMjIjxqaUrRkkxrzti/dmAYAFoMuSCwBN4DgiJ7XuZrn2/vJRN7a7OLlSdre6adrd03s/pWsHebPlzs17T8nXnW+K9ZEZvvW15pZ85hyWFplKBoJ7AgmKcYiSGl4H3bk02mutn+fpwVRXXlbDxIQ5+IqCkPt2OKi4UoEI7iTCMOZYQObZaKCimkP4Z4egWbkAJw+VMGqsnGk92pgwZBTUO64OVS+KVq8ed8BAUnlIwypniEkb1H4EWHH7a0vdobccvnVldhDq6YthXjaMmYfhNIgIQyEPuhh4/8wzC3reBjv8ot2FailFrRTToqVUa5a5ichAIwENIx8ftF5wdjeqYzY4zJjE3UVC5WwwQftcd0OqmRm/+PkvHBxe/fa3/+wn7/+47+0rX/kb0+lOzn23ynA3K7uzg+l018wrZgskbdsyixUHQmA57/Kd43vH5x8jFtKkGW2DbBzAszSNnIyDqWmfrXjHHblwGFvrMgkWwaMYRp52wvTyeOFn2OHSGO3A0PNOetzkl1NIusSeTnMeeedAkODbY7v6yauv3Mjl48ycZ6IjUW91o7YhdI41wF7YKSJsAYEyqXDKk3B5rNblCSdTRDGGxhJauXRw6cnxo6zb50P4qNBUuC07nzw6/MUX3vnBn8cD7Oy2UpY7UlrfpGAq4znnOJ2ed/qdB9/59Bc/2xwKNm5CSoWrkYwIJq7iECADDGeiWEfQPhhk6mOWgZ+Z4IkCQKLUtpyM2BjCIhgXNWUWGa/6zcnJemcSJ6O9rCq0UG+IthguTbsY+QpRNAtGIXNi5S0stebi1m6a6Wx7aH5J5Mb+Lh6mb//bnfe+cV3vHlG33xzTsdJJ2drWmEOSrvl+/5//5Eu/81vf7XrpPqR4MCk5sT2c7uqGSIP1BVNAjVvWVTrf9A4BInl5Pn8mYmKuDWmVPVxofWGl9G518+QEZ+cLADtM81oBG7WjeoB5nWqBUQHuVdgbRd1yLoOeAiCCqYLqYIGY0evFfUcX97UwmzGHmMS3xsRalGu6tZNdRJI6QBSIansKGkxOg7vX3YgCYFpq/BGYxXR4XgCB3Go5VD977UHdUUrZlhxqxrhDi7pbjGnwYNSSHxdi0frBanXgPyvno0gpWvfrUaRXrWEPgTgMK7CLTzusx1CKTiaTEMJqta55ZVZ/Ru6q1m26pm1rhGpxkyBajFwp9G48Ho/m8+Xu7Ijj6Oysc18TjQDOXc8trDibu7oVowJFcJCxGNgEISCzwxiyVk7UXhovHq2fnWxP7l9qV9PD8eGbL+9MLv/VX/5Jnp/YW79Oytd3D5+sT5E5tmWP1yir63uz87N15xvvx3OihG7aTqzXw3D6aHrzm+HobX3vqLvryYq2C9U4mu4vT3Oa6SvX09FsSWeYsWJz9XOX3//Gtw8/d+v0yb0EZ0+iz5Z3/+z0zp8kGYcbb55r3OrKjud7x8vmlcM7P/0gNfsHX/ovTu4/TTZVgC2zhG2/DQynkLVHJfPDwKFYlouiuuVxszMyxpr1wPeu6qWP8rOH0/Pz1Uk7Gy0XK2/ZRETc+6oX4H6+3EV0NLx1LpE6mNAuW0rYMoUxIFw4BlGwOjcp9c+4uxmn/XYrM16vNzwKDAEXJzfzOBHk5E5ehiaSmAgKZwnJLMOVmcwD3IkKodQziYwvxEdWkRvEYKvIl7ox8gpSNRAHsepZcgvEDq/T1GFr42bwgGDmgFFdGPGwlHt+SVeNVRUXOi7m1X7Be8KF38P9wtL7//+L6pk7CK2AYWF8QenwoWH2QYtTu3bUpUxgLqpMJEGyVwpN7dkJQCDSUuDOxL2VwRx8sYdyKwS4qyrMWERUEZOk1LqXRmKUicHayeSLX/zC/t7BH/7hH9354Cfdtv/KV75yeOlyalabTXbzKG0p7gRJjaQWzqjiZ0Ypfu/0/qPTR8+6eQE8o1iOIl2haWhuXX7p+t4LQqNsaq5Z1/Nlt+g0qy61W9lmXTpEk0BWMrz1XbM14lRCQ22rNiJq+t69wdkXffWjspywJS8JW/S6GU9fePvNEFfbTS6BVsqr0Cjn8zTiSMwwqQFDMHdlF+YUwLENNsXY4qU2HO346Sk1DGGQO/PJ6llpPUCYudO+NTZWbbW9PL722Vt3fvTd5oAmuz71k4R1m8+n0nO3KtyMLbiP9ka7j+bvbfqPpi++0T/uxblXY4fXHLctWU6wLbFQHSGSE5mbwQtQt5yVpgIKof7tcQggEpqysTbjmcJctbAVRyBIwU4ZbTYqoP1dMSPzCWNVjJjrNT+8LsA1td2taA9hQkIWs2hgi7N+nUI/De3xj6bf/9dXF++8OFleb9ezxf18Ktxck0+8PIt78WBc2iRRTu59+/g/fOvT/+TvnL93LEhBOOtIuXm02/rGrQWSIbEFM+asHCq4/PnEiwA3N/MLw28YKGxAxefUttbMoG5OFIiYiLf99qUbLz57dtptc4oBxasnPgBM7GQEKjLoe1sRM1UbxGjmLiEYzApQB/8hVOVwTfpzd2cy1+VyWXvflJLVZAL3UpvyQT48uCGGpVeNNnKKUVQ1MJdB7wEaVjVEzIbi5gyrgnetai5nEe7NxYI5FCa1VA/chNCr1XSj2k9wtQ7XAVyVlZhfLOHc65OA6jqbzRGC1K0wiFKhHm4EMRhMGbGwkYUgN27eXK9X56sVHGalci/rdHE8Hm+3Xe57jhGAlgIicu42nYS29hh932k25tg07WbTu2uSMWrrY2ZmpRA5g2AUDAyCe1SDUESxlGZvyXfmP/oDOTvbHyX/xU/Obrylm3zy/k//+Lt/Jim9fvWVx+vc7km36koy7pBGmxlOPfSh204jOKfOy06JapQ1d+JGabc/3k6OvnX1c4d0460P391bH2cnm+zNIXtffHP6iVunD9+bfuIKH+0++O//b0t6f3ZruveVXzm+//KatdWw+u5f7qy3B7/6m8cfvDee7S+//WeXPv1lPf2oPYr3HtxNt144eOvvWSZaPcXBNHJT+kxoib14qQ938ypFMCYJqpvVQtADsMsv8SiWraZJU9xu48q9/Hhpy3uy+OTm+kk5T4cHWXNAII+9UWtsal5sDc65R4dIomI7R75T4nHW2UwyS196keBkvW1GI7xyMN2uRdbKSaRBWZnBIhMJZyq2MgC0TcTmbsXUYMJcbb7MrBC3HIiJuZgWINDwQLnYAQuAUixGqTxUJxis6grYQfXpU+qQadjrBuYLBBsMRsbOVmUJQ9Mz7FP8otyFWWFiNwIKDBABuE6VvRqSieEV1DXQL2u9OnxLv8jaHu4Q++sYqQFkUVfQQx9LFEIIZr31Q4Fba1yHxOjkzJQk5j5rKRJTirFbb8ysTQ0zdV2nqsRcUSJJZDQeV/acuzcxjcfj1DQ1CjSIFMdyk19/47XRePyNr/+HBx/f/4Nv/MGXvvzlWy/dXjcdwET1I9c8ZhiYEYhZi50vV7r1m4ev3rSyXG9BwdwVUNWD2eHNwxuM1BdLwQBvZLbTiHsi5g0tT8vTEzpdhlXXdufWXbq815g/Whz3s/7aOL42Tmuaj8Om1c1O2Zym/vK2G6dzoX7MujHcfPHN2Y4u8v1JdPO0S7ak0domDSarNNrstdQ5CVlwRI8rLyOodTxOHSfanI+OdvKj3aJjJKsoA2cEiWLIuTO2FrFbd4dXDk/L6dVfurlYP8iHemWyveSPpnY2xWZM5xPkVrqNMqeZjqeb9dPI4SB/sDe78qAfQxCEmMnUzUCFg0ZsJ07V2VVXn05s5NULWukQVh90gdlB9T8kHXAB5XEmKuxCTg2TgHNvbDEOMotEWBPMPTAzLDuCoxD64YwwA0c3NvOtOZISwdmMNEg/3WlovMWffP1w/WB3zGn7aMxzfuuLB7ufln5XTwxLsLorG9nNz/zmgwffXX7/w8PDadb1tEhrS8FOSrtZygu3r6+wXJwtUkiIBEOvW9+W1IReEdwpoHhguMLYnQnEwTB490x9kCAQ3GicIhOttz3DI8ezxQIEIzNyCJhYhhIXHMQcBC/KWowDA4wqlQI5yLQAEOFsJMJc+ZJEDi4oYITCzKylu3Z0tLu3d+fOHYkJZmyo+REXs9kyHILVS1QLfC+mDqCHBQYVsENRIAFq5Cg1I8ysjW0ppdcNyFjCtig7EbGAwRU47uQwBxMosEgoagQzKxUACYd6qUtkIYajmFGd3tW5PcSGnwDqlLpUQoGbM8EsBQF7KcXgH93/aLvNqHxKd/cKF4CZE3Fq2lIsBimmBcbERW08mnTbslguSaKat6PGLWbtg8Q+q5mqGitXKAo5BGSFKj2+GDMVZipJjWLrfPP20X4Mve9/sMCH9++u//Jr+fR4PJn8+t/9nfd+/P3Hd/5qPHtxlafrpxu6AjOa8CrZ1hXUaZui0KiltJW2g7OlkXZMZwvlKadtosf7B0/8yy/Jg9c+fnf/+GyyE06/9Xv66Pr+59/Cbjn76Z9cvTLx/u7R8db+1Z/eaiWPx5geGE7zjnX5vUlar+jx3ozjX/67/vCFx8/W7Rc/t3f9F2RNxz+4068/2rv6dr9UxhiwYMgKIaoBBI7sbG0SPy82fxZIAKSdndDsFMow9PADOdw7lfmse9jmV7aZhbe5VGxFDxUjU5NCDXibkYLwzFXM2Gy33UP3ZAGagbtuxGxBG2dLjdj2z9f99dZeE92yVsw6sWEdjIzV2YNvDBnwEEgcnFJr2tU5jheIA8TDBoHEVBEY7AEMtyhSDz8m9NqD3GprTGKlgCjEtM0ZoDpYJuLeSuCgpszUq9JFzjxdjIaAisWwohaimBcQzIxBFNiLuZEkaZo2Rem6vO3WZkbERjVGydwqh2sIAg+BQwjPGUfPsZpWrFTp8kDgAPGgl6nSSyIuRR3GIVRNrANtbA4PL03G423uiSjGeH5+vt50+/t7bdsuV0sGjdsRgNV6lXMOIXAIANqmEZEQpLi5G4GYAzOp1SFUzSXDctXdvHnzH/zDf/C1f/+1R48efPMPvrn+pfzGp17PSlpAzkzizOQkREZshRpusZMOdq8RCQ0aUgYGBypzcjMFURDDhfKc2ANBMJ7MxpOdG9OXLMHG1kl3+dbu9979dv/ge9O9vbuSH9PjndJNfD3C6nrcrsv2RruNvL3UhPX5CU8PD1/eM/9ohGQ9G9nWOflsbCvhWYPRiifdeLSWCAbDAqiwQtlJW469d2iLX2mwAYRBxRkgMgJL8I6JjcwlNV1cH7x2ha+l4/sfH01WV8rxZRzvYz71xQ4tJ7ZsuV8jWBnpctRJGz3hwz+9/srtDpP54VUFzNjWQG+uTlvynAgNE6OKYp0dwYlB9TJCTcWpFi8QcWB3yPYQs51UMfgcnLUHxMGpCBA2J3kaU5ylfKZiwbxH2RI7IQDqXqsiMzCsNwvwWJVBRkbMk/29TkrY0QTstHG8me/YYsqb2Zd/Gzsv9h9s7cEcz5rSZ0okSSyW5Rkd3HzlpPtgt1sueb5EfPXazqgfnzzt0mxydjovfeEYiliUmJf9znQ8O5o+eHQ3xmCqxWuofGyZc15KCMPNNnhjCpt3pgdhxFvFKJQq/BNm5+VyKUxtEKgbjFMyWLFS0zppeGogAH2fhQKDacC7u9XLARAAalYXAXyxGoNTEnKDhU3OL8xmTEzg3opUkRz9zL1Q/yuwmCmI6yXvBg4M5lakR3Z3NodpHajFYTfGq9Wq3of1xbgSU+rNj4q+qk+koce9aAyIq2iLzIZdb9Vo1ck0wb1g4JoCRkLFCvnPVfg0lDXErKqBuG3bXnW5XAYWM5MQMOybrRjAtFiet03a3dvd5r7f9iCe7kzPz9ZdtzUQPFjRzXbV91SMmUfT8V5XzLo+jqP2KpZQiOpjnOFuvRMxCoLCZcuM0vX2n/703f50NbafPOiWgfNbn31jdvW3Uzt79uDu3Q8fHYR29vQvdq588pyuhJxLX8pZMs3MTBDbltgsU8uCKfkkMGuLY/ax0bpTdKuQO8fop7Or9/j61ebRS3bvppw29uDsnQfr+aiV1TQsWuYotqML69bJqDsla2ZWuPvLrycg83i1d/S9q2+/v73yW18Y8+h2Pl2v5quT9/7o6PXPbxIhlmQouUThUWo2ZZUti5GZpRS5aU/+/Hs7WtZcr2IyDxQdAYoyxc51XJ7Pf3p6fX086sYaV1YkRdUMMgEs93q2CeNoM/RsMRoY1oRutu3OyiRRk5/uipoFKQ5Jak8DIkN30lSnoEAkgNSrqQCoDNwEUTXaNg4LlMx61FOaDEEI6sYg9kHli8HBwR4CV6Wju4FY3QEEkVq9iQRT86J1H8uABAFAF2kiDBeJ5mbmUUJlLEuouFOtsOicM9xZQsVMWq9wa1K7O9tNTeNuUZKZqqoWcytQSKjp9GxmMUpKKVyYj//akgZArPP0Un9FETev1MwQAhGDyaxor9przjlJbNtmPB5LSsycGnN3M9/d3dvb57osnM1mNDwBfEdmBGJhwoUSvM43veZ2kxUrxZxIJLjBzGJKfV/W6+3eweE//Ef/+Ov/7ps//em73/yPf7hc9Z/7/C8SvM9kLGziIAejhBBEDVGSF/EhuUEABocBGUAEcYQhcsD0YroQYNE9GkUgAWOWXW6atsw8HTSYQtvlbwTc35xdle0OTk96/SrrCZ49QJmW5cww1SftS68dTvzM5w6C+7ZgbUm4JIyC6QqTiH6NzHGyHo9g0Km3yj1bAdMOZGfXck9Ts8skKSiMC8OrSoUO2v1Vd6ZdTpQ2m/Vkd29Rnk3k/IhOD/34Kh5cwum4zG/uj9dP+vXp+mCWCi+pYeUUwvTk3W8efvWrS961Ep/MDkoHjIEMyu4b9+TYirnC2AbpAhOLe3EzRxnWmhigDOTkcGmvji2RSMPiRfuEJCVtN12Xdaz55U8c7rftZnEqO2wbtuxUGgfcsoGYa99lICYNQHSEWjtCUELOBZbQWEgptVcutafd2OaztkTb23ywxrzYdpTWbpsgKZYxEbNvjRcpyd5yeXx4mJd5xXF7qUE82XRxkto4vrL74PGjKE22LCww7bocSERiD4eZW86Wk/B4NOr7bQ0LA1BMCa5urZHBNbGtNwQoAszEkVKqKrnaKNatEihoVgYBysR52916+dbp09Pj+VmUWGpyA4gBYVFVE7IaCsYEc6qUVVBxC0Spaddd9xd/+ZcpxL4omK2GsNjP2wWHqRSqg5irKlKtGLltVMlATCShlEIGNytV3D0YDwf5lxeXwOYVb+nEFKrdF25uqEC8Yu4AkzuhdsZDpWZ1j+H1H3didoeE4ObuFpjNCjkHrtB8qyttNw/EZkYhtFG6roM7M5dSCGAmYdE+cwgcgpmv12u4RwrG2Ky7OtwLVbZKdunSpdWqb5txjJPlcgMeC0vdf2tWLswWLJdt4Z7FQlSSvnhsEpqttiINfe7zO711B7PdzDeWbPOFvv+tb7//nY9sfZZS24CV1tfoZHH6Yxx9MZrk0y5RU0JPlQs09mCQ8ZodHXm2VUzTRB4bSe20S5CswRaW7f7ewYO96z+a6Y3mw5t05yW9v58XLbbBWHbBiyc7UgKXMI6F1tvCneAktXcwey9MH4HQzPXGCymvddF9/J3f3X/hqhzu910XY1to5etxqJvNQq1zcVMCqS2fzd+dn74hNkp7AKa3X3XdBhCMUmJb55f4yrunH3R7i7ty8lm5CTdkqGniaNutTYC29IdjnVoURguHJuilom/sLX+0sglbYysOTfFC3o9ox8oqt+XWaJlB3d64YlzVdDIaN9vmtHs2Go/cXVSYWHMB1E3BYp5RjMng7GUALhLHGl7iQ3IqYcj5YBBGJKUoO4KkXlU4pOl4vV4HCVaKucGKaqlRCsw0Ho1yzv02BwmVPsPupqocAFfVGMVKqbAsM9eibdOOJ+PBxkMgp5TS3sFB1Tz2vVoxIsSYUopqRVhC4L/mJPbBLuXD5c0IwcTrZe9uogXMgQMAYpiFNiar1hVikQAmM1PV4YNXaHPg4UsAI5TK5GF2914LE6FGNVXXx4Uwu264CeiziqQQRNWjRHNsszZp9vd+57f+8H+d/fAHP/j2t//zet1/4Ze/2LTtVkEIZoFYAHYTBIEFJ4KLI1AkBHBgBDjXc5/A5kIwkNEQbeoAuwegISRQa2iYpizJ0thojJmblOWhrWZUPl+WfxK6R0VfDatl3kyll8WibePlS23Uh+6BlHLuYCbcRuQVZoACRmzBFDCL1O0lAtSZ3JgtZEEDzqOxXTnDh6qdRCkKBsNhwTfojeEj7re5uMZDCeHZLh5d1ZNDfvACHk27U1rg9G7n57Dzgl2nSCUaTTYvXtWH83dOf/SNV9/+jXzelNScHcywcldHJu7Ytoac3MxhBHUwjJyIWYisGMz1olobwhoIEFzu+xhSG9pRo9vWCus2H432ru3tHVDiJS8X5+ZGiQpKKMlKMlWzAHa4gULdq2AQ9hJLtGBgcEwlYDzBswc/fGX31t61g/a97iBu0vGj1bf+qr35y93J03SS+lOUkhAV54WahsTKGO3BzLqP253ucJzvP/3wad/tNy8/FOrOtt2ykzYU9M6exm1ez8+WZ6PUlLw1YkIIzEaiJYuk4eNWxW/dErlLijE1Xc4sgZiTc8nZBFYxdnAMIhDqTcuF5QEAsxXg7of3SlFBgFngUGetRKTam1vScGFocAcqJ97UhIOT96aRJTZSLsDrNig6f9b/Vj+R+XAmwlGJ8A4HkxMHZrj3dnEqIoCpmJJ7GAa8xiwFpmbCg0jTUYO+ATcrzhzgsLpOBteCupLxHW7mHAiAVUAdEdUPm/valMCMwbdfeeX4yZPT09OY0s+WbIAEWZyf37xxg5mXi3PmECVkVTObjcdrWM59PYxzb0yIMWrfC0vgUKfT7j6dTF+9/fL9B6cnJ4ummbRtkzMIhEKRI5mxMRshs3rK1GSkNcad7GrqwkRl5Ja3lz/3IlAe/sXpR9+5f/+xLh4sdNkfSCPTmaqWZvPSZz97NtmMz59q/os8fytFNt7E87FGM2IucDAY49EKBMF00i/Pw0y8a2Fd2rWDAAb1HfcLid121r539MkP8879VfN2//5NPb+uJ9P5aly2LfcWbK7NOTdPbPqRXHrSXH0apm3b3JT88Fm3tNXhwc57v/svX3jjejx4u18vwzzAclyPzLz0LdwDjxWcrSNVCTzvzjL5D8bpWmQAl/rWe/bM3mpZthmLfY6vp5s/Pj5+vLda63alZTTm1AbeIdpLFhXc61RsDLBNg8PVbDOOfRjpA1/MGKmsbXsmSaaT9nT9sfC4TdurweY8QqHtbOzs0WU9X2+0a/dbXxG0eKXWIDm2gMALcYiBc+7BzkmsuNqWYAio45o6QaoK/GFrC4QgF1c+1Ip2azBUewBRRIJICBLTbHfWjkYphGfP5uv1OqYkIqNRS8za55SSxFSKBmbVst1u+15zztN2p03NdDr26jTiaqeDiNQruWnr46OWxZZiU5v9IUOwGn9rhTtgFxwVnjk0u1WpHeqExqwmFnqSWNmUqKEplZjHwascGqgWZwMCB4cRh0rMGTIdBjKIXcgvqyzAvZoMwYDFwFV9y8TmZAYCb7caovz6b/z6dLb37T/91nf/8i+2nf3qV77cjnfz1omFuQVYjQPEjFFtfgQI0MBgYHJ2YqrdsgcblLhemSsAGA3QEFpHizIya61rI3bTrnRtyGqbV5vl/f7Zfiwv9Mca+HZZzHnJkm2Zk7/Yn6zL/KPR/k5gbV1HzAXjDpbQN5zHZB3ZEn0wA2G3bu9AXJDNckq5LeelNDd30vjV7v4PLDkpXE3AVCeCGswVHSOR7CY+W+xgPcX5DIupnjbLRAvKx5k3gddJlz2P2VvnFUGwv7t48IP/9dVPvX0pNBufrcYT3WFsgDW8cSR4AjIThFhgUqw3t8BWUZQYvHl/7ZfkPfDI+1Y3UGtwebbz+o0XX5juhQ2fPu0+fu/Zdnu2d7nlpRJBAZunoiWwgLMVAxmzD7QZHuJxh7EmvBRbl9hev2qbDV3dy9eu270fx7iz/N5/CnYtjm9mOxNM3FQUABsKHCVbO5o2XdNnTTvcQkIZUFBGEAlMTjBwyFn7Pl+5eqXbnC6XS7ASVPvOSFlkvVnHWNffwy6mmMG8cxXtY5AeiqwjGRkhOisGIrPX4lMLtECqrA01xLuYFyvMoXoFixoxM2rgDzNY4cFJqjXQQeQKN/Y46J/kos0dIiIGYpfVIxuDoa8usYjqCJpAJFWI4pGDE4oamUUJmdXM2Ab0s3oJHMzhKCEE02EWwkNXWo/1wRZZ52Z+YXr0QbrlTm71iXKha6vRxWZOHJjIijFzKfrw4cPqV2BmtwrAZyKyYjHG4ycnqllYYpTtNrMwsyzPziVKG9N4Mum6DqYpivYqIqZG4BBYYnJnZl5vutoqLZeL6fRS04zPznVEqWRNraCHdR4KSNFJs/bUoV1jssxhf0ds38xKqzf+4H/5s/e/+QEtUthgfzbdNKvRaHy+XE6m6RNf/OUn/vjx2cdyc9qun12V03uLQzTSQorm4PACJ+IAZxqPuzE2LfKh5GK60M54htLDgk9amjR+yXtZ7WzuXo7Ldm/3Ubm61PTxZm+HTsa2SLwJqdlies6pb/cZu5dYxmi3dj5PpJdm4/XiwZ/93tFrN3df/PLypx/30nhWltCzRQP1CJYAL1BGMmYTymdnW6wj5FQA4A//4o9fu3Xz6pVXY2zRo49dbuXy5Ma7yzubw/GH+uzl9mgbO6TQedc2SSmPdmI6nAqpoE/oWnYu8xHyhx/Np9aOF4/G4gxqOIzRmm7B/SXIAXtvMcQ8D3EzETdESlxT/woEjbqiA4PLWmJsmYL2K3NlJytwK4BTscpetUBwDzwgBSrOryYaBQ69ZhE5OjxMTZu7ztwkJTgkhBRjNbIHiTW07NLhpQM/qLRXljCcUwOpI5kVkTSdTlULEepUmWjI5b3gvA7tbPX0Eep3ICbWUi7ifmmw+12UzIPXF4N4q8AuGFhD0oOTVUuTBLLBMH+xHkbwqsSkGqQKqtTKisoEX+RT/IwKwBfZUHSx5Q5D7esDTbP2CxwBMoNIBAjgUkKBfelXvzSbXPrjP/qjH7/37nKT/+bf/M39S0fbDoykoMCJhbwFxBHJg7u4p2oxcSdQqAGjgzuqohEHYyfI2dG4t6AW1IDHMMHexKa2eGkU8/mTV2n5ed4c2dO/xaukejXkvU13ulA5pfH1y3yWuvMz0QmnhtQAiWMdXzpLlheuLXTtmW2srgkdwOQoPMRgLIAcxnl5n9rDvSly22u3CMx9IAtkoUVgWE59n0eRm6C8HZflNNmorPawtrnRArKJ/bK3Z4Y1LDlnQkM04iyamvb0wx/Y/KeX9w6fdosxzRbtlMaEBp7cgiOAEJ2q1Ig5RLLBnsMsTlUUXX5OUe8y2zsfRfTb9TTqm5997caNa8bj40X3/vHx/HRdmHcOYNUAW9zcXBGWY+t7V2dOtb0MRL0Nl497iakJLfWcOfi4Tef98uTRg0+8+Ob4c19eHX/rynw1Lbz5xv88/qV/lMY37GRbAmthDlxypparxe2l67fv9HdbEd4aDImJiCBQG67Q+p5CbB89PmliD5hpBvqD/b2cu+VmzZWAMWTThmIGYmXnAivWW2lDmEyny3VGDNs+B5ZgcK1pBW5wCCcJpVgpZqbmwzHs5k4wNw5UdUUpyGCfACptkuqeykHFInOBiTOIihkxRSYzkyb1RdkqePZnZVEdmw8qLPPAXIpxYHKU0geiAEBIJJiqDebD4e60urUxN1NhqXvrOoTnQbcM4gGs4ReEoYvwQMAGj6PVTd3FteLPUX4EYjJCEDlfrgBvmqTFnlseOQTtFSAtPQepE7wqTDErHOXg8JCYHz9+FGMi5q7vRQRmItKkce4158whuedHjx6dr3LgNqVR161FpG3HEeJkDGFnKlT55/2ozTze0mhd2k062KGPMXM2zvPV7MpL0xuPxz574dK1oqLam+sLQWKSj9f3TnkxubmztZ704O1PHtyKz771n9LSual5zESITitxALDxuEu27XPX01po3XJexURsjCIjwkRkMtnFYuZ5r5xcjeujCJ4yeDalIIsDODep7EbK5WwjK8vtQmwJpmWXWsMPv3Pz0kH3xb/x7L1j3yttB17Hnq1RswzL5iWoU+DYuPUB6Lf6wf0ry8X68rVu0wHIdv/u/e3JfHHj5o29q3vKeevmDxazG5Nz2T4Zz6/RTMm5pdCwJ1uvl9PdIK1NtptYujF6IU1eDri/PtMH6yeXJsacYxAzzavV1XZ8tn60u067l0Yhjp85b7OWuKdTMTNkWGumgGsf+tFOuxf3Unc5NdnKZv7s8Xk3BwusBAmRpdua99a0MTURhBBCirEUY6odKZVSSjHAZ7Pd6XRCoOl4XKtwmNWzsHJ3fDADgzlU+XJl8VZoFMzNaiAmuVsp4BoPyhSYTY2HrsF8CCessMBabqJOsotbCOHiLLyAYlQyBy7q2QvHVCWq4+dVzgM0uAbyEqpD2odpNjMFCsWViZ1rc0s8pCnh58XV9jwyBWwD8YCfI0Dqkc+g4sPNzhw51A8OtxCESgnrlb319qf2di9/7Wtfu3fvwb//t9/4ta/+xgvXb+UMBPboSJRZY8PUEAlRIk9w8gvAj4EBqRMAAkCF3AF1YvLsFY9DLWEMnmCU+uko7yVt+0WU9ffzkx0+u6ynO7SZbumd8866JCXynMIL11s/6J4uuvNj1VX39CPVjMO92duvTq4fJNFz7wEzbOHmvDCwO7KhBXZTUkfhx+sXmrljxHklR3mJrtOsZdltzDKHVnXZe96A+yZMm5KoG5XlxBdTO+d1zE9186RLy9R27fLJKoyDr4DWaUwiATvWhu69//y1t37n9Wk3XWO5mE6xBFogoQYwYstuNRIKTEws8AJUhbwZqKrjf2ZVv0yPyvz0rWuHX/7F2zszm28++uHd1Y8fnJ5jyntHqLmrIGNWMgpuGQTyhXjfEDmgXgzBJRSzTBTcYbmUjBJNIFvN4yvXru/FzbO57o3iV//x2e/+3yfO7Ln7d/92+rm/n3ZurE/XSZCDEadQVCMv16v58d30hsw1C2OnbaxzJtdSI9xFSXnALXEK3G/7IJQkbbu8Wndu6qYcGI5AdQHpgBHBak6De3Im8Lrk3pX7crC3dzZfGNipygiNwWS81Z6ZWdgG1NVwR7G6BDZzDBAUIAQrhcF0wd4JQcCsVlg41IKamAIPyWpWF1JVxcRDCQuyarO3IdCXuUowKhEHIQwzYnWo9dBCTMakIJgFgNUribp3U5i4MwIu4Hx43vwOJsbnhqfh+xsZDVtw8xrd5mBHcSNmJ2NAUiJHMa2hFs+fSubGxDlnkaBW0ykUgRfni7ZtS9/XJ8nyfJH7XkhK1vF0rNrnnKc7O9r3q/WKWYLEWlN0OdfQMNXM3JhpSgw1Ng7ObIIC27p3vp1JV9IKzYZ35uWspZ3d8dx6ShKe+pPP/vZXHnz/3g/e+2HbNs14ZzKedOvN/PFTvoLp1fGaF1jKDk8Wm/PD/dUXPr394/8UlVuJAWzGXjlMHLmd9hNsRqYtWWOZ85J4hkD9DDZpUsjT7Xzmp1M/3QunUzsVnY98G85J11k3IW1Zk5qAxmUnuctJsRPGSORIu7x3/eXXfuGN9x7/9LH1zY3L8lDX0z5po0mphWemDFIhL54ppOa8X39g6zbFo3Xma58AMO8WyvLo9HRZurgIs8NJv03t8YMXJvGd6/np/nq+nB/FQx6jj15aQ9+nsu4Wxw3yRFx4E00b2LicHEhZt6td6LRfdFtLzCYto0tsn32xObg2+3iJEeHg0vSMpk/7MR822yfbPMrjfhyWQZu+zeMZ72DupitqJCba05kEZeoJhdhVt6odw1KSQcQjoYoq3Asz9zX0N7CZVQOWWkW0wxzVd8AD8LxerCjFqhJwWEle0HcGfpzUHXCREGBQ1TDEVKPKi+pStZQhPpAIdcgHUGDxC3/wIEb0gRTLxAM55CL/tTa7uKAAAT5UpwYrRj9DMdQngbkT2JhZiwXi2gDUXGGqYYsXlK4KeK2VPl04kOF0IQczghjTBfSazCvQzjlEgGBCZhxkvcH1my/9w3/833zta197+PD4G7//R1/6Vbz2xutr69FQCcYpuBglQzMwXwmDZckjDd+enYzICQ4qQCECcwJaoCW0hJHRiInPDkdy1HqD5Zg2b6Zyq1srd5+wvdP5073N+ION3es23M1s3YZTzNL18yePusWZqSZmWYS0iGVszXSpCT3cPLP3ZlwAmPVMU/D1OIqkfXdvufva+/1YSrd/NJPLqVMgNOvcb9V2p7P1evnd//y1cWrPDdwtkp5NudujNZ0Xmxs/ozAXX1A/V9fGljlNBA3KyllAYyBId3xn3J/OaLrGMkbtR4IWaBkJSO7JQk7g1tF7bck4VPD/RYgghjKpMjT25z/+lc+8/MVPHyae/+T+vXfvPl2scSNeLrEswM9GUxm3AExdTLRkTGkIv1uMrSgTi4hpD2IhBkIBzBTGEtlRtr3GOP7wJz8IO9KnsR0chr/198/v/e5enGVeL77+e3uf++3xrZvds1XsyRmUQpM1yOQsSUAzStCNZmwZBivC4gy1uiDhwFSsS4HHk/HZfLmzOwa61WYp5EG4qAbhUpRZzKz0SkBwCLFqcYAYk5ioUEjtqG1PMScWXCw+rR6lleZZTEIwL1VOoaWgFbeC59bDYkQcBpsxg+BsLAww5Z5RLBDCQK5wM4VJFO37Gg3p7sMEuEZEwAdMkDuBTA1MA5XZgzEbDHArjhCZ2YoGMDP3RY2HzLVGUumVf5ayBjNn0MATqrPmiykaoabb/f/I+tMmSZIjSxB8zCIqqqZ2uPkRHh5HRkRGJhKJRCIBJLJRqCoABVSjj6qu6e2eHdrZOZZoP+yH/VdL1EQ9O7RTVDu03V1TjUIdOBr3kcgbeURGRsbh4eFubm6HmqqoCPN8EFX3wKx/yHT3MDdVMxMRZn78+D0kO8Ok9awEJUGCsRkiMT2ZxuCcg0/aJBq6N4FYiRSc6FrMZG0IQUTyvLh+/fpysVguV433ObGIWjYiEtqQuCghBO+9tXk3s2SsCjNB+2JlNBqGYEPwDgUTJ+CLhVlYg4pX7/KayxVWY909Q124isfLydZuPgo/euOvR3FkLxeBJGBxWp8ww15x2Ip+uJqYS6ujdSO1Wxer43Zk9SufWf7ily4bWmIIWrYCJ7DMwVsLi7rgamCnI7E1ENQP8kEYViOcjuhsSqf7OtvHeipHZQ1ZBF7BVWRrD+8tQ0wIlsyQwhC7l7x3YRnLUFdbhbfr6fGvf3P5S9+alVwPLTbSos1t3rY+y1gtFJlI9DZjttVps65CKEod5jg5ArC/e1CH4MpByMmNB2YyjKxw4fb+C3eW79V7/t5Otd9igc3O9tiWaI7OJmOUU+tCGNmYWZdHZDq7MRm65YPF8aMpfles3pqk+UWe0OSgOZPdl197bv+KKfN9U55EMzFuHIuVmcpEZFfsymIOKjnOYphHhRIspCW2RTGCVMwCIGqwzrpiJBIYIQFOQfs9B44gtik6JhYCYhCbEBpNZZ90iC0YgDUcY+w6IB1/iXqSfnIa0CQ/yZTCPNKwPlHfM1NiItWE4qp2mHGaCJAood9GqbKl5DGcpvWJ0JuKobt6pwt0AVNz2mSAqiSguIOxe31M0S7sp+8ShytZICe9D3TaHmkykqlr/HYTFEwEten+mEmEOfE4yABGBGwsIluTg4yQ2TSyvXvp3/yb/+573/3HT+48/Icf/GCF+stf/WKNKI7YAo40h+YCBziQJbKEZDxIgIFCNWgXgEHqlQBphXMmh9QGLrkZSTMtZLvwRVVt0ebVuHgiiy+vinfXm/fnKGv55sr4hZlNnx20O81xhKWBL+OsQChCWGjThFmQvJWog/ECmQA6oFU3voEYoisN70ReCjHz0K/360OL4GxgaSrJ1IyGhTOuUFm9/PL1SXXrN7/+TT7aneLMixgNrBHJS09A4FAH8hLIWIJEIYEKNIKiOFscPjg8vH83v/68NiAitqxGNWHyIEIS+k2jryYNdqb2vPZcIkUvukaw//d/dvPFayO/fvjr37798eHZqJhMyks1Sy35lmspbvJiF3kRWrAIomUfGEaiwCpHy+wMiXWZjzXYImlDCSdNdhHN1Yn3V2+9VFC98Uf1cp5fu2n+9N+s/uf/dZTnKGn+3b/KX/n64KuvBhKmEBhckJj12lYutm3YbY2hBkHFQzhE9sSRodAQCuMqCFRiCCLBEGxmOSR3AlHDdWhKm0OEwGQYMaho1HarGG02dVvF/WvPHJ8dL5erT+eLzDnRwMQ2kQ+7sRkDYs5IRMfTLQItF6s8dxGC2A31GEFABBtDaKNnY1OTKwQAgawJUERYBUGE05bhoIhsVMBEImoMhyiiQiKOk4ECVAnWsGEVIUIXqCVyn1YLooR2Z2trsa5CjCaN54oQqAk+taUhYGYhIcsSIoEjhEUFwViLzr4t8Y/BygptVVnJORtCAHX0j3S4MJGo1t4DqTfcCVInuZJumDn1vJL8p6pKPHp8BKgrckBz51rvg7RgBAkhRmuthFAUhcsHrZfNpmFGNsh8Cw2xyIfj0VYIHIJYayEKD2TENbDRuIpUkBY42d5iaZmDlSi8a4ChU1vi9ne+8On8sKpqNxZrRoY0C5mCBeIn0WXuX/7hf/vXf/k/LR/OVxuzfQne6+Vn4rXHZw9ObJYJrBOGzTlaAbsQWNiYrMzMwDdGKYewm+bOiWvaUhYuLBhLlz2x9UbOPJYsS+HGoGKFMwMrGoy00oBiDKayEyqK2WhwyXk7/+nfXb31+XYyOm4X1jkuXLNovfeRoipBhINIUGYqc18OF6+88szhw9XsZEnUAlgsFoN8NNm7WuRYns3z3YPC2XCpuP7Sqwf3Z/fC4Xw829l/5bK75C7ZYRZXd+qRc8/d2Bv6GpkfqVosXXBXR5vffu8Xo9U95z4uwMWoqKs1ySGdHW1lxY//052tnYPJzksb75kixDMHVt8qCxQsIAZESGHAlhAN1AAmiGdI0qhPp4+ECAmSPLhSZNMuxnYqNAQldPTODiFSJZMy1C5ypswVTJyoT4mZpNzlrNQP2T9FXk7gj57jP+d9007loxfsAVI8TjrqHVAkCnRyC8kD7bxRk+6jD63MnO6ll9np55XO5xRSPUt9DpymaVWVktYr+lw+3VgvsUUdfySB0+fQWXJFTNMLmnBOTW1b1vS9AklrFpbVgqxvOS+nf/Zv/uvv/+M/fPjR2z/99Q9Xtv7at77KRYxQLq24gIIpF81BrLAkVtkomMWSGrAwx0TdATyJQKPQRmBtYMMUWSuroTQysI2GRZBVxVWs6r9fYbuKCKaqcBgwqJ3Zf84wQp1ZDm3gEALJjg+WFvNs5ct2FDfRDNlYv0MLm1T1JdbW2iggriuQZp5GNVbOthn8iGttJZw+zBtWW0peBC7r+u5/9199tWyO339wONm+8ulDDoRAZAgCTUmGLbOwEm3FEqtRZbIZRSNiKHMKptmqHli0lUST+M3omDKdJ5wi+QEnyav06Uoq1xX9/DcDRtm+enmD1ZM3f/Izf1bdLJ3m7VqCx2BFe6totpnYZN6By5xK2Mb4RRCrwsqO4HOgiRpIPZNNWkqdbFQLqoFAbavZnNeXpmPyWhQthU04mr5yrV5+e/Gfvj/NJpnL6rv/GMKd/OtfLa7fsCPOruuHv/kH95nybLzzeMEnNp/ZUUXjsFDbEFogKoQy6zI2tuYQJKxXmc1OFguJlc1IoQSD6K8dXDk6emLYhNZLCMrcRlSb5rnnduvDR6LtvQefClMbUZRlkDTClTjQhlRDjJZMElxV1vVqzUTGkKpaANZYphgkQqzNosRW1FobgzhXeO+To2eWOVWBIkDASIacMXGkiZOYViJ7sOldiJiSBhYbTlOAxKxACCHPyIfgk6ehgAGBrupKYpdWU+o3aU/HSEdYsl/sfddYEFht59+r0rWsgKjJzSlVDFFiAgBBJDGyMUlkzFoDaIyRiJK/U9sGY9ka66VNmvrBt6FtmYnYJGWAxWI5GBS7ly7Fto0Src1iDNbYzNq2TWau5Fvvm+TuwptqI8oMF4NfrZaqTDRgFmZlsdIGeFBDshbNlDNWi9Px1IkSgmiMolMMThbz3du3/uB//MMHv/z0zvsfensEHkAEUawxGKBo89+88QOIeL9uZz7egC2lrsrnP9Mcn6287pmiHhQjO2jWRjaiLQaR8k2DhiFitBZlszR+SE1ZSAFbZpjEplCfwUV1CmuZETy8j7Wn5UqolYHjgaJwPCwt3NDyPPqzanH75rMyzI/CYpJd5q3JMB+1LtSmLgZuFVYsxvos1PV4kj189MbDwwd7u5Mv/cGzdz4+fHgvApAwCFwfre5CYHYtzeLZ0dk3/uJbo6uDP3IvP14cbnJ/jx//ye0vNdLmODaoJuPRpVJ8qAcGeWiYuXCV47pefJrLI9ciVLKu1600EBmNyiLPqV4XBCGfwTsKGYJD4ORwZDT5sxCnqRXpi0AFlJmTqIZSL57MRMSS0N7OyT5Vn0JEXXOnU2TueEuJIa+4GINFciXRpK6T4iIl8Y0+rvVkwj7Id1UsUUdFZFBn5prCOElHv0qTUeiIzV2cTk7eiZ2YiplwwarpPc0STwqceGCdfnp3pfMb1+6Gzl9E1y7uvnr8Wi9C9PnjASTRzHQ+AOhNwC06Msp50qBJoFdAgEkhRZRFicCtoHD223/+rdFb4/de/9Vbb/+ozlff/Kffcjta8SYb5JqHdmCzPMCxQDRjUJRMkBMzIXbhJnXOuDExOBmDJcCJyzDQKLIsirBd8KJeFW5zTxZfL64uKx/X9fUNWd/MF+3Hwey3mawctx4CV3NpSx+LF17Apdvtp+50ttgaF66tvMuF6ogmEkOslq2XTttAXC5s14w1qctQj6Myrdv6qDk5bojbIN5z2N4zr+z/8z/+wt1/98aw2cnCig2rGhGBpVSZWGTFIINE5QhDnBEsyHa5lQBRYIxNNvVJ+p9ianhQ0uXW3lkrlS1d/qTK1KdJCiEC1O40y+9+7/VmVr18cP2DR6f5pWZYftrwpZJiwSSRFXYZyQ8LqRV1tIUVq+oUNp3wJnUGuFNBJdagsIiMCG6VNm3wVKttVB6tvJsUYx6O7cPBN74ow8H8f/nr6YDp9gThcP6T/2lz+bJev4a7c1vfoT/45lImvhiJuTyrbG2H1htshKMlD/EIdVw1a5MDEBgUxeDG1Ztvvvdba8kGihBrzMnxzLCZTLaWZ2cxStuGwSDPi+LjO59YlzFnIURYdpllJWIrEI1CzDFE6to4gdlKFGKyhru+jVIntiXoqYhCoqnNGkUyQDs/E05AkxCMwIJgWKBGqAPEwAEiJEjCdorOnIgNADaGexsJleTNABGQMhF1lQKZUIuSGLbdKtBOPyitAQGJihVW0eQmwxArUFZPYtikho4qIpL8XkeZigJns5iIYKZTFzKpgd31nyhNcIgIq4Qo1hqJEkIkazhJ76oOy2FdNwB2d/dA9Ojx41u3bjL48PAQkDwvSAGmEFolti4jMDRYQ8YWmc1FUDcVw7qiIA6iHt6y4VgLlkIWsCyZMFFk+2Qy5ihEVtisZDlkd/8xPfPyle2rq90f31g9JHix1rrMhrhwvj6+v5jd/aWtCkvm4ZOHl+xVaxbe+nzKn3tp9cb9HVhUtKIIOygiFyHKRqRiXkZ4dhApMqMulNE7bIwsHNeO2oE0cuz1cKOrJs69LBR+INH4YS7DYRY1BlBgqjSc1sw8HmWLUO3tlsX2DvNwa3LFtztmzpIBDm7OdRZoEqh2Vuxq/fCD3z2WcPnO8aYsZwc3tq8e7AD44P35yaIaTkufBTcoFlg+/9LtP/7qV47Q3P7Ms7u/O3hYHr9ff/Q1PF9a9WczambT7eu2qciGLDSOg5V2J5fV0aELi52R5coURSY+zNGCrW1dfbK8/czlS5Ph46bK1GVoMgSLkHEwmdNcuSFk6A6sNDkKKAzIQoN0uFKKKdzZenQ4aiqjEiEqRU/ukGTtxnhUARGTZQnBuxCBQaqIQei9udA1gEXPSUp9w1T7xBQX5MPzsHdupdBJJWj3DZFKGswnMhfiH1AgAb24aOqkUQPqst+u9qaOCaVPBdA+qvaOwr15YqI39d9zx4x8Ksh3FTt3ysNIbxYpcWdxhkTF7MjKSGNDYCajaogswZEycQ5mz4EL+7U/+cPR1dEbP//B+x/9qsqX3/mX/7S8OgrwOmA7DFQYsREWasEWLngrDUMkMwLTZpmSExUMVSNRw7IxsErsTagLUhv9TlFsZDVx3lf2H3UTZhtqWRrJRc2i5f3PlcVVOw8LDZKhPq14rZStfRUGXLx4aXbow0eLK6NiRGeCdS2RFeTALXESX4IyBuTKkJfzkTqHxtHGtovBwTQ+s1PTIAA+mLz1bn148/L1L71w5Xf3P9xxLF7ABAMCrLVixUOYlEXhYC0jY2GBEQHAVsDWuaphJZvg3s42rjMYkD5LOv8tABFVSwo2yaZQqZtGsb/+h7v3fnpvOtq58+BUjEr0bsfyZDHI68I4L454ADEblOsiZwcMmAtSx2mbScvMTMkCBwCECRIYUdGStmRqCh6m4iOg2Jo24k8l7Lnl0K8mr77QjraO/+o/5o8Oi50rVIwiKnnyO4T58NXn7u09f3zmJreeP15NquWgpQFVgT1RowYZxSBerCHvK6PirKvq6u0P3iuL3CDW8BlMExtrEBp/VNXFqIwqltkSWeaDy/tPTmaZzSjx00RgbMKUQgwSxFqrCo0xs5YpcZRZUpcnLW9KA76ROtdQsdZGkVaDtdb7JoW+INFaQ0SIIoaCNZaAqCCElBmpiEoH8RL60qCTnU1d4RTtUoYbJIiKpTSFLVAYTv2wjoGZdCY5wVMdNpcmO1I7ol8qlknBUQyYU0eVEmdekjImSCVEL0hjGFEkscfS5D9zooJJG2MInGVpvKrT4+1Y2CpMcC73TSMixnC9qcA8Ho2bTb2pa2sNgXzjFWrYjEbD9aYhgmX2TXCu2NnZca4UobqJoZUo3LbBGi1Kp2Lb4GMtXDMVRtcKFljyPDrKZUMQyIjNac1vPgjf+eza7sRn//nzb/7g8NZn972Cvd+eV3ZW3xitZqPZwzfv3H7hljyo3nn9o1e+ecWKb2t7cFsP+ehwdckNorgs5H4jU1uOqHVt4ECZjxrq4ArWzGeymoTquVGU44dOjnR5YmeOWo3BkZ1ax+wpBEITCaH1ZKIhERRkLHTux3Zxto57u/Hg5j5XW6c2m9UmMsFCEITBFrYYNpUvLH7+s59DbQhxVYXF0h7NF9dubgMY7V16XD2WnHlYSG43m+MvvPqyLxCC+ByvXn3hPh0f5bMni0+/fHnv4XK+k/NkKC6uDVpngtUqUxlSU82faLVwFhqtbPyl6XRHykVTr5pQV/X+dYtYFcWWayRDNAgWwSDCEhiSUnx0xF/qmmOmg3Y09SuS72zHuu8MfbUzmUZnqclpQYuqiHS/TcLsne7EhQwkAQl87krlhAJ1xa10pQnw1OAQ+nKbLn7Rx/7ziQLpBvSoj5sseIo30Udx6iedUpWTInAHTctTD+32d3KI1d+7jdRdwvl4MYF7ZL0Prt0N9nXwU7/AeSpxPjxIZIiQzNoVpnsIGSUmslBLnBnKiDItVMoorKLtF772hemN0a9++Df3H77zH38Sv/Vnf3rphe1GIpcoijoTIUS2oWhqazSz0WoIYN9pH7tWRcBNVjRZ6XPLXkysmVCE2krYntgniFn0NA9FzRC32SxplVWLDUt2dXLFVWG53rBhE7WYHoTpVvDGog7+3vZ68dmD0xHF904uh2pURIOW2MY2iCXDymBVhjqVMWgIZxv2oEC+dr5wsK5wSwxiyLcKrvdogbD4869/6f7/699TyBvKvGbqgBJUgoZMG0FgBGgBnwWTW80FAzFju0E92rt+4/mX3q4RKVPfqYOgFURFgIYkoR9Tj6X/fJSAKELUf8B948H++i9/PRlPZLWA2stb+650R9UZJiVPV8Xl05JylqJlM0PJowHmLCaQA2VIY09oWWFUW0MMBKgm2SVEiCeqlYMtxEm1aaejKvrgJg3LsatcxlQvixd2n/l//t/mP/3Vyds/zERslolrcLB1WOycxmGTbb35YPXET5pyElcaa+VgoFyvGmrIELy0rBI5SqgziLGmCTVzZHCMMcsyZinHk2IwmC3OmNmymZ2cee+rasVGI/y4HBXlYHZ8yizM1IbIzKQoy3J2OnOZIyRXAuklLy6Gd0m6iQIDigJllZiiJFRiT6igZMlniElgY1J5RnJco2Q2L2BoVBWCqDoYBgtgrCGAGVB2eR7aNkShpN1BEO2H/1SEYNPB1OXdSOUFG5uybwZCSo21M4ixAmaOhNgLWjIxx5gq73T4JGeW5MFqmUOIzMSg9I2xmTWIIQCIMTGmIhuTjFvQSQqgHAzm8zMRGQyKxXKZ9OjX60okZM6VZdm2oa7rQTkYjUZgbtsY2zZzdjqdlGUuAmYe2RwwolkIVpFlmYutzaxDJmA2MGkcRILIxgc7qfPJcTBuuPfG3cMP/O6Xam6LMA7Z2/OH0+3pcMtPcX/42JX2GpvlZLSzN7jxwc9++9nJzvHdh8efWV597tLipKrr+MVXlut3dU1X3aD1xrZSLGptqFDkIRphw0pVs94B53LmeLNane2XboxBHi5pkYW6NWqoanWFdkPWt1BiMwza0JKwqQFVCI1ysC1y88nH9z9/80WHiZGGrKohgrjcBQ7Epmk3g+Hg3gef3rlzZ3/voFovQG3INnt7l+8f3wNAJrv90mdQspRy++Xnayx3rl+qRLnkAL1149r44aQtj948/OU3rnwrro5Rn+1kWpDnuCngGd4ZodAMC+9w5vhyPnRR+eP7DwuQNQk8zUMwHDajUC1oYCE5wgbCqbPFJKyctBoSO6BTOMW5d0hXmApAyslDWiIoOXASOvm4RIJi6pk2Ca5OTKfzhu5FgOq+NM3QUTrkVIiIic/bv/1h2A3aPfV3CXrqilLtTX77OJe6Nz0CrX0uQN0TqXaMrb7o7ZJoUaEUCZFk5npcW5FkCNNPqX1IRPp0uNaLG+jQazx1e/3r6E0eUk0AJWG2KqyqRCYhz9DzuV1WYSIWZVJDsJIJLJAbzqElbdDceuXZwfV/++aP/vHO/MO//uniWzv/7PZL11xdDXBm0RqEQQwmDyb4IrQZWs8W1sJaL4ZhWnALX2ld5UNPAycYUMhq70y9VYrjzdC0HlwtvJ5ynKuTcOMzr4y4rE7qJvhKWxudWXPMajEmkh4vMJ1t75TNyWG4emNdXH3nnV9fWx1fGrpRkEzsJiLLEkzBkBLwKjVAgCfyJCEEJ2Ounpm8EdbF/fiyK8N2qBv/eLK39Rff+PL/+t0fsdtay2BjitHES63waiIrAxmQk2Yas4ASPGHaYrc3yvdubbK9ZVVuzJBawCt50siJspBIeeg+laQrSMnTNgq4pxyoJnUZ2Ndeff727o29fKfexJZjHKovY100c2xOaDYLruAtT6OxlXXr46CkArCAU1iBJQUlFxAhJY5EgSgqWrSOAySQ1tHPW8PRTrbqfDTLhDWUYVogK4c2w2pQ8s5ffG38rz7vP/14Pbs3vfHMcWUePtRlfnCyGZxgry4mZyGnhdja6FriSoZ2SJn4dgUOIMqiRoLXNrbecsfiYAOCGhAZni8WSVAnSHSlY8sb76NKZvNVtV77TVS1rRCxy2zwntiID0WWi0hAPynEbIkI1E/qQ0Mk0/mdKnGP30JEjOVuZqEzC4fNbGhDJEn2LwgCUGuSzLkyGAQxSWKSnTEb7wlgIsud3VzHlQZT+gsRIosup+7EBDpMujtf0jFEfWUBYmhIHTkWgqhQEscSFVUYUG5iI2zAzCGEiGTcAWYTQstsGHDOTSaDENrVai0SkyReURST8SjL3PHxMUSyLCsyJyK+9YvFcjKZFEVRlqWI+KZuGi8qLi+n06nLnap67xHVOjfJMiglFrq1TjRK95opSZpnLgOYSDiDowIpo2iBGrCQSpDlozx6DvXurYd+8f2jKnfFXd+WhV/b2F5/ceX2ZH1/YqbGRZSNaUdO7YHLq5PZ+3fe//Lnbv3d//b6lZeOv/FffXZ+VgvLy1+hn3/cyKhYt7bFcIFyhcLbQRDDPkoT3cRlwTMVXiUgsNRGFk0dpaoyn4UAYwvDXoL6DbNqLpXL1Y/VuowcIktsTNbCUTidSWk5ybaJUNTWEUmMwpllzazhiA8+fDt4f+/uPVeUTTDj6db29k6jFYA//MafulERCrFj8lmlXObF0Fs4mE2sd/LRZ92NN3H6iGeHy3mzWRWjUWEGQWvHViRY60JTZ1t87/6jVvLZ8liWtkRxaXvn63/25z/51U+P33m7HAxGk8wU5QmsFxPYBrat2tjJLSXTD6G+6OzqMxKQGHCKfCqsFLuqDpqSTPRBkQkKJkLsB4nSs/Zht6MBAudRj4DO5KgPXheArZ6juoQuOl9EXj1vwKIPb2n7iChpL64BSZym86L16ZCf2tL9idBBV+mRiT8l/awU47zNnShg51VQSibOMXDt9OUJlNC2zoPt4i3tEG0knVh0o8iaKLiJzE2aUAZKMZgVtpN0hgWswlAOzZQcwYgUQoXh0s21ufT8wWs3/qz48XfvH3/4jz/4yzJ89auff8a0K+H1FtT6YKQm04yKOGBpa65XMfoWEtYqhbroHUsGcVYKb4fZaDEeiW08Gyl1LQtCLWEVZB2uXrm5v3Vz9vHh0Z1ju/tidcXWfqRYkbFFRblF5lSrYnYPz9w4m9J8Psum0+yPDh795F47+/TqbpltMmeQuDUEJZTMjWqtRGRq4rYxjcUAbVavqugLXpl6hwuay2TYrBaPv/O1z7/39m8/fDCLxSRm+5jcZ0+AESPCoFyNI3YcnPJIZQK7bR9t+MvPvrKQUcWjhkpsCI2qJwpQZY4AWPn8UwIgBE0DSGwtIIkYlAT2AdhvfuZLjx9Udz6d2xaTLXYmd0VRRppsZ9f3JjT3x6gKqvPQFNyuClDGsCpWYAlMKobYAFY1QIhMACJR1AB4oCZaEXJEhoz0eDAcGc2sOVYEt5jIAHwycMvSttGN+MofZPiGjviNv/q+HnzmUz+eY7jkyTJu8QKdTEmT+WW7XU4qXVa+4kwYUSyFyk/LQt3g5PSoSJpXYEgUcFVVCliTSZRBUTCjrusvfeGL773/u9jKoMibULvcSRQQB5EAZUJmyDiLGCVqlGCtYeK6aZiYjdHObKKjXEpa6oqISETGWO9rCeJcHqVDfyUGhWiEIQYkQBhko4ZON4C4I49q0CAxZsZYY5jIWhOlk98zhkNoTTfH1FlMMDGfz+mrdN7jyf9JAUqDvcJpYJJZRVJkTgRsZRZWhRqFFagBCG1oVbQjkogoZFyW0+l0OBpZaw0b79vF4my9WivTsBwOysF4OIwxEsMQl8PSuUJFGt+I6GBQ9D1j6GiE1GQGGWvS+VYWg3RQGgMi5ow1qSiogmE6I/XUzYtAFKlJIWBuXJJchSorE4gQfZQwtDuT7Lu/vP/AHpS+vR8OJrQZufzh/GR2Essr5Vmw+eCMeWxzcCm5G1SjzUwXvzi6y9v20YPV3/+nt77yr17SLewO6s+4419/+nw1mGxC7nlUBxds2UpZSu5taHNsbL0lfkSY2Ayeox1kVq1TYcmN+HpOXnQTrm9tH4f1xrIdOCs1WqEIsHIm0SArsrtH1YcfHY9uXT2uEEA2YSEiJCpRndXDT47eeuPN69cnqhKjsaXbvXo5hEG+2QYw3d0V67Xkx8vDd+78Znp567VvfqXZNFpahtl4vDq9/c7snU0hv53d245k83GwTkPLMGQ1+ObSVvnO797/3j/+ZMtNA29s2VTz0yXMb99/44+/+Y2r1/Z+/NO/X/gl25FvysC5p8zDtci8ZvCKmtAqWtVACNBWKSSUODJiGrNNykmkna88VHqH0PMoFlP/hElSL1RwHlLPS1fqg2pPqdIkFJGKDT2PSxBJIFIfap8ufqlvBxNRp8/bRdpzolRXimuCmhMvq7uu9nMJCatOTeiLoT8wqKd/dbB8V+KmOQgi7e/0HDm+uKgmpF6SIG26WX06fei6VBbpNYL6CS7COacyIbNgJSYyqgxygGW2YIMM7BgFuKSYCRyCk7zIVrwproxe/u//9eQnfzt74/u//Mf/MKk++/XXnisEqDeubEc2IDRy/1F874367iexWigMq90aXG22JoEHlG3T9May3KsX95Z3fsvh4eTG9PKkeLx9y9RrsstinF29fKuOxft3Kj+XwtO+hipUjbEDsswIRcwyO1Cg5PnKfXJ/evtmKEYnZoYtlj+8+eSXq/bk9EZeuC5NSq83QroRaEFjtInRG7sx3g0el5/XUIKz6ThzrfEnTb5rbKxeuLn/6MHDiHwuI+OK4VZNqoZgGFqQOMRMTGExCHYv80W+WBejq59bymipZSUFGmij1IoGRoBEsAqoW7JAIEpm0toj0hetB7Ksovb//e9++ujR3HrsFKPaYefG9PoL0yu3d0yWBdc+d2nUrjeLsBprvaRmYYQyUavKRBmRY0l8YzIqHX+QKEBblUy8RQ1pwOt2ur8zn6srZDHeM+qMjZWWLVW1HYywKaWpkYUziMn8Mn56Mpi88vWjWTizk3nYb2feLliWgSv2Zy1He//jh3bld3Yns+UTgiAGJgSNCMGkApVgwGTQhmAS4MVElmPlq2W9s7X93rvvkiED8Zs6kgyLYjgpPr1/vyxLA9re2vJN03pl4iCtMUYkGSawqEi4mPATkTLPBSoxmqQDS+yb5tr1a7FtHzw4NFlmjW1jW3tvmIXVMIe2ZbBCo6phTvaunGwrmXp3zhQ+NSYFMhECdTQQNgJhNiRpU3KIwiSSyNbcgWgGJs1vCCNKsKA039BNA1NizEPalhTWmlb8JoSyLI21jfdFUeTOZda6PLeZnZbjzDkwYohgKgqXu0uX9i7F/mYTT213Z4c5caQlQjOXJ8QuHUyhDWytAmA2KQdUTX1wEHeWTSpBArNlJpHYi/EFQmqj+ERtgwbVWhru5eG74wyREEwR6fCtJ2/91o6umvUqfHT34Is71bEJYTg4WdMNmUh96t1wM2ADB244+GJrSBNZ53WxPfChPV6E7//d69/4H1/whd1/wZZV8+Q0q7KilqLJtk6l3EjJK5FKeIcdG4JlcWBWZwji4OHZhyCeLbtyWFQxHNWxZbYxSFW3Q0tG2DgyrNSiJSnz1hbvf3D40nOfa2NgNgSV1oCD0SiqNrPvvPvOq6++9vjxb/I8A1t1enYyX1cLkwcAP/zhd82g2L22P9xna+vb1z5voDHaTEU0q5pmd7JzcLb3GGe/C2cvVeHG3v7GZeRzQ1LHdrucfPTozl/9p++Ni8vi/GLdjKy9fPvS6WL5zns//92d1//0O9/6r//7f6vltQWVldqG8kZtrc4ja9WxVwRCoyokrcBDvXKg1CVTFUAI0uWFib9PIDYaleiiW4ZzAnNXUz5FWlJIon0mHI8g2kuppsGlKMSkSMizgIgspRn3897sRQzjTi8yHZAX8zzUDSN1ZyVTl+AmqeceKe5EKbtWsnTQ91P3mhBJ7dMK7p+e9WL2qa/BLyIqIekmabekcTGo1L8/KeUgTo5SQEKb0zMxsyiA3+sBpKoXysSZiDGUq1Mkz6IMUgCjTIuYjTSOmYdOMt2e8NU/+4N5efr4F39350dvjNcvfPsb35xOfVjN8et3wnu/ze+/W2Kzz5mxI51cCtOJ7Jbh4OrZ+ODM7XhMNzQq2nwzmUze+rv6nd9cHcWvOPNmkZnRJSfu6OFsdRTGmHJQsO5sHrrTwm8/wzQwUudcmsiWfcvB5e7R8TamcvPq0s6aJ4/i1Xzw7RdO3n7Adz+5WtOoKEz3eRnlDcMKvDEtG5/bJnOSicRYD7gScWu7U8h6ZLPML9UW/rWXnvn5j38UxC58ubTb2eg0M7WysGMdAE4zx8FGO4JO9bAZrd2Wd5eOm7G3E1qDW1ZP0ioLpdFX0QCN0KAa0lGsqqklrCrGJJUVEXTjxXZ9fzkyhgLiMRfbmD9cz6v6o4fHN1+cXH1130xWJsBBHc8LKdkFsRRJyDIskiINM5NS0rsMITD7zBYaavYjbECOaeAW9+e8PT2pN7WoXN/K0HqpxK6GMtzwxsG34MCs5ej07r2zuKXjm/eOn0jY4bPIZ8A6mDMjC+WG5Sxww2xhHbOKUjAa1aBuvaw3uXWRvE2CE2L29/ZOZ7NBMSoKd+Tvu3IgvW7zrZs3T05OFssVFKtqlTEOLu2t11VellVVi4rJjPfemiQYS6KIIbI13aYhSeln07TOOZGQjCmUAOWz0wUz2cwaw6LiTNYidBKPMRBINCqRMkQCE2fOgqiNkbsvSp3aEKK1YGYNGiSAKEqgbow7SXUAFI2lwrrMOSZUm421xtqsLAchhPWqEhF2hQIhBAnBZTbPs5TGV9Vmd3fqnKvrxhjO86IocmsMG5tZa2zykmARFZWYfJfQdzk4be6umE4SAaIIMUJVRdPcU/JYMgllZE6yPkzUdbYSnEig5APRaw2pJG92FohoS2BFm05s0aQKLpnmKptQ5xwLgbgWGo0ECa1MLf3dT2at7pSP4kCzd/9Ls1VMbn9pcfsz1zaL49MgY5lOUFVDY5elzde+cKObud7hbJgdXH1uFU5Xs8dt4Nd/evjSv/78fD7YvzV4czZe87TC0OsgchFrQctsDYMCR0RuKXgfMkSnXgLECrGBNcKyXK9l2aKJNCDkxg4LGTrYCCZrOJDXLNIm5JPy13cWt+fiipKrIFBD0QtzVGez04dPbt/8TD0/+9mPl5PJ3qo+o8LmpQ+oKbcALo/2iwF/dOcdPtJ/+W//7Oqzz63XMXMSQRSjDaYBvjr4/P93fed+eVaY+oXiWiO54VCTVx2vwuLf/fu/QiBY2tva39rbfvTJu5ivKSt4a+B9/A/f/cFf/Df/za1bL95fcG3KDZWNDBq4mgaiVr3CK7WEjZAHWlAkBKTiVFRMZ2uQiBNJ3CkSdd3VxI1IzVFOlkl9b7afhe3kNWI//EqSPPFIkzEQlNh0ShapaauqdK7y2sVWIFGTO0C8a6me1yaAcFKE7GU0pIuSyr3UAvpBASSaxzkyfG6MSAAllIvO4WJoz45NJXl/V4kBrcpE1MnqdQPRXf+qv7Ne4gNIBzuCKkSlG0FRgFgCLkI9oNQpVWlUMEtQIhM5wqILwDk4Yy1VS0IJjBRDpTLkfjHlxc1vv7Tcmi3/y39ufvP9+fKTvUnOb/06W306YF/aIfMUW0PensrO/mIysXtX12WWx2azOBJZFVJ6DOnWZfPZ/2v8xdi/97c3WopN/Yto6lVFS88alosF6jCxrjBcrB/XsJvhXnRObASZCqXCUwa4bFZfLj6VO68LrfBPXzh0bvnqlZO9fP7OhwfHT7ZcsWdK0SZDHqU2UqmNrm2mHKIVFu9qrOLRHKGeuUf1tddKd5Mt8QZ333hjr7SrdkHOneLSWsajYrF1bVWEBWoyA9NqyIc7y4DHYfsE+8v80lF29Ui213VBx9C58ppQKdaqFTgwm14kXNJSFyJNCSYJogQk9VDtPnsLmxmP2LrGYhe6Nxqu87Cqw7jE/sgd+QUDhWmobcGGVSInAyRRhXXWuAwStOMAe4KqNDFUzCXIa1PoSltTG6HBcBQMeN7WdjO7vi+uDvBjWp3JxiK2gkiudPbt3/2S9r64yZ+RurWV1POQLQ1vmCpCzWEZuUFhbAz+6PGJzYwiKomI3Lxx/Wx+fHY2A3UMSJV4MpuphMzZ4bAwllfLJTN53wwH5b1790TUGCOqEsLpfLa7t9d4vzWdzubzIndVVaWR/TRTzwQkkffUTlKE1mdZRtC29akFy2yiwjm7WJwx83A0Wq1WWZapwrIJMY3LEQA2HKMQU2ZN27Y7O5dWVaW+UekKVQV88Hs7O9Pp1FizqerT01nj23JQOOuIYK0bDIrkv81MZTFgNkQIIbBhZs6sDSK+8RLFGFYiiTFKNETGWmstgRaLhSvy0XAkKjFI5jJSJUpeNCqiQaSbozpXJ6B+vrjf4+gmJrveFCMRYbXT6e064yCAu+qkI76KaKeMyt28J3WdEXSmsN0fp2upgoE2HZksoiBhhgpiMMuiySQLrBtM1L1358k7Hw3GOyYsqjBxtjQ//Mer7z90r31BVvfvvnJr4qZ+Vk3KYoORtYt9njzY+1rl3nINNrJrD8ZX7x7nq6Plg1M9kO0zze1g24yGVVWu3WCDUkKR1ZCGWtbMsQWbzm3DIDBgPQsEHFT9igIzG1sM4IwWjFJggVAJM2JofAAciMyAC+dauA/vH40+e1uCgXooOZN5aSCtirv78Vtv/uZHW9NRDLEc5cKxHDJlu2u/BFD7anm0+uoff/Xe8b0UJ9omGGM0KFogo1C3V/avXP7p1NZSbzKeWoitQnFWrZ85mP7t3/zdSUO7gyurOHtwuKFnxs+/9PUnH78xWy2sc6PJ1sTlf/lXf/0XxY3iyis+lrW6GkWDwmsmjcADHtooC2urFFUSR5Q9NABROthZO1E3SBdHzoHY8z5rz2Ppv9eL2k8VpFHT1mQg6bInravYs5qSxrJ2API5nQnSi16dX6K7Cj2FHWuK5udjS0/dSQrY6ZfSxXHqhOK6GCldu7kTS74I7p2CVcLUn5LRSjU5JZXYjuHVBXIVRde86R5+jpmnP2BikOl0tRJ8zhBRJjBbgAUGSP48TpARW8Aig1qljFIA1jwiIyoQR4Qx2aKdYDXRxSgcb8v8mT+4FY4vX/31b93vPjLRE4n1hYQcLfNkzNsv1vsHi8FQyuFsHjdHZ4Pi9LPD6WnJYWSNKxbzw8cPV+vj01XrG2yuw9+LdGjVZjE6mJylsaLEjtCur6w+cc1RCxtMvrGDZliEzIkrgxdZy537U5k7u1yfyUz9QFbFaCxffe3hw+P13Q9XZw+H42JYBIKnUNVhtgyh8WGCQqWIWVkU5V7t27a6I9UCAwO7QZXNH5362fFkOmHTNlLPzfRUd49RFbwuylo480Joh2vhM9peFVfzg+er8QvzkxHPgLnKqciSeE2ogRoaauG06EOi8KdsLzVGDLMiKS11xQcA601wzrHywXRUk3+4qQdTlxX4/EtXZIe8l+Cdh1Eug3CImnPBjiWLtTTihcHOsbXDanMmkZmVWUXqpE2IQKhzWGSDsc5hW8nzzDBVcVONqRyXw2ExNFJoCCow5sHKPPp4vv/Vrx9+uGgeeojNWjfSAQdanC2xJt5AawRek9ZsVKQGB+IAkk8/vctGhDoxdhIloCwH061pDLK1NXnm+rXXX/9tOShf/dKrm01VVRUzny0Xi8VqNBptb08ePHjonHtyfGwsL5aLy/sHm7parlbGmhiixMhkbN9vJeZMjUgUoHMOBjbSdgLUho2xm0116dKeKk5npyLSzSpACRTaAEJs1UOZ+fDw0Lk8SnAuZ6D1rbH28qVLly5dci4T0dzlo2EpSkXhzkcX2CStPk3K9SBWiMnsebhjorIsRdV0WuApaddzCfnpzjYThxgAMpZDSEivJHerjniaTiJJ/M2kS9sfLdyfLz2BBaCnuJnaHU7UJS3d8HmPMHZAe3+0dsck9RVKgikVSSaJYZ5qzRGpBSUDOMNoNQqTxbKQPKjEn/58kRcTzNa8ZXgl5FAMFif3xn+7GO2Ppv/z9979o1vll19A4CfLyXE5CjNX7Y8uZ597tHh7c9yGcvR8GbYeykSKRQyF3R59eJgdhnFdjJcyWnEZa3ANWhPGlMFaMoBjNcQ2WgfkhQ0YaCwBDJQBiIZAVdDWax3VqnWZyQPnlsthm7VtBh8sMm+Ge+/dPfvjLxgCacsE07Yti3HOns7mv/nNr29de+7Jk8dt3DiTZ5ldLRpXwuYMwNm8LOyjhw/vP374dfdHEsAwLExIvXxSrzm5S3qzffvTsLr3OuTmn11tRLb3rn56/8Fxtfryv/ianLrFB+/OVw9X92aL0dkXX/naZLZ8573fLs/a0WhQ7t4IdlKb0SoUGxQ1FR7OB4saKQAjgAMnQQyGKkciKEmqYdN4W7dQNM2qQhH7BSRMvXPAeUVK3fLTPrHuPOig52CwUleScifoik7JFSZpn59ni+fL9KlfqPaDuOmnhP/q/3FqqQufneABuCPbUFfSn8/m9s+TtKH7H7qq/ZxtRpSGD89fJ8OCE3LUezp0u6JjPWu3NTpWdZra4iR9pyrnU7/KTFBIFGbDxMpGhSSNC8BRllSLRXOCg+aqBUkuxrEM1GRaohphPck2o80qvP5bevdn1x+8NRk29QZ5cFqrrTJFK37oHx/j/s/rL7zmbz2/erwoIJemWT4tJ5dkh09md+8cv/NQDufj1VKQi5UVyY7yFnAoDBZoTEJ6zpiCM1Ej0nKIBUAeIyvcENhwY5ugsTZYIDvFahlOPsoW7f7JfTPdoUvXNtuXm92v3jn6ZHH2ZpDDYlRMEerVop7VXiCT0ajgAtNiePXq48bv3LC2NdgYcowKtw+ef+vtd+Fhs1XOLsNgpaMndK2g2mkTNBPm4LnhbGN3H63c5y+/UrXbOG5pxbqArAQrwZpRMTw6EjZ5IJAGIgViSi8ZiOlDYlCaX1FVVSsFEEBZeLiYlcNy75msKoKbFrO6qRdNXRbldG+vvCn2GuRyIZfYZK7M6qI5Wh75lS8nxXg8Kktu2uHZ2dHpaSWxZipEGrCBZlwbttZWIx8CtxGaUw32oCr6lQ0jrcuSS0dVNdwdf/KDH1ZzmxW35f5sDzu7xdQVrhiVgdvJavnoySOtUObGZbGqgjNinfW+8aEucp6Mtg+PHhSFY1hh2ZlsFcVgOCrLsmx9Oyqz+XwoMRjLReHywm1Np1BkuauqjW99llnn7MHBwbAcqcTFYgGQBDFsrLXj0bBwxenpvGnqFFyvXrta5sVsPvcxWGOoN/Xd1BUpxuORy4u8yAeDQWjDoCgODx+FEJJFqHPZ1nCSQnGROwKrRO/9aLw3mUxC2wbfsrXTrQkA74M1zEQuL5IrQzeAAEiQjmaVsirWXptPmahTsEplAIE6cdxkU9opb4DpPNYmP5fzxhcTkqZLd1R1Gx1dE6wPrin6prMqCQppX6b0oRfo5OTTGRtFFZ2/eFcA4PzSSufxnBJVn6SDujvr0Zi+JxJAjGmjQCQDh0x548Nunv/6rQcP52ZvIl5YnGaKekVqCzIBQU8rG8ON/+2nj998L/zpNy49e3nZmNnmzIVNdfXLX3zn7Xd9u+vcfuOm1s3uxD/9yx+d7B/Ydx5cfyJXKhouMakrSxVoBV7DOaYay3JYUL7Q0mIyoM0Z+enejDwoSpAadQBHzlUtcck0sDxgOI5FiKW0ruEhKh0tdXi8LlZcHh7h+ZMVy0SjDU1Sfo0a9PTkoWE8fvwYCHk+aivfqi+nvLtXZIMSwGyxYI/j0+PR7tbu6PJy0RTjPDZRjCTGrIpqiV2Tvf7wzo298p13P/4nX/vK9tbOk+Pjv//pD1/+Z9eZfFhUIi9UH5JsHsx8+4Nf/u7Z25958bXv3L13d//G9e/803++0q0nbelNucGkxmijI25Ya9Va4YEGoYnwCg+0AIJqgAo0gGOqS0n1ov5ELxfZEVdiD7B0WWYXKymtjc54l8CahK+I+vpP+aIF2zGf+gq7t9JFt6ihuGgz98GYemIXdd93JLGnYjbQDwc/TUhOUpOMi0eDunH6foYYvT5Dt7lU4oWVQt8Qlj7QEy72XfIk7RVLOh53F7LJdj9o2sUp2TUgBpjUqhp0dGhLnCUtDhiFVbVgp3CMAlqoDGGHoAFGWE9kNXFVUT1++Lf/vxv+YWYX5bbwY7NTq19pvTHrgHE9sW3Nnuvjs8GjH5WvSPnSLbc/GVjb3lv4n9+Jh29M/H3mCfO0yXdOaOjV1j5w1u6QMGxgdlbFijqG07qIzkOFoojNRI2F4eACA5bNIFPxgVbiN7Vv8fijqUy33Lhdb+Tsva3iAbb2xjsHYf9fPa4ePzp+b3HyIQc/WkaJiqpuqlDxGc8fHO5+2Vzd2zeLy5EFbFDKWz97j6tMbXCTMKA8E1ewGfDwBPtVyIIaUVabN0INbc98/OJmSw5BM9CKMY9myVqpVqCNAk0KvdAABEVQBCBCIzRq0mKgjgFwfsra6V5RV8Rz/eM/unFcrw5DbQdWCvvdN95/+atXbrzweeKpYG/EOyPsbdbbnAsInJmDSwewyDdszAZoTFYWxZ4xYXZ8FKMHQWIEKdS75gALh6wpkCEwNTCtsfMsjpAVJnK1odl498qjN+4u/uPfT595XlfT1evvXrt5c9fucM3iyfrMuYwmFMp6b3dgTVNVmbWRDVXrs/nixNeVxObKlcvj4SCzGRkq8gJA1Lipa0MsqkkxTkWjSIdpgsajye3btyXG0PprV68PiqIsByI6Go1DCNPplojkeW6tFZGyHIDIOgfowOWZsWVZ9kK0BFAbQpSQWWuM7TqgoomtVAyK1XI53dparVeTrWmR5z60RJRlmcQES6NruIZIoCjSTz9RGoQUiX2ZSKIRCmNtMjmmvhJN3C7TuZZ2v+rHNTQRMxOBRYLazIokuQwOUZKWSB+eU6yN/VYnSZpCnYRRjw1KNynCRD0Z9GkySXeMdDTPblqj43um9wdQgIn/D/IGROgTfpBq0rtLCp6UGA1BhBkSQ+/05KpQTUtezhc/f6Mud7Y3VZs5K7U0KuogC7XSMrJ66kI53M2nh+vwv/zH5bP7cbRb/+Erw5it22s7n/mL4Z13lncOq3ffese5nfq50ay6/ub7uRkXZNzZZqfRQXamdslxDamh0YYG0nDjppV6J2GFdR6WObgoV1RHB6OOdJxhDamltVFsi4KynLTMsSU8hbh8g8mK8w3G3o6eVHa24h1HVRtYKW7IqGigex8/1hDERsBU69Xevnv+czvBbM6W67xwAHYdrys5Odq8+gevkgpa+HXggigjJSEyzCaucOuZF378o++tqfBif/LLt/7iz77zdz/78aXXLs03RkK2NQlyxduTy0frZS2+sPT6h4e7e/vGTm9/4etntD3z+ZpGC4wqKldStDVoo+SJWtJaKZC0SsKUFOmpT9o60YxAKqKBqNM/pYSrnhPyushIBDJMHcn4XBUKXeujzxrT1F+kRC/Q35OL6mOvgqlXupJOGepC+aoPgL/31Y0OJc077TjPvSZX38o9X+LJGxwduNX9nilZeSfHpD7A9p2ZLpp2ruDaa2B1c9J0AQGky1/U5x1Sn1KL1F7vdMXOD/WEMCT+cxIqMCDDMKIGFmRVjGqmYpkLUE5mAIwsjznLqoLWY6ymZlV98Mb+8snVz03Kjf/lJxnfr7eDWVRuJnbdkNvY69Xos816u9yqmxC++4PJvaP4+S9sHnwS7r2B4sjskEzzcruZSjPXWdmKJwpiPWdj2jgOwWUhAA2zYVYYjoBmUMNMZFP+wFwwGBQEISxttgyx5pXQbDUPWTkZTDHgTE3t6+re+NH9uDfJbl67fu1rR8OXn3xyZxHvxXmNmlejYXZw2dy8UV6+vDtpPmsWu+KCy83Zw9Oju48tW0GIQLa1LMkF5LVUHFzgvZoncx0HyWLQrMrMrNrRXX+kNCdeQxZARVST1gppiVpQAAQk/SJJKma/p2n2dCMfqrberIQtRq51bVkU8/vV9LrVzI8mk9svPlNbGyX3bCvhChyjaBRphCOXgxIbIHiNURNHGNl0a9tZVsV63SzW63zgjBQK365rGINIKMS0lgNLBixUMrWZ2t291Rufnv3Nfyg3OHj1qw9/eW+0zA/fPaRS98aXXbCh1azhne0hGydShbgeDi0RibaTyTDPNXf7qtHYcxEJEYnadSE7+w7tRFNTGyZlwZI5m+dbqjBMIhJCTLKLST0jdzkR2NgYQhSZTrcp4fgECiL9SCKBQkxBgrIso16CEUyiKiG6LBuNR5PxGNByOASRqDjnVCRKkgsgYooinUuGClsjCdOXDlDrNIZS7pDUXwWUsDaCEpKnd1fXamefSD3oJVB0ZoNC2snWq4KZobDMIKNQZk7mZ91IMbj7vmd9nqf/0vkd/37Zeg6PE0s/TAH0CFwCpvlpXmf6vM5H56SvY3rB+vPbvzgcA9QakqieTEahCdIyCjcuV0cP/uGd0zB8xtVNtCZuIhANWl6xgJkHrQ2BIFKc2QkyGDt8+2RYPNj69O76G18bHlwJ5XNX5vP8nfceI7vqbembHV9secmqOPAoPZVYKq3AK9YK4kGedC2cY527XJ3FYMFDx5MJmnI7wAZZtXEWYwjCSgXxgIqcTMFt6VrbuonBRGMxrutiQ9OVjmsuanHzedi7pKFWF5giWNjX/tH9R5nlwhWL1fzFL+ztX3YPP5WTU9MImXwNgKwZTMo8q7fKKVqSJhpj0UIrYWa0FElqtKPxdH/rxsnJcTnZuvdg8aNf3fW7Fm5nUa8HzqyQV9lR664s/Z2Xv/ja7Ojj2dn6wSJ43/xpeTCL4zPYmopKJyst6tZppahANaEBBaKWOAIenUowCZEkuIUZik6ZKoHIRLELtkQEVhbtrD1UcWHmx08FTJU0KayJ6Idu4FhFpIuO/YpPzoCkEO1BIwWRoHfhffqra4P0iWJanufJaOJGJUci7qwO+g2hFNNUb/93Qv0TJmib+kWcxpL6nXC+0EU01dWdnEaHJ1EPOSWntb6/c/7udP3iJHyd8OdOtkQ1iYWlmQlKnmKixGRgGAy2IobgoKxqVBzBAAaFiiOfU+2kOXtw78aN3SzM33v7gzuPH4fgsnZgJWNvtQlNjV834XFjv7Jp9oZZJty886vwuzehTV5gte/E51ypoCkmlYMppD4xW2vKFrDDkQxrOvHRBejAxAgRygorW4FXpEWmFMAkFuwEpDogsDFGNAMaa3xA5WfhoXdL5qwo7Gh3L2PUi7Vvy0cz68t8/Ozk8390eu2fbFqSYRhJyAdSZrMMD2+UB8/TQGNoRkP3szc+WC3Oyp1hgClcNtoan604xggDYzolXYXKWvVM4yaMdHS1vBGOgQVJJVyTVEAFCgAHqBfxhEAcqMNpOnXxHpGJF5hOn1NZuZnZMEQdvn/nntsfF7eyyXU7uco7V6ef+hq8cxy2Z7w3p91VGGEZdMU2WA2QSlCDNYAg0YNaplgMbDGYMuzWVphUzXC0nXP54YenMa4NDbnOIBo5qBJMdEWQ6cjq6Ognb61+/QNXrfjGLW+vvvujv/njL33Z5sOitXZjEMBBFDVzK+qBxjn2bQ0Ea03UkA+cioBMCCHlrJYTGqvGGGOSiAQxUWiTvReriBKszULbMiJAkcDGJG5UskOQDhRFCK1ltmwB9K5kRMqiIh1bEjFZm0SJgOVOf0o6QwUTQgR3dApmDiLEnMaBDBMTS4xQZiQ5PTbGJOPCjr9JiLGLQFEkUbpAKYL2SUCSDaKu+fR006s7ovqfGUSEqF3A09g/qDN+6dtSSQeAKbkUpvOsL1p7OPmpSPl7sJ/oxYzm759xZLg/WdOJkrBH6fBmJN8I1ov4Sx2+2PV/038DwKBAamGiIZsZfPrROx//7uSh+wxjAxS5iG9Ahp1xYsOqClC1qvAKQT0uvQ0OtsiHvpivqun/5+9XlwbNaV0Es2PKF/zsNETEVb3ae7FRRmSprawjLbldQlbKS+WWUAmKiFxrZ+euZKlLlFkomPIodsusTOkZ7ErHU0veqoM4tA7qfFFkGMmcJ7Uva5psTNlK7uG8nSxmNU0EwhRhEERNvWy2RnueV9Vq/tIXJsUQP/r7+xILV9gAsdECmO5NygJFsX/0YPG5l4gDqRdiqyx+FRiW2UgdUOBg99Y7Hz6cjrYOH5zZq+8++5Vr8+XaZmMnxVL9fLUJ0a1Q8t5zsmxPjj7MsuzZF1/C1vXTSrwtF1JUPNpoyTW0VtRIYVgboFH2hJCGj1LXQEGafOhJFdzP7CCmTdrpxXQ9C0pC9Qkb6TPOniOliSwtXUGZGh/MIIoq4ORlL1000vNQiT4k4+nqkjqFVyRpdTylEd01fLs2Tbej9HwfacdMUIUkk4kkhkP94G/XGgbQ2fGi2279nXR5RspWtUeMSC/CbHoo9zX3U9uo3zvoTPBIE4kt/bOoUsLAjIJVjSgnFpZ0+vMg7jQ5lFWMilFmDYYz8oU0pRM5eZyFKjf8wXuvf/TwNIuTSascKPhItepauTHw/Gg9/B7V32zluispwrKnomxCxLI1dhMEklmmKicmZoNyrYOzII7Xu05mUwMmKphsaFvxYzFlgCPJ4ABxLaxltsaSFK2IaqDMZTnixKqWReEsypztqMjYbtpNdTYuyyKnUiWsTNa4COJmuqzL03tlvipHGI6L7NZnbtncBzA5bnz92zd+Y0snRtVSJKxXdZCCTNZGeJFgOJKJShJ14Eq/9I4y1xaLYzEVyyqgZl0pQiT2QA00QKNoAVFNQHRURE58BVI6J6Pq+YKGbUZRxMsk5PvjzCkmLV8dX/vSlTWbjbtR686MdxYyPZMprWCXnGK+VsKBIRD1Kp65HwGHMEOCt9ZORoOidGfHj1erzWh7q27n0ReZTISJjRaDifhQv3H/5P2f69FxWSBUuPz17zx6bz5e6fWdZ0LIRAMaCuqd1C0CcyD2osllKDKrogWLSASTaLRMAhFoG6LhNByWmITopNyp2zAJhY4xMHPXDaXEqwXbzsOAmFNQM2m5iwLodKlSDk7Mva6rtUYUNhk1ddaBSCcCkrlQ2jUdHpEYVJR8UqBKaRaWiYlJOfGqACSTrD4+qogmKUeoqiRNDwBimYIocS9eoHqhEtKfMqJgUuJO/ZqJocLMlKQeKKk3azI6ZWYJopAEjiVjta6g7e6/0xt66mzgpNaVzoGE2PVvzzlk1/nPJP+Wpw3O06mcnofo6fMq1SVCXRs4AFYVqh4GEmoeGG3cL9762frjj5utV5ltLnIcGqllIqPAbUhwYiscOMRWlZUDWonTouKiMDAoTVHlGK/FxtJSxiJaD18ycbNendih5cZKiG0TsiXzGWQNWQgviC2JJ62Zq6gDWWYDi9KhcHZcUmgt1xgPos9G67gKoYjaNGQAp2QRLZsCaykWNK50stCtuXezlla5Dcy+jhQMghdPaEKm2f1P7y9ms8Lh1nMFuPrFj4+cnRZ5FkJ88aWrt164BUCEYXnlj8/WHz+6e/Pyrau1DyAhZmtIPQRiJ9n62Eu0Yzc5vj+/9kc3b37l4HS2sOWuK2gmwuRX7d6jB+/L4OCHv/hNDPPJzo3jJ0/+5KWvrs2okuBlUvFoJaxrYAOqCR7SKFpQQ9QCdWJEJ45cSNopqkIsSBNDEKDfIxdEaCg0dUi69mmCV4Be/Pm8Pcvo5sahIiFtHGb09INzXZokYtXFeDr/dYfZUFfHJn0QPSdJJaYUUVLY6cGyCBhCKlO7RyReviqnKShNTWzR1DainpJxnkoA6JWpCV3MTJK0KXlVIWbivlODruF83lHqeRRpv3RWhMwUo0JAbPp/7mfi9dykuEt609B0Sq1hiByxVUZakAL2rt6UhV8+uGfb5Qcff3Tv6NOxm+iZR8vBF6hsU3u7Rqwj1ygWtS/cj7n6+nq1X5rGMM1DGAWyiEMrVsOm5XU2GFRWipzDTEdOaWD9sFxnBWBUl61y9D7fTGh3Y4OzrQ0Cqy4qKzhQBhooLZlqCbZllmGbCQlJ2GwW2RDVBou23R5syWp5eP/TB23c2S22eXX0u0U7uLq+NxisysEk3xq5F28840bWG8+WyRIyM1s9QTkAGZgQTVAeirAPiMTKWQAHAUwGCSoSW3n+xc+ghUZECVZYqgRd1yoNyDPVTLFf22mII40iCXqaC+nFx59Wtf3c16+tZ36zbmy006vu5mcPtvb37i2tjJzbv3G4ni7MpRkmjTdmRboUbIANuGF4IASGdJJnnVQEgyNZkFCkaItwdO9O/fqd4rXMuK3chpZXZhnr03bz6PWzww/K1VFhLOXD9enZ8HN/bPKDeOeNV3eu6Uq1aQxEOBDVykLiVTzQGhJRzxyZIBKZRDV2csopPqUJ9bRzQuRzPpFYlQCFSNcrYpBKVCZR4aTwrGlOhohNmmTt+lTd8GJquKbrpsYsiA1UFMSkSmwoHQvQxPLsqEopGWflrn0FFQXrueRAOoy6nyKeUuLpkmXSRBwEU4dyccKzCMKqgnS353xSBhMbgoioaGefhC7MAiws563a1GyzbASUHt/nECQi1E1ZRE5D36mFDLDQU8SSdM0OXxEigkn/ZIgUlGBApqTHBQkp9kMgTJwcGfsUsT85+wKo438iDVqnY0kMmZaoKHl2cvrGL75Pdf3Fr778nz6aNH5TB/PSHn3mYOfv3lvHusggIuDCtdpkMAEaI4xnjcwDtIght2JHjRFmq3Dq2QhLu4/pS/79/2xX74adP+DlmmLkVYaziIqpCtIYJ6atm1CzdYRKbMa+3Ko0VEKzqBpk0rYla0m2LOtixLohJUslBFY0nPH4jPbmPjsLrrFuOr1068qzPNlZNNNJzMNcba0a0Tb5MGJi2beng9KMJvz26ytrTZTKRPvq166GUP/0H94BMNzaWlSLzOVUeAqvX7tyFY0mQ3uGEdEYWljeLNbZhp/ZefGhu/Psi1fm90/hBnYVQmV4ivmRvvWr+TP7z3386XsmK1584TU7GsfBB8Prr35SW7EcMKpCgVXgNWSpWCkq4gpaAZWiJtSkdRI99aQBFBSiFFRjMo6RxKfrbYlUYyolU2DsFka/YPSi56sKYgV16JRe6FMKBGKtkaSCpNJVwMSJd5WwnZiG4nr2oHQ7MQV3lRSMRSEg07dJUrAiUtaoSQ+g38tdOzZFQQuVTgu2K61JoNwFWoVq2jqQ3sE+2UgQwdD5/JGKkPZbo+sCd6jTBY5FqoTUseoKZ8MQTiIlBOpmlZQVSYUv8SWZYNgyOWgG5V5RmpQyqOMStQu1zYWDXz/6dPPk4yo8LGVCa4M2E2+pEl95rhkbDZtoK64k5BWtefBLxrekHVlaKFyQyrauisRZqKQoBCwDW9e8YR3OaeS0FW5tvgkZx4zYkVbUlF62clkFNVaI1BiypBwDQDlTINnAFDbkPq/ROicspaifn6zWyzIbnp4uZ/NTq800z3Q+OPmBu1cV7jP7bIYrWx0Uk1vPPJsbaULFha0g+wX/5oO3t59/YfHwnpLnzCrFINSAO+t4YVWrbNsoVmyICL66des2mAwiQCEkF8QItEiiM5pQQhbxIEk20h3/LqnQMBIsyomvQKSq9rU/+bwzoV4KiCPbs1A+0sG9qiknlxH2jnV6LFurUGKm8STSknQFbIQ2RA2BQiJ9EQVhQSo4IZBIYEOiwZ/e+2Qw/yj8/aHYwcLZVhzVAhEmu0O2dRPxrKtVNp6Obr9YH2+2Hi5sWRM2nEkMEshn2kYGI+HPHqSMIBqTmHuqRLtBZ+6qSXQeeYGY07bqYmLaEBckNBi2QSQZwktawAnGSUxLRSfYiASKKqVXKJJEVpNLWrczqKMNMdIEftoGiUylAu0LVkpluf19dInOWUtAshPsJh37QiGdBCk8defOU6+mrycStbivABTMHHv0uR9j0P5J0qtIHJPUs0qVhEjfo0jupoAyG8iFYH2islA6sC4oX+nSxAQmElERYaaUFen5VTq+taSKpgu151VLT5hOl1aVRALtKVzKhmMQ43I29N7bb9/56KNRMfqjb71298icLOvnr9sXdvzEjna2zMEo3F0AxODY1mopNiwE2Gii1iRs1pKVmVQiJDHXCDKiKhBr/IyK9SM7+vbm/b8tb95s2h32IdSeK5PViBvQRgPFtpLMQi1rQa3NGhUuCqMD2EkOb/MssG/4TEV9WJlJG1tXSWBXtuLWtS7ioBhObt86GB9ci8WlmR89xkRWqFehHnleQSvQWSX5wMIW+VhlUbjrf/xHX/ru9/6mHE5eebU8nR9+8JYW+RiARN8EP96fBG/ef+vjz7/08Nqta5X3xOxDMCWREDuq50sTiisH+/HqWf2wziQvJ9btZGTMfHbyzn98sB92bAY7uHr15Vdlf1S5wxee+z8tsoNVIxaMWsLa28rKWrFSXQOV6lqlVt6w1uANkQfRptuwaBUBEKBVFaKeqQBNtPYUOHtwJSEeqVylPgdNoshdP6bvDRMAY4yeP5ecAzCdNXbafjinNfS7rGuqmMSx6rIAXDguoSNQ9Ffpfq8daKOd7MV5dZ6OHJCycqeomS7XL+VuM1KCqVOa0ENBaYP0TOauu41uCFBFtJPq1B4JS3fUm3hLPMetu/aPCIhsxzgj9M8s3WtPlhCGYAEDtRozWCZGZAojA//wk8P33/ay2HEsjYdnEyB1QMVoVDYt1WxqqaNpTKExRLSHXt6Q4jVZWZZGIY3BCmwVRRZqQgnD6zyUQLXQycSWFj6Qr3IeIQTWqGHlPJcDsCVSYahVYjHp8A4Aiyk4lMoDKw0ywRgmSpsxl+OJD3JUVaNBNhiPx9ZyEAt77cpOo/n89KhYrvZu3XJ5bAVgF1hKw97gV7/4od9y1//wi/d+8xPLJXHWBJ+RVWQ11PejmI4sWQ6rMBlNJqNpVUdWkghrLZBUTrnLxLQf/TBWxItKwoMJFhpS5tQ9DDhfG/ZevKWtjxygjWBYcf6be/XjRXx+bw9y6Zi3Fn6EmWAOnEGXSmtGBdRACKpe0QKtagAkCWKqJu+wwAb1ahEe3pts7dbrjZUVAkqUypZNAbY+IgvHwmHD9uCb/5JQCdcSHtndLc7rellZ5oJFgicgTRcCkoYNmZPBU+z5GCkesJImiZkYhRSZs74NIpJ4jSHE1reGKEI5Yc5IxvVk2Ma+Zy4qKe0VFUpJZBdvut1njYWC+Xz+Jy1z7htawO+F1oRnd814OiedoMew5Jw4oh1s+7QWn9LvP1cfkdOW1nMhoQuE7fdieW9KqNLHPwUlP0qQqGoSBzeMpBGmKSnvGsx6TsUy6HlUqU8L06MDXWOjv+FUr8YY0g2I9ApCBGYS7WvdRCFJSGI6L9PTXXgzpqMxzWWIiFrLMUjrQ164er15/be/Xi6q27dvPP/cZ4vM/uLnT5jHr+5jMJw+eVKVA9MGH+CHYdBWMnAheGejV88ovUZjnKJgExitUGbFChM4UhQVhjNfqT/5L7vDGNyzm/fe4St/whVxZVCbUDdZXcBLmwW0jIBYi1SSWYmWVuWkUFmIEEvtV4X4MVDxeixD5rbhjHi68VK1uLJbPnf9erm9c6bTJ+3kbFGs2GEWXGXCXIIHS64hssm5NseHqyxj59xovLOcr0Dh9gvRe3z0NsoyF6kANHVhbbaYnbCFJX3jV28+c+UaWo1obWGkZgQYh3oWcRSOzN3hpQwzHd/Y3qznb3zv07rSZnV6VSblnrtzf/Hat/5kUOQmi8V4tJ3dbB4LCyOytB6VjetgNhaVUkWooDV4TdqAalUfqWuG1apJEytl6qIUoEIk52spCY12+6tr0EoX0xKGjUSC6lyS0AfY/pFMTD2ztC9aE2bTiywnwZc+LCckO4kR6XngSvNCibaFC34+niJMk+3War/RUgWuqqBe1urinEjNbu1bNug5Y0ysT3krpct3SQBzgibTlukNjvU8tz5nMqY93BEkkdITApFqP+TSpS+GyUrihCZeJOP3Zo+ZiclYaCaFhoJiJutHv/zZojrKJxKrwL6wkWJlwypYH+KSjafQEElRUbUhDho4asv2PR/2vb0BrZzYtWhGKNm2zL4KK2Qu2sHQXPlCwGj+4IMJog925Vrk6xHXgcMTg6tbEUMu21hbA1ZlUUtsGGeIVjkjzQklhUpMFa1QlMgSxdiVw8CWEUStbEL0bIIHbfygeVw38xu7B5Pplg0EK5nVqJEH9ujw05PHx3GNvYOt669+4e4Hbw5olLlR25KIsnFgKwZKrN5q1Ax06+r1nenO/DBaGI4S6mCEu3ZvGvaFQoXSIHy/qrpjmJgJgqhQ6jUB0wKz37sTdieTcWZE6sNl88GD5bp1//LPv72y2w/rcuFHvDRYCVcUN0qeUSsa1hbd0B9aRQsEC4FGSt7IhBhD4dzpfBZXZ3EkhMjReIhww8SG2rY9lWxkKFb1cu+b/6YdTwbGm8jA2m5djm0NaYQJIsqpsg2gABWRCESmNFEQ+wyzH+/vGrowxGy4bVvLJogykW/8lcuXy7KQ3vFTRJhNyn2JiaTbYN2/ps5wP3LTkR3Omzr9dQViwAp9KkyeRyLgqZkc6PlGSoSsiyZnl8ymTiczdWSm/iA4H7jVzkiNmSRJ54iiQwC6AfxUk3fUaL24V5wnBR1dpE/gFQoY7Xuv/WtU7TMP6l9OP8Rxnvn3PVw8VXMngA1MTBefiZxH6dR9Phfb6o+S9BgmouQpfv5+EshYbr1YYyQGYlO47GQ2+/XPX4fF51/+wsHBVTcw7703exLstERZlNW6dU6EFpmLPPctI0SWaCjb0NLqQDSw8SpltLXGTKDElo0hMoQIFgSNBLbTb588fA/Hx8Z72zyQ4gq3EZ555TiwBiG2IUeovGGClUYCCyx4XjjY0kEJWXAxApZ211Q7BM/wKzsZZ1955fJkZ2se8o/XWU2jCvnKjcIqy8Vy3W7O4mK+2h/asBZZB6Ks9pvhEDHIx3c/XC6q69e3t7bo7d/My2Eh0ohkAIqC2RpAmorzsnz/zd9d2bvylT/44qrSEAKVnqzRJepZPTt5ePlZm60GxZ59//sfv/uTB4gqsbl+/bnJ3vaP3/jlN//8z/fjdHHUwp2V7Y5SFkMAiQRDwfLa8IZ1BdSMSskT16SVolYVD/WgNAic7NFS+6vjZCUBRVAiZ1EXczq28DlE9NSa/f1N1QezZEetzJ1VUYqg/eJEZ3hO6N0ZeoCmOylBF3ksoUe5L5jH6EjUiZScfuxoJCQ9Zx/8FF+h41z1Qwf9Nk//PwedOkoHd9Tl/nKJjEZ9QptiMCX6CCUHbuqT1C4lvtjd6RYScJQ44kxAEOkZKQn1RIQASoaSmjYIYgQWBDUUbfTjQaw//fTT370RRrolnrxSq7EyoarZQxZcRBMbbjxqrTfKtWTRWBOkUFlY/DY2W3GQe47VJpaki1os+0HJl27yl19wN768U171h6vZ/TmbVqSZo/Q8XGfzoVtKK3Pa7G9vrSQShC2IWVjIKzmCQ2zV5BQK5RFZIW0By96j0rBqsGibpdKQHVtiIDIUoRU/HUxufv7Lw9EWfARzSzFQNijx4Hf3Ims52rrz3nsvfOfLB1946dG999102MyhbI21CEC0EaxtYMrWi7Pps1sKStWKRmWYJP3cH+AxqZ0rhKCK2CGBHczTCfslTDIdayoKgv3Ln63cYJ5nJlcT2KGYfPal5x/n146Wtg0lLwkL8IqwYuOBDaQhbIDo+1GDSNCe9yUpVU0ymMbIcnaaCWlVGcMMdURGokq10VBuX47Ozh9/sP3Vf+5euMrVSb1SW5i6Pi0ngxAqqx6iwpGFIwmTaJduCBBFhCm1D1N/J5F6nmL1CNrg2bCkAkuVgXIwqKpqf3+/bZoQg7VZWsWi0q9/ViQaUwITjMSQEs8gQVMjJ5kopEnGLmj0hXTaEUzJ0j5tqnPctWtGU+ruaNqDTMn8N7UJVCEpWJn/v1OoaydcyOydi2n00lKaYjYU2vPtkJ45ZeLMnN4xAEyIEgGY5KjYEZ2615B2bULyu1jemc0hcVqQ+sAXmUh6E7rqHkTQ2Mdv6rF6JVIQDBOgzCTCCmF6aiwkJQQ9gTRVKjEIMWKU3GUAf/jhh2+99falvd0vvPqVUTluvSJs3nmwKka78OsaQy8sHK1F8HFSitcl4sD6JjRDKoSjl8yKV/IkFDkjNQLTE7MTJyaGEI2yt9nLiNY//nEhC1/eWB2tWU2Zj2wb6yDRSVZBrISVOETDqpZr8cVBuTJOoh3aPEflzM0ytpYqB60a+ernt29cvnIa9P4CFUa1KVc8qttcz9Ru+MEH999/ezm7096eDv8v3971DSRKAFarmW9nozEdPppduz4cDKy1m8m2X5xZl9nt3S0A87MnKjmRMRk7O5yOJqePTk4eLNxW5mzerjkbSrtY1o+fTKajeCqD/fjWDz55/8ePxoNhsAJyTz48PH144urcPw61U8Sa16Fc7mc5ayBRoBWqWTeqXrBhVLiQ4PAC1EDNFMFJBrojQxMFUEBXdPZsdgUQRTt5U+1SvD5iddG5qw7PV0iSukrxNW05kXghtXYRorqYCzpXXiMGCyF1ydDZH2mnqpXuqevMSfp77eUvuMOYO9pDj5xxBzk+dXvAhRui6FPlaId1XzyO+sB/ztQ6F+zqGJdR2DARKyUGogI9iIWuJUbdSH467QlgEJNy+lGVickQM1tjbQy9mM35jQg68x7lQBC2j+48vK+YmlyaUxhyJYXKkItgYWdJItls0EggcBaK1kporSVugwk4RXHE4TYHKUid0v51fuV5fPlWcXVHLhuPzNfiJavEzTH0ynWsDmoppVDLMyMfqh+NglpuPRMbY7MgmBQrFWGwFClpA1mGg9aiDRWBmiClZ7I5CxiwzA6UEahth4NMptdO2sEui92L3gK52iLqAL/66Fe8yzIS47KP3n/9s9/6mt/++q9+e39td5tsdOZNxUOP0qPQVtpV64Sff/azzUoQIHWkAAQlVSAqQmrpnwOQmibwVDWmECGcVJA6s3hKYkoiwkQ2XPtcCFrbNpdoinElLmw/f7zZ8auAFUxFshBZgjeQhdCKuCYJodOdI08UgYjEMiYRiIqY9PEK12dzYMnY9lDmjcCoUQpgRQizeLYcXbuVvfBiffyosJYL29Syc3Ur2xk0dZWxAmokKFtR3+OUsRtsgCQxts4cVCMn9FY5VZ5KMMxQbOqayTA7HwIxD0ejxEtgY7SbYFdKYSmN2otYY0MMna33BXWS0hCREiwSkwvUKVtQLxiXZlr7FLjffv3+1PPEuhMh4J4DkoCm86L4KT2C30efu7q2/9c+6nfiPgooMRgGdAEQd/sfv/cl6NL1c2Cvr/RJ+xDNnDylL5q/aXSqO9eA1Ni+wM/TSaoaRaRPDw33AbXrbYk8JYPVvy+dSU7fiu7ivZJoABGYqCizs7PV66+/fnp69sILz33mxRfBzjdtWQxOnszur2U8lcCY5pmH/fBodlafPFi0RZ7bCNb6n39p+69/etJIkQurBIpsNkEyhoVaEpY0iqnCyW7YBfIgrZdF/nwws3pxMK1O//DVS1rFv3390zM7zG2ulbQCqOVxrNk//+zVx+sF1POKPfKN2xdZ1Tyybay5sTKxXC/V3m2mhvfOfBTHKww2OtAV87q1NcsC14dXb7yis6n/9Y/u3b1zdn0w8atiYzcIUgxsUVZX+JYxS7ZHH38Qd3d3ZidzEVs9WQMQsdZahjU2Bn+6qtZyda/dtNWmcqPBZG/0wW/f+Ju//c+TncnX//U/qesVV/ajHz4cZBNr3Pr4SZ6XJIg14OPO5NQ/2TdFzmfkeFgv2xiCZUshoob1JtakNVBLJ0IZg6oHamIPRJEu9DIiECQpYUGB1NwRgAUxxRS5kF48Xw+Kp1ZNt3P6OrJL7IztPHC6irabAAaBLiiMXZQ5TxaJmcDKvV/uxVbt00ruJ3n7v7rAr5i4Q7dVejPCLnftdxddtIy0L8S7/DIl+B3cfg5H9cmrdvGZ+k0kqU3c8amT6KpKoj4/3fXtSv6uAu4w9/S2pY2jXScYxhrh2G/+vjYLSiCJCmujlzuPDptBKVGIyGYOsGKjsAXrdAdDlgcfLL0pCuGVt2BTcMWeRWQ82dm9MX2wOt07erzleJ5p+bVvZq+9ACyCMMeNj+RpS20Gy1UYEjLX2gHO9qajJ1U9j1I3xq3dM7vjWqARVhxYLaEcr7sjTtgyYBk5zDFDJGRSeFpxGBhnnIXCZKS+1bYtrGnz/Vm5d7Ss64V8dmcwnObe1tOd/I33fzUPi8HuMObelFbK4hc/+s3Oa9++9vU/Of31p4tKN1npMVjDBYELXFWbF2/cvnJw8+R+YyWLsTXKKiIagaDJZ5AASDrYu0QqUdGTHW3n7NEVUgnMsMwALMKlk0UVbVEM2a6az17dv5LvLO4GDmQqjotADZE3cRl1raYibECd0FwKw21St1HToUqdBTWTAtZk4FzyNobWaaGGiGqBtYUNi4fYuzL5+jd8fSyGl+18kO3E+nhYIFpPCB4N2DhFi2i0jSKGDSEBnIp+YB/aTbYkmm/XXb3gPmg5KEKQKOLy/M233nn2+ReJmRhJ1lG0d73tW0WJ6JicylKSkrSiyHblXxrV/f2O61OJLXfqj0/L66Rxvk5uXbo7JwJEE5KcOCDnQnkpBDFzp5v3VIKddqOez95312bR7t0/b0FBkSC0BBAkLl6y/r3Y6+hmgQxTio1dfs3n/d4uKBL1ar797u8KjESVQtdv65tYkZkT2JJYVf1hpwD6qafuwBM9P0GFtDN8UOrbfwAAY8y9ew9/+atfWmtfe+21g4PLQUR8CyKXyRt3FsgmEts24L3Hy+NleOtuPSpHQzcgWjTW1AuxvP8n/2Tvr//LocZpEUBOYSw1EOeZKKbKXYnQaX6FyCKtU7teSOFe/tMXt17+3EGMQqLPXtZ/93d356vtYZG3jQdV3hgW8/7rhzRGtmPXKtjAbmPuhlIMM0cDqUmbDPs0CH9/V2RvK8u4adl6m9WCSqhxcSlShWoldmF3suw7X3mxOlnHS1FAjlGWurnPe/sHXKw2q7zmzA2wXtV7+/Twk8blOYDJeFiO4Yqz8aSY7piP3m8/vXv4+Ze8y8pqflbXD+aPq7CKC1m/96v3br58/e5Pj82m2B0clK4Y7+zMnjyo65C7dnq9LIZcb46s4bKeQggBRjNtFQppRWpFS9oq1YAHiSi1RB4aoEGgqlEpMtIhFZHkgTQ5w4SukdKjHdTZ7eJ8tST24UXDJnGg+12WiEapk5soq4b5KdoEel1I9AIg3K2vPqj2s314+qsjZ2iHPJ+3fi8u2gdP5o5jiH7n0+891XnGkPa5dIdVv4u530B6ToTuu1XoybHdEwkEkmT1uQfQzw1we6rH+U+EdOpL2vLcHxIAICrntkrdGINAo6qoRlYFbDY/Xt2ZnU5NHsIi2BxZK+yZM818zGyFza3rTuvw0e+qz17JRpXeOa4IMG5y+fbWzuXtMBht6tuP9c0hPmTVuJiRX7CDNb71TaOlZAWxDeAltpWbh6HkYd48M5w/mM1nC4KVM9OU23lhRJWFSo6Gyeaa85ptEtEkWBEBDRAbaSkKxEE268pbW2S2sC43NhbW5G42vgYe5IXc2cw4TF6eZOyijOSdw99gatstEcd15tZSiM3e+tXvspvF5T/8k+rB+tEHpws1m8BZFZq1opKXnns5eiCwVG2mVgOkjR3szEEgpNJ5v7EwEbTLL8mgJ9d356OcQzMpDH/6dp0PnHUNqoGV7LnPHVT32a40BkglvCbyHBuhFXMN3kClTkAT4KFBNRK68T5ocsyxKqwkQrYcTVSsw5bEMy8+ggtjBQKDmGW7r/6RF5XgDZOBdSSz1ezGZw5iCBy84xgiwGSEEQJxGrJB55SpQsSWO8vbFFPT3GvCeS/cvkDMhoii6HAybtsW3eoEAGutxNjP7hOghlkkWjbnDEbSbmiVWJlS34ekG4xV5m6uqQ9L6dLaI7HaRaxu8jWdOaoQhqGuU3seJrnj/Wq/j3GeSXTfikg6PeQ8/wclcKZL0LXnQZ2fB9ojW6l31Hem0uxjL2PZVRx6nr6g29QdCQpq0iUSzi8KkCES0Q7VTpkFcX/ywDCp9ETQ/uYEHTVaRZL+V5qUTnzypEP/dPi31ratf/fddz/44INLly596UtfHJSDuqkZlgExplrP7sx4UBZBW+v4p++dTQf033599+2P1h8cyyDPoBHM946Onr+y+z984+D7b84enIkpGDlnFmjBymD2iMxqAAkkzASUsPU8XC3W/+d/8dwg31os1yRBEIbl8P/x57f//V+/96Q+sNwGdbY2zJKxi00IKzXRCkRV2FHmbHC0zGLBW2c22lrE829en/+LV2/WVSsriRXCBtrANhg0mUquXG9OEDY112Z5FkdONPovvPL8/Ycfvv/eR87Szu7Oau6KUc1cTYbFcqtR1ADGu48HA1NvzMlRNSiLlz73bL2+BfJs3N5g9wc/+tmb775Xjst2HT766eHd1x/6gLxwbtu2Ne+VB/n+5O4Hb2wW8avffi5f5bBn4mMxex6ABksiYI0CRNYG7JHGf0lbUGBqAQFFVVES6njCnQpB2oZCTwGg51z3RLzv+yTnHUuGTf2g3wtsClUYNh3RQ8+RHVII/V5E7dutHe7UZ4xPY8X9f5IAQKpOn8p107RIPw6QMN3UJBLiNOCnYoxBL6XRU676IHdR6HY3kjZm55jUA2Np33GfuJ/veiKWjuTRiY1oYmhoz2eWvuOT0gMSgCgZMKQZJPQuh2BNRZsIAjgQArEASpasKCjCWnPn08N5LVuOmW2wA+HA7JKjEudmfprfv9e89FKxmNWKzWefL45njej04HPTyfWrdbNTi/UDt3jmVnv8yNp5/fDDkX+2nWR1jtg0prAcqbbsuWjFgsZrqh9sBsMTCtZG5xbCC4/FKV872E4F3A6qhAqWROPtxpkYVbCBGGqoDSxixHOgIBIQHUpiF5FZO2VGJetAm8x6V9HQfVjJpK1evD06WT0+9PODF3cerM6i3aqj1Xxy+fpnTu6efPLJ2frw3eELr1z/xqvxyBcPVx6tBLrx/N6NS7eqM2VvrEJq4WiNdueeklLqBif3BYmK1B5VEHpphaTmy90qpi5BU1GbL4FFKMbqwC+9sLe1LpbH3iJJXOICZaoVNak0QhV0Q2gBr2gT8phG2IDASX4FRMqI7C5dVS5C8NaO67YyWRGiqm1oVWUvvMyXr/uzhXVOEVS4CWGrcOXejq89rHice7Z7WIPOxy+JACemEqcCFjhvd59HHk2ZoSorI0okynxTB9+g3+dJOkMkgtC1ikUT6QKKThMyKbwz9V3UbreIagjBWNNHYigpKUM1iBhmwyZhUyCSvsJGYk9oGrHlFIdwMTikKvjf2fq7JsmOKzsUXHu7Hz8nTkRGRmZlZWV9oAAUCmARAEGQRJMUxf5St9RSt3TV0h3Z2J0xm+f5J/M8z2MzTzNzNWMaXUlXV5K1eqi+3RRFsdkkGwTRYBEoFAr1mZWVGRkZceIcP+57z4P7iUxQU0ajFbIiIyIj3ffH2mutvVlGrrkuzoOfYZ6FoTfP3WH2hVSYvKpo0wOnGJBs3DVh2RmrZwaUs0HHEBrA6TWTenfQSCAj1KycJvvIJkbD5Iygcr55nJJfhyT5pkjSoCtxXh4uIsTMlpHH1UmtTYOdnrIxOUwARLCF9ev1j3/84xcvXrz19tu3br0GUO+DNVbAbfS7dfXxr45PEGe2LYM0nq/vFN95lYzi8q788rCFhF5sWdKP7i1++NGL3/raS//gOwefPlvETn70UStVaYyi78GWrYEPpuSKZLFgqsPyNLyzZ/7gd1/veTRfnBVOIvsCsgzryo7+h79/5//6r3656l9Gs+ACCLwOHYRtdAieQAwLRnDROFPYKnIoDEGNDfz8WP9s/nTdNmFFq4UsF8JnUsDOlK3iSlXdnFXbND2TsFo0kytuve6m0+m3v/P2Lz6Yd0tdnnrmsHiO8dZ4d0Zf+brtug7ActVO6q1rV0rriqM5P33Or9zcuf/JL2a7V1rB7du3H33+8MXxWTktHbhfWVdLWPvli2Y2c51vtqaTUaz3b1fXX949mXdjbM/sLYfdoJ4lgAABY0C+AtRHQlR0RFHSxDe1uQgggUYllWwnLkndPtSSuWhG/jehQZWa+Bx5qSBiyi0XuIRpWAxj7CbRErId9IYCAvx6AXo+Ihq+JiosDE60FYiCFWmepMNuTcmlc5qqhAHHJmJEkZwXk7tXUhJdbJsTq5AHS5EMm6mqxkFzN/x0SRRAgKTOn4mESVVY08ApwdQkA4d2gL3Ox9NDka2KmMw0LwD6kogNnLz2EIlJVPqgVqxEMQIl20p48GgOGCHj2Rl2zNIZLQoybMHhpZc4zuPZKd68Y3/6vj8YFbdu+I8/Xq+fLGU6KbemjHXNzdODS/v60jV/3J4+aY+fmBuvEnWB3ZmvTxFZShYBwpL3e23uSzX27PZEu+np+oyMWfflcrk1G9mSAsNadlZtx1Z1tTOexxXgWEoJBdQKj9lpYcSLi5ZsIFbxxk553dSf3d+6dPN4NNXC1EbJL99/6G/uVWcvXsgy+nlbou+tLmCXPGlP+qO+iKPLTaye/vzhfNTWu69Wk73ZaNSY+c04qavt5fPgAnkRDqy9RAmIntkDQRPrK/Hl0sFJjicixljRSDL0qLnDIQYlWr+tXhh2VHP94tGj2c3L8oxxChHDSV68Vm0JXuBF0Sl1RK2ig6ZbmPcF5JEiWLJjG2uUvpd6vB3rWgFVZ0UYLmhb0LhH3Lv9btuKNRNI74HacvDNZNvAOGibWMkqITnXJQA0lZ8D7oxh81g6YrmKTFxaiVEVYBIVyyZCQghb06mISAy2gDQSKTKB2YjEgXAESBrSDL4budNTDAMmHay1i8ICuZhND0h9K2eX5oE/TCmxpQiQuBlJlKMQlSi522MkHHpInBjkfkM0OW+HkR5PQ2+8ge+SEHGgHiN/TLnFzNi8ZM8LZLx+yOx0Yaik52jV8AFLfhspHnEuuSEiyfUzaZ9zYb7RZeuwRSIhYCl0DfVSFjmlkl6YrUnmXalPYTbMfPjs8Jd/81GU8Ju//Vt7l/a87/u+T8E3kDhyEv2vPn9RV1cKH3s2EdHYophMHz89OTxKBBYtuJXeuhooq+/9l48Wb+7dunHTzsjdfXx8NmaJzlXMwmuG4XXfrZhGzM1x886u/cPfe2Xhjcjz0rkYlYwXDQW07dp6MvnD7x78v/7kodu7EVcLVKVVyxy9tBrZ+VLWUavC2F5ZxUJZyaa2UDji06edepYGFXBQVAf7dj5f3n8M2/Pzs8XHtJ7h5J3bl8stH9poTBu9QbCjSUu8YtefnoQoQcUcvij1mPveAmCZPWu71p+w7vjO1/Wv7n/62XIp7Wr10rUvvfqlmy+9ui+K+TxGbpjHJjJU+nnwhoX8qPV7B9e+8c3Z6hnGZv8ybhuZBGqIbNJZZKqwJC1jsCxAL9JDQtp6RCSEze4jyUAnkgBOwJKr5DTsVxkauAzY5v3zuRPNphkX9XW5k85h7nwIlAdsNKTbL1yV4XGZInF+m3IPKgBSHuO0hTqZU23kgoS0C5FBMmTKc6UAIGR4aE2HYbaqqDDSPgZsqncZmCYZcju/ekjML4iCNGaqdErK2aguAe6yIWFlkD5hgcRsAE77jmK64xnzl6EvT379BDAUpMzpM46igSC8OOuPjo+ZKUSgKJRtK5UzoWdfVCwWkeK7X5+gOavZXHncx9X65lVz/9Nu/eyYzEf4yh03qq15uleHs4NdeVxXtGwfPqS3Xlm3J0stTnqPGfOy8X7Z66xXW4X6fVeWi3hr3/rLI/vQnrI9je50oZd0vO90XBbHzBL6bTPuYqcssBrY931ofBPbQEvmwLUt61Af977UdTG77Lbf6M1zN7p/CfOHciN68qe4PH26+rx9uj+5+9MP/WK9OHIYl0V1WhvTrBcvVmJ4vyG3pHFXjL0v1h89GGP74ePjb/LsxrtfCgEBpgpie4YwBEDMqxcghII4pgXcAEMtUVAloixSTyyHwTGCCTkaArD9o3WEf9E8+q3vvHXVXj37vGO11LOGCA9tFUGhHeBBa6gHBWiXlPUJckb2kEHenZ5qOLYiqKvK7e/19+7DzchVEHFce3+G3Zf6yS63bQQrcSHWVkXXNJODbQGLpKSV2rIsvGOTaopzUgSyBJajwppsvJx/ToBMMljWKKKAsbb33vsAphiQHHJEVCQwERNnqj4P0WDYxKJJHJR1u6nhJwChD8ZwVssrYZizMnH+vqR3TZlMNEIS5XkTFvLYc8h7Kbzp0HZeRIL1YvU+ZMYN6y4JOTYVQx4wXYDfMkxyXq3kcMS86a+TKWaafyfrnPPAN0iWJYsXdbPPN6H90KQ94tQQ57qAsRll5WVqaRIggwuHqsQoGGrGGENRuuR/4pyTGO/e/eWDh58fXN6/8+U7VVU161ZUisIBEFGHyIVrzpa/OnPltOzD3Eg9svrw2enjPTNfeuNsKMKaS+uN2F6Coeb499/bO9jfWi5PxZjf+uru8TLsbm397OOnn79wpmAVOFAgrLr1jeLsD/7gzrLz0M6qRmlAosEHaMFiebRaLF6+efnbb5/+4KPn9WTLL1sKJbSwNmw5u1546ZgKr5aVoaxsGQm9iApBEXW3wsv746vTSQWuA59h8vTDp6YoKbjGh0bah//57rtvzH73vf21j2272tvfnUz+uG0XbNu2Wf7y7i/PlqtJPelD1fQnAKzr6lHVBWzvLK8djOZnC9v+Rr115Ud/+cN//A//3qJbnzk7+zL7hV0+Xzy8d9w3EtZRbeCJTrarwk72JBzPjy/VbxzYNz0C6NRGJ9wDoFxhIlN3VKJ4w2COyZAOLCJBJQAhERWIguqQg1NVO+z03dxjHYYsuT1IHMUBud1QkYeDnQ7UudbnAuExT0+/OI7Nd4dBSpscnf47qwnOUW4alh0inV3d1Ad5BxEnuiVtKlQaSk5cTP+kqRYWkWR/sXlPlGzkkBRSeVa2CQBfuOeEVPEqCfi8nDgHrgk6bFHTtM4hEyuJ2XDWGtKmUCBSkchqVFQjCGTI5E01CgjNl/F40ezZohenVrx2sWcteuNYLGMsRyfyy8/jN94cOR9u3yk5oPL9bCZNVL/6NHwGfPmtupaZe/pictDEl6z/G2oPWc9U+xMUJ03vrlSNX7LaQK4Vmi1pRNXnJW099PTKDrbL7tTP7QQez08szfqyhA/BWqm5b6NrZOTsKlrBSEfTMqjppO+WvvGt89XU1eWla+b6awueuNOHfiXrwxaXxXRBjItSX548+9XPf/T4iLktw6JjAo1QlW2NUXS+C57a5aScdhAv5KRYzJfXeXKr2qoObvgmqmevTEE5UgxaAJKwDTggEDit9YWqkhBZ0qAkQzUYIEByiDyXpYIAS0txat++9dp7t14/eSwQoaBGJHglAcEDXskTAlEPksGOKmE6nI8zQqr2QBawEBAVAAHVpS+99/Te0QwBMm3tuogli5hqanjUQaxlCAvJOnIZ4LYP+sAKmxzXgXB+k0gMUcLAkmtdVE37RYzhRIyCIkZh5qhR+gjAWsODs3/XttbaGPqNFZRIRDIuSUShTcKj8ypXKUuBL7agmiQ9A3g16A7yKHNznXHe3CJGVRbOL6L5ehHLwDlO8DaxpKhyYRaVa/ZcKec3kDHfhD4l8t2FEDV0x8PfEw0q/0DDv2XrPFFmTmuIUqrlIWZkNkpCyQahhw44fPKM3iBfKVCdU1ZVh7EcDT8DJFM7B52TxM3PaYtCRJm5LNzZ2fL9n/0MzO+8/c61a9dCkLYN1joiTtIpMPfQsXUPnzeTUdWGs4LKSBKC//1vTmutvvTS7NHh6YefL+tRITaysJf1735979J0+mJ98l/++ujlly+/Npvu7Iyu70x/ce+JakvEjCBaAXDrk9/7h6/0yiGeOS5641XJ9B3bVBtGgifm+Zn/ra8ffHLvF89CUVkKHUSEq2J9on2BwjKTDW2AAQyBB88JIVK0Da6+Ur61v/fiED6sl12sy8luhWcnvVWx0Ahx2/jJz+/d3vM3b81az66Cq6tdrkIIztnbb7y5OO0Q+no2+eCXHwAQtM8efn57/6rvl6rh+MV4vXzOpy92X9r5l//uX7z77jtwxXS7Pp28ONitb7y91yx9s5gfPQim7Q8fPpofH914/fpXJt/a5pudnCiNIOqxNGpy1smnKwoiNEBjiAoEiZ4NW0uEAAoZwKHk+Cr5B04I8ZBuzj3dNsL7xBPU4RgTMNSy+RYMS7cG7EdV8yaR4eHn5S0u1K+QXAunznN4vBKTyLD+IV1q2jTkw7dKzp6GKRlupDT+hdfSTOk/t0vPGqns1JevxxeFCHquP0CGoi+MkVPKzJqAPFEXzf70JJq1VUNDIEqGNL9ZlSg02E+SVSiTEAlYEUFgDYoA7VWNwpOsoSvMF7oIRe3qJcUKtoje8YS3gjOeOnAPW/Ln8373hd653s9ucb/m2GB03e9Maqr7j+99bhZudf21vuJuVM1ltLtAnLVSl766xbK37qzhyRJmSZOey9j4urfTtT5wdLLCngXvzfbU94f+qI3HVoKrjCupqiqYOrSGdyn0lyYdeaEozhajSTmeij8L8bQ3XVHby/7S7U6nIoooQDChRRfEOGKZd3FrW+FfcG3sylhGDxfCWl3LrBBYxyOunvuolq2AvEig10ZbPN6R6SwchqIrQqcUwMl0CoGIiexQK0ZNBQ2Sj1dMvytoVAXBEqXJPSuyXXBi9dsquvbMH4wuxWOVhVrjSELUHlBFT4hAIATAqyZf9ZDRTKLcGqaNfJrU3RYwgIVakA3BXn3ta2e/u7v4q+/Vi4UdjTWOyHI/F6wLpplI6EMYuQJx7QrnymnTemamRO8iaBYzJJavDPOSTWqhpAgASCExKoFiDERsSPKgKNWjIoW1tXPrtk2eVKKaUR2R5ICR0GVRPV/QMHxMv/aH03qP9NpMDB68JrCZEg25OveLho0O3OiBTHFhUJM92YeKXjZJ/QL+vMHNhpdBhuJTmwsawL0LcHLyCqLN91J27h7C0Pk+hQ1sfbEiP+dj6pCSkQfGuZw/r+UwvL2B8J1dAbB5JyIh0a8UQNqONjhjK3M6vubkZPHTn/xkVFVf+/p75ajyPgAAGxG1BUNg2EYRllAV9PHT2MgYLiiUhRxD18xl83/5F7/4p7//+lYpa2jpfWtQ23BlVi3mR32o3ruzMx2VbiKLp+1/+vThJ8eds5WQqMDZ0CwW33lrcuVgOj9pKifRdCrMMUREhgF3CKyhsdWkDy2qyd9+7/K//PMX2N1RtATHvQunkSorABuCpt6XwJBkliZQQYHibz5sX648daJksBYbww03OV4vbG2dkxh8j8iT+qMHj9/48vTF0fLex4+B9uHnj9/5yp3lslmtF2+/9bYbV3/27/7041/8DYD9KwefP3p2ehCJadWooBU+2eeDbdSfzR/+5D9/b7Lz8lvvvPnyletPjj/zpun98uBgV84WP/5P3y9L892/9Rtf+Y1vQ+tVmJdcmLD2rIWykB9EN2l7oDJUSchoDF4lgKJqiAJVMcnME6LDfAaAilJaiIRBlr8p5vJXztuC88udVxRdKDyhuLDHns7P8YZpmJMZNsgNMITFgcQ8VKKbl6JBGJDnJrlk1+EFQeenOLMrL8j6NlaR+QpvIKi0QvQcHdcBb9/I94enGPr4dB0VA9eKh7IWm7oWhpkGAzxOsQKZt5hWPaY9LmminbCHqJrESAWUEIkShOEVhrhn3yoa+HlsxKwwGhsslRw8URc01NViMvMQkhCh+skJXbuJ8Z7FSXATw/thvCPVDtuzddvfN91uPLgWJmFF9ibb+YRXPG3ttb51W7u7p+VsHnyQaq1j00A9Lp3i8zE/tBXeb3df5XKHX9uqd6vw6LBtj+J9Mt5H2iomZiIhiGwZdDvTIwT0JFyACsvGOJ5wrPmzdfnZ++SKvauvirUBQn4xmA2w9P3l8f4iHr0I3u295NtnpL5yxUpIGD2krlwDx14hLNZ4lpnS/lnnXr/lGtu2HXq1rcBDe2XEwLBiU5IYxvA9lC84UnDaCpc6NUp7BwCCGaiIIIWtEKTsP/ro02t7IwcTIjH1xKqSs28aMiNvWUqr7NORzIzb4SQakIMStCByqhZiSd3aL8vtW5Pfv3361/8rPvl5ZUpw1fslrVW2p7T245HRkW3vfr57c09QA2CGRg/4jLfk3GYEQsP8Qwb2bupPu9YXhbVskLiYIRbWiIgxrIoYg2HXNM3pYr63u9v3AZl0PIgRsyXyIP4fKss0zMzOWXldZ6rrWfRCp7u52ILBxWoAfzfoLZNKdnvSVM1TAoCSQkc1u3xkNEsx/HKHYJFej3VoHQaS9ab1pFxqKzYxizZeIRk+5gzT5RxLabg1vFD2sqeNv8+FPwPbEsPFB5BG8jRk+iGKIHG0N/zqi0/BbAQRSBk3f3Ka/EfAGuTJk6cHB9def+M2lLvOq8AYJjJKGnqATIyJejdbLZrHiygVVTqBBua+l/o//vT4j3/v4J/+3u1CgzKA0Ntgw9apP3lwfHKwt2fPWtZRx/T+3YcffLCUwtSTEZNvJLrS+D7e3A7f/Y2D1XxZuiKEVe9RWMOIwhSkdyLCEEcWwVpZLBav3d6/+eHR88ZXjnvxEmGdk5ZiFDG9YYJN3QiI0wBJRbQkNIt28chf26kXy95o7AVXdm38dBk8VqvOCJuCUIZHC/nJX/7ZJw/uni2fTrdmVeU++tXjEP3x8dGTZz+JcDx//uaBAnDy9Nq1kuSkovEq9JUQ6xaz+dXTX+1Pxq4p967auij2ZzvXLl9Zzv10Mnn+/Nlf/fLfgpo/+KP/7p2335nPVXBqWCU0jLHBWsnhvBZMZzD5OasgEse8YkDSlg+JssGckxpV0985Q8pZorr5s2mFU9Y5z6nIw5IvzF++MDLefG0z3uXhMV84eBe72mGh0PCec35Nd5CgGlVZBV/opXPFDKQ1CQSoJAORvMPhi3clY1jnWBeQ8fHN+zmHrnOcoGHpS/b1SpaSecGwDDvbktEzBspi1ieqqpL0AgM1w0UMUFYSpJFj/il60jLJVqAgASIhmOCj7YEW62A7nnbWriAV1pa3SukdBJN5aIN4R4z5aXvv2L715ZbUmJ5ffttN9+LRqbfTAuV6vfjImUncnjR96dfuDHUn1Zrc6Xh6uiiO/+Zh+ORxYEutRE/a634stpaYWw6w9hM/2Q2udpeDFME8OPOfW16sOhWzPRvDrIpYUihZq+lkiRb9mYTWh7XUQNUGK6i3SoQ+PH8oiyNBUJ4IGYh3UszXq6ePH02v2fFk7HbfOjz+ZXP6V6U3Uu+A2Kh9cXrWmBpmFHpxzAHhphlRUdW3bslCWFgACAwoGuVgWaJISG0DkYIM5SkAE6sKq4S8dATMNPDo8hHZ5GC1y8XnKAvmztrT1sMYYiBoIFbKrhfY3BkihghhULGD0gMYVsBQAxQgR+QIhWhBVDu44Odcud3v/pPF1VfnP/mzHS/W7J0eNzs7L3nbBhXrsT5t3f4NCY5Yock9RwbHWFKNyV5qgKc0NU2kiFEsc+EKBm18/IvCJlN9iWqNMcaKikJevfXq1tb0IozLYKG8y1N0AGFz45hnpJul9BuKm6Y7wVCBaExBIhXCUS9qDPNnzpzWAaUWiJDScDJJJwKRDut+mSlmICkVAvmGDSypTcOp2WAjx640efoCfnYRkt6UA2ltmjEmf1YMJLvPDBFL9r2ijX9WuupKwxPqBUiAzkUauPjSRJydcHMdwSn3w1gMqg+T2+P8gTMJMSuZq1evWeu6NojA2GTZwUScsUoFYBRaVjg5DPOuntQjH3yl3AVrC+bJ9Ps/f/rNl3Y/enR4vMR06j3ipFoGdn/6s6Nr9dM339gbuzpGzI8Wt29v39wZ/endw8JwDRtUXXPyR3/0ahNi5OiMakuFCQIJEiyZzDTgwChZ2k8fPO788uDajXffGP2bHy/Hdgz0VNpe1PiyiOgTLmqgZtPiCQIYMEbQVw/uLV76CkkgNr36vhr5W1e5rArxwbd+0YTD5eKEq8MmXN67dOPqrb5viELX9eye3bz5uoTCS6h3X4X7BIC3Ty1WDPN80TQrrbiuqpZMQzdWCx9HGKEaL548WnWLejJ+9ZWX7v7qx//qX/3r0PVvvPHGnTduH53MFVoQQSwoKp2RFqJdYucSJa7lEPLzVsGYeWXSA8KWVGJeEwZB7nV1k++Gs4gLKIxiMAYfZj4p4Gg6IBf633wFIhSDa2VCjs9P+AY0GkKWpgS8OZmbHvU839HwD5lqlY54HtUMjo+5NkUmialk+xDVbCxDdD6ghSidz60yyCSabTiYMHiP5LedPlMeClwQwwwfVWI6M5HmKLHxTteNnIWMiAoiZRJYVtIn8+1UUQDMbCkZokSgVzUAm7COgZQn4ABquZlNlyCRvlBvpSt5j3sQx629ObeteKMWz8G+DO4qt03/0oEtKz28F3buiCm3nh4tV/7Q3bgcpDw+mpz144b3ltXe/Gk8/OH325OF8qU1dsPK21a8L2YtX239aW1bLj9edlfndju0GvjzCfy12cvXXmlt8/nRvS2R2zu7anqjp31ACGICEGAjWbaWnVutq6POVogM2d3m44UBonOtUcvE0GbdHrf2kuyXjN7ZS2++F++te3+XbYhRSIlspcQlF5PJ5Oik25rUr6wcXd6vqnp1IkUwAYAjEKRTcsS+Em6zyWK+3IrMt6XBtncjAOfBZ3DjaJ6XZNl6PF4uwzvfuWm4FA6MVoQ0GwNtJiyaGYpIiwc4B+IcXNMVcqqWUBJK0cJQCTiBPWpbHlc8ZgnHe197K06q1Q/+pKpgRhEF2LC1kzA/ml66Vu7srU6X1pSqBERiVvIgQIOIqkTkzV3IjSgTEjdfITGmXh8QIopRyDBxwmQoagBZY/nDD39x4+YrX//6NzovCXxOdhwbL6eL/nOJpaEhGGuQP1NKm8IS30FENLkuG9KBJ6ybAjeBT4PuOo2SB470OScqdXOKpLhPPNG8kmEDlCXycNZbAQM/NPXZw8Bok2gvpt30DJI5lhtPSpJBzLDBxM+ppqpKwzNj426yYWblaHo+S5Zc09Pmm5mYBwFwDnCaBxWb0V/mdUITWAnLLMqA1nWyKoOxho2RzP3KdpjpIEoUR/x43rCtWInItSQFLKsXEfY82Xa/s/+64/sfPJcpm6Adh+LqrtzZn9ZVZdCr0G+993Lp6HDu0S9LM5Mytifz/91vXR1dmiwWx67c8r6RUgzICkVjoF5D8BLbxdqV4zAaXd2ZLTp08eTmte3dyQkMG3W9b4uiC1wquVJcVNEABKNgBqmE3Bra4FA8W71YS2lJmEIgFWn/1p2dEMjYQhh96GI3+tnHL5Zz1gq+7RaLo739nb296yfzErFsmmXFrqfTk6MOAMRNJvWqORvVPZPM1/3E6Nl8HqPZv2RePD/S8GjCH95/aHen335w/4MffP9/dW6yvz/53/zTP259XxF6BA8tSFRETGQEDBDH0G/FdBxSvoGERKhljtDIsCnup6OdLyrAJMgbe5ExlJSfJO2Pz3ToBLKI5glngnUGxzTN60GVhZUG7kU6n4nbtMnhQ+Ibsq3+WtLNB50HBgOyOiCHu8QuTveGh3/dbJFLbz6tEUuIL51/OOcJO2YL2wE7TgY4nBUiw928cFNzWBNVEOsmQqRhj55jXMPFJjsAYJmzYVLPraIIAEM3BZNQSsYaIF57R72gAAVCIGlE2UgL9rDBBF8sy0owLSk4FpbjzkwRI9C5uisdFKtTBD/ZJtc/nHf7M1dMUN2013f56dwjKvGD02eT/mi9bffW3dbxx/fnP/t0/vhFCEZwdVmMcBZ47dCKbRCi3FzTcYPjykbmzzpyvfNlmO3fuPnKK/PTw5OzJ1uVv784nml/aRsV2QJAzOCIsLAhu+on85ZVfXcmpuDFUXf2lKhqXQUhQ+J7UOi267qc2O5FgFcimr769unjFxqW1jJICzJNLzGGNjaM0cwUJaN6/TasWiFUxBJtRBBVC4gGRzY4IGT74IxKpPCc5hipp0j6zYDcUiVGam7lALIvX3vls88eA7MOFcNDC6GeWZOf4GY/KylUo+YDFgf8l9K1UCkEjmEJpaJgU0ZyqA2NMGrdsVvCxMlsW6t+6439p79yZWHW68euegWWjY/9rB7vfb1dWacTQasQRUHaM1kRSRPspOGQtOg4LcoWAGqIQx+stTo4H4YQE5c59Z4pTapQ7yNgt3d2wqabRxCJxOfUjA1RkgGVmKYreZCTbLBoQMHTM5jUECf9Yuabb0AzTWEod7lpDJj5n9m+RoigEpSZ2NiE6A/kYxBBZaBlqKbCPAxRg4aMmn8PxNkQbegaOBfyLKpsrIio9ABZZ/N4S7M2io1JEcAYm7elDWFB0kJfggFpkmhgU9OnCJXiJW8AcWJOY2AaenjC5vFCYE4blFMtqAQFg3WAEDT1jGSYkrAMEVEldQSU+AAwpbA+fMHBToIIoCxCYELV+uXbd658fPf5g0P/nW8efPTkaTt2pS8WIX79yuTmzcnR83XTxmpaPTp8UVbmBx88LmxJNviz9bfesLdvXzo8WVtjtW8JsAm3twzpJDu2RDawBVHwwch0OiNQNdl689rJzx43zvqGJ6rE6KG+5wlJb00h6hPaYJiCCEHbGJ0NZ8tu0aympQ19D1Kn4bSVqEv4StFZ1tLQb37j1Y/vvjien+5dKtheqqri6ORuuw7OzZrmtNrZMqbb238OwHI1cfuL7rAqx0BVuYm1Jbu1CFRke9eVdgYw9i/Pqr3/8Z//26//xo0nj/zVazc/P/oo9Fvj0YRZq7KWGJTUiIiykrIGRZFtX0hyC4jIQVWJrCQZLhkCAlFaIAcoKyLlLZKiIsSiA44TY0yreNIRVgVp3hYKTZvKRFSJVMTkHEwqKmzBIfk6gZFYWnHIp0kEkZY0ZEIhBoPnoXIfzhEwaNpB+XjnILBJ0gxQWhHGRMRKw44QOc+ZmwJluDGgYdYiWcKIPGXKWHBSzX+BgpWjEQbO4uBZMwSCC55b0KigvGWbQFbzxufh5hOYWQQ0DKtEk3RMAPTSOrbwJE7Je0/SHza4uW068Jp5Wco0Los6srGxVxFBVwssBcO9hnZa6iu33nxw7+eP48o1/Jcfhzu2fGXWvny7fDRfH/7Ehp16KXjxq7s7oV7w7pzr5Ycftyh73lli0lJVrDgulQKkNdIF7vVSb64v4lEtYMO274LeePfOlRs3P/r4Z409xj6c5Wmw07OjWbesZ205skWoySKUYMfWY3R0LIeHdGatGm9HOHrKfSvGtXXVsXVsDc0L3372fBUrr6NrLWnoUe3uOX3dH31UsAudeg6FUrAcRMS4nZMz2j6YvXprfRJpxOoVQcMIbNIGQrAPEll6YraAsHI60sib9KwoyIpIn2HFYVSi57MSKGC36r03bl+yZkRapaKJTdH3LQhMFpB0+EUDFGyMSMgz2UTxZRAKhbFUQgvmCeACgWo2NZ35+afHjycH10ZbHGMM6N3E8OWSpFkvHu6apty/EZY+Lis3svW0bOZCWkJFJShFkEl4Y8IviQJSd6UB+R4QlIrCpqTBJi2/S3rzfOxFpLA2xnjjxvWDg4NRNQp9r5CowmwAAZOECAgzITlLAyAGSW74L0yENoZ4iqEB1XSrktQhpTXS8w7z3GkqPwNy7T98NeXlJJclQ5Qa+hijKQo+Z2CnrlR4E0KQ9NyatBIb1ljerHpRAJXCgEgKTJvRU6Z2SH6Bi+bMGe84xwQ1xjg0w+ffO/TxumFZqw701xQRRGJMKx/MpiXJJ5JZJe17sSLJCIEHcQgTGDBpxAxigmNChBCZlKXR0+lZqGwp0hMDbKNASQ1NHh/rOy9fP7gSmzPfw22Lay0XvHg6X759dY8rfPyre/uXty9Nx//LDz9jNy5L9j5cn/R/5ztfOlqGopQoSUWD/IkKMYEts2EOQcQQhJitY1L1vZ8v+rffmP3o40dtvWO5YS4gvWpt0IJEsnsLAAblcsyIF2WluDhrdkdjzz0ZqxEKX2hF0gAATAhuvuhfvf2VO/T64mxO7GJYz6bTtl2rkLO2WZ/5lk5ObgAoXFytHhJNgmegD/Ki99E6T6zWxp29amdmiWXk9n75/n86uDb6vT+a/J//Tx9sTXfu3797797jr73zHedG42kJuLreEomlcyKCWIA0hpCmvWkpH6DBBk3TG4WiV9FELLLpJKDPx4QywAyR1D0iEaM150MRca7o+x5RiDg1mmwsJ9u3rPKnRGWRMKymzjks0wIx+HAQITE/hpPM2aRgeNzwa9W8HSRd64EUSUTn0G7OjenYn5swY4PDpwHTFzO3ptycTSXPSZS0+f8NZYw2L3LOd94AWMOuhwEYz0+UJgHpQakWZ4VCTAJD80HLdDIgWwoJMQSt4xqIqia0obB1+OzZsl1Mb8z8KrqiqtzMr44YvJqUBiOLNct2WqfMKirranz5lXf+tlQv3T/83ivXz4rLW+8/tgunX/v66MWiOIaD1gI+s1sjni6l8FIvHOocOwABAABJREFU7WwFeyaThsfwTEulFlgF03FcB9uWffCvqp4em/sjw8rbB7NXb7zx4d331+GIDyyUX27lWxpehu40Z9PlfHt8VhZhbSyXXEPK5dqeLcjZWMCGyLKWoKymIBSuLIgNiY/LMIqr3hwuqJ5t+3VbB5FiOr526/jscNkFLmYBUFgJKAszm8ymhydbd94qXNkXnrdYe9GeqANsZhiDGYa4M+gNRCN6hRUFIJnxAQsEYsfomYEMSwiGJZLJP8OGYCVEppqoSnRE79fGjIiVSERC2nLFyRZOIkDGQiVmq3JRIsM8UjEF100rrmY7NT33o5k5ezr/rz/5d+Mbu6N66xvf/K2Dvctr8KXf+b3VD/5dVcvJ049mL+2ilr169/Tx/eNHqzvvvLs69kxqTMJde8kaVlX1qZ48x5wuYj5pbdfmokJZkKAcZm7bth7XT548ffr06en85MrBVR8iDQ50GjRfekmbDDih2irISSKRMnRIgtjoDXVIhQQMy08yepS7u7zve6CS5EGRpOs7lM5Dp5j639TvioiRzbBnA6D9WtmcX0jOcyHOnzKpTsFDkYCUZdMISkXSykDQ0GALFBupSR6J5fbkwrvNlMxNnEhGH9gg4pqNwTYVRoZNNPNXZBMfldjmSQlbIYbSMFcwyCvTEjFNoFahgAVZQAtjmlW7aEu7XcZIRilqBCRGGZfV+0+7iruDHXf34WJcVUFgpePJ1r3n/k/lybdv73zza68dN6s//dH9UI9rbn2sbZj/oz9448x3iUPBCrZpfBNTy5PmakwqBFcYEBJqIKIECX2c7GztT+Ozds2WCTYiEpoIC2VCBOXPOn14UaRiDhSY9Xi1vl1U0nlmjRBnuXJWdAIo1Kj4eSMzdmzd9u6IoIWzhkkkNs3yQMUVrNL2AQCMsWXJZTkKsfetF227NqzX0ix906xXzerk8GjdNJHuPX92apj+x//bPe9lb2/n9VffffVmWJy16/Z4Pm/myw+mk/39nW+Jcr1VWMOj0YS4E2VWF0PyABAToiBImtAjfc2m3Y+DKjcV7wMbnhKQrMPMggHVGJm573tjTOIqhmTALmI4w71JuRZC2KgPMthMuTD/4qUgDI/IxuubAvp8GqyUfp0Di2m4m5RKduRLDmYMefYL2TfdqCEla/5XGmYyF8ZAQ97PfycgbmhlOjxZLlUSW+08CQtApElGtWm583hK2BhOw6i0BymhZUypOklXLGTGKgAIabo+sfNalIg9Tv78hzt3bjHgz2R3Z+tg6+CT508YlqCnky0SjnTaAzC0XDZv33n3K19973F/uvf2dz/+sT8p/nO33T97NvVLmR6bk960zjWtERkdYbtGseByqeVCqpbHcy29jsYraCPaCK8IK7Heyip4b8a9/XpHddd8VODWl77y9Mnp4ePHuMYj779yJm/O2AkvQlFRVQWSU1/yvFp6PWX7AmaZ7gqLBAnM0nMACxXVttY741K1atrFodkWU7Hb3wnbo/7F4eK4qV57s7g0leW78sndzk27WIdQt9VW249wdDK7em3nu19ehWgLlShJjase5Jk6QqdoCQ3IE1rIuqJYEnuIV3DilwNelYGQJgsDQJ0aoBQchcBWqoBCeILghKmgSDYYZlEJqhGJiiRBFURBYhLIyBDMITCiBdQwnPcsACYSHbQSO8ODX9wv9rd5ahf6/E/f/9d3bt+5/srtnbenZ7+0e7PZ4tln7S/t/m//0fH9F3/+0x+8dflAa9UFIVREKhqR3gAxYAc2sGxQ0KSpIhAxi4gxRmKUGA2bEGIiVTAxM5WlA0DMVw4ORqM6xXhiZrCKxBgs25Rc0x00IFECy4UpEQ0UDx3a4Fzd02YtRC6wN9koZ9XMX8w3NPM10iyTEkNaMLjJ5uzJxpjN9zP4/A5n45uLuQ1DcX0hz2ZoV5LlSgaG2VrLzMmAk9nQBTXIhT8bcHkocDJV9fxBF/9rgN8pGXki437nug9rLXLgzZUJDRsVB6yPmU3qdCGJxz/8TwGyDBFlVRZEJhal0rlHp6uWpyOwwrKSqGHN+tSa47WdcTUqvvOVl//kLx+1jD6W0sbRKDx4Vnz6+YPf/NrutUsTkVChY+Z+Of/Db+3V29X8rGUCgmMraVCXfSFSs6ZRJIBhDacBokjSZVoRWJZX9ujh/WArK6EzYpXDgElwAiLzZ5bFIwyIMXG+XgttMZi1FBvI0/d+9rkpqrGlakuvXZpe3q6k9Q1akIp2gEqI6/WpKSSEnslWZdnLKYAY4YpRXY8lBmttWZZb2/VsZ2SMYcPeB+dIxATPXdsszg4///zpl+546OjHf/U9tq6s5oXdfuXGN6Znu4eHx2H2+aOj719p/9BW5eHh08mknkx32/WJKaiwzlpAC6YoMqgkFGySVlBS7TvgsQPoOtwPzWSLSKCUvFURYyRiSKZBqUiURHvLp5yZoCISUw89VN4D52LQ+13YypCvoUo2Tx5633zIM7zyhcOfHzZ0vZsiMvm2Sr6eoE0uH7CnZIk5tOQX8u7F7Dv8w4XX+sK9yvrDFBmGJYqbV8inJ/UbeYEbKM+pU4WXXDTJ5Eemjz87FYohCfAWpqCSpFr99Jd4MdfJJJxpNSoWj5eHnzwuqlIWhD4a4ZN6HDn0bDtvb+4fHMruZ2trMH74V5++eNx8Y3q5rfwp7ayaeCNEjKmZumM/jYQjqg+oXnQ819GSSy/jpjfWs1kjNqJrlkZoTbEL5OEa9F6oL95mHIw4PHl4dPbwerG42pgb1kzYLTyYtYBtufRm6uGbGCbVkbZBJsREQoAIRQoKm8hZPvT7uyc3JmrJlTB9WNLZ3v5eebDTT5wJJpTt/KMf8MtfxcvvNPOqmx+2Ui/NyLcU6pntj/f+7pvhlRrHQWZp70HKwZAW1AINdK2wqh2BFYawJgoV1BJY2RI8QIokAmaiOJzK8w4nVWq2HJVRezUCl9xrlAjSM7OhfJqF2CYOIxGzWiAw80A5ZsNjEScoqCyrseokSgWemDDB1beu4+FHvKtlXVk2dw9/cv/4o/G0uvKbtw+fHIXRtoT5j/78/z1zozf+1u2xHdkKGAMda1sSFJzoQCkLimpMq0UotSaZuixpcZCoMBEZG2NgyyrD1nrRKCoK3/so0rZtlJwMBCIiRWGJqI+RQEPeSdMohkgimqtuMNjNvaTc8tGFe0YJZLzgIZ/qT4Vi4HErAJhN2U6sGmNUY/gCs0rTQCvFigsKoOyQoVBK5DslZtIMfA963/zUQzMKhSpxVqNqli5hw+rYEDJzd5KPRzo8SpvtS78WSvIh0gEBTxmLByibL7BGNbNJsjVlknmYKERgJhYhTQZqWa9joZwahqhKsAALCLAAx4jCFCdL6XU8ho1Jp2EYsWMYUGG76qgt1qfdl25Mt7ZGfQOEOK60CUVXtNu1u3al/JM//5XylmOzWPp3X7Zvf3n3cLEeGeol7TbhQW6uF4oepbzMGKoBMEm0klxcBO7Oa9vf/+Q5Yxo0MlOnzPC5+cu8xUytYeYgYIa1YbHmvi8sl0AhyoXjk1CsvbN975/LBx8v9rfDt756UKINfRSF955B1o2sCctlr7LoexekA9C2fjzxRKQabTSiFbEQ1szUdv74aDHZmtbVDCYAYXt25er1axI4BBV86exsyUZXq1XTrsC4dHl3FX6yszdazZcTi/n86IOff26L0dtfeXM2Gy0WK8NuNKptUTArICxWSQCRuFlZnbOvZkBHzDnBXok0SfNSgrHWSIwJRFYghuCcY7YxRmiyFjovhVW/kCCHvL7JVkOm4uE2bbJgEtulBcR6Xnj+N8DSZuT0xWzJPNCXkcvadLOIN1YeFyLBF580A+bDygj9wrMPDlnQC9K9AY4W/PofhuT6jygVpsIgZqtghYBM6vAlyZtIABINgdQRe6md5fWj5fFfvv90ZG3bbq0JBe599Pnx0eFkr4ohsmoQYWA12V23i4PJ1ru/8/u+mT9tj9BWP7/3mTTyZbk558ULsZcr62v7eN4+OJoqu0Dc9XVk1wSs2J1h2qvlVmxv0QRtoA1Ry9oyOkHg0HtaoxAODnsCfHz32paY3YqDDdIvfQMPKcrW9K1UjZZrrixq5npUNxQRRNBD1mS1sH2HSCGY2pnn1/fXlwx74a2tnUu3rVA9MrEeL9k2/kXsD4tr+/P5akWP3StfbR8/X99/uHP9cjxdL9sHN3fHr70y6uVYKrMsyr4W6oAe5Ek76AooQZaICUXebqSq1BNaCy0v2L30RCCK6Xc2BJNcFAIMFrvuO0EoxyUAsFhn/dJbU/iuc4VhsiF0hlmhEjOJ17CNGlVgjFW1URgo2ZVSQeuAMdGUsUXdOF79ynX3gZSXZSUd11sV1RA+7V6sxLudsOJ+FIt20VRbNz87eXrt9Xd1F9qIGoIY7itJSzFFIGytEYGSsUYBkrRd2rLECKgmyUpicJgkg1CRyFSIRlUlxmxndz6f13WdTIkS2dIYBtD3PQyLgqEySGlTngppoElUFAXS7moBVDJCiyyWlwFyvrCCBQlASlgVbXgWeWJKFx+WXSE30iGCRDUmAU8iCh5gtNyUSzLDVuZMg0/1d7r0OXWkRaCcLfGGQAgCGTYxCjE4lc8ZHZUUARhpJfAgaFbZnKiLNUSKEYnsIiqiajauYTIA1ch2Y+epF7kD1k3XnjC3jLwlUNqmyJIZbcpEhmFASnBEYHKnTWmohBAQkOzCDSOqwqOi7/+NDy8WZ8d+azb65Wfz0VZZcKysm5/Mf/s3D370/vFh5+pR0fT9rGx/99u3Fo1x1HdCliGQQqwa2RB3EritymwAmOTpSoAxAJJVr1l62b+yfefyo4/n3hXcaWuN8wGWhgXKSS5AWaKiZAVSFJO+iz4UhkdRCb239WjbjCPVbiSu98r88NRvfbj46pvUrL0AIWhhaL30xAHsjLL3XYwAUNpx8P1Kz0AyqcfCoghRgkDatnVlWZY2xFZCYLZNPGvWAWLZhhDIcgWW2fYIIhJhLfr+7wWRtW2Zm9s7t2698spHd3++Xh1x3Ds8ujvZux9kz9A+8yVrdpnH1qIouCiqGNvUBw+/XGZYNhANqsBQlmVcR5WyTW6yf5cY+62tyXK1kuCrqhJVY+yAPyf9PWc0WS8UptnGMlnDpsOe0/M5YpRXlQj9enLEr+fadIEuDIaRTOpSx5tQnqRX0mw9iQvp/7999g2eBZz33PqFh1IagSFvLc0vmrmKUMAg19rpvxOLAiJgNiAeyB8FYBIFg/NmB1ZlUlIUnUhh4b1f/9cfnRpe2XH7+fH2bczn3cfvf1TBYCFUIQbhiSWwb5pie7L35jc+etEfXL7WHFcffPDDJtrJ5OZJdXbqzAuuxeMw6CFmC+u8TE58c0W3JJZPgVZq7dE30ayZ2yiN6Ep5zbwmXZOF6ztxvvIxBu8h7Nm6Ebxn7lWahhyYBYGCHTfBNzxu0ZwFp3DAuKyClUDKCuEANsoeMbINCPXWo1cvY9uVMfpRoHI2Gn9Z5NRvV169nXG9c20xubl4sTr6xT3/+r6985vF6i/2/JPJlI4PX7x37c7re/F48TDYaqplY0o/KsK4bFFpRzgDLUEOVBAaZWK2pIVSB2FgXaoAaFPtlArFYYioGVODKEI6nVYgVeWsMyEIW27O1n3rt+qiXXcqbjSyLGZQ8PEmfWjayAXLXIk4MIUy2ImRmrBFsiU0Jd43Ae1ydLwGX65DQQ+rQGQo1h3Wj3S0617d4+Bw3C6WR1geX7nzOy2TmRluSER0aRAs4DgbUBJxJCQfMC8iRAIha0zfB7akghiCTZOkPIoUYmVmWGbwk0ePDw4Odvf2ut4TQaIMDbSSMSCiND/P9OJcwEOhkDAIsoiTFMkky2QeqmdW+oJhwCYxbxBsbOr7jFyDMlBMQ7N7Tv9SIgMyfAHvvzDcBaW+U9MU8vxmnwNsuc5ikKoBiE0KJnmVoWrqKKIKZfpYgppTOklRLK9bhYCZgkQMPdwmUF1smBOcmD/8wetDkVFuDNtUiYh5Q5qzEBYBc8EgoMjtL1lFQSBVw4Y4WpBRMqRE5AybGMJRU7pyR8mzImVBYwUiQgL1k4l965XpTlXtXy4Otmb/4a8PGx4v58d/+I1X50f+7iM/3SqDeOnj3/nmfjGZdM9BTtkEhXVwIl3sM4OXmY3hvg8h9CzsHDEnAy/RZMIKBqSAD6F6782Dj/78yWiyI13gAGstp72VoCCWyakSUKSfEQIytSCsWrc72Qq9MFfClsupNCJ9JVIBKMu+CZGlQiyYpCpGVcEhVkAAR5CHTME+Rfi+b0R6EZrPu6qW2WyLBVGCZY4afd+WrjJwfWgJViFEPkaRwMEu0ViPyFxYp300ZP3Y2q165uNUtOGievedb7XdEaKtqtsoTAwnQT4J4a9F1fCMcNnyri22J8UeGzbGETNIVKOiFwjUbEY65yrzZGUAMFMIwVq7t7P9/s8/XC6Wnz948JV33rl1+/Xl2ZlNHuAb1Y3mS5EpgprAMN0YRxMNgmLKGNIGxt1k/c2NSwd08/fNNkylROI452AOZzt3skMvnF8xtb1fyK8XQaMEGF8AyLOST/ND000UHqZClF1slQA16WprZrElfIhjfjYDcHL6ZM4jc4JJq5V0cIExQkrMRhYf3Ts+vl9ceuWAhJ/N3enZw2dPm6eHbmYFTEHgrIhQCD11X3v726/tf/WTw3sL4p//+O76ZF6XrzZ01ozrVb08Cq6qzaIyd++2D1ajWDrR3ZdRr/rGe6m8+i5KJ1VLaKIsYsJvueOwForRCrRoDVCwDb2wX4fWaAG/FONEe+LWUqPryjo7WcXlispSRhbBQU7Rz6p5ASExplehENWEYEqPJ7Pp/PLYjghiqIyxCN2WK0ZXxPZ11dlbbz9e8+MPP17JxN68s4661/xy7ys3j99/cKPvivDk3Vvv7raPnfZexp5dy3WrzsO1qFbl+Kwca5EQusxGF1Yw1CSzOCHvBjQwndEwhP00GkhmHTaVnRaEejIiYiWRKK60VTFCj3qy5btWojJZ0TD80pVZRfIegtNFY21RV5VUSjVi5VGzmRpMlS6p7vTPjx/+H/6H37n3wY8PP/75pCLHpotrp5MrB5NnzdFZs2zdHnbte7/znR03K80kHK8xsQqgFQ2M1YhpWB6ASJQuYT9s42aAVKKxnCFZ5NVgkqqMZN0gKkFU+cre3mf3Pj06Orpx42YyvASQ9cRIi4mIcL5oTAfKFYiJ05Nn+S9l2CoZGw/spJy4aJMKE/Caym4emJbIde3Az2Kkadn57RWoamb7pscjc7N56MuJkLKdDAXxxpUeecSfCiziCzwTGvL4BivLCTX7bCiQaOQ5gCSP97SLcbMcVS+07wpoztdJZpWXHFEmfig2TTM21HAaPuAkVbTMvGl8BxPTAnAKIwCoSAbjbAqgVzY0Mj2vj+3aXrok6ZcSwQztxVpbQM+a4tWq+s239+4+bLoVffrs1Jpa2EZbAaOPHy+onkDgg31pdvbl128cz0NZMwSiVkgjOrWmAMvwhw0n3tzw+cTBLizjSFCwLRq/fvnm7Pbu44+XccsZYXBYAhNCCbYEa20lgRQFYJXZSmF0FMQverk8sqg8YMuJnR7w08csFSBkQG1TrispRnUVDCgCvTEKcSLeWmUOITRMBkBZWOKtrm0UQRCYkWjMRAUzNU0rQb165gAWkFiqAAnBg4IE8dJYC+aaaQREoFj3PZHPrhC2Jbgtd6AUCBzjgVFEjSH2vp8rjhSLrn20bu42KIlGlrcLu1u5mSsrY5xlkIGqSBwM3olEJPmDE5GCxnW9bps//dP/VNf1u+9+9f79X+3v70nobVGkTVmqmrakJLHDOQeCN1k2JzrdHPOsD8b5TCcnu3SJzrvenMnTBU+ZckO9TIgQZxF8Tq8ywNCcr2q6+blpHhApGizqkEdHSRp1nqQzoyIhYLKRMWXTK+ahwVbkUciF9t2aQrMtEjMbgFTYJPQoS4HTqMoQTIxgV/qVPbz3INhq2R3vLheX5u2z/zy5d3xqfAiNQ1CQ0AgUsF40t978St3s/+R7P7tx5+bnH947evh8VO0c+XhtUjQGj705dvXtm+XDtdw9kaWtmGQShJs2tD33hLX4Xqkj1yOsgi7BnrGGWInbkb0qAoTVUwwhBmJvU8NbSAEf1QMuxg7cu4bDmCbz2BJXworggYll1NXScVAYNYoQKIC0fHRlS6+4MvJaWl9GVxoes5YwZUGVbVwhPB1/efdsDjsbXZ2fTU6fmqK8cmPWHX4yrZbv3ZqyPK1JBYtWyiXXrZZeixajmiclpovt7VBYsWkVkYpoEroD0AhVxTrJ9hRQEEQDEDkRI8BAXoLHbC0oVrWFIUSBZe0DEITIFKaiUSJCA5QG+6oSwaYoJbaGbFVNmUeoiCcUSuG6oi0NWz3vUrFfvDj94PBv/vn+9Vu//fZifbU7/PTo6LDbm11eLg+nYVxVs8fzIy2aByft54/cnX/8f2weRYlsIyCkniFAD+2dQpJhpyKALJMQF6pKmvyJbGIpK4SsiapJbsWGlUyUtBMNpHy2bG7denW6tRUlAhJCtNakO0HEnLch5CF5lMhMNDiZpLqSgGFhbiYeG8sxBhCLJB3AhVkUshVFMtJKVweZJnHOKqYoOV2pDvgvUorWgXyRBY4KgaR+NDWvm/SmAoFQXid6XpGzDpOnpOXKJtjId9qem9SpKjMHEQMCksf9ID4GooLV4EKxkTI3M5Fm2ouC0oYlZqtpMKURw4eQP4e0L2so5kGWiQELMiQGZFUdYCEOsCDHbIPCOgeCjBLoolxj3WhTVZMZQt9HZYCtBokOfYhRCuZjy//xo+Of/MXDr33lyueH0tJkIm3N04+O/JGYkkpfdHEZvvH1S+tQGVqHEJldHvcjEjSlBjAxcwhRAVtYZhN6n11QrSEYhXBClWMUjmtf/NZv7H/6vzzX+nLwQTFhKWAKwAFG1QlbFWNROVCoy66MbLktsHW1CEu/1rAaSVUxdsCF5eCDqFjDtt+9NhstfN/bbrWU0BFCadcRAWLHda3qAdgCIm01Tpy5GGK37qK12fwyCdGieMOWiGBtpMYwE8OCReC4NGxURWJHTL33RFw4y6qpAAbFLiyTV1qUACY2piq4rPaIrqiI1hFAG45DWMZ45vsHzeoTLF1lty0qV01cVVuumB2IiRSIxpC1Bgrv/eefP/js/r03Xn99b+/S//P/8X//9t/+7vZs92y5ZLb5VlHWQRAPW4SSeiCFPzl3qsro33AXUm4bwOQ0Xzq3rtzks4FyqMlcJodXKOd3C8rLGBV5uy/pINREYpcNEmEFmXzvLiDPw2VMhXqW8mbciwdX+3zDBotPM8SULMiP+aWYlZVoAJwNwAwGG4AiMSfmhLAFS1JZsw2xDMHwaPS8Xa8W7cskno/uffCzxXTCdoRVCME4i95EimE0mr50cKtyo91Q3/vxBw+e3CuNiVARnV623ZofPquBkbX29IVvjn0Aj1RuL3i3i9yTSPBtxZ2vIvVtoI65syCWHQgHBPSzSxGFDWLVxWdP7BmxI+kKNqJ9kEDcs3aK0tC6C7Y8K2sTd1fauWCIW5bggMgyLc/KqRqIiEPAMVfL126ZmV01c9+iqB1NmAvBOMJRZ6eraJcY99PZ1piq9RGap4U7c2erUlw8uPzOW5dubPmnp8cHzAutXNHZEFoatVQVKiwqoACeVzOUghpoQR0QoAkRcwSvMCqRCMl+JzJbSEjjmORRKiqp77NuaicHW5EjOUYPMEG5X/tCrC1tWAdjK1IE6dloAtwkBsOVqjg3YR4HF+FgZ8Sz6McROxp2/NU9//TnHy4/ePD5g0/a+vi7b1/57nv6y/dP7917dG1vtLf18qI/2hrLmaKe2evXJ2Fxj3duIUYJCsmUM2qhPQNsqYCygFIcUBWGEJFNA05OJSenm4VMGkouOWndQlTo4eFh71tjTOwDM9vCBu9tYUOIheEocVPBbmpSzdImXOAgJY2fkiVVCUGYEyCMRDORjZFWou1kBnG2vNChp9ZkA6XAuTwlA0+aJ4UYCJASQgbWmNMANaFoIB7K7USAvHDZcyIfEjCdC6KQ3w8oCfiH0ZuKbrbBIIeFgd7JzEMe1QvtfeKy5kRFwGCRm51LNuaYKXzk+Z9YyrUBAxDlgW/FROd9sFIJLciQrYzagIJjCWONQOsJPpuf+i0rWxE9G056UVbE4MWQmcX49ChcKekP/9mtWTX5BvP/+leP7j23dREePltbmlju214vV6tbL7/SdJ5BRJYoq055GNyngir5dJrCqqpIZGPTY1I05kE2KrFXZ5bSH1y58e4ry7963FbjquidcCFaiBqmKkanaolKLSmMGNzZymJs767Dve8/WGsXsJa+KEYcatPCa21LYdeS4Vju2FUUp45cjbbyrWcaTUe2axc6LA0TCVDHWahLriyYQrIACCEApIgp40Klj56yzJ9M3u2lCekRDaxsC5YoofciYgtrYaLEPKQACEGUNWpI1BAFG6uIgI7cLpX7nKf+3kvj/UJC24bjxfEhAMBa66wtGbZtu8Xp6enp2dOnT6fT6Zfu3Dk+Pvn+93/wO7/zd26/cevkZMnGYtgxxswiuqE9b5JnRns2Z+0C0pv92PMt2OiOcgUIgDLD7tfmR5svbHDsxHhSzk50GFAwztdk0Fedj4KS4o/yTTyvFTb5GAPaDNUUkZPdCVFihaQyncnIcO80s5qZwEpJqmcG1YCV9HVhY62KAciSBbGSNWojuNfCWubKHLdnrx/sbp8cn27Vz0Y1EUsQFi4MRRHh6KFfuvXmWKZH95/t3bh+9+HH8WhR7W5LiF17NrlpPvqlf/wLevN1R0fNk4fGn2xV8G+ssB+7rifnmVCv1xgHy8GKZ2phtfe2Z/TMpXz399wrb8F31rrw+Nj/6b8p1qcarWGDYLXzruFgAwpbtLb3kBUaM6ssW+kIJyJjZRYttrRUlNNqVRVt7ONI9cH1G+1rO27tl4Txti2ntoW3Y/Z2vJCi59E6jBbmUtH7Xbt2o9bNYHxThHXbPpzy5PLO7rRbBGZGKHTU+ZHj0CBU6BzVhgAYge21PBtV1BMqUEcIEFGU4EjwQA9EoxpFVYgNgcBEqZayBFH0qRGyl27uuCl5L8rgio23fduXhQtNiBSYiZVX86UGGk+qtvWAVJWLoQWcKosRVMCYpCSpAm8RzYrJFOXywfMP/+wPvvGKWT0Nh599/J/e//o7o1cnonxmF9Icfjg5eOON2dVPn59Ky26++/LkreeYH0/H1JMGcAO0UKdgQbBkRGFJOU9GEsBCChjmlEo0ikCUCURsmJB6VkKIQUS7rr9167W//PF/ffTw4Z0331w2rWHYwkoIzBxC2Nw8zms6cylr2WyyjgwUNhA0Zt84ZpvsKSVIlMj58TjHnDQXR5kzMgSHVD5TwoiQm2cdquPNFIiENjpFnDe4eSOCDjrpEANn1Qxp8o7IQRkXRLo5wKhqCD0Zk9B7TauUE4yczTTS55txP00uJTmIpdxLiqFuGDic+aljerec7XkIACOtoFAGMxOLpgUFFpTAlQKwqqkDdiAHFHBQp+rAVbJZi1qQKPMkPgqdbo8xFg7WWPQiCIbEiwP5vilKVzdvf+VltOH50+XE2oPL5uPnPZWjghCDWIt4HL7yzl5V8mItrrCqMRUTGUIQHUQmlIDmYdzNSfPGxuAC74YIpigTzbHx+NbXr3/4+X3FDI4lGGZHUrIZiVgyBg5SgmtYx+oEYHa4OaPZ7i67UMH2Nqzb0Cz8k2U4nXsvMBMTK9aJIYmucIFld7zV+9iuj6azPd8uo1gAMTZga61K6KwlQWA2EnsAhS1m29ttt/a+U8S0m5nYEGfvl9T7B+kJnEWmLN57EIKIIws4AAoedMAwhR2KUgBgY11hQdyH3loJIRCzKjGPtuopACaO0YOkj20MIUrs+9i2XtTMZns7O3tdtz58+nSyNfnHf/yPy6o8njfOuRAkbx4TBbBZA7ihqdNgznKe+XCejBN8hQF20g06lBFq3QgtMNzB4R+HdYCbXJ0HzMMTbprrXGduioHc6ib6pA4OrJsMT5mQeVFdlNX5g+kVYTMuzrwKlqTxTZMqbFKvBWj4CwNWQWBLQlGNpYKpCDAKg4KVjPQxjjB9+fpvrA/HcoIyPNrePrIlIxgxKhKCt8yykBu33tgZ77lo9orrH/zwg+OHD6qS/Ys1jKsKOw7y9AODOR/9Kj74gNtW90P/Wu93QhTpC2+NurOuLbyYwAhRAlGIS0fObMOQXJrwnbf93OPJw0C0/MlP3epEYgEofOMFZAGppQks6NVbIMwQSeZbhUoZMIlU9TIPQORRJxSs2WIbi6avZotXb8kOSdltsyPLHUKg6qwaL/qSeOojGjurZTHidhvtyK7He1U3v7R81OBoxTN5esgnthlNQLBltbTOW0iFfq2lVU8aFBLIeCr7SR2UxQs8WJgBScsCK9JAFER7CxRAr7CEoJLXshOl5ikqsR3vVR49RjCGgxdVGDYSJfgAGIlakLqJU08iZG1FCCLCbFUtUaUl1CmPqDGNKys7Y5321+r5wx//5yv2ZLw6Xdz/4Y26XS67j/59O9t2l0fbW+Ws2tqay9zoYYfWUdU8+uDZ+39x7b3LTT3pPetaUIMaUqdagAKrMrMVFEnFLKBE/wOEE8AWk6cSG06ywqROEgGKouhVmLlwBRS7u7uphE+THmarQN4clS6Zgi9IBYatR3kbX776RDIMP0ViGpIZwzFKbgM3G09zmNZsYrwpzNMdFD6XuKhuNJGbTJlysL0QdeT80m5CDGgzLEre2MluJSHEkvhQgx7pPAowAzFt5N0sNBVJRpKkw0ok2kyUaQho50zvjQgrBxjlQQk1xI8Baqc8ImcogwyBVRM/04ItUJIawBEVghLWwgElUAKV9EVgx1RHNUwM2ZZHNhTXCxSBIqtRlagCDkoRsed41uDa+F998HS8PLl56dL9RfvpU9R7NixiFHbFlg+rseM3Xr18tq6sTYt9kNXOWWg+BGtKvjYDfztPbvhCOM0LyEjZx1DZuu1kd+/yt96Y/9mvYjUpBc5gBJQxOpTMFcQpTYgdY8R9GYjBZfj2794MvfZqYD0JiB1z/BpiWIR7D5cvFke0yyYtj1v62Wzr6aePfvgXf+nP+i9/6bW33nqjXZ8BsNZFaYBgHUsc9l6DrWHDzJys1433fUohzMYYLgoHlRglYTFRYgzROdfHINo5V2roY9SAkJxIVAUqUSSokILAyewM5GPlClvYooAKTDo7ClBMLHmJxhoGFUVdOoDYGL5y+RqxEBJXH8ayCNp12zSdK4oYY4JWVDXdr4R3nCdaHkTwenFTKW1O7IXUmDvW4S/Z3Z0GoAkD5jHMmC4UF+dz1+F0/9oV/IKxZP67KNK6sFQjyyD1T+XOr91iEEElcyIBUEKI7PCEnC33lQGjwpopQEwwRCa1vykxExcSiNkSnMBqASZEKz4oVdy76O684p/+VXn8/GS3vs/MFaOPEYoIVm6b9uD69du335xM6vXz5pcPHjx7+kuGD96xCb30k6l59rHOnxrbuuBD5enVZnE1UFD2impyEELXzk+UR9P9l5uluKs36Fe/ondfrl65HWa161arH37fvP+B279sr15FtcUfPxi/9CamV1bf/6+2dPXB7W7+uH/wg9HLL8cvv8HNs3Dygf3qN+X21eUv/oKrcdyb4umHEYhwQU622YlwRAWp29lXHm7dmDcyrrYKKwiYB4abBF8vtejdqNdqFo8m1MyMd7og38czrrspxjcWz7r4pJk7/MI371yrrY1+RWa6mpRrxsRybdAzqYJ7uJZGTVgsRlMesbQSfGDHcEwlqAUVIMeqTFIqEUPz+qLk6ZzOGDEgtqi57dvCOWExBmoR2wiGmxbih1OSDBUFaNmwE3hViLJQYMuoEJ1Uu5XZKXQSd7jZpeMP7v7ldv/wMty1KZYP2smLcsdty6kxy9EEB5fG05ktWg1Xbrtj+Mdherp+uIujI+HjrQM0hCVQAQ5UMSLEG4m6mZYQGyClWpLgmYmtBZFIDInhY1mDnGcMpqKwq6ZZrxtjk780RYkm2fEkyloWamKjoNmsABvK59xcIsFdOcEhxigSkRy4+CLGtRmZ0kbST1+8urk7zPE+UUJymjSDb8PgypMus26EQDKIKlJ+M2QHpnEyb09pY5ANp0iD7GZLRNZy0jvkZ5OIzCXRNLeVmL5n0/puGvvNJolEw04057waOgcbynyxVBsktFo0cgbNKOUthQVZFUeUsq8Dai4ZFVBCS9VKuRKui+CgFbOzrsJROz8d2eoyUYAygohCOUbuGQHaguoyLMMre/j9d96ZBPOjn312dxFKYSMSG5WAtrNv7k4vXdp9dtyWzgmCNSQSksMZNKbfWO6GE75eWAyenRfiu0ZNfAruQcbWEMsGiyV//e1XPvj0swZjsItSMFfihGtoDYwIY5KRwMGMnHFYqz/idrTrVn0LZmYv8CLMzlQT/tL+vsi0tT0j+jbOdrfuvf/JD/7rX/yzf/pPntyf/+qDX37VftWHFYCqqkgUFCV6VxYinUrHieBPvWo0xtZ1XY8QYgghDgdSibhwJhGJCy7Sj1mgqKqKCDFE7733fXI0NIaT5UYfAlPa9ZmEf1g3rbehsB5ErijS7gwV8bGzNiNJoolTwCLBdwGAihhbQDXEbH3DzNkHngjMGmOa7CZKZt5ocgF+Pj+e55XiecP5hXr2XN59kbdI57n54rVE7n15KL7l/AxcyJtfzL6b2TMRIJwXjuYqIUfgC1Lj/DLJLoyGQXUUTZJ6ERBbpIDNRoWJWIihTCBoIuBS7oOJQVaECcbCReVYgC0Zg047Yfi6sjA0M/z1d/s/f/hsymeVMyFqaSRGCWyJJ+Pptdde80X34NnhbOvK88/uh/WyHNngRWFNGZZPzOctVG3he+tkf9HuLk0LQAIf3JLf+HuLH/6AlydX//4/a4+betLym7cXL47Hb7wbnZXjo+UnHxRXdujSNPi2mGyvP/1YrCnfencZyL7zjr10g6fj+L0jV1+3v/kPzLX99eNP5Oi+u/q23Lzu1y2/8VI76rv/+fD6b/1R25/JRz9ieipPBWYZ7OVrv/s7713a+vDBycPj0yDOwoutA08XUolxIboJjifUTrmtwoJ9oAZxhablkbtE10fr0NGx/7hRrPuvXa6rKclpkBp1vbTwSiLEHq5BW+q65rFH37oCBbhijqwFIoQNsSUqQBESkvcCJVnLgEOGYVUGWWVRAzHCliEwylqo9gLLLEZtVIEa7dZdgaJwRWgDENkapqKNvipGtjLBBapUnKqT9vizVfNYVk+39Ozk/ok786v73Uv1DbuuH372+Y3rs8MX79/7iUxvHJhdG6cN7+nh8YdX33ulhLeyNC7EitURlQQHsgQLDUbFEFuVCIRUrYr0gBpm0ZBpjmCwiCJKQgRIRDIWzTg6OrLWgpJVjzJlFmVq+y5e0AvYKSHbECZkmDSmnlgsGyJEVeZkw01JHpod9fKlSnV5xqo2rWSe+SbfnNxsDWmbBqbIhWt+DoWBRJW+cOGRGtaIAIDBaVlyqrEpp/VNVBmuqiJ1vSqkg+PuYI6f30SCz3jAtXOwuUAfy2l3MLTauEykDVDC2X0+MT9FGEqSwn2aIICJGWqBAXlGBQc4RUWoQTXxiDCC1tHVjNq26rcvlz/96Mzvj7auRI1ITUMSO8MzWqVWi6CdhO0b1f3j+Z/86d2yms52R74JQYRDEQlYrN5761rri7IQlQBAECQttiIwjKBHHnzr5peiLCIZQ09gtaZqBCzKAi4ii8KyC1LYyfTbd07/7QftZHsniEENU7HW0Bo0Zh0p1aS1oVpQIQY+mypvCxthMQIrFi6AEb2Hb9tp5YR1tFNqxx/86K8/vvfxP/rf/+PZZPrv//R7r77yKgrDXAOIwWtaus4SxBsqyKhq0OwWsNk3rYYt2bywllOZxSQi1piEmibSOxEMG7YM1d4HEBXWJEW+MRZtAwWRFNaWVRlDXLfrKOJ9YLa+89a6uq6iCigE6Q25tCMo/X8IPUAiEkJkDnVd27x9EslZdpMEYUyyMQ8BzOmtZo9SlbQqDJvdpRdTY74d5+xofBGphiYVYqZh5IuxMQHH5ptS9kVSNXxBNJhJGIPMPfOlNzQxGkpw6PD4bFTwRYONzY02qeFPliGaLF05fRSUMLvMliDDCXNWo5p637QP2JISyIrYaAUlYCI5g7aXwvHEIAClTL50+/jR7cf9k74m6yPBcFRDrm/We1ev7r91s2JeLye/+vC+l9OirPpeil4ksjpnNdz5UqA+/PhvQnFcTfpoA/cUrHC4cSALdevtvTf/WI9X4d7P7D/8+8e/et/t2H6vkuNDloWcfWJefctemYqX9cmj9hd/Pn7znWYW+LDhmwfSITx/4h/fn/z27zYtuwfHtjNNO+WF7z6f11e/DJLjP/kPozffO7v1G271rBtN4vFn86dHp5Ev/d2/P3/pzdrr7Vf2Xf30Z4eNJW7tpBXjUQJ2Sqe7tJzq2VTPTCvaGn8a7ZJ1JWcQExwL69xYo/ePVnwSf+PmxGxzEFYJznmu5qQxAA2XY1QdRo0UTbnFI9ZOQxHIchqcSVBZp57HAgr4VFkNJKx0VJkgdtk009l261tSYWIhZWNE1MAKC7MaTohHFxELFF3nq8r4tmXrprtbTVyriHFGLLjSSGFivS7XYf5k/2C1NxNmHr998OJXT+J68uat+uTomPsxXrTP+4+rtSuDG7n6629cvf3unU+b06nbXsD3poLVPOawgAUpq1KaAUOZNq5+0JjpGVmzSQNHI/G9CSQSY4wiWhZ2a2srhLAhGiEz+3kY5WGw19jcveTiiLQgjxPhIr2gIdHsD6Z6XmbLoIcfAMvzwVKm9mzc6AmZwbRhTxM2/aXmxDogaWl2QBAV3QSR/E1Dq7npUZEJ0ol18oWKffiPwa4rr45htkRJgkWZgZWGoYlKndaE0xefJTGrTfKXkFy8SM78IUQefPCZLYOUmIjihgumDDGAIRjAAYUSqwONFCPVGjyB1owpY6SoRUoUNUIdPm7a6WXawZw4CiHCKBWqGhytxrV61mVXuOKFhDduuD/+p+9YH//Fv//cVROOwbDxx3pjWuxd2fPrwDYmY06RJNhIb40BFgmbD0sl2anpANSnED3UUsIAO5APIGYHG8Us1/zGW68e3H9w2huuWUrhmrVWGauZMo+IpxIq2NqgYqvQ7f7aa3W7bNUGExgWkYT6sOZptxTfLhoOh4f+5z/64PDjR//kH/13+9s79//m8alffvnrX14sVlxYALGHtapEqpLs0wEa7JRtFJ96S6gnJpt3YORIgOQzkho6GujBadWOMRZuVENErEmseyGFK1z+vJidKwMHYhKRoijY2M531hpnbSCUphTAkAnBEwEwIrHt1pspDwkvl4GZq6pkNswENkmtkZRgxuSCYJP4MgsxTU43eezXsOH8Rfo1lPi8SR0Ap/PREJT+G3h4uEME4uStzhe+nMt9wqBtHoiVep7TNycrfX+6gHRBLp8+csplOgHEZDRn4kT0S1orKLIEFbAKq4lFoTaNgQmWYKOyFIKSbUFSQMYUTk5DUdnRKMTIE1OPyu5vv3vy/twxZKR9Hww4+G7/2tXZ3uXPju+/tH/16fz53Yc/K8dRBNTbSKCo0sq49K8W1eG6nZ64NwIuWdd7YXGEWMnelu50b7/l7/3EP/jx9B/+9/2NHfnF0fT3vtpfs90koLc27MmBxaU1irGbFO3J5fF3v3b2eD65VrePl+XYnS1PZr/zbXvzlj1cyH6lbWsnu7661N07nL5zvbn3uCr2yjvffPZXDyav705feW8595Ob39759lv+2vXTpp9LtK7fu7r7ZjX/2YPjx7ES5xD4Cs9rzEsst3QpZ6oroInV0vqlyBIuUNDAvReyVYmG4r2Fr0Pz9quT2AfqwSM4hGm9ilo0WLaYtGgqKawtpaqoJhIiT+oJPcirWtEAimnOaBks2RMUGP6iEFu4Qgl9jGqJDRMRDCy72AhbMs70q16CuMo5OIpUlC7GABCDgkQ4RI7GGa5YbZwUcmtv/Mn3Pri5b779zsvPPvylK0bd4+6me+PeRw8a8lVVVa668frLk5e2H7afLuFHbvqd77x74mT97NTOlgV5KVxqfGFyu5QSV+LZDysUBlEB5/0yMQql3X+sacSa1asQNqxQNvbk+Dh1mMzMZMCZ5CwSedibltpB5MuZq/CUgEHnwt8QB9g5dRwpKlA2fuIsbKJNtEhYnyh4WNYtqpl/RUOqSsEeSsqS3a0uBAaVhL9LnhLniRIASrvHNKuDMGCnqbwYoPj8ToaYlMI0pWVviQOS6KwDuJ1eNm9FVR0IYkNPkNSFmgUfiZcyxBNKcYIEFGNQJWPy3NdkDwo7QGcmGVMwlzoCTYAJYaJSKcaKLZGJmC0jY5GJTEbRP/zMNaf7u2d1v4RBCFHYIFoBB4ORTpqybqvKnNHK62oc16t2sWjvfMk9eBh9DIWiY//221tVOTn2c6cV4JkV0jGXIq1qTJsF0jEZ3DShiUgGBjSNMPKYgpNY2YYIW5RWrVdWa23HdrT9zS9f/Tc/bSZ7U6mVathtE0aCCXgMGVvsiBrIdseIh2fP608/W76IHk0vPFa11kynbmt799LuwaPDxYf3Pjw9jOq66SuT/89//J/KrvZn+O2/+x07tevGF7UFYLtaIqnCWiPSQAWIqlGS1TgbRZDkci7CnMcLeePyMLMYUk4+DDGKcACzcy4jJ6qWrQKMIqn3AITQg8i5go0FaYixqkpmlhBtUYR8a6QorAiMgTEj58p2vW67lmEBFIUrSweCxCAaLAprbbqe1trsp2Z4OKr5UnzBJOM8X57/XaAYQB3azJN1CIPpyxvLMyJizizCCy3zBX5VpjQkvWJK5MkgLWMJJpfFUcUMUlDkCXXuvEU3hnfZOCA9vQiZJCmUaKxDRiJYyKZIJIk2IUxsUiAhYsApGaKSyJJaMHMhYsGOUBvmIA60y4t7RzZGe/mKBOZaOsbJpGtnKKreelJW8fDL7uCdG3feeOf02Unv6dHyvtnulBx7QdvJyFBgiO5cLhfc/vVd7DR8IA1pFUwctevORn3/+2dHH8ZwTHxsb8zCJY2f/aDeOmmna20f2D2KLdu/+5vVl27EJ0/kZLn4/C8nU+nm9/Ds0bqchM+X5vJNllN77cbpT/5DvPdg9uXvtgZ2eh3PPX/w03j8hJsX8M/aw4dh+eD09O/wb70zeuPO+JvvdBM0TWutWOkYplGpt7fv3N47feiftmaXn4/gK6jhNTzgiXuSVkITtSPbMffw60hcBReb3ggR1N8l3p6Gg4LVAkbgYV0ora8QHHqHvuJQMbclR45qlS2TJS3AjqkiCYogEhQgJpvAURUhTts+owpsVVVRwmjkYIEePrTBixVr2UoPAkWN1hWFtdKKahSItTa0LMIao61cKHphUQZVxhmPrl3PH/9v//7vfPK9//DB957zSVWd0DsHO8sn97z3vl9Mph7Nzt1PPziSk93b9e7+fvM8Ftfb/R320BH8WcVwHDlSQVQgqhhjSayIMBk2RrRjptwJcq488zJRm4JExkrBQrCWxVmeHx+NRuXu7pUYAoNjjApJAxrLhohERIkYnCr9kKyKhUUCooAQolhr2VioeN9XVRVCSPfZWqMZKU6egxwVbFlFSGDYiEiSDmZLUIZms1YgqXspixNBnEhiqnQ+piJQQgWzQJBAkDQSSwnwfFadLeOVCMSqkk0xmECUlk6EBLtJQkWQtN7MlFXQkvB8EhFiJHcwEEQ0+1RGTYLmPAtNP4OCMCw2Tk4mKX6wS0PfZHgkxCl2MVkip+oEFrDCkRyRY64VNdEWaEKYQsdKk96VYUuOrxj5/mefbDscSFOhY+kBG0CeDQmFaCZo5qg62u6m4xcN/CjMDqqrl2bcre49eWS2tnrt92bt2+/cXizFsjWcdxcrK3EPLlJ5opJoQSGRytPIgzlXP5rofZRX/BhYVQaxRIlsoMYGB2uXZ3Lnzq2fPfrZQ4eq1jiJqC2PjUxC2Cl4om4Gq13NfsvOZb56+PysdsGRsdz3XnyPkxe+D5N6cu+bX79R3rqxvFkcPV7qPL71xnYlo23e3S5my+frYlqhUSQ0IxRAkKDMBSARkQygPashaNAYKTq2gABRMw8xpxtio3mNs+YDJmDDKip51H3eszFIkctSJiKGYe4lSAwMNpTOj4IoimYaclrkzQTVEAKYylFdj+rAGn1PIOusRLHkCIjI6xkSNp5VvEKqQTKuEzEU4XpxwjLojjZjEU1bvDWNS84nKptGV3OzmnmHg3Zwswcx8/6H9HteyaZzPDAthjY2F6iZWpLa1jwGSug0mDhdSkr+AcljgInSymFRIAbApaSLPCpKpSAzF1CjWgCGkHTzFmTVWC2AUsUU5Lo44sKor6xW62pCEp7q8bHYb2BKsNI5efrRIx0HbFXSixEbuu7GrduPFi/Ko0+uXDn46Z//+MXqYTGz8IAVATGs9EG8zC7zfInFwuzUGpaB1fNKQwk7tuqO0Ry7idOdCpOl/OJ/IqtaF/79fykOFVoH3fr6t8qnC1+E/s7LeGLw2Wf93c9sQ7okbgv/9APbo7mrxTqWltc/+Z+1Mbwzkvd/aIsz//gTM4pSBbu0toL1beUqO3ttLo1v1iDHIgXBiF8yUIg1O//gteNPn91dnD7Z1s7hdCY9+ajRxg7ixYFjE0PHaKMGFI51LQFirSWRcNbcfWx3JuORo9gjBIwCsw2G2kp7A+84FBTWBuQMexWrQpFAEQwoWyLDBFZtIxQhZs/ixDMiUDJZTUYI6Zq5qnSGQhcQIBALrid1bGPbthY29B7M1lqUDobJog++D95yZUoSEwvtpFm+efuVq5fs//fuve3RFjXs++WPf/DnriudKTQK1li+OPbFero7Cctw94MP3vmDO4TOQRx6i8zmI0vJX8QULCvJey1JNgpVSs6J+VKIYavZ/zlZDathEmiUAGERtc4ZY0IIqtLHaNhYYzOuKLnkj0PxnDjSMQqZQRBkLOf9SFAg7VBTVWtt7hYNq4hhSutkkRijCW6KIesnNuwLzcimTYuATP5CuqUM6CAz+v9X4QuS0wBlq8jNoIryJ6Iby4GsO6ShKMFGHK0J+2ImQxnS19zWpd2im8UDct5oaLYg2MQgPcfXFAoZlpxv+gdJU6tEQ8hiCRrY00aVCU4LRg0dQWuRCqjBY+gW0ZR0RiPTzsJiwmf18tnq6PHLldsLZxWtLLywC2yh5Nm0xNtxMtatuXbruMM2VPvTJw/bh0+Pnz2IdKnSVWwW4e985ZKbFeu2N9ZFCUyFUlRERUDKR2wNCwExjfSAtD5LBUGDAqob4bMZoHkmtaxWQYoC7ABnrZOKv/OdvX/+k5anldSI23BbUXY4TKKZxBktamom5MdhMXGrrerMCZhbChpLE200MI2ND4+OX+LqjTs3j9fu2Y2rGuvlo8Z1lk759Pi0qEpVoR4AtCDqmcQBEbBsVDWABGqIlcAsnBx10jKVvCdiQEiS2JaH1JWWBSlRpgbQME0ZfrubkbIkKoHmr2xoyRhQkWxCtXFQywVkHt9aBltIcgshYmMApYEMeX68NOefPGS9gDZvWt7sS5PTaPLYGab1yAr+DWJNQ/sO0KD9xvATZUcqzlsgcmGSJg8ybCBKhWficWo2xkmwlOTAcqEZzwTJvHBhIH8OP6BmFj4rqTFEZGhjaCXpnjPAqlaVhslcoVoABZHTgtUqKpCDuh41rKW+IrhVuTWZHz60hx+OLIfVfX7pNVhtzhbHfFRfERva4NhLvPrqS7/1+3/vFz/5+dOnT9xe+WT9md2NHEtpW+kYlZN1YICnlndw+Ji39q6+/uYb+MmHePgxFWwq17vWlRVPWKdRtwJqyFZBJUyJitjatu57+/qXtl65Rhz60q6e/TI8v1di2UxmjWVwr0YLY3QpzGTJiCsiBac9FovoiCaVaUGlcb5qj5fy3e++8pu/185Dv+zsVjUvCraGWCV4Q8qhH4fuhj3Zsg/jR//6ar012tsuw4ksDK8ga7XCwVO/VNvZsA79qTfGUAAFsGEhYaiyPXm++GgiXx3vWhZ1iDZMKr9QgXpLwcJb7dROKPkGOaUSshZYFVaFGmPYsooDAtTmHm0zZgSsYQqQZOoUokgUTctQBWyZySznywIFMzPQrlsJsDUni38GO+dQCDNrVCJyzkgIdWl+/vP3l4vlVKcxCNRVlRop+i5e3rt+48bN4xfzZVy3zSlPUNnJbLp3HDix/EIImYcPYraC9I6IoYAgeUblTCy5fZNgjO29B2CtDTEaNhniYSJm3wUwWeu2trejBJM25BFE04CJkr2wtTbdEmZWETYco6Swkqrj5AidpEDDrDfV6SSa2I6k2Y4HIhqkZ2YiFoGqUDakzAEghaOYkuXgTpXC1mZOutmNoiBFdtNKVZQZJsdpZWnUZBdLREqD8kgya/o8tCg2pM2cXEPEEAmHKJbzbgbuLn6raJoJnI/BzsPZedjVlNuz8CivF6S0M5w0re9iwIoYwIEtKqAASqAGamACmQJTxbbOcDrF2Qwnr07OPv3wbtU2e3XYxaLEwiLtH7OebFD2ahc0HcGbvmmLNZvpj3788bOwa9iVsxjBneDGpfD2ewfLF4EsYEu0GhAITGmQBlZNEKgYBgYj0cH+HwSOKmDLKX+QJTIAiTKDiZ3CGnaAE6ngqKXw2pdv3nr2s88diu1Kp16mzDNXl82uLmYyn5qzCVZOmik3E2lt74XEQEwAVHoLLKsbCNfKvX7tFBNT7x4GN7056194Y41jivMgUahIYKiwZfhk5xk1h+xhx1RyK0wu6KzJxgbDqGI4ldnyYiAe6AY+1dSD5jVopJvpaxJHp29IzKONDWk624yM8RLSBh8dYF1VVeYQomUmaLzAk2KbymLJNXHaXJ20OF9IvRfqv7zKV9PFTxIlyizAX1MKpU5WhrxKF28AE2RzZUTTg/K0STVzSzZvIH0G52h4Rp3t4F2TPhdNwSp/DjSojNUkgnXidiTWm7GqUM3ZNyv3wJrWKiTnOBSEQmFVC4KDTfI20kpRwpbcVZGdovTlzriZnyz+y38cT8TbxnSPsf0aShw9f1D5x5fKhjRGO2pCe9VdfvHxj1+5sVu8fv3HP/0JjddcOgTAMXuEVpy1aoINkaZm8aC9cef6zpfuNLvXVj8aVR/+TUCgolKH3gSClFzAUk+9YYIG5oKtE8PWVv39j/3pE+6Bx0/K0Isrg3YMl1jcor1YgZTBAL61EQwj3ihgFiJTwVPy9fLkzXceLrj4yZOXXrsi0NBGrjlvorOVWOWJKcJybNsX9x8++fDuzr65VN9e+S0XCukCR6BRdEY99U3vT1vu2BoOXeDk6RkCKSlzcPzgcTutl6/erGkVUDCHvix8ha6i4DQUHAvWWLH0Cso728jCONYIiZpKNiZSgoJJk4OKAhYQKxgShoopjEL7rnfGgSEiy+U6R2pR733vQ+kq59x8Po9w27vTPvRlWUWJi8Wi3h0/Xx5eGcUQ4rOnj9LIli159Mzu9PisnrivvfOVs/WR758UdSu2atar3YmZjOsjEbXeeyHLCThOEYCZYBkFw6fGjXVYtMKU1ZnMTNlCXUUk3XeGhhDJMoiKwjXN+rXbr73/s5+enS33Ll3yMc2GlMDGsO/8IJM4B680O5sLG04sI4EyNMYE86q1BaDJ0lIVKmBLGjJYl0y0JAGVw6wnZbEM8YKYSTayiBwXkpB3wJNB54X3MIXFJqVKwq9T+ZwYanKxOaAkBxLoJmFrnooNXk6cPGvT9+Y3cIHXcqGCp3S+z9nOGUo/fzHioaFITKVzjx5Or6UpI6NgFIAFShEHx3BADR2pVooJ8v+mmGGxh5NJf7hbNJfax3/20f2XajuTs0lo0UCDWg5SIJRgSGCuuJ3rity06RfOxKrermh50pWh3gIU8+a996ZhTHKmVBk0Ecai6/NonQmctpobUEg4syqUhAfYQlQNMYiR/IkGCnTy5UzONrk7saROxcm6oO98a/bPP5zTdmUqJzOM7HybFrvhxWV7us3rCa8rPa1lMbWH051PrJaCFRAomhDoWy994+ho67IeB1Na8SToDY4dm22HCOrBwWhUDgxAOslmdmIVIpt12jmCZ3xFOZUUG4kdyHD2TdvoU88tXzbzy5zYRFhJCWo4Cd/zcwyXMXfVBCDTuVLGzdbsuZLJz5zx3iFXDfT75FNvTZqwnB8wXEyiQ+6jYeaSYfg0YWXeLO5S3aDKG46kJqJm4rUPCBANr5MxY9XBnllzM0vnj8llhibaWq5ZL3a8eekZ5Zu8uZJDyM1MnKTUAHHy0EtVayJ8gFjJUHZ7JlVStUQWMAoLcsSODGsJdaARqCYpNIwCj4z8/8j61yc7juxOEPyd4x4eceM+8oFEIgmCIAmCjyIpFkWV2KWaMrVebSNppttmxmZsZ8z2w/5X+2Vt9+OOjbWtWbdpu3slbUsqlaR6k8VisUiQBEG8CCSAzJv3ETfCw/2c/eAeN7NsYWUsVgHIvDdvuJ9zfuf3GMGN0Fl//C//qZSHennGBcdHv6yf7FcvH+H+J4d4eKAiRvr+7Pmd3dotv/nlD268+TbvHDR3fjGpyPMoeKAEWuss1Psg/Py+1hMVOzq8fk0MeGJn3/uTvt7DrV+QtKKojBHLDUdnURgSy5YNjJBlY6x8+rMQ1hU3UZjtDtsiBLagyEbADOptb9siaM+sVlmZo1UqwI2aCYenrbt6efK9vyivXVk9vPsvf/OT5dM333jrZmhFGokSyRAK0cvk0NamqWTxkx/9YGTbt65Nnn72wVl7effyO7QyFHs0wJr7RaenkddcEIdOjDXKCgkchGBtSaEhMH/xoNmduYMD7gKsV2N9hb7QrlBfSGuw6U3NBZEzVBI6FadIRHUDONWQFvap3UpuZenxZQtiywBhtV4766qisk4ZLFZMYZRUgxpn1Er0uru7U5qyWa6ZeTbZbX3b8qbAjg/9dDI+Xcwth2p3fOfOXb7/sCpLv2qdjKI3RSjefGf36LlJ54/bZr6/O+53Z/0kfHXW7O88LxFkvGGxhSFAogCsTDEECkTK0vesgYlUJe0uDRuQxCgmawA0iXGtNaqSzBSYIcniuO/HdfHFF19+8+hRPapEVaKwYSYyzCLiXDGwk1I0wvYYpdZfMcjyiIgZMUko85XExhhwds7CYLszMDnTvpDMgGaJpBkid+QsJMjWU0hZgYBmjVIGw9JLyzvXIf7l/JwP18LA6soAAghpw01bpypNxniZ6Z1MNJF9RxSSVmAIMftga45HVEApOXsO2zAAKXz0t8jRw6/kapnTafJLtUDisRODlYioAAyn8bcCRkBNUhNqlYnyDBNe7WI+o/kuzV9yq5Pbt2I7f24GOUFckW1AgdSBLBxLsKSlXBqfVlUzQrfkXRYjstmELtqDk52i86MXrskb3762Og6YSOmLduZpoeCCEIgCUa9I9gWqsCRIQZ6ULG7iUJmS05mykkBtbn2YNLJSmgYt4FApVUQ1N9xde/PqSyeL2xxGl9yoWExptR+eHpj5JX46QzOTxUQXRReKJtazhnUVNLAwW9v6s4PJ3e+/996iXqzgoBsQh2iV7elkt29JPHFv4IUCA9CN6kbJEnyh6AlMZHXwriHNs15i50vyYEm7KMnQ6eC6nLiIg28PD4tfTamWeTSOUSQpd1MgDKUdCqV5enjkBqT2vIgmpZFA8uFiw3mnviVDJSPlmNYspMNYSReXG6nsp4Z0eO4AGihRudYqaMsU+61hGUqJlIHzpckw/NN2R6JsmCVBb6mdRu7RM/xEqflA/kmm46I6HMZhiZNORLohBNgO8SISY0ybXWYWIVVhpvRDTy9HKbHT0yonR5gkOJoMawFYhYOWhApaqIyEa2bH/Z4NX91ym0fFgQuu5SpM2q9Gv9bYvczzWwdmUQMMv/Th915/96U33lotuier9gc/+s+zsKxqtxL1ReWLkZheiIK4vaP2W9+yT+739dEL+8/vx2WwFQMw77/fXDvUL37M/psgHdy4tDDcSwmAWb0FVcWIjUpZkfNo62gLq+I1klWJDAFxFNZCWG1wgcWStymnhVSlYNOdrvm5l/b//C/d1cubfvPa4YtVufPpjz9t5/6d995So2xZIXoEFHDSHk3kix/9S3Ny58WJfH3r7mW7Vzx42MTn4KeBAy8EC8habM8cuG8jrHFewBR9r8QYEdawDqGSVaO37y9nO5cQOg1UoYP6gqMjKUisBhiBYTUpT14FCkPEokzMRCbvp4ZpBJnAimhFRAgWXFhLNisfIyIxgVHXtVdPoCgaQzDk+j6kzWXTNMXY7u7uBcRqPIrUi8jB/szy2fzpYzx9tk9kXSEe4zq8/3sFFsWvP1icHJ+QFrPxXrtodVfdrL75yg2JKmrYuhhVOLlJZvYyOOGkQoBybretSdc+MXGMAYgApUmUmXzIMDXbvLC1xhg2UeTg4EBEoLLd4yTYJ4Q+22oMThrbY8wJ4VVVkqHLJmOsiIQQtutSiIBYUtrFcCEkN8eoMTMxIJQTc7MEE9Aoen6tJJIVcsjhwCYhbAfS9Adzq5BJQBg8ZoHzFbOoQIkp35cZ3R5IvayMrWoiLSVo4P1ut2JMqSSnbz7Aihcw52HDdWFeTv/YXpDpgbMETpeTgiA0+A8pQLACC7XEllCCxsCYzJhK52dY72CxK4tDt9hZ3f2Hn3982b3oH/lJa8Jp0DOosHFQC7VghqmgE613l0XtRxQsNoauwDoGB0Vn/J/+yfWWSByMRVuKrhSsag3DEYIIJ2CAmUVYwcy5gImE1BExc6LjDhe9Zm2f2jxVpeprhRzEKVfEFYdp8e33Zl/cauIUs7jY1dPLmF/h45mc7GA1lZbXgjV45bBywiv0JasReDZ7d9t71yZv7bz4tBxZaCCqwC7EKpCbT6YUCB7YkLYAQAXBgYTUA2LArMoXhlrLJJo95NKQR8NnJpINLjIKkn+bc/eVnlsZPtT0uYcYSZU5B4BuR7pzV4tUAPM5yqkoF/bAw2QtooBJ/aCKgoxhUoQYiFj54uN0/gXzM5hp9zKARbk6DkjS+Z+VJNlmGuolBlXv+YMLSjCXYDB7VpxXcNLB3W7bhJJcOMs0jP1J8kmpa89Wdxm8UmQ4TGUI/iXibTc/vNTUhxMy0zFh0QVR0s0XyVFc1MIBDlwTasgIOlIeMU/AFYexiI3tw88m+yaOMIkri2ZS9/buR0/mv66tr2npEKrQvHD16untDw5tO7185Se/+nkxv3dUjU+jLahtdKdH19TFxtSW46tvYOP7x6f9c6+9YncMYt/6qv34x1VbTn/nve7GVf/4Vnf8axO+YQnEBQvBtqAgAqPG9h6xtSEw9+AIcY44BAW8ukpaUVW1lgOr4xA6V6hY1cpp471fu6tvXf53f6HleHPcl65Uyy+NuL78+ucff9U8Xr3//e/AIezFwho4lNzH9uTTjz+8Mik3T589fto+aze2tfLklweX31k5RyuhJnBbwiO2XoIrREIQL75ypXVl3wsK67teOrJVvHOiV55uro9NDNYiutiStoSOTbQqXBAMKwuMRkpCmNRCiYBYE7GHhkcn1y8o2TTeiaCsKoX2XQCU2eQIXJVRVbWLzsJWo5Ffdw6OmJ2z3oslG3xAD91IsVce7h56fnLa6qNV3He7Onvivd+/Xnz3zy9//sOzz34q0sf6kCDh0dmDUe12nh9feuHgwy8/Hb+/G6++d9q5jalacdqKNpBW2YOFEcRxIRBFBCKpgBBCx3mGA0AisSisMdaHkE6CcdncjpNQz9i+7bquG9e1AoUtJK+PRSHJVQv5AA98XRURyeYMbNLZS5MQDVkoGa+WFH2ohIHkDMr/LyeAHEFiqqvJ916gibdCNiv7MvCsEJJz1tO2yx8mAwx0mBSEti18iSiU0wZV072luewjWzwPoBgRUlDR+fyqqkCMgZkTxpr0VflyoqFnFyZGJidZ5gSnXDAVyvyvdMVm/nRq+kwy6gYzwDK0FwNErWopewkUihIOmxFtxtROYnNol1/fvbtZ7dUEWUi/6DB33DAkMUBFnTombnMkkCmjLZYlcxlaG1bTqjo97v7wrb3D63snt7tiZE3DvRFjSnAUFo2sSjJ0ZcOcSKop+0SZ2XChGlWZODGCzdaPN6VksXGASzxIOJABW6KCbS2N9K+8+dz1xW9aKUpd7fGzWhbTfrHPp1Xw1FA8I21UzorWurpQIyQsZIqZK5exmR8/2d29ujOa94oO9Qp1bTYtquVoJiuBY5SEZKjqOK0Ok1nV8LnytoVL70vEpMJzvqOkLYaa8haJiNKnldwRZatRS2VPlUA2GWek50I0bZQvFpL0PKStZzqtqnoBh9YULZRABVWQimGOhAjdVi/E1GkPT5UORhgXHrkt4TlX81yIs19A1GHPo9jC4tDMOqOhBwXAwxOv+eFPbLTk5sx8/raGAwNoNuBJLBXddseJ6oyB95gRhsyOTN6cwswJlYQgx5mxuUB4RKJIiuSBfHCsTvsORgEUgIM4kCOMlGpCBa5YprHadfP7X9vFI2NXOya4uBr5zdituA63jeygq+zSaeNEvnPz956t+n5xp+MlvvnoOUKLjWg9IleSbXUjjQPOrr195ayVe7fYzS4/f+M5CTHul+HT+3znx1jIZn5i/uj3Jt9+N1RvhPltWd3p5InBchIah01FvTxdw0TE3kGAOIZrCLDOcKwgUXtYshVC3zKx71GKo3UI3qOV3k13/vhPdt/5femCLrUoOfpYo7SjUg6oKMqv73/1d//5B3/w3/2ruh5JFTgud+r41ce/8avTerZ89dWq3t/RxnerdtaHp92a4YIDW6TdvGgnXTAGKG1VVgaWgEIQioiKbcGw2rP98nR9+dqssggwYBIYQSIvJ5RUgMQiZDGKbUPIpFCRMBwf3TIGBNaKSFUViWSiCjaGFDHEpI+PIYhP0liSGJ1zGpht7jrXpyvnKtMz9SRN0DGwd/Dk+KunbXm4dz1udH+6+c633Id/+/j2HZldmzCK1bKbTIrf+6Od599+LcyasHvSlPHvf/Wzbx293xQ7q7Dj7UxWalviYLSPCEAA0AMBGpNzDih0rS8snHPGsDEmBPbehxCMtcjLHmjOaoBG0SjOub29vcLZtu1TC2ytjSFy9tRJ94gkntQA0qb6m7ko5y2qiDGmKAoRjbG3ho01UBVVYwaHemKVQLkzVxCJCmu230ohC6R55ExpSTB58zYwJFPVFdULkuSkgk44dNoagRIxjJQCBMMFitQOMJ03Xbm6AAI2nNjaSdua3hozVHK82rBLy9bWZFiH+pqsTYiJjdmqnraXYb5e08irihxImNbgJl+Kmj0pE9ACCyWlklCBKoKTEptCVk4XO+P15v7X//izJ5fKq/5pR3OK85Ibo3NRQSxUCrEWajVUYjRGQwKpD/ugTYuTqavX6+XNS/ij77z58CRWdSmrXhllYXvutTQmsAYGjDFF5CjSxcyGS8MVZ0/fYS9BbFNSDRMlsuPQQYCoULJAwQ4ogApaQEvLZbCT6jvvjn768wc703Xtw5TPpli4rpclsBJaASuWk0JshZnTCKiSNa3xOvWLB8dx/4Vy0k5G6xXWI2qKuHRcVbppXCmFwmjagBhjhQVI0GdyRStI+2QKIBpJJD02lHaieUO75eGmWqwD0gOirdBHiZLwL3OZsjujQCX7RwL5GmJjdKh1+dlTVeLsx4WMzyipqrKKzfCLAiCmRPKy1kgIlN1B018akJ9hD5u/Ui6WWY3EZghR0GStw0xK2xZXQWmSTeSr/M6zmv2CG3iSEybXHMmuzumwUcbx8zZX86mFnr/G9BWJWXKh3TalUBqc8fiiaklzYUUWgjGTwEChyoZYwaIKIQUj52cbSnyDBHsWQElwKqVyaWgmXz96QL/4yTWzAK2tj5UuC7epZdGAImEnruvYOlm98/a3Xzqavsj2pMEvfvHBns6ldKytjWHu605CaScv7ZsXX9tp7eLvfuZ43755/fVib4JNiMfP5Nf/7IqWa9d/8zP87R28945578367bcxeTNU3oQTd/ag8KdWuyqsaXNGcYX12nXr1rfaE28CswlsohRquTOCopQQjUFbsTnat/YIBy9cOrhej2Zx2RUN9yPRXgnk295G3jU75bSka7j16PN//pd/ef/y+zvVtCBfW3/85SeVzGl13MbN4v4zOTMuTp8+mi/mH8zefp+ntUhIabVsuBhbZ1kcG3Av0Vq2wp7VpnEEBVt57MODtX+NWdSIYZIhkGPAMNOAs82UzPeGJOpE4qYmGDVdzYZykg+xiITQG8MMZuI2dk6NKKxzfddbW1APYt4sNkaNK0zTnMVox9NdAAhYz1ejac0Rm9a46nmZvXjcz8eTyze+Fb/8+uvbxzp7fYKlbZaLN78zufnG5WZx9f7Cf3Nyb1P1s+v7D0L18X/50e/+xbsNj9vWmk7QAm0kr+SNCit7iIgGSExKh7quFb2oiCbPIokxGGOttUPitxjiPgRmIyxdj+euPf/FF7dW69YamxnIfUxXkCD9TDTlnfEARIOJyfS9T/U1ynYpyBJjDDFNtHGoZAms2v5KFU+3ziHZ+TIKwRpGJMOUIU1JCuJhzbUFphhRhJOpSPqIOa1mE+SJDBEmkaQKCJR2VpoPtWbDS9IBaFOBMFESDGUXsC1CRspDZ7+9ePMlJBmgjwqFYSJV5M2oSrLm3UIsmt+OIvN6Em0nqTEl2/4bEY4UrbXCgiIEwwYESyU2Y3QT9Xu232mf/O2nD8fusn+4kcZipbSIvCpoIVAqVPyIxBZiNuQ0ioEJXDBO4uzKqo1uLKuyk7/81y+ueuVSA3s4wFKMgZKGnITBmtnC1haQ2CiS39G5z69iQOtFOAWvghI8SSghRYhs2DAXmV5mwdngi0JFDcV3r+8ff/gr0/mZmU9pM9NlnMMsiVfEG2MWfXcSpaggqsIGpJYNIrQ6LU+XJ02x6+rqbCKjhsdTmna6cdQ1dckNiVNJLGgrsKQ2MW6JhRUC5sTkZTbZJYYpxl4VyWY1PbBZijrcBqlmRCiYBpeZBMZHYjaGRRMUAzYpL1iJYNmIikgiCg/7zxgV5JwZjKIkihDDEAeNPgSkIEQiMAcZ4ghEwJTtcUSgEIkJPRo6YQUGe3jKhzU/8VmPNyxZKJfPVDs1t6BgFQIUxETCmsTGKcOLQPnY0hYvpMxUTFHc5wz/YTecRYe5R2emdCiGCEUkLpjmQG8yMLmRUagFhEEMtQmuijF5EBmFRFgiS2nqFSaxAMOCDQsHYaHCBiNkpagslyx1LCbu9M7Dl/ypNWez2FgE1vlIFvvinzFPyc90U9PG2cXy0a3FzBwe3rjz6a313Z9N7Giz0Qk5VP65WVseuHK34Xo3WvzTL5a1P3r+9erSK0fUcVD2t35h40MeuaBchDL4E/PJP7Tzj/m1l4s3XzQ3j8b7e9XVmeOFE5lSa0KwFDi0HHvbR9tJ17PBtLc1ZLwOlWmFG2sWTMIlldqXdKa18tijX7TaU6eBNkzEUgAGvY+wKK07mO1j98UvVw9+8Iuf/N7zv/PtqxbL45OHd//dH7178suH9341p4elLKPtzoywmy/lwZ39t7+z3rVqDBvT2o0R26sWQkpqU8i8ITZkGGrQuFhZMZa/ni+f987ssPSFsAW5mNZWiW+TZlvKDFeBwICJVZDGZDY238eiADHEusKFGIipcEXyewoxOC6kD8kP3RqDTvo+yEZMYTlys16LCkikFw5MgUuquC3CPEQbn8I0fOmpn75+c/KwO3v0aDK9obrwm+jf/7PDK0fm7//6wWr+YHKtWrlVNTv6al59tXEP4qK4f8bPvyYrj43VJnKn0pG2kXxSKAgBIQZmAWKyqQt913atMVvFrXZtS8SusFHEh8CGkXiOhNK5w8tXqrL0fZ/II0SawoLYGhVECdkJaFuAFV58OrdRFIAxSXYSMfyUOeNddrilsT3qAAuyFzTzFroFqcS8ocrGwnnoSmDy9q7XxF8fEDPoxR3teZ1HZkoPjGpsqSLp7tHhhsp/Ls3c20ZBVQd3Z4Fe+Lv4rX/JK3NgEIASZZA/jRMJP6fhm6QWRySV9jQC5O/BSI4d+RYSCCyIrSWSGlTRWBSymdbtjpz8p7/7tFtWdoW4Ul2IXURaGpxp1zIDHNlu+t54Y01fgTiwK6LpFEVR6WTm752c/OX7s6OD+nazMWaKMq/NUAFGBRGEmI2FOZVdJguNgFGNQ35VAilYQRkXIKbkg58QBpDlkmA1GXgYIkuxADlw1VHtpNS9iX7nNfubjx9Ndhczf4JWsYKcRaxZlpAzhIWKWiiMcHKZgiVnxVvfn/r+mXVTMxs3K25raUY0rrBRO4NVWCQZkiS7TwYZot7kj1i2nyNn/oGG4WGwiYBMBGN4oN4Pi1JNjzmiZFsVAMZaDPArDU9bRnjShgUMTqIayWeASJPflioRJ38ZgLbqnhCS9sOeUxiApEEYJoyBt5jPhLDm31QmycQXFZxj4IlqtiV0bPkOifw4sLGS9U1qbgePOVEBGcOaGGJDvDc4e8wNlEVVaIyat0Msw+YiES01JUkQZfmR6rC9Th35QFRDCpQa5nBkVWE6PazZnwB5O5KaHDYprDNnAFQcjNjaahWkglRxsufurp7NP/3g0LWjpnGyMLoaYzWJKzXdpijKdVPyyvWrwofls88+u/vlw4Ojebu6Ws1HdbW3i53nJnG0+aYdPTzZfXDHP15iFcOm3d+fPHnl8uvlZD869J99aR79mmv0zEUbxLJ1IGeK/iR+feznP6fb+2a3NLPaTeuijOS8VE4qy+TsyLItpdxB5QLVdTdCNW67Ao2xa0KlWPT9/Wf0YLF/6bByUy/K1mTjWnjAIDA8lBUWvenr56qjg+cqO/t8dfsnP/3Rwe8+P6mXVWluf/wRffNNoSMLgStiFwoP77n96iu58mL14uW+CWxD6Yz33hinCmFi4fR4q0NXACZYpsDEFZ+Iv7/xzwkCs6R8aKIgaagxaSaSIKTMOXmVND0FxhJGRGm7GJgZGqDBSgyFK4QFQO/7AoVlDl6MsejhN95pYYxVCdYZS7Z5tt60TV0zCKENpjHcMXvLXmWx0QJ05eC4L+bt3mY0vvWk0FGY2vvRH7/xJ3X9Uvsf/uqYa+dmTo56V99cmN3TODOz61YuffpN+9KlCitG423v/FIKgYpK7MGBTYaF2CjDioS+64rCEpEOWYSZSaLSbFqbZlGmtO6KITw7OdEo3vs+9IYNgBhj2kmRkAzpSdkPmRLnU9hyujt4SB4G0vcaqiQRnYeVkmIrpcyqhORkkISjQ4ec1MCsA8UqFemt+DCJF4c/mvv3vFy9cPWluyZ9bZFz843tDZHr7gCNZX0wBgPcoaBeUBYN/554Z5ope+klbevxVhq1xS0B2ho4pAk4827JseVhrWWYbB6wDKsmbZym+AupyBawFfrCW7RHExn5xX/8h0+4Mzse/VLN3MSlD0szmpv1pm+Mg4ozgUWNN6VvxQdrC3ZqmEIZdWK7uPjjV+r33tr/9Gw54lmJ0DgrLAmdhSMOiK0ohr1DmrvAKsRmy1eUAaHVZFwy/KAoUXfBrDCGbdpIqwUXiRqmcISS1fkx9/1m8+3r5aNfH48CZrSQhaVnWrejwvPZs45XqJZFhA0lCgGzteBYiLFopO1Pfb87QsPVuK3RVNiUaCrdlGhbV8GBHQHQArDIUi8QEUv+xAzIKKJms/+t0EWH/ouzXBdJCCRbH46tJj190Im+MDylw+efsV4MVL/8GBnKIEtyTcfAZx62sJKmXmJWQYxCDB6M2VN7un2Gt98we8eySALRRRMrXfJAfA6nD0vuXPlyv7ztPNNknM+LMpFKUgLpNnpyu6tD1uvl30L6CSuBlYGYoflBkkc0AOkDWXoYo5NFt3AWHhBlIbaopsEpya+2P2tiO5B3TAoiJLCqUauSOKpWqSQzMlIJjVgqcbOCxvjw737QzL+WWTW1QGjHerrD/W48exgil/y7Rye7la96V6klMV3Te75//Tma7dnpjhRjedqf/PMv66+PsRbeYLdwTU0E6/en1w53nwsUFs083vmnehKidUWATCgoOI6sUtCgSg7KqydmtVRqoW10hVgpnAWitTYU7GkiXDc9L9W21X4jEwkFGqJF3y96+WaOJR98/0+LvVm/6NUrWgdJo1enMKSksEYtMXNBEmJpyoNdF3ZfuB/w01/e0udlcnT0+UcfvuKm1UTKXbc+acQpheBm0/2DanHymT+q7GTShd45Y2AEkUAGrFaDQpm0SFHLaplDwQy0ZO80m0krdmQoc/VMHjZUaeDaGGKkD1OQLdZhbSJpq88tITHAVoJEeLLElitXSS+ILKFP24Z6VIdV6DtvjWVl7dUWhentullVo2o2cREkG+GKZS2Vq0KHCRf28KVe22826LvOt5t60918eXLlffrZv3TP//7rfbc6efpk8sKbx6vi8cacyR7VV5+s6vaJfsub5XxjVmV/FkxE3LDxlk0XpcvOG4TgO2ZhliiBghaFkaCGraiEmFF5awuVYKxV0SCRmauy8q2vKpco/oOqPp/s8xgTHparyeyIWdONnKfT3ypxmbiypVRyup62KloiIhVR0uRQneQWW0R2cGzP1tPnhlb5SiMmSt6hqmKMkfMVEwZkF8QZ2FLJBlrbqyd1zfmrDRV4yyFlupA3fn7D0XBnZoM9oswZ2A6/dKG08zYZly5setP9muVYlMJckrgzeacQ2+F6oqI0YiEsDAkVSx8LUx0U80cPv/mrD76Y4fDV6is6bW0DDpaiRaAYexbD2gZDbWAGdlkRUcZJgLZhYzhYrpYL/9ar+Fd/cOPLU/UWERzAGkGSHB6UBBAyZISJWQSsogQhCDHl1GMSJU0alPSBZucyBYZsgNQheRVSoVzhKPVLDEi0hCjoRcOl3ekLe1W3noOseDHGbrrer9WyY4iPHmwEiCBKQ5sHAhQqEmMvEkQCUSEMNSQMsUpsznnCbDnto9PDqbRlFdG2u4uq2SQxEbwRkcIVMjw7oDiZ5T08axKJCMRBAg9C1cRgyt0ZAQO7OAuOzzNOeCvZx/AHctKnqoow2226SmLvpy1o9l1HHjmtNQDFGEDJ/12QmmuiZLqRwAjKe6kUkJDldLkEpjcGpIwDybKlYZJPrqu5DVOk9f5v2X5QgrEoDcZDa5D2U2k0T2JnztHXQIYeB9fS7cIL+RAlI24Vobzaopx3qpwVF4OCINMlyAJWDcgxFSwcI4IxFlalUFtbM+b/8Ld/c3bn4+nUPIS/bn3Bm4p0oqsirmYv6b/7Xb46krO77sMfzjdzt5hzkJKqvqjZ1izGN514FDzDXtlVpFag/ezyoVkRz668crBXPlkfx5/8qMaTfjImWK1b9cQFS4xiwEU27+xMWVBvg7e7+6dXb/5m8U0VmpLEaXR9CBNZb+bF6OWaC719B+x0JXaF2DpZeGee2/sf/7vy0vPdcU9qTIAA6KESAAGCZtWmBchYC4j2yj1dvXIZ0EXV33n08c3Z1Vff/ZOzT/5f1XQV52zHppfYtv71t/HdP/resgkPV+buQhmdN0ac40hIwkqTc2ENiFnFkDBglEWLonjW9Q+WfrcyITWqqaGPLEFUWTPAARWRKCSkULLMcBL77LtABE2CfbYxirE2RaOICBNLiMkZqO+DtlqgALH0IqKIKIrCucI3yQvLh8aYylBHuiE1JnInc+xXR6cH8rB5Vo/Xp0SHs/HsLfroYfxGw+dP5hrr2eTmV4/ds75uzay+8q2vlmZeHLZ35r8Mv/zOy98+W8a4CtwBG9u1HXMLCCiIRpUgEkR6IFrDIXSpK9ffShQXY4itDTFaY9JhiCqudGU1GhpbykyHrVukInGXosa8NocKlESNMcj2FBkg3sp5AKhGETUmRw+BksrnApB7wZBOAclCfDUwaVWWLyzWC+60GY0mVRlYTkyQNBZcrJqaq2sS++YLNEMombFMyHvcC39JkIgAlPHqxPQE0iAyoGbDGx64XOlN0xAbNHQjOYhYc4XP4l/aqrHT6m5AHaEKNlaFiagPUYUMc4So5/0dM2+X//EX9+bH6+vV0RhfRoRAtijQM4xlQDwI0ECWESaOQt9HMdawgReGdQS2reD5Sf8X//rlrzorzOCRj4iABskO2BezcUAiUZH667RrTO1EzLqwdIGnzaIm8wgQsrlUXu4lJitEIxA5nUIFg2FgCQIhtbQ8vuOKmsbWIABMhhVRRSkI2LJNu3RlaMjNFdkE/uQ6OrzmrIZKtLZc85K8LhdEIhFc/NTTCCh6/jRoBnE57aNINUYRCLNJ7m9KyYnTDJDp0LLlzxYXNLh5vwDAZO7WhW/NlIDUTIIeOrjEbgohEJNhc868T73mhfk3AbaclXLDt8yrF1B+9DWzG0k1WWOmJffwSpIt1tAuQAcvdJyTl5HuE8IQ05lO0UBgZSJjhnDuZIOTZId5xZLhgbQIT6oHSpw9KAYLWbbpMGk2dSWALJMRMMEQM2AEhFx6efsf1bzvIiYYRApsuSgLKaJa2JGlgv76H/7m4WcfXR2j9uu7WL1WhjeKzvZnY78aX+5ufr9EswwnRCu/R/Ty1WJVnz1dNZevugf34OdFX2Dqit6Ib1bG9SEUlejRC8ZMumeL164/f9htFsXHX+w9+XBe7trAgXsyUhS2s8EIhwAIq4gadupd7Pnw2t29/U/uf6qrrydFOR0faWzVAPPVKe++cHmPTxaBHMMprILFL4rrrz33h3+uvCdPuiICPYegHIVEkh+SkgcYSaQQST1pUOoJUTnwc4dXRnvYPTKrOx/ulpOb3/6TZ7/8L7HcUG8d2T/8bw/jSfGzX9/+1rfffu05t7rdPA1TFKDec+Rk8MRQCBlSriClyoikEjgNE6NliGy+XOKlXSu28lJ42B4WERwhPUjYZUNWhgEsokY4YWYEQwIKTIggI9IzyDrrFPCtt4UVFQ0aQ2BlCJjZFtZ7zwoQhT4wDKtUdQ0OhY3QUHAhHrpRsQoDY1nm8I914+396d7LzysfPLeQjx8bc+uL9Xrdgq5A25f2X9k5uLFTX/3xJ5/fW5gzunT2xclz7ujX//jJ6lf+/dd+n7wNy0gdGY4KD/X5YmOoSSEtIYWSdl1nbaEiGEzUDbP3vnDWmLSC5ShRVdquZWN6H2IQsmJMInREACqw1oQQADLGIvnZC5hNRlZVmZMYOubLggY8KvneabKP5oQmDxksw6lOFI2MGA+tsQgRWwLl0WGIL0x41HDFnF+XaclKqdBejGQcakLGrtNfPL/+Buzt/J4kcLK/puFPpqYtzy3neHOG2Uk1qZzzvncY0wehaPqKKZc1wwmJRMaZjY1cliXGNLzEaLgiQ8RkK9OFbmzLou5//OGzn66X+664Odmp/HFM2KEEUUOh52hF2bIECoDpxVXV6r2XZrfuLM68ZccuSoCIHVU0/+//zbW1nfgQwUZ6I8ZG2GR1r4FY0qyrSCtJEUp+C5w+67RUSDV16DLy1A4lM+wELDQqIpHY9DNTUeR6qQIVQAICMxMHimr+9fdv/vMPvgilWLBGygUjMcIFig2AKNJLNCALYsCwhaIoLHNGI0QimBhqEnlvqINZS8NpCzuUFB0egPQsZBruFoJOxVoJ+SwgGdxITLGAOdzZmPzlKe+Gc9neno7hG+WudAtT57q8xX3TxoYw7GtAJBgMI9NskLHjVDcptYaaOhvD4NxZCBMTJftVgUZSiDIrD3mfmi1Itl0tpbSD4YXgfOLdnpftqyYkKx2opq6XDA9MLspkq2zVgUTGTH3NEFiZHgVJNl80aEBTfAWLKEz6n9sORpI+HqDzLUf+mWWtHnFqqpAp1lQ4G1mEVYyyM1yZ//rDv/vNvU+vzlwdj0vtrLQraSfiRTqjC3+pf3Af+4bCWfnFxyGcjqZd0R5Prl1zbRNWz8A1hVbrGa5epmLXFQfx3rON2yv73bNPPt996a03Zgf08Pghbn1Y0aaS3dZILDo1HEotluKtsDJ6KWobRFHtrqaHX5Zy794nrjk5nD33ezdf29t46RsD6drQhFIefrk6WUWzp33Rh6V4e/D2v5589193TeSzjZDhnkxEIUSBBWE4kh4oQJEREQ1roRI0EoKIBwVzeO3a4os71994q/1qvtm4N373j+79+Ocb/uZP//jqpz9f3flRWx8tX6WXfvib26unwOGbmEjRcEy9ryorwCoCVCwTYAqaGlS9TIRrafjoyWbEjZvuz9owa1E3oTSNoGNtxfQ2tIEVGAEGGBH3gCcKUE8Iqh20t4gF0CsKm/J02bBGNWzati1soUFDiEZMeoass8EHQ8YoB4kMqarKmhiChxLaUqyALYxC2TuNJ8KQDfj2yezykc5G339i755Nn7R8iW1sVqvHD/y0j2tddKMXgep3dm7uXN2dhLLZ819+8PkXH355fe+lWguPDWwrEiBGpGMS0SASjDGGOMTAZKy1qkJMhm2QEKM466zhEIO1Nt0R1hgCX7v2QrNem4Rf5wh6Y4xBBkgxqD+FiW2CSROvnNLRUYUkYVJG0n5rDpastNna5XBOEwKQTBwTrI2sduAgkVSiMmUgL8VQJGZmPq9Zj3l+M2D736m0pfrIpNuaeXH0SOPvtsAOF02+bHjwob1IqbkwVySQVXXrl6DKW6g5OaTknJl0MwMZh7fbH4iSgSKqcEqiJZMS42OkqEHRGzJd200ulavm5K//6/qrmb38yt5+3HRhI8ZWUSmR6IIix3Sw8AZaWCaJ4dGKfvGwFVjLwZoi2CjWrudP/9f/9lpdz572wgrPNpAKrApRzP0RgiIJf4Oq9IwoEpDSjCkhrpoTLzmJNJE2FcQmASciYAqUyi0LS6+w6X2zpmEtYfI2oFWxyn2z4atXD/Zmn2uwsJ7YgjIIy0wCMrYHNEJYojJZslAQ2BlXOiMQpa2xBjMbA5GL9hbpaUkUeJWtT0u+yJNUnTgFjSDRhimzSJLIiAwZUIxxyAFKp0PYZGqwymBqlffDlCGYbeNH214uFfdMSlLK25pkGg8g5t0s87D6gWSdffbEyU9urvyZF0EUEyE4KiUnVwYELNsjkRlnRNuPLT/fafkK0PkaJe+4U8XbCo14S6WGakoTN5wtb3VYtST6FqCpC92yzJiJQWxtfuOiySmI0t3B2Y8usy6JAU5fidmkuKgkOBgyK7YHlxWG2CqTsYBBJKGCyJEUUs2KX3722cdffHpwqXLhKYk3vBzJ5jisVxRe4p6dX5OpRI7vYnUc50/o5Cv5/GmjwY6/lGWPYK0ZWSVZPvNnT+zR6/b5HT8by2K1/vIuXX/t3Rdfvnaybsxntyb+cWN2EFaBCjKKktFodFysiHqo137TFiMXxlfvjGh+8nUgh8ml2f6Lnz2Lq2fHQqEup6/NnpdHt9cheHuorekXS+MOj/78z6trL8eTvlig74zdEFqNrWqw3EE5AAEaVAToiQzQgUhaS7WyJ/HKfdGt/Khx/58ffvjO9f2/eO+7y1v6xdMPrr39wmsvTT/52cPbT8Peu64N4e8//+HzL5U3X7sSEG4/q5ulV1IosSqpxjQm1YKaTE26o1La6LDBbKGzMxnfOrMvzUYNz1qZmNZIB7SgwFyA90FCCBCv8Eoe8KStIkA9xCq8UiB4g2AsKamqhVUSDeKsI6UgIR24EERC1F4tmSgiUQCBRpEYtM++DrGklsj2gQsW2BrxNO7ujNXEe78O+7/n2ivV3/98/vqrV7/5vF2vV5efe2WzXCzj7moFavT9d9657I7a4w4bdZvilcPXHn390Ud3Pn3tuX+1uzNq2x4AeM0AIDF6NhAJbdsSo7BsbZFMKDG0/yIRzNmghygpGFBgfnI6rut8fyXTaGSrjeEvKlRy3GtW+6ViyprMFzRfZKICzfNuquWDkYGQDs7BF0Ct8xlENcbszpFu6AGD4nN2CAEEjQk9o+HCVSbehiNju4JMr3MoxcNvnU+9GYrD0LKnziEnHeVXRUMvn1cXrOecFclRNgJY5oGSdGEVfbE1uOicOXxLyfMzskdhst0kEFlY0wc/2xl99fX9v/oqmherg12DvunRF0xRJEAVsKBexUQW9Vw4G8oAjjE6VquuWcUJCmOZo1Qj87QN77xdP/fc/qJri9E+uIRasUVAoR0QlAOl7BoSYpCkhEjWLYacrCfT9JqklyAQQ+WcRke8HdGgiARJ1uSKqGIYREERgF4kErON0rN1BdB7SFBjVWMgKVhEBTEIhFWU0afRS1lVyUNKMASVK4vCqrQSARZGhASgj9Lnj2G7dt2Om0wJulDNH1rKWAQlcCJhyEKK5DFqmIVIYgRgbZEmP6gakBrKIjTOlX9oQSLYpDkyr3Nz/keeQElZE/xKAMGSSRQtZhJVjZoMWpMCcOAaIpVM1WiMTbEo+ZvmIwZLLNCYh5WMXCinVY9km9dERVMoYBiJSZECFHnoFPNSacu8TjatlL+jpm6ZGaCYpFOU1kJEeWpXApGhqMqGUouNYeamwbx1AAnyghgJ0pB0q0Tks2CQ2RKcKXJIMmFmFKKsaiSRFVMvnG4UCypMQChGdrFufvLBT6tJXXBnOJQk4xiY+jas7mD5Esi6frxTfPWJ75+yNMtHnxWyqNG05HnZwlaugNImiIOyaTfhzu3waGX9qCKO451yrxLbLe3S8r2PHDqh8cQEHzZLMzMmUMqWthpErUdx+WZzaVqfrG9G8QdXWzncY7M2m48f3JK2iRpev3RzEa2EUcNsFxu/ovKN95///X8jvfWPOm4ltiW3LH2PIAgFBxLeECLQg4U0Ii20IKQhaqRe4kao5bAWMyrivL/xyjuffPkLFzd/+vvfXXF5584/uJMb93pX3DzetM0G8vt/+OKbN15/eLv5+B9/YfZfdFdvhGajKUNToUZICCUwRZyQsdK7el2Uizha6bjl+nhNdUtcVz44E1iAUEVXGHjhCSNCexhPslF0Cg+ypEHRgQ3BEiIJK3lYFSXmtm0tW2awsARhIWYbQiAlx0XajhEsGDZ56FAABaZI4BgaNKPEDA5tP56MDouD33x6+7t/9t6TzQ+/+EF4/bvL0Ppf/kJ2LttHtyfrNi43WrM9Go3ILzdf37+3ebrnrt+99fDhx3dL2S1DqM3tz35z/Dtv/bmrNYhPtyFBmCESYvCFs6n0JmljJomkBZihXDuTpB9QIIQwqmtNKeScDZnTuU7QYggYYhKS0JC3aFgqGJrLPNKKMyFYxANKl1prZgZrbqsFgwOdJmHl8Hok7YI5Vbv0nbJ5MinSjGKMVRVR2eLPw5qWhhQXHWbu5CKEcz+AYehJVTTPuanC5sFMw5bhsv3FAMGk7DYdLA4wTBWMIApVCyKwMFRpK8nEhW+aBhvNM1LaXGV0NtHYVVgNGVAncmmv+vCzh399r9198ZJw6H00XAbhKBQDe3DLtjZRa+gIJjD6qJXaIOPAnbKj6EiUhSzzSFqLGbo/eP/ayiiPXGReax1ggpQejiNRBAIogpUlcZ6FYCnjxSqQoNRTHu8klxJBGmIsc2JxJ5oQoIAYpNlJQKKqLEQCCaJBSUm8t96AqBc5Fbx86fDqYX18fDIpSlgKJsI5a6WoxW2U7cIICwXLzgaG1bVZ7db7k9kYo6iOURQBLpANarxQUIMApGIPpMCQzC+SAEBJkBcJiVEQoEIm7SCGtgsgQWquVQY6OxGT0WTqAjDbPImyJjSIAJuMo5O8TkFsUkiwDiz7pA3gLOfVVJW2pCRj8pNGaVmau1FNdtM6zK+pieBhhTy84kSS09xtp2UCAUTKgCCpFrN3VxJJbCUIwyohAgoxmoo7ZVoga0p8St9I0kFXzq9LVABjGMlaezhlms2wMjqtiigCzWwsTlzMTP3k3gcVFNYyWzCJJEG5SXtNAitZkGVyGhlk2VQQFrEibGwp2difAFEK1klRFf/fn/x82a53D0oTViWFSegtpNamZv8odJ/I2feOph/e0+ZpOBxXv/pgFFp2vWhrYyATmUDiRQvSLprScE1ccjh1hsWWLXB294sPAurdF97c+867Tz+4RYuV42KkaMsQ+mSeJ4GdtC1efOVT2rn36MF0Zqwxbxxc3Xv0sLsy/ujxcRMcYOud/cPdl/pPfgEPCZW79saiq47e+450JjzrJYAbgzaijWgELauP6JVIID0QwR7oAVUKQBAByJJ3Kqo9qIVptTnZuK7ef+7Vf/zgb3Zn/vXxFZLrP7u/PHz57cXtLxf4+k/+8ubxfPrD//2T/uyzvRL1vN29siOzSes9obBirAjAYcQ8Dt5ZT+ONVus4XqFeyWhOs+Mwkafu5rWxQOM0cFU44aBwESqgCOoELaMk6cAe1DG1QCEoCIWIV2sYnmzbtPW41iDRBssuhBCClFxKiEbJEItEKEIf14tmb3+HlfsQmECkIn0UiUGssdpUEgM53jztb1555dYnHz+785vXjib37y/u/MC++u6bT9crPNzMfLO45V1pr15rsJzPv+npyeJgPJt37of/6e8Pi/2739y1Xt57v+/608X6i6Ppdd+vGaoaQFFjYGKYNA8rg8AcQhAR55wO410qe2xM7Pu8hGVaLBbG8NBfp+O2tbXLeHL2kNIsBc6N+ZaojPMFp0l6jDRh5FFSc5BLshUgPh97Fcn5gZitZdWsVGI2aZrOnfiw8c2BgJTi1BLJ5wKrSzk5H6VlkGbEMdNRCOcQIIbXvRVnUK6VxDyMvLlk5+FOhsE9qShpwL9jUB3McLfMsKHV2P78aJBCZS+utEtLLYJoBES1Zwaj6kN/aTr56ONv/svdsP/CJVkHHQkajRusiqpCXfGkpp1am8n0uNiAG2VLsRA2pBWKRaj7wCW4rLjwoTLi7EKf/s//w7XJtelZ1cXR9ER2lzxbYKelSYsxNUQd0GseIAPQQ3oFBZBaonR1qjIQsB3H0jtRyrweGpIJKLGe0/uXARqIKko95XE6gKXs2obiuHEzh1UTq7nYFe9P9le6AghwASRxaRDieBRskSobQ1ULjVOZHe1UBxbTgAktMFthssa04Wkj4w1N0Cp1hDQJB0iv8KkeiyBAY3opipiIVIpohMBCTAwDxGF6JqPJiTNndg1Meh7aKpw/gclcLgY21lrLKSgrP1qsOuAyBqrpjkxri0wgUBUBGxrqdfq6A5TCDAFIs/JYNbepICRxW2IBbjGeTPVLkZ7pKUz6Nr2All9sSzU3npxyQgd/Wc1G7jnNLBH6Acl9LpnBBhxbhd7Q1WY0PjW6Oa+COAkjRQOSpomQeEPZ+BYsIbKxCecn5agEMCWes5IoCJZQiLCiABXMFg5UsFgVhhIJh3o8unX7648//Xjy8oRkbdWPqBXeFNKU1NVoKmqfWv+LhW0e6Qs7u5992PSrceV7PnNhHW3P4noEIoYpSZgRwUJkoUWjjSirX8zDRvZr4ZGtXrxR77zRfPwh7j+yfs22MrUjJ8ICbiqePR1funX3U+bNct0dvP4qHq06DZ9s5MFi6UZlH8zVq2/Lw5MwD7bF9MY79+z13eZk4md+5bmDa9i3wq2iBVrAq3qk/2J4oFMJRKnfDEBPzAIv3nHP2qk2qg1b6775/NGTeDzH/m8en+GlB/vF5f3y6PbDb9zs9Xf/7A/+66e/uv3hN5NKDycvrbRrZOUffTJ5+T23uxtkQaIhMkyAdQvMVlxF1B3Vjbq1jta00/Bs0Y/Cxt6QfuICIDAcBZGNqAnp0W8JraJVbBSd0VaoYJTErcJxfndMti5rRIxcpdDgg7UFQdtNZ3qyUjSL1mkBr9LLdDrp+269nNdjZ4z23jP3gDonUVpDQO8kmnCiblb++ff+/Ac/+yuu4sHBlTjpw2M6mhbPjsPN/Utr1xmO6/utX8n1ywfTEqd3vyrwymU6CieLK5O9cd23zSO2TVVL7wOkFwXIq/aagloVbFhEQ4jMbF0RAomqsVYlhBCZIUIiaowVic65EGI1qkZVlRzgGBcjU/IFcz5rJtuprI/X85VrIkjCDEcfyJIU2vIrmcBZ3pcN4jXXq20HPtwJqlDmbZneTompDGZOVS7E2z+i2y+Q6+45BKyiSeq7nUnP6VdbjFm3t+D2C9FFkpduv93FrwRQhirPX1X6BoNIaYDQB2ljGoQIlkBKieDDSPbKlqUL9bi+fe+bv/5kPbm+0/rOliw9qCNtZL0zHslkhVkdNzWmlW3qS6vowbWiUnaEUmiE2Wi8bPtgg51wDLTq5//Tvz168e3Z3LV2d/RMxidmt9F6QdOFVNICjWIDdEAHBFAaAZMaCVsIGonKOvQ1SCah2fZbc+BP/iCHFFyoJoQGgA6ll4QokHZSiItN11hbyXgpM7d7uHryVHYs7QoscwlKhFx4moWgcIVREbVgB0ywf/Uy7avuwlflmqaNzjY03dCk5Yl6y14pkPYKgLrcKGSFDguQZk6oREkjX1qARgEJmJP/eG7dcuGgbSBYSmncVrGtTj09TUlVo3lbnDCf9CXOH66k9E4FSEXSoKmixAIaAlzTlLvtcdPqIlW/YUMUVWiocZLSBQ0ASBQRSQI83tKNt6c2r2wzETqP/JTXLkRZRJ+Mlkk5sacSGZOtMWxySBk0WZtnvofq+ZkEBhZFgrUHMwBookoZsuktByS7bII1JAknsJEYNNjEZdg5hZdks3GFAVuIEU2hv+ARq4taKBfEBa82m3/5yU/qnZqcmNhVHI22jvuJSCX9SJsxGt/Zh3OVU3vyRd8/HRuvbcOmDbRBCIE92AssqCfjTIpv0w68YTgO6EX4/X/z3StXrz5en9ajA1zeid/7Q39/gU/v4PhMfQO2tioFKvuzeVBZdvWlERXFqzuH+08fPz3cf/DwEYl0S9ofja8vGrl7X9yYr731sbfLn/zH1/7d/+KFQ0POk3hwz8l6GEHhwegl/Q8NqgL2qoB6DFYrDKFQYeVgemNsOOn3xqPVvbNn84fTFy9/9ajYeXH24bP5+5cn49nOvdP7608m67M3ZXJvIQvRzZjbkmsrLGdzWx6uaFwiGhODCd5MfBytxAWMPIoWdUvjhU7nsfaoqvYkzNvDSy6osKpw4QUC65XBNlRlU9XSEjloG9UoWUabmstzF3/rV96WNrTRGgOFl96xo2g4EiI7LqWJDs4660NrrNpCFJ6NVe8JwTD1cRWjpVKIgmqprWl8W+/vvXLpD3/005895GNbi6dm51L5wos7Nc8cVn7lDM6ms7WcPj5rSo7Xyuny/d+fjnGwahaLs2d1ebAJj0QDcQjiywIhhBh9YY0oQpTSWiCyNaqSQRBAIUEkHQDDqUIH54oQoqpMpzPVYb3LpKJpFN7a7wwNt+YbMYkq2Vyom6nSydDwntc5ypcQiwgGKwLeyicS+2KwyBkOMSAShj+QrKbSGAs+t8o4/87nsyq2ZtC5MKchTWUQvxBSuUv3RvpTyX46OxtfLOQ4N57HBT4V/dY3v+jAsLU9ylClbr8SJZWJ5H1kGrv1XONBykaCiBu79dmzv/rZxhzuownsGK3SRnRNUqovzcJWjuoSdYEdi64o/Wi/w0rJAU6oglnLPHQYgyu0LNKv/uc/vXr19clpiX6vetaPl8WlhUzmOl3YcYMJGqCFblRb1Q7wgAc6pFmY0SsCECllNWUl8zDW5Y+FeAi30a3vHAQaB52nci7X4MiICZYW9oTeet71tF6Zeh52z8gvYHZmT2AIFSVdKxctzzSoERYWCk64gJtWe1f3dS/Gms90dqbTJdVLjNc6blCjgTbARqWNANARPKGHhlSz4jCJx1w9iECMGCQHYGr69EihKnHQ6WRrZQBMQsm3ljhHcSN9vmCybEVigqMlK7KG4pl1ADIAxiwpdleFBsGbyJBVlFYyg5XN9nGXIAnZYuYYBGnhkXbTqRsggMiyVWgUgSgTE7OeP+CpuIEuUsEH9w0oBlNopFVVeqZNInuI9tKnqddyYS0LNFGwz+Vg2/E39WbCF05s0iJqZnQPfa6oUpCMuyPz0ih3EEZzc88pmYTIAEbVKiw5ggUctFAuSQoVK1VRfvDxB6eni3pWBulhOYpEhSE2bEiMAJHEsQ0dQnQuaOWKuGkLsUIdyJioXLAS1GjPKQyCU8ciAmYXRL79u7/38qtv/ud/+Ju7Tzpz5ebOC78/vvZ6fePm5vCd4qn3n3zK7Z3+ZMEM2Ha/purS3sI/qlF1y9XjndnnS0+yuO52d6vp4f4VN5u5P3pptHv92cY++3/+399/4+bs+o3mpGcmBAvpNBpKEXeBgEAULA1kQ5WUYy8IpCmex4AQtUHDVHBgYUPLx+vVo/Wlg2vcoXsii2e780v+/7h19u/+/L954Y3vfPz3f3V5Nj689s7Dex8vQoXyrAujK7Mj79v20cPy5Xced95KX2ux6orAZk2TjqpWit6MNjReBOcjLtuVqkfrL8H2EGUXlcDkxQbmAOu1MOg3Vd06hwZkSDdgS5okLD4pHcmGpdieQ/BgLQoXW99TcKaI3rfrxonjQCFsvF9731R1NZmOob2EzhU2Bg/uDdmiYtUOwqREUhVm3B5vXtg55Hfe/vyLT09O5m+99W0huvVPn7x4w5aVm81sizLMe/GOQz111erR8WJ+cvuzk6ePT6Y77g9+/w+nswNr6ijeFYVqZMNsit531hprqQ/BWps8sGKM2/MARTbW0YiBWpVq3mq1nM1m2yOTElZ02y8Pc+526uMkgRhsKSl7YwUdqmQ+ZulSG6jO6WYBgxM/BTo4EWxB7Px6tqLGBLJRmj6SW+y2Cl6okjivc3kwJtrelNv6d+47H1TzLaDYaj/yn6PhhsrQdC7k6S0rbV3z88tILKzE6JEsP8kzYJadg5lM/llpirrM8N2AA6rCAvAQFa6x/Pc/f8buKoLAO22icMRGMQZalZUuZ/WIJ43pVjE4dFb90eyYy8hrQkUYwZQUVVFzE5Rk8b/9ydXDF3ZOnMRpear1ylyZh50FZguMG5n4xnEraAFP0hG8Up/3djmZCQkLTUbGvWpgEqLzdSblf0ZFMpzTNPhmhYlKXsBCCFGDzaNYgPYsLaMT3VBjxmfcf7PC2u7PhVy5rGwrbdpvEY8bqYNqqnmESrz0B1efGx1UsuOXdLCi3ZVOz3RnicmKRr41aMAdwQOeACCAY4I3RSHM6QHIOI7mhA/JNh/DI5Xnwhz5Q7o9CcgzaPJuOlf8IhMeJLsn5ydI8o5/qGbJGCaDQIkomXsyw5R+sJqrFLCt3xl3SP8qycGDFIaZmcIFuiVyI5CMpYjJiESBDBaACf7V/PSee6P+lrNXfjJFt6crmWDT8DYjSCWG0IfQW1fkZRHOiWM5sOF8kzX8dJRTWGrUFI5i07tPjZCmZDBw4nIQiMgm8lYEUaaAk6bAI2WyzBXDijhopVQRV+CaGt18dvsLa1liIGGFVYIq96AeKLkgjCQ0sEyV5R6h9CEQSsNGYEspRAliQQ7ExAXIQC3EkhTqKiMuWqeR+OzMP3j04GTd+TY862bhuNu98bvFbFa+cBivXMONpvv53e7+r0bL00Mc/86rV+4tJj35vtrZXL361m59af/PuLJGAlc7anV9sv7xx785PGtvOnP0/vdD21vv1XOM4GgwrIcQBOhVg2qPRIHOC3lz4UIECGSCaktNLYAd8dM7D+McRRXbpz0f1nf+efXad2f3+9WXZw/np7j2/BtP+7DQ1cG3vv/kqw8ftm6fu3/+4tHOdPbayweWJmzdwnfL0PeoN1z2Wm60iuw2Wq77AiIzWk3UK3vT2R2xQiLRCTiQAmUAB6WNliPqVuoLdeuq8mxhYK1lCzHQTRKoqOUV+tZbtt6HgNZaVgm9BmKSlXShI5HK0WhkAaocNetTQCoHEe+cDaHxXhwLkwOiNTbGTfCFs5Vty9ePbr56eH3VtUL22dmz4rA8ufcEE//owQoeq7NaYeOm/eKXv7x589r73/3Lo93j23c/qqvq6rWrvm9HVR2lFfG2QBQhCFsSFWsNA30fkvmbs4VAY0iXDkfVhC0lLyfJCTA6mUzrepQ3xOdszSSnSQQOZK3DUNSYznURqjkgiLIhzvAEZL4XEm7H2y4+5RNR1i5slcHZaHcwtWJCuhNUAQgpJ8bI8PuE7eu56NRxoSSfY9Cg1BsMcB5dxL23Y0Eq8vk2GG6jc1AkzUMX0UYeoLsEsqWRSBUweQWdbKIJUNKs6kgnhARCeaBMll0iIVyeyH/50YNH7aXd3SC+CralwBCIVzQqVuFULC/ryYh8FVurrUNv2VdlOyka44UqYCQetmm6Kzv+3/7x9f2ruwvfST1eyu4ae3PMTmi2ot0V7y5CxY2ElaABd4weFAkRFHI3RBRAScii+a5OZryJ657xhZha8OSjMvQyBiwMIUocpagI0IBo8yXSgbxhr9oG6sTXu6vo64PDxfHJwo0rzIK19WyRY2eplSJGMcYyVGhEfdsfPHege7IwuwvaX2JnidkK0zXNVjrhDWMDaYQ2gAcAeEgUdIB60fNrDIhp7WiJlVgRE1VJttvZtDkZOrDzjx6ARCVK7lk0tIWJ2ZiRZebtw5J6yiRz2npMnFc5HZQF2yKWnkDBQKvOqh02SchjUx3MTQAbyxZQyQliAEEkxiCaTXdTwytBlLYMZNEBdD6n7GP7RnD+a4sRDIcF287b8G+rrdKhpmwpli6cC2xnJLecJNmjrCDSGISTYy6B86o42ZsYKGsQ2vbJakUJMGAiWKYiy40smRGpE6lUnIz2yk8/+/JZ+3R8WEsRKNooCCABQvrBEASVRWXIG2YYjcWIHYSDjJQ6MR1DBRawxKxkUj4AyACWAolhY5259dXHfuforW9//we/+o0rDnxZ3739yfzWrbf/5P+Mg4PN0yb25eR73y713dXp/ceffx6bZT0JfrJT7l9+4dqNTtbHpwuqMJ1O51/d6fvu64e35KOPXrj2+8//9/+b3d+PJ73CUGSn2noyMY2/AHrVnqhX9JCkVRBWKxDAACH1VIjCBWkM6DyiMyMs78/lRJTt0dGluw8eoywfVZvvvrHnusXP7rpXXz18RXDv148eXnb7z7/T3//q7upRVU1HO+VdP8cXP8DsrX7/W11Ya3RLGQWuNihDMCIYY+O4r6irqauNx6YPTXE0rYIwmBtFwa6DiaANlS15p96JZa0bW7ezKY2ANTFr/iEr7OL+vJpUo2pshFQ1kBhG27RlVVWolH0X1l0nxghzBHd1VTFH7xvnHHGw1vV9A/EoWERiaAs7s0WMsn56+uzky9OPP/iEeXx4eFCNKu9p8TDMtd2/dDguxlPXj2dVVY/Qhfe+++7VFyfSuLL2Dx/efvzkiytXDh7cP7l27crGB2aE0Bmj1rKEc51FqjYhRmYuChtCuMi2sNaISMo4SuPoYrGczXbTURIJQ6hARlQvnM9cHNPox8PmaVh85qxDulgdoUkXGM8n3QjAGmZjBJrkjnlhTNlzY4uGE3F20ItKIGU2vMU/L4gUt93/cH2ct/Oa916qaQTJffSgfQBt0fXBjABpJkn7Xs2Lu+FHsnXYTDC62e7OMIiVc4FXJFWwgKCMXLsHt2ElZagSaUh74Vldf3H/4QcPsXOJ27Z1OqImqDW8ZiKNLMqqFmAsrEMxCfAbRIF45RrdEuuibGrXKK/36/V3X6q/8/abK3KPOoR6tpLZQnbnmC2pPjOzp7q7wC43LCvhhqklakk71U7h0wmPxJGG7iGNX5wXpcP/ceGJIIIFq7KAgChCpEHIMAKYoIzEzOyFe4aHdMLrIE64cGJVuG1m4189cit7eMIWghqrEeqd8Wo06qhocqpOumGsatDZC88d035vLp2EnQVfeqZ7c9o9o1lcG1oDG0JL5IlTeHeilUVhFki6xkQ0qopoTH0HkQytV1p7UFqaUH5oB+5xXnAPbRtBkyArF+EheQ0DbSHvb879zC/+4NJKgpDXPefmGGndkgIqUxxR8nJEIlFaEU14fwhRglprVJXJZBcRImZDnP9uplsnKR3UJEkCiSRxYO4/hzE9hzVJxsNT9UzYmGZ8LMnVABhmIoQoQ6QxTA5lSD+MXJwTYVwHwDnHMQ27o3SSJAaGTWFJTGzIAAZEAhKYxMBiYs36LSME40gd1AoctARGjErMmIPTO8d37C7TRHREXAYHzyAVZmYh9kQd2NvSWYkdojHOSWiiWjWWgg1irRMnxitDClDJYpSssoUtGDUFQ7G0QZuPP/vozff+9C/+9P/0VVPcPV41h6Pazp48W/36o/9ycuvOqzd/L1bF6fz+6fzJSy+/euX5dz78wX/C6dfz/t6NNw4/+ulP73z5s8JNXFlJT2+99d8cPFxVYXLplecvXb20edgZzyYUXS++CSZYbYVaICiSayOIyAIKjQQCwsC6MKpCJLASNHfDvfccxqvjdfOsdZbf/7NvNz/+wXwen348ujRu2dpa7T/87UeLt1/99l/8T9/83X89bvzsxh9W68/YHYPpRGMRNlX32PH1YxxuuAyKRmovVOt6R1c7vLHcV2jHHEZonawfP9Eb1a4zphOZGeejmbLtyTRartFb7UoalUZbxkq4sXUw9vxSj7C1RWiajU/IqgHUMFiFgwhWzAFoVXuRCJZ1453lKC0Q+jaE2BpikKwXLWgT+qgSYyjZctMsV0tl2t3df7y7e9na0DSr1ebRweWaeTfIrfvPnlEw/V37wovXK9d9+E8//PSDisnO9icvPH/t889vLZfH8/liZ/b+5aNRCIQ2hOAH5EySfXwMwVprLQFI/y6iIcYB11UA1loRCX1fjyprbQg5CiZZN2cj9FRzMpJE2zpHBMt5rZuPzOBKkQtXHl6TzGDrg5MmXRaJfR+Mgq1JSsq0/iUgyRpTyxyjDKk7yEYGKgm4kvMKyxd4ucM/L7Kd82r3HB9WDGTrbfnVbbHNUz8pJavqBKblwSa9kNz+J6INJLnwJPt5MChpTjhKTDk2qpw8MPNeMb9g4TyXm/Szjb3/64+fjupr2p0Z2YX0pFVsU8wzYDSlnCZK+GpnwhYCFiHPZRX9TJtCmhbtX/4rd/P5ma3dr9cQspFtCG7F0wVPT7j2snsaZ8/MzMxZzySsDK+F19BGpRG0oJZIYuYNs4emhW3eGG4/XzYGRNCYdL6DwS8sAzAh781jdmZKPS0JNEhnyRActIO0TjbMJaSn+/cXHz3ZOdirC9gAHqGqRBqsKpxd2S0qGltYz7AwohCartzrC60DnpvLeMX7JzQ7of3QFLRQrEBLsGd0Sh0AoIf2oCS0opAXAkSD+UhMNmBKQ4sG3Sp/mClBpdimBg7UqvNN+LZogiyrig6x89AB2iWgl0ggw5S3NokwwdlxGhcfVxCT0DbCIQHRxIlKHUnBGXYqbBElptykqIHBzAYDDpVAdUkJhmmOFESNTDJARYMOQBMqlE0vNf+GDgxtQjJCz6A52BgG0kFOQj0RBQtJkWp9mnGjJKuNpGseThHDsgEAZVVYm6jdGHYeQLbVlJRemTxbBYZgoRBNnhCkEtgwLLhiFNBCqaZiYk82p/ef3HMzlkq45JEsJ7waSVNQhNhA1sGoamACODq2tvcoyKl1QWBd4YKVHq04ZkvEapiIRRjiIKUpatNXrVCwdve0lZ/94L++sP/K3o13qzffPCyOPB189ai9v4r7b79947XX//GH/2nR3DOg117900erR8yt1t2Lr9/Y2PbR2a2DGy9ePXx1/nR1fbaz2wX//Buzd75/eHl/c9yjU12Lb4QaIbHoQUSwgEDFqESkOD9Em+RhZAaCewYOSSVtARkizLqOtdbFmlYPmwcf+3df/t0fffzTxbz98J/cze/S9atdPb78zfGzsx/85M2DqxTk4df3rl47rEaVt8f7dahGB8Di6elXka/NZSbRBgoTLGbWj9FVsig1jo2foh2hta5dN0HO2r39UQvE6DyXyc21QFUgOPQF9Ra6SgR+lUW5I7VJ3j4U2Nb1LEgQSN9Ha6GqMfQAr1Ynwg8FzyRsouioOiStNt2CsQYvRBbWQiR43zJc5aZN/MYaFu7UklCJwk2nrxdmf3d3L/ROVep6fyZS2F2DWR+al148suWx0AnifX9Zjh+vmqb1MTw9cc69HMPq6RNf15PPPv3iVx8vx3X5+us3dvcmvkv6hxgl9L53rjSWo/RAqVEzsiuSwCvDJkoUFQWste2mrevamKQCyvMsEavGJDxIu9Nh36eikjXByKBxYuXkOyQphoaNlckPbxyEg6nG21T1JIq1ZjtDi8KQqlI2m01+win8INXN5O1HFgRJGzgIJFm1Uw42ADBYDQyuOTgvx9st7rASzCZJCaSjAfJOI2q+VaHJLSi9v8EhUYGkOky3G5KLniIoISmf1ZBQGmTSLQpKM3IkYwNgowV5j3BQmx/+7NGqrUeTNvQcjSqvEAqsxlIHMLNFbwFGkXyqxC13po1DYKPkrIlziVfrJ//rn11zLLebvlsRWRdhe0EANzpd03QZJgtM15jwnHQBXVJxprIm3QBrzdXXEyQAXsSL9MxdgmqJE/gllIyx8medo51SqgZIFIagyZolPzKhVzCzVxTMgaSUoPAq3hof0Wqz0Kuz4qdfPT31V2rSGLsNuMZkhNUakxEM8eVRgeBjr3BUtv3qsLp+bK+dURlweGana53MZRbXjDV0DV2qaYAVpBFpAIA9Q5Q4MgIziQrRoCOXPPamHkIRtnA6QKqx76UsS2i2ehx4UWTSrZf2I8RQRI0JlQeDk6XlVuqtAKEwJqrGFIKb6IFQRKHBz3LAigkqQZG+KeUlcIKT049eQWCT7Dt0eCwzXziLgrJuUIlSUHaShg1NAdKuHjmRlzgRnlVkm0qSIKu8uqZzUT40G9RlRS+nx16TfFljzEsba1mFYEKMxBxCMIOqWLMQP/f4KRpDVJJVHLElZVVOHY6CjLGJ0gtmNkxcAIWkvGkIGGSTlIVRiB3r/XuPNqbd2dsJpp/ibGLWozivsZ7JakIrUhVYgQ1iPdvKajJ3EJZIxE5lI1oqWWtZxYgWiEVUmwZyiISm78zIwaIDIpuFLf3T4+cf/e3mF7c3b39vffRGtXP1d7/zffgWXfOv3n9nPd+vGn/kmsd3P3mDH88uvfTyK9eirP/iu38w3d3tg9nM6vknH7fg2Uvvr+WZXnoOJ70QODJ5jZYZBOklsKZl78bmAE0KMUQhwwyJIcXkJoojawR6hlERYTEkvgvXn78xtf9cTtWfPRjN6PvvvfnJ3c96tt98NIevL12lw+v7rTxpL+3uupf00fyLk3r/0uH+/uX77QM/71t/APDBtbLzk9isdu1qFps6tLs43bEr5jCRdsbNWFojPTX6ydeyK4e7E3i7KKTuGSLUolpobxEs96SB0FsNFlNRXY53BCw+mkqtUjAGTEVSralqYZ2oGjOTqCHsGYu+X1Vm3HkfuvX8NCyXK+/bcb0TegbFyXR05/bnh1eu37t7u+uCc6Pet9dfvPnqq7MHD++NqnrTbs4WJ0B1/YWXROx6s/a9n5SzpyfHQZu6bhWxKkfW8O6BBXBy+qv9w8nIHn19/5eueO7pk+ax+r5fXb58NKqr6WwyqmdGSuKWmWMKskZ0RdnHJjkZD8st3Z6CKNL3IY2AfYiqYoxVEQzbobTxGaiSNLRXMjTmidqUA9IyWCmJwGzSDKGJwUP5ryXNEqUEX9UYAm0vpLxnEkAlRmYWwbA5S7eQBhHLAjDniZxBMhB/cof9/wf3De0+Bh4JaIBxgAFEzpZ+AyangwNf/lspQkkSHJLu67TXTuTSVK23oUdpg56Kt003vSZiTSyAzpDVGGBFA48r9/Bp88MH7aSuKtsDxdovVQq1tF+ExcoGUSCSAGAxAhJIVNEwrVezuo3THSvx7OT771x6EA56vyqYo+EoEHAQCmoamq6kbmga1iTriCV4Y3hNuoRZQddQT+QJAdAgFIh6Q8LZdUEVUSWmYTHF6RBn5vp2QygZZU0hT0n2JWnCTx+oqodYUEfBkWdsIAbK6mDDSfebXy2M2ztdw9ndzpS1bkY8OY1xt9yI7lNnDUkvVNpRh8108u5DPVpxJdidy6TVCc5EFsBasCFaE22gDailvAPuAfi0OQN61YEUpknsK8OMl7qurUsNEbExHGNMj2he36qmLK4txDI8SwPkrBoR0zBLQ7yRMTY/xlmjpHnSZY4Ss5mWAJzCKJlFDdNgDZscMCioqqjhrd0j0vcY+ASSDkvmG4pqrlrMyT12kCUMmNE5CTnjTDr8vQs7HIAkaow9K2xRMBtRGU6lJUKMcp7yKwIRIu47bywDcIULIThrZZDJa1RhMLNlVk3bYUkx1JpW3/n4mYS4J4ZJnkbVxKhEliuSQuAUTsQyl0CFWERxZh7mNGGYboL1lFa1LmusJmhmdDbWFUMMZfy/F3bcskM0AgsuGEa10BQjDQN2EAZsSt8bIpcsEwcqNLJVISX3rN7fvfle5Y4W80V8Ou8fbcLTH+3deMWw7Nz5pZl/w761t//2jwMbE3T9m8UPH4/Kyabzq5YiSn75xcMbV9znv/71x//H8oU3vzV9i1oblEKIoMBsxBOg7FkCCEwi8GnhxdYy2EtwBEb0RCwKlYaIoxKzgr1CQNYHmdaTl1862sijZ4+/fLaspPTVperw8t5zZeX5RLyd33tiStcuHs3b+4cv3rxRH3754YcnE7d7dMNX83pmd3cmi7NPX5Z9mV5frZtLpikxH3E3QlvH1YTaUViVgYOQzmnRxrld7O1NxZqee2OYjUyq1jpfUl9pa+EdRyvCUIEIzKKuNRBUbYxJ9IYQeqgyW4lSFEXXh1G9o1Kp9EVB1chax9Xo8IVr12wRDWPTbJarBcgXRbG7e+j9xhYvMBVB16Awm0kTbpGNi9Xi4OBKWc0EQc19yEGIzc5OXdgn1w+exb4msEgHiA/r9VL7PraN1Adl4+Ps0u6ly83R9ZmTV1u/jhFPn548fXo6Go9m092yqqrKGOsAbts1hlUUkw0izhljTQwSYiysjVGqUSUibCz6fpAeZQQpmzTn/VZe7WS6Fm0JoQMqnfQoyMQrwm95euTarEgiRmQ8OVlOcip7tPXeG26DXKoHpCx10AM1OlfVdM0MPJbkAoKBowvJQQn5yCHdtJTykXOa+VCUMxFmkD9m1mn6HzrgjQplMjoU4NzN51QWIEW1qBLbYZhIOmGbF+psLJwIcWYEU8Xt33/whMuZNUIaH651XE4sU/SbU4kkZWgtsxjmaDPYrZ1GEedts+ntZDLvzNUJl9ev3VpRxRsLVTIB3EdlIFDRaB08YwE0Qg2hIW1UGjVr1jV0rWiTVkcEHuQVgXKErND5YlsBEps4b0PaASCqydVLwYpIwgKBilIgEGABIgSQIQqqgYJFx1SAOyMNSofTu2cPvzb2eRMldNNqM61rdI48qwb3RPi5EAxTwvCLIN3Ls3fvxCN2lddZWME2wJqxVLSkDXQh0gAb0kbRAgC8DuTRHuKJA5FkaXjuLmPCgXVQvSN7RDBRsoBWIhXhhBsPdMJ0IrbtHpEOySd5v5umUMkKusyHzp2aDinU21Ch4ZFOEAoRSKIQwRTJgCgbZkoU5PYzPc6Z+TCcLxCpxkHkllYdWRKYzwsApgFuT89oXgLTtje9QHQnsCKm2V8Ge5wUzSlDOkXGrm2ag9NXSU9FyjoaWn9mzmphURl+SAZJhMVpvCaymtOaCMQiIKU4vGZVS4UIE4zCAY7VQUulSqVSqTS6UIxlRgsXFxMsZ7yZYD6hzURPp7ogCGssEA1FVWrZ1lMPFSqg6V1Yooq0z0WXUwEuQAVQAxNgrGZKi2J8htlKpy3Vc2/Xx5sX3z4srn27eTC3H/3nm+9/1xwg+lPMVvvXnuORlY8X/skDltjbvXJ1oifL8sa7s/0X4/SAyt3Vh7+4tzl5EOIrB6XsFdKvTZq4DbOxoQ3OlrH1MIKNSVchvMtzVARTkMjGkggTWoNKNQAthAECCmaJsjDsxtXRVx+dENsn6xMeC0/a6OajfaD2btftHtX1LuzE9Vaezk8nd9sXGvPorGlX1u1e+fXDz5/Rw1nV/l/+h3fqEb5oV4irkppxWEzY12hqXfWt2FUFH93KxBD1ySZSVTCLDZGZDHSk1WzDlaoE5czdVIJX7tQFYFWP0cMmZ5YQPBOIbSLy9n1vC9q0DVFnWEOMIYAtfC/L9QrSbtpVovsXjn3vL126ZAu9ceNVQJaLpULremxdaA5C5fYWq2eGJs2mXTYPoAtruydPHhir680DZ2Y+zK0NrnD11JYje3BlXJYjiWeIa5Vi4xeWDseT8uTRz4OfTMYvHD13hVhWK9/MjwtTM51WlatGhTUOZLvWRxFQCNGGEKy1TNS1vq5Hx8fdaTO/fHiY0CRkSoayYU16isETYyi6qgoQpwVRhiM1+dtlLlMqmbL9vYSv5VyjLVa97chpuD5y/0vEhS1UJQ6saM0EEE5Oc4oAYmbWC4Pz9jb57V+63T1hGBpU82xO+RdrpiVjIHHnWqxpLzbsj9MLFzAJKwyDEy0z2x8k3UfiDSuIrcpwnbLd/vWIQkVVJUbereXnnzy+t9y5NDPzTbxahfeu+F/fndvZTmCYGFQkhMoGy5vkQMACUE0QCmV0e8b0aBaL3/s3R6fLWRujq3ZEgwSwsiRjZyVdA41QS9QYaRQNsvy3EW6YPGun0qmSJ/KqAdoLoohPRsiAaM7mAUFUZeiHhslNkuYl/8x48ItI6HsSdQMsapA2U12JktM7qmt8+OmpbCbVEt6zBCCYUJatLby1rpxYfnnNK0XBQkEigxp349SPdvvKPmMsJSwFDbilbKOT3x2xh/j0ibeE9NZSlFNQDdDMxqLB1lRUEyM4pcarZrqZNbx9kgfZUC5+23/ZPl3Y8gsy9WAYmmNkohyRqVmiJFGhYq3ViwdlyDJJwX55Rzsg1JY5aegHD43Ml05/f+iVh9OQorqEMWgQhg8uT9903kVhW6NZWZMgd9hdMZLLXhqQ0zvP2+l0aQwzOrbkK2NYmaDqvTfGhNAbY6y1Ibl34cKaOVtepkRI0KCBBOW1DhODDBLOxGzZwbJYEYatWCoJNsIaLkUrkUrNzJZxUZM4XdW0Lvv51DRTXc14NcGSJBr0jIA+UfFITFFMrNi0OWdYCAt66G8NvkBBNCKdaZhw69wGrsW0pdmay85WT54+ffLPP3nlpXdHp/dGl0aTa5f9w19dvjLjq4f1ft3euSUTM3vrj3wXeTbltV989Pf1tRfa9lQ//JfTZTxx+6dvv1/fvvV8JRO7Pq7ZhaRatGSJnQgCWatIfIHUQ4l2EiNYmeHAnQ9qGIQSHEQCGCItYEWVhcm4TedfeuXyan3p6OiVf/zxPx9e348WYvHo7CFJpezuP10dXBm1enb40tXju5198bDa5dfs7ItbXzaef+ed9+yR/e57N44qofn98Lh9GkNJYaJNLQunK2yqcVuFTUsdb5qwKzAP/+mkfW3/4B3DrWdhS6QEq5Y3E6uq3JMNsKLcwW1Q9bCdqUJlLdSLcAo8ieJFxLDto0cIxrLGtpdo2ApC8B1ImUPbtb3vmbXtNrOdsbO8Xs/7Xspy48NKgpRl3bXOVaOdHQp9fO5ov/fY2yNrrwXvFb73vg3i29eCD5t20bc4nR9/c2fR+02zecrWu3IE+/To6uTgoPbePeu+evk1nK3u6+ro9he3VuGzvclbL1y7cTI/Kdg9fvSMuHXlaHfnsqvGRdGHYCQIEXsfQADD+75pmv39/RDT1mnYVxJF0XRURbZtPm9r8LlCV5O3RVYf5qEvF+wEv5GI/naUWD5fFwDhHJGbyjcYiiSTSrcPMbMoUpSTqKqKSZbUua3fntr863z3djE4HLr9x1B6KYUHZjnUsCA+Z3JCk7lESqYQpHdrVEnVKDGooGTNk/Lf1aYarMQEQ0QCYRgIIWVXGNuHwBxI2VmaL87+8ZarJlUbgnPybLX8X/7wUmGPf3x3vV+51kcrLrDHxogkLouGEEmN7Q36nqOeOXNzZl/bu3zywFOhoQhsGTHjfBQRoySec65PLagZpsMGoRXeMKIQeYIHApGHRlAAb2POBCqqQpC0AEyjo55f6IkMH9MolYpyekyII7MRCQRR9SBKAlZdFgRDhsKz5stbnrmWU6EJk4e0KiMhV3K5Xo37oPuNqThaC2n69kp10MWDft5wBQ7QFWGtuoFulDxRC9owddAWyIQy6GAgxBSFg0gkCHHK10owjYgGa+yA1tBg6gQoQsxkQyJmNmnJkHQEwort6RgkaxkBJt0+SDqodfLjM1iDpSlUotAQ5JBWGJornSaWYhYW06DnG0CmVN+zEarCsB30b7ncCiTEYI1Fso9ISQyUe4PBOz1/Qc2trRqwJqAIqSOlLXsZOM9okIE1KdAQA+c41MjJEXpwFMkrPNGIaAqbCW6S4QEZmqqtXUmyaM0QAEjUgAyzgVpRQ6keOoaDrSFWuOJixLEKqNhMuMHmyf1bu7at42ZqVpeKdhIb1x7PTDuLc7fxJNCQ/iPqAY1BlSqQY9TJzxJUgHqIRUJwkvqACtAENOPoRguplma8CtVKx2sp1xh5rucBV5u715/86qDaFL/SrltM6Eq8++Xu6y82x7/euX7DuPny4adFu4vpUWnm5U/+b4sQ5sE+qG90v/PWRJa7iyeT0eYgzj3Gy5mjSmyLuPK2LXrbm5aJiSydLwjUFMIiIhKYmGJB3ISgbdNNZ5X61ppUwiBqRGLsFpUZsdpmdbZTzZrj5vmXXzo+W8iZvPPq7y665evfe+Pq0cF/+H//+3CPv/dnf/7zD35gKmumerRfzrm5ezZ/5+bVOx98+IOvPnrzyB6OqytVq+18BysT1rQBNqFt+sudPdsYOy9u8K9q9/Xx8XV7o9k5dOxFSTiQBIGgHHdSrqJCQAE8gptyKcqtVn05sURBIWxSuyFEShxcCUQD0qgBFARdCOT7LgYxLIVlO6l8v6nYGAPR4AorCG27ZmYgLNcnXbfpwkgC9bFxxXQyrovCtq0yLBsDlKORTCYTBhfusgox3QQFABKwXi/73t+7++DsdPXxndtHR5Or1w8++TBM98Y1ja8eHYJvPD25v141Xbu0dbxydd83e23bPnz4FbOr6tnu7mxST7z3IuRDZ60FhJl2d3dCCCnkwBorKgQSEWOGxO3hA0+WcgAoZchw8gLOQHUCjbfePSLJanGwp7qwS95OyQCQL/VMepLMvpChTqfdkiZfwmQDBEA4pYhSbpuJtt33hXo7hCzgXD2U2/whXCGPHOdrL8Ugek59ZtoGIjUs+ZYlVaswqgYwClZYgKGsxKombY0UBpp8w9I6nACocMUIiCJ+NuL//Z9XfXkw47WXAtCey//rXz2bjVFnWvqISCh0AEMrQmSFAUGCVCxBuCs0nPzRe68u7/cEYsfCqhYslCKsNNFtW6XA2oi2IE/Uka5UO0VrZBM1ptLbAz3UM4JhiUiRS6IaFJHSJhhqtkhpLl8DhpmX9JLlWPn/jsMP1xAHSPLWB9AVPfmlOriTk/benVi9ZGUl6kUrcG3gOZLh8VOu2rAsAENWoJBWLl0+whNDc8IU1IEaQ62ihbaKDdADm1R9ARHiHgBpTHZ9RIEBUAQJZ/PklJggAERjIvrmuTbxpEitMUNpHDKoodu3rL8VbJ+e5lxicyWGDNPmMNFSrmRpIZw9s0QzaXB48EFkDFMgDBGZaft8/nhn9v4gUY5JXztIjQlM7Jw7H9+3vLA8KA9g1fnpoHwACFuiBw3fOn/QQ7ucuhCJmZK51SaliT8tqxSJraZgCiF477OIOZ11TQprgnICUUADMgYiWAVlw66Ei8EANpMgLWmpXBupBKWi4uB6Oymmtdt3vsN6Qs0Mq13jx3E5seuRnNmVlwU4ABEUSIW5BwXEgL4MtrRcGTGiNdgBflj9MmCIWOFUavKVO41VQ5MlJgszWdC4o9GmL1uMa3r67HQe7aqe36rnvyopmC+ITVud1vtxZZsvVieP9tpTC16wOTDVU1M+sMWxjGkvjNa/6W/d2R1XJ23z9MndWX0VElZV0boacGoDUdWjj12kVtmyWk1AGytpKyQm8gLoSCh43zWN40Y0ip4AbG115+6v7tz98mD/+snJvbZp2vu6O7u2t3d1c/b15T0dVbszt1zM73XH9tcPP6m65dGZfPH/+PczsMKcXno0uXrw0r/9/mjq73z645s7uN4uDj5/etCHxY39sGuKfiVzkDe2CdyUpw3Z3l3f3N+jX3bt0fHD9uOf/tUf//F/c+na1RA6TXoIBQmVWE/LxIljD+fFeZgaVWcqG+LKWE6sHGvJ+/SBAKTed4pQOCPRe9+pqjEk0msEVAwzG0cEy9wGTyTWqgglvbz3m+VqGSVM6lpFVZtxvWsLJkN99IYRetv3q77zSesBDL4YxhAX1aT81rtvM1MM34GIRCDsz88ePmw+m00PGdW1a9cfP7k/G1958uzzrx7++PLOu7vTm9cPXl2tTudnza3ju6NqdvngYDqbjes6LUEXi9XTpyeXLx/QsI7igXCFCxYBeZilbX3Nwfd5H5wOqugFsG44y4OYmIbSu0WiExScih8NLFACKSSd2yTDSBMKM1lrQpCcWZiSeTStoy4U3W3KzVCDcfHXMOKGOCyS88vii7+fHffB6ZaAIJFMBsw5+TMYhdWY6isTWyILGEKRrlJVZhTDIc5wN5iCaIyxHpcf/ObeV8+mswOWMAJ79gJXeFSnm2BN41Us+QiMTBHE9JHVsxomw+zFizC51en6e2/OrlTTZyfBlQatEhARsnIjZIdRdIidmJ7RQTrRTrlleOhGrALsFUG1h3poUPgoogiESBygQjqYjGbv3vRzpgF0zp/3sDLcmpgmFhsjZyAkVyprmDKfq9dKqh/96iljTCtWB/YqQhJUNwwTKjPHHAGtK1ygIAxZ6Wyy75/2tEAMygHSRLTEPdBCNsL5PSqHtPf1AECRKAAxLUnSMCoac4eRUFAa9GwDAIrskCKJaqhD2FEioKbnPxWvRO/fAtE0hBwMjUpS1xLIJPDmwoOWv5lcoD2KKCMVex4kAMPJAW9ld0Mvm98O8l/JL15VYkotMCZvrJUGj9kkRch8L4UmpVzCetMWSVKxT6FPyqmXyzpgQowqGTo2xESibE2KsuQs30qWdpy2SGwtALYsIhJiOknpp8QgmLSeUKSWKdtjpvUgAyA2KWIRzAQGc5pKxSgXgCVURl0wtalmZZg/iutnExtmvKnCmpvTEvMxFtx5biyWgl4kxWSF7MVilNEqnKJWKgCrMIQSPEbOjykAghYI1jRSeRq1OvGh9jTxKDbMHragZh+dbZ59TYtrFfa7ZYF2hM0otnwS1Bs8vltaXvJsVVXtpavHMbQnj2G7A4e4szPvTqw8rumwvXv7dDR/4ZVHdSAvlwLQVrPYt0rQRFZnYmUIkzAEJCSIEtrgrbQvqf20sCuuwknbiHpozzQK7dmmH9+8/n1moXDFHtmjw9e+efRgb/cScbx3/9lrb22enT6+snuFNm1oTt759httv9y9/pCdF2dlf9a6u+2k2rv0ztH41c2dHx29/Nz18mDxzd/U/tjhmu8UnkJrfWPcyh20m8Pul9f634i0y81zx1+dPvrm08/Kne/9xfOIOR2P0lrd0qjYBC6DcEtlC+fJVhiVurHEbdeGwllEYiZrAYiKEKMoo6qItMZw6AXSE7NSSIWEU56bahBOCUUhCoDC2tIVSr7vLBeRYFQEKl2/6nqeTEqyrut9t1mF0DMiYGzBlp11TFREDYBEb9vWKzyphRZs9Oq1566/fCiR2/b/R9a/Ptl1XfmB4G+tvc8+5z7yZiKRBEAAJMGHKIqiHkVLKpWqVA+7LNfDdle33R12z0zMRMf8PzMR82VmYj5MdHQ4OtzT5bK7qizbKrkeLImlkiiKRVEU3wRBEEwAicybN+89Z5+915oPa++bkJshBR8AMm/ee/Zea/3W77H59PCjw8ND9rra3Hnqxldv3Xo8OLp558+awyuPXf3i3oKuPXpwtunv3T+8d/8ulA4ODvYu7JuI0I6Ec5RTtkgg8+sAHjbiOK9iUoKJyMI6CxpZO9W6Kt5CalId9cqFveVamYSfqncBkcW+bWVOWiBoR1BNKTH5869R7hUrkaCCX9fryf5WQLayvaylWcv4Uhyq1dXuwMo/AJS1rqUhOVVix/ZjK3zNZgnOexVWZUIAGOqVGPBQBnnz7SGyJtr4OdILT9ivV+u/fkv2dx/ph9Mxh0BN4t5nEU6QIWcPTpkiZd1bXFyv+6OYWHbGDVJGF1ybaDiTi637xlOPP/hobHzjNKUGxjKHQFXYJJ1ZKTNFIMMyODFAR6WByJgaiEQJlECJkFREJIITlUFFKzQvAOS82ylM3oK/MteZEVUhyyq2QrYAnETMImumIJKEhODzGjc/6tkv6JilSa5h9MLBU8DImyasNYXMIzNJImoYG3R7k7xJbk0qpJGpFx0kRfAIbCBJMAIpKZLCvK3BNMICF5FURzJP8aLDoToaiqnXSkw9FU6SqCLnLXJLVc7+8BNlCbdFUWvEwv+dsWNK2fnzcGuU0lkPFDOxrXzBXBpAOxL2jQqxkR/6o2XeLi+lbgbL35xzKiKSk4iveZx1gFdRzSmreVMSnR8YgUHqlQpNID5XJsJ+PkIhOxTxoVbEyznOqoA65pwzzIAvC4HGnKDqvZeURZTZe09ETusag5nMtkbELEcs3MOVCCZyTE7hRRmuLGU5uOwSAlzwNGWa4vjTmz/4T/9WlrcvTIdpOt3ls12sF7Jq1kmWwBHwAFrczUHJsg1EBeSRQ5YgPHUcWEh4xhDQDqhDDK2Ak6KXbkWzJfbWfmfl56fj9Ay7GwTWcVfPZrSeNat9OTnjFRAbymNs+n6uic5yt3I7KbTrnXnfdtHP06dvyNHZdO5Tc5eOfujTzn3c9XHw95b3+yfu/zzRrJPdnNAu4/CAp3HlcCp0gnwk8bDv70esSM4gg4axi6sUnLu0t/jok9FxFDkzQxRPTZIjduFrv/R7H908GeNJN9nbn137u1feuLh/7Y1Xb7fd7OrVz73zg3euXX0+SXAicnzv0xPsPf6ll/7zv7l4CfKIn+WzOINsPtiLJ88cPHX1+pPLw5/g2vTz1z6z/vilLHqyuRZX0Z8N++uT3eHehc2tnXyaT1k3z7x3sz96521Obr3keC+CBKnUBhIjpep8sRzBPbpe26hh4oZOe6+5hyaI941POaWU264VySqAZrFyKMJOhASUGhARi2ZVdVx0hYIcvGNwHJPznpmZp9A4jpmcHSBmaJK4WqXFwsXNKsZBReAcIMOg4iWrC0EJ0Axy4hlAq6AsY8q6Xo92RL3zj117hhRjSqLSj6udPQzDeu/SmvtwdO94Ex98fPvj55975tLBJUBF8/H9o49vvcPUIwsBnv2YU4ZCEoHJOSHxttdio59w4dVQcsLMbCQjgopkESGGwrHZUJNznnOOrAz1qud2tcSFxWlU6XoBmFMmV8IkAdiqLVWhpmvZphRWryADWQ1Ys1Nb9I51NKhkZgW21r3l6tMKfRsKJ1lVs/fOkTOmGUrSLTRDMxOxGi2SAxAsjpTgiYKAoI4RCF4JvoEEoBFRkHcDcsssnOGgA8+m8sev3Fnvz3dy3HGzS92aor59D810Oq5Pk5tBN5wgDLC7szxRbTvPkjY7frpeC2JIU8Lq+J/8/lNx6VKk4IeRHRXjRIYi5QSBY0cgjcqZJQlGbdBoL5qSWvAC90A0qJlUwIlYFLYozdBUzK8IakIoK8TF8Pd8j1+Afi2IJsiS6l0pEhrZMjKJRY1t2rNvPjk8vL1Oe1OXliN3EAcZhBuK3LjuVrMT02loPSeXnSZRdanpjieZPY0bnwISNAoSXCSjb+sIIBIiWFQHzT0AoQyMzDlhhAo7M0IxeEbKE2XEX4J3rrLoVUW5Zv2g8pi2jpslDRBUcJ+yU2WW8jQasaBUTLadKJklM4pBunEcpPzS1qXSYTsrF1sYKKBUch4riq21LbIp2TKcQOYgQiDPXkUSki1cXN00s2OIAYFb0dE5bGQuWbbrLuZcVrbpYfZWOVeiYGc2GiYQLl5pPjQlmhOSU2JoSjmJWvJpShEI3jsQQA4CFS6h38RSdj8MVUlS8DJiteengTpQcORHbpinXkLiiQzcf+dP/iisbj865Ymup7SZYT3X026IsgSdMI4FS9DAEEWEpoQEyqw5U+fIMwVCBFrhhpFL4KICvunPeOeUd9cSVpivsTiT+RmmS0zPaJIEl/h4R0/mdH9f1rt6P8Xjn/K+m1/BzkJOETfiVSBN5yeyidg4HN+SO3c8PNbghbr4/vxgX+fT1PencT1100l75cP7fHqYPj1bn6ZOdtpTt3d6P+IIfMLXpo8+/tj+g4+O/aRpqV3dXyKtqJfbt94aVi6OTRybLKNvmLDmJj5y4fpy+f5k98EOXX/jjVfWw71nnnlqvV4N6c1H9p513cdPPPHEmI7PVvH2/ZMXvvKlj48+xPw7z31BxXnZFQ5tF+CvhT3+SJY/37n87JOTJ6bL93NcT45Dv/7J5eHNgzTZobWmE9ePutHYN3yy9/aH09vvfJj7NXeLJy99IX7qlXKrxM72Q7MccmgSWkybvsfpSkJHaMau46nPOarKEJNIA6bGNyV0E8a6YOeM+Gc+UykOEcg2SYpkQLlpmF1KKdsylcp9DoLznhyPcQyOU8qhCyml5fEJziNtidns8DWlpCLMjomYPIjMjaewDkGjZOviY4ymCgghdO1iNlkQcUrX1uvhbH28N320mbjvv/yjL33py1euPXa2Wu3tX9/bu358cvfo7PDk5vLi3v58Og0+kPoxp3EcAIqUPBElBTGYhBIkezTZxiECoCnZlsubKQcysUIkqjrvWoiKjqIFeC6LWhsYdIsRSwW6C0BXPKKr4LhcLNu+n0iLNsjwNFdHCdTfWC7AyqTRh9e8KMi5bgtz+aM2alTlFJG1DgyAHStY4Zga0UZzA2oddwpHZg3PrKzwltGivecu+BSysJJo43zx98n5wq68c+vwp+tusR+Gfp3TeHeEDI0gDHEUdCqZNDqnpMuk7Jq2iylKz+yP0zKE0Aif3Ox/5+t7j0wnZ0cxsI/cBqQMp964a1sZswJAghbYjVK23NDILCKxInHKlOzzIAgxXHk/SDWV0UiBylkFPB7e+uFhvRmbhKuk/tjcZfpayuWdpiSjzhfh50dLiIKjZpYNUUMOARoh1M2PdBnYQeCFiaEE8Zmavh2TNEJMQBYMhAREaFJN6TytTROQyFnTIEZ4BtRCBEAixVwTxW2GapgztrwpGIojKpXvTSpaQ/ZMegti9sySJUku9dTaSuMZlTUw6iLFNDllR2vvcfHtUNVswzeXRqd6T6K8HiMel8i/optCKfzO2a5XtCae2TxOTA5ea/hEPTtAQZwr6v4QHMXOVIJqj721vwUI04fUAPbSKncKei5isgamrCTKVomYSVXGUZi5CUFt2GUy3bAh4aJC5JkZYBGCgtnZEkCVHXOGEJg8JRmZiTySRA4MR9//87+VfrUzn3A+dUgeiZFYTT5Vugs2knM0SxUCq9FsBcLsyBNYzXpLWEryoQdIs1IG1pgu+cIyT1ey06NbyjS66R6OO/RTOmsldnw2QXJjGM8ejPfj9MJTPLnoBkGfNaEfYqIUHLhX9AmUiVvuPRr18+t7ly7l+w/c/Y/j/Z9ff/r6N/7BVz86Go97d7jhw767dTa/vZ/S3dR/0p8+uJ9jnyhN2+l0Z3b98ef8yPFBDOov7k+aFm1Lfb8+Ob6X0pFvdH02Lk8/mYaDfpM+/4Uvg3Dn8N58Pnvi8b93evYgHf98ffzJlSufg4LEvfnGa5cev/rxzdt+lzGJyMQe3Q7HlfOL4cJ0dXb8N7uTqz6u0+Hh41dudAuW+A7u3cU9jBNJPSCz++vLd98OH7zznspKEb72xd989ODqsj8JTZNP/XQ2nXV+oEFHpB40iPeJs3be9dIETh4bn1LxtcmS29A59gr1JgZUy7WCSCLxdua6rlOVrNmsHCWLSBZJYBd8SCnWyHrPjlNOksR7z+amqnDMChHREEIckx0M75jZVVkdRAU5sW9EoTlZHAK2G1EATI1riDhJSmnN7Blg7qZTN5lNcs6PP7H36COPH9775O6nhyenNyf7r+7OD9hdbYa909V69WDdtdPdvW5nMZ10e8FPBDGLiDFTSDwYCaJNJmYXRcrlVaJSRezfJGfv/SR0m6FPeRRF8cYCtvtgrdZZ1efE7vKyMC5hTTCbgXLCt3SrrZSiMDPt2mGFWtxg2VhvB+WHYb/zIkzYcrsBlOa7rqa0anzLF4FX9ib8VQ3QINkzt0QTRRAGAigQe2hD6hUN4CFBAlNyAtLMJb1A2Oc8/tX7afcp361SZt6sc58dNdl1Lg0JmLBGplbJk6aGJKWztcycH0mEPUPwYLn6B1++8OXnHj29L0xeXCXHct6G87BJuO2SFkgUIJNm1VE1A1EhOfce2UhJCoEmMsVRCfEV1awKIjHf/DKsWTPD4CLgVlRN68PXc327VSUrDE0d66TMwhDp37/zIIR90Q0pO7ToIZG1bRwfU78U8UQxk2cvmkihDRq3bsY+sWdmlagkhASIkCawpSxkQMQMvMoAmu1nAZkLtNr2zLjIZo+SpeIm9mMQHEo6rRU2038Ydcl+tiQCC8FWVMtlk80UDfV5pbJ3ht3Wk1VEzMqNHWnedp+13NojaRPw+TNaVu4Z4rZljyhL1izKZgjMXFoKAQDZusCWl274eXHjKpgQofpBi5hLa6rfVFhrA7GFkQrQXT9hUWxDorUYXpZYrPNewgS89dmkghCKCisTOYXtnupcLgI4Aik7mAJCAOQMhS+7LR9cYiUSH9BMm5df/ek7b//08bkb4xk5JQizsgggEKHCXWMoMUjZuGoEIZEsksmRZFswFnoKOXOxABgJTsCZGiHOcL2brTFfS9vzpNM+oA8YvAxz7hsdfB60J4zE/ToNn2AK9buUPM4iOfFe0xAZE+KZplORmPreO+T7J67dm853w5yOlzJm3fXMly9ekvaq7NxL8+vSHeWd5bEbbvdnN8+Ge0M/7U8PVx/evvXhcId77nI3LPsH9+88+uj+wcHupUsXGs9Nt3Nhd3f/Ipx/QiSpOslOMGrmYQB7lsT9RpxrxjHFvfZRT6t0Ml/sSv+P1+OKJxTjoKvx3p0zXsoxn9yPn+6On3w8+/CpsHyBce+N1/YvXY3yNVndQbqHlfQns6Nbs9sfLNcfvO+V1jE9/dlfevozz5ze+WB6cCmxOhd+9MNXTvnOr3zrKzuLvdDNB4ZrEssgeZVkwj6JimemlJL3DQE5RXBi5xVwzouoc5Q1qYCJU06q2vgGgKZBy04EzOza1vmGiSjCMVQ0STY9nO0gu9A6587O1jFG3/jpZJJz9oKcE7MPIdjLKAYxIBHknEUKz6UOjVq9LDSj6IG8n4gMYJdkjRzYOWbJI9q2feLxJyXjynAxjpeG8YNufitN3mx391K81Pfp8PDs8M5p1x3PF+18MZnPdhr2cKTIQxqZHXswRlW26iuiTJ4YqpkJ/ZgXO+369Pj2raOr154QqFK2U2SYs2KrGC1HuIQ6lF78nHlFWqZm+89cBblFy08EkHM1clSIWQDejrwlhsguPzmvtRX602J4VKZw8x4R4oZMHVK8NawSO6gHGoBVG1HPPAFa1aAdxAsFQgcKRAEUIA5NgPgcWVNr/hN5ujONGOct/eDVT47mWDAPObOHg4KzjAQZCaJjoFHAbdLkMResPbX78/XReoowIrayufcHX7n4wmcfOVrfZzRAAMB5E9Vslsquvb4P9i/JO4hGkQwdiZJoAhQ0KrJqgklgkUmTCcegafshKZlU2zI7z6NptdIASqDe1sbQ3nbodtiyxxRFaC2i2Tc+DqeHyzF0KlgTBVXxvmVPm820ae/pWdLeKzOcwAllN+ZhHlqcOV2PFJiZ00apEKwESBbkZMEPbFN/MfMShYCKmIqp2jxohtERivckbx/LLSNA1Z6xCghYmVCFwptFrSJnUQgpC5s06BdIgVo07PTQ/thgaNaSj4CqiVPVwmCi6v4mFdPnSnBwzCpKipSz6ZScY1Bx7QBzTuemIjln856zJrZo3/lhtKmsgYouCOVwqW51ClR22mUfDBPN16ertrpWpc9/dAsPozoeg5gcEdnDJ5mZrXswUFpVU05EzrEX9QCV+FQpPEjzrzU8vgapaBO86+iv//LHf/vO9/YPAnQzYfU6eoYTZdKGktn0QIwQRppZxSwFGGTEPOVAymWHkJOQrymiXPET8kKsykok5CO6NeYsMkGcIHYYpjwE3QSKGhVrdanTmOLxfZ8H3n1KfKA4KBry5INAna5d6qPGjg8uSWasZs1RyG3D84ODS2E+QbP+ZMI7GXP2sWt4Vzy3zk33pYMuBMcqxyrLPJ7l8UGflhlndHp3eXbpYhxO7x4e3rl9kzjtLNr5Ttd1wXmaTNr5dCJC5I351sToY5TQXIDqtN2Z7Uwy+8vzG9H3V/YazHKz09K+9/uaD5jn2i42s3zngj6YxyPf39w5fCu89yCyHN6P3fJqv9x78PHRg1ur/oP3sFqDsVnH/Us3vvq131h/cnd1+IlGzG/cuHd09Nor3+O99Wy/czcPD54NBzdu+HwQdl7Y6Vp2fr3mKbPPIs77LKMoA05Z4npjzAFAnPM5RyWxSANijnGwS4+Z1RzDCs9HlRCaYBxhT42qhrZ1oJSSiIqMIQTnOKus+w2Du65lntoR6vu+j0NomknXOedTSuMYxzE1jSeilLJzTiv0pIDzbEbnKan3XoWZHSuP6UzIe0c5p5xZwU3XtpPPz/ILSeImHia5E8LtyfRuWjQx7m76nXv3p8dHm3a6nE6bndl0vrM37aYClTRadmqSzEzsISKOiUBDjFcvLd58482XX35ZEr74pfVzX3hhtRqdb6mSpre9e72WC1yMh2BM1W0MqvXvUndUVQlCJhepdFOi0vdsIVEbta2slin2F4ez7RatLKZNcMYqmcgTuaJ1IBYhwEMcswe8aEPoiKfkgrSCDtwwJuCW0ao2Si04kDriRrIDN1mSTBez4+O7KUb42Q/vLSf7V4bNOql3bfYeibM7YwmiZrGZGWCGZ4bGuLfTHSzip+tlPsuTZviXv/vc/v7e0dnSk4DbTIOKemYQszgzMULpLKjWgAyBagISe4KODklVmJPpc2r6r1lcCVSqlZPqQz7fQB0FK1AArQ4rdP676o27/RxgdcMaRhERjRPyJycP+oTWScyRSUBeXPLOQ1M3Oe4m3K+iTSkMJrBGmc/ndMa6yR4saVBNoKwqTLnolVGKMQFKmajEIZENwVYXrKhpxSVhCdmUy5BbqseW/H/+g2mJ07Y3lV3x07CcXaMccHXteLgBsodz24xs1UxFWXC+1rXO1AyzuOgLqBwaVaBYoFjqp020AtjeWcqLzWK59gYdi4h5faAeMaUSB147qdJdoWQDl5KL4h9fdsNc0KVqkFb3tOenaZtlUpAnxda5urYu5SeQ0n9YOLH5AuWcJalzjgiuhJyWtwpgghdbyTIRKRxEpGEW6F9990d/98EPZlcCSc9IisGTNJIapEDZkVY0ByQkSTjXlYAIWffoGarkQZ7FBPwsWcRxUydgr8SJGqEg3IqyqAM3E5xOZd2hn2Do0Acd5m4lPfFAtIm5JyRPw0pWb+bJo5O9S+InMUafoZo7ni8W1D3KaRK43fEOYfM2uDs729nofMU+Xz7Zm6kXFuUUnQADyIF6N3OBxKt45Sk5cQGzzJJZps2UDq6EIENcjnGtskoprfujOAza57PVyX3Fhb2FgB2HtgtN2PGNEx0Jfh1X0o+umfWyoc73OHMp5P5MB/GxpT7LAbs+HbjonHTCi7C4eOPpxYFbHb69vH3r9uHts5u3x8Oelgwe2M1G4knovv7Lv72+e9S/8/N27wBdA80/+Ku/TusHj1x99P7h7f3pweqTvXdv/uTg2Ys7T4RXbt7xu1+48Mw34+a+L7ZrpCCJKXlmQGMcvHfMlNIgmpmQUiQiFXLMglyOmiV7VekGwylJssUwMREZjme5JbXSGHUGnW+YXc4pZzvq2jjXeH+ukyHy3lkroKrMLo59yuK9Y3Y5Z4gwM3MWcfaEi0gI7TiMSaPzQTSpapSGdKMUXfCzcKXVx0dJSdcx3kvpk93F0Tp+erYO/dlOjNO7d4+mk3vTmdvfX3g37cLCjnESSSkxu3HMTLiwt3jllVdf/fEr//yf/Td/+Id/tP/IQU7cNG3W6NDUo4l6F5zjdOWmqG245TdsbwdbOtagJDV0uvBtoUWjyTVh3RQdWkJZpWSVP1x4qVRthdqlU2AnI7EUI3g1ySsRkVdtgEbFgzxzq+jIBXSwQiyN8IQxAQLQAhOgIW0oO+W2VfShC8Pq/g//8H+MOS4/84+6gyfTapw0nqd8dtI/uhdufiDUQUroqRDAQ/AkktW17ZFu7t7xw4a/dg2/8Ss3snPHJ/dcE4S85uQ5KCQNIzsPZgPzQCVhphRKpJRHZq2GI5nLOjYREljqlFMWpfXSItVcCDiKqjUz2/5tP/RwmdlqWG3Lb59lrcmF61TyABynwwcnEdJxTzmrjkxtypDUAcmFIxIAPZx9TUfsVM92Og+cQpZNE0RBbDizrTTE9Mow6y5JJcsOQC3PREIFhpUiQdM6k2I7uJLtWesO46H/236DAGtSspiJ0/ljpYCZYGgdomE7YSv5qBt0qp6O5JiLtZgqsRG+bDhkqFBF9rdFX+tmlYgaDqMk7z2IVe20q6hJBKUEX1a1mOnwC8FYoDmLMrZhv8WrVe3IGfHdqp+1vyIAZSKjTTtArXd+aClMxSqNSnJjTf8tbYq1YsLqyNypt29IWULVTEYVSUDDngkMOBWnKuwbgFMaPTtyIM9N47/z59/7u1s/XlydZhImdi5yEtZMlBjCEIgWKXtpMtnARMn2X6SYf5ffSWACQ6BgTVLkfGCf0ABOwGI1mYhUJxzb3Le6bmndoZ9x5DP2kZGz9Oqzh/g8iMY1P7jj7p10+7PptBlTGuMwlZOpX6WPhyFmdKHpuNvz88Wi40/73Vn0n/XrvcVuGNPxPAvQRMSQVlM/j7PFuMoawB1LUu5cjuK61iVrcSRjHcLMe04jhdBPp5fAOcZBcp9S7GPvXUcYj48iOPkmECXnibEjqR96zb3wTFye5HEtHh2HNB14Fvzajx0iXNS8SkMn+dinqW8uXXo035i+ff+t5nLcrJYnwzH1U6GeNLz41d+bt+0nr/+kc769cnF66dEf/M0PDm+9Nr0Ulsuje31ayepRf725+tjF6ZP3H/zx81+il19rlrcf+2iVPCErQdVsYClnuKKoywDV0FwR0YJEGWEBykRiakxR0zGY5TCbF3XZOalv3DimWn21DWGU7L1Hyd+2I8YhBCJyzqNCRiGEegDJavl8PnfOE3PJUEsiklPKcYx2OAVujBpap+DCUwUzItQ7aiQ6xfoMm9BwQBOm18k9IRLnwyl2zpabQ48O2t7+9NO79+XeyfFisdf6ezvT2Xy+E3zr/VREptNJHMZXfvQ3y9MH/+Jf/vd/8iff3r989erj15enayixC7Whpf+y9Na7r3Bh6kBKjrWYAgIl9FC0WP2j3G1k1EzVGnNWCkDdnhHJ9n74hb8VDth2auP6QrjkDBrlV5ngiT0okDrVAPVEE0IrjWACnjGmQAuZCneMKdCRdtCg8Np0jhxJ56dT97f/9ofdlekS0+NHL/vxdJzkdMYYkCTcXo60r7JWXkkZf5QhLFEVyedmzNTGB//1Lx989tn9B8uYUt81LVQjg32vOTEF7zTlTRbvmARkBoEPDf3Zs8J8hyTZ+2gZK5oF0BKyXAbfrY/3FlMuhZSLKwMEpEJF+8sAzHNSt59n/b5SpqRCSJdyOAjscXQ2CnvR6JynnDH26r3o6MP9hLtpHUAq8AyvMoJHaL/bzVU2TEPjARXSpCq6LbQlKkoBgWaFWDQ82P6hdAfMDmWno0wkYsR+3e47YRpgouoIrqWEaCnB1n5INjNoSxuymc4S4YxGWThKBHOlgSKLZKP4EbFVbFE1W/JSAgt3CtBk1Kz6Tup27UKOoUSixGBlQiGTgUyRXNcOBGNfkNrK2WiUZDsfs38U0Ux5u2soTFG1wlrY3zYu149QiqyZLRZQt2aW278IrKz/5YETLUszZkjhORfPEyIRJTjnfUW8iLmoFsy6E+RFBCztpB0lMXOYuJde/tFrb/1o8cR8VCiYdE1CnhPLyGK0+LTFdMpmWYFMWht9+xGVmJwIC0iZnLCqCIEEGVCQChjswE7F2+8ToEFsEAMNHfUBMaAPWGOATz6NEKGgPsesg3fjhCPOTpehP512OqyPV4MscwcZWQOaDp0IxzuBfSfR99P5Bp+cPv4IuivPzhmagvCmw2rH+42cdbyRSScbwQDuIUjcutwnQBkOLJKEvTL71rWqmZlSiuxCHCS07CN57wCvmtMoYzSLYk+SPXcCylkEommtG24WYXiwZo9M3VQxnfueUoRG1l6zSl7ntMPuypV9evbZ/sJjt/OHy/tvBMh6iM995tcef/Kp+++93SG31x+dPvrYm2+888aPX3KzDN25cun6mZw8OLkbP3hnEffXj+5d+9yvvfnh+nOPffUvfvrK5ed/2wtG73xK4hwzs2ZJafTee+9zjsww+xcPWHiJZyeSwcxMmhJVwoUVBecoSZas3jsIHHvLPxARgnp2KSXnOKfsHBs50MHjfOVp/+C2xwZbPqpp+RRZCpmJBMzsnaaElJLzDoBvnW86KGtKKZMLDPWifRohGn3jPXuRlHUkTZwcO/h2Idg/mDyx2SybgMe77nR1b7M+2xx/crTWs90Lqp+0XQeCZFktTw8PD6H0+GNX/92/++Nr165/9WtfXy3XNg1Q1opFUr2jDV5jsPkbnP+iqYHtP1Ex4DNnC9LzsYO3HMuyJ9aHzzvKCst25aUuFKhbz/vxSpe2q8tYoDZnWNaCKXrhAa9oBJ7QAQFTRSfUMe8gT4QmRHOiCXQCnSsFoo4okPjY5bg7i++9+ebN43f4xsG9Iyz2pB/ErTk54V59gxgcr2Jm3yoxU88iQhDOkhrulsfyxPz4n/7OZ/2u3F72LQszpTwKpTAGdZxdyppsmIGLrA0A1VSRQJtySnFliBIcmT8zFCZer1ITG0NIARaREnKBQkwrliTbt4+Jmcn8sar/yvb3P/SXFLsj4rK6o6yqDaflau39AuiHnruG2YdEWbL6cJ84O6+KHLM0nODhmRlpGlrknnnw7ASiMtRiqVRaASlAek6w2gwDTA0yMf9nex4SKbbMqi1br/KMKtlIUayjzBh5C1MTee+K3Ie9Hb5qzFq+pkCQC7hr1z3Oke36jhFEMqpId1twRUoTuIW+7NURIeXEIC4JKyW6m0tCKLaDb/mPlotiubwwLpRo9bMz6Nm+gZSNr9a9LVkKHG/FS8xZJVl8GdvSiR7i20vF/Or7xef4wnZFUfZlAmJ473OSnJP3nuDImM/lPdEsiZi8b40Kx0wgHnNCg9C4l//mb//mjR8tru6kUXRKTcsNT3Q8FucTcwJnYSUITEho21yFJxA0gxtAgGwiSFJHHJy12eTBvlGfORA8hDQKJ24STwUhic/sRZyn1CB55AapRR+0pyRJc94InzWcvEShtVKGxI0MpIr1Og4rT3xhfzosQjy624QwoKGTPs06P+N4fNqz28kh6yfrt77/xo2rlxePyADHvltgKojJc8rp1At3TD3QEYvTEdbXml0uc8jj2tb5FgTJFELTGFo4DW0aBxn9fD4V8XGAgofovQ9JwDJRHn0DaRJ3XhGdnwRPcJwlrU5j6jc+RHYShs3QpTidrHI/CatLl+cxLDDgwa3juydvXb3x7Be+8sLRa+9qf0bXLi4ev377/uGrP/xTdqud3avr5uzowc2rT9+QPJ6ko3h0q083Dq5845lL+8fJf/PK858OU89IhdZraxQTyGPMKdlhECizseSzkmRWZzOxEjlmrdWkonDOvA/FLjy1k+ULOYPIsSicM2uZOo4VN0cAyGV0NiTLQu5rww5kKDE7lSxVre/YN8E5tvRfzSI0sneuYde0tpnW5IHkiEmgkmAzExv3W0gESFGlbYJkN2t3FrPHwEmk7/tlTnRyfNL3fYrJh3D16pWnn3l6TKNIfurZZy9c2HvwYEXMDk6sHdZzjIsdM1ghmiVHURHnPTFLIVkAgMmkH64ilsFmYh6pjve1eaetcSQUTC5JMs8fR0VJTJbcUN6xuh+um7JKX2I18YE6pgYUCI2CVRpGgHjiRhtoAKaEHchUaA7MCXNgAp4Tz8BBOh68DMENHeIknaRbrz4xlzVuL+ZhHG8ew8fOr6cXx2NJnBwLey+rFBM8kR/ZzyUxgGZz6+yXHsO3/tEL/arvl67xGwUoN4rI0ORGgJAdleLGpAyqHD1z/rd6aaOnAiiPNFHKomyJ9DX/1lQoKGzaMvpVnyWgLCZLhGxdWloMMue6vz9vhmziEVVkqCMIE6skYjgHYmELSkvpmcvTO/dTVBZkUuVwjyWp9iDx5InUwQ1pnDVh4bphPG06cBPTkLhhlSSSkbONg4aYECs52spwq3gomxpGJTMr4ERyNjYQwMooLqrGr98uuEl1+9DUrac9nKgPlEjhBxYkpsyRpEzO3kHUIbWMhlzkN1Zrrb4mLeYwcJ7LF95S0CtzqujBqk8IqLjXlQ6pgkuGgps1pScHou06tjpg2jXi6qesdidwmd2LHQ1t/dy3BtFMZoNtTTUzC3JO4rlGoZHC3GVFyrEsYaGoxdiaoAIhMCCSDQ9gdlBSSVDvnIqkHMcsI3uGa6FMyJMuvPKjn778wx/PrrY5qWSlCCSOoYnUJcQknaDL2o45iA9oemkzJ9aNai8QJiIoIykUyKIyUkdoSIOgI2qhrNoxdaydbrQ9y2EFt4LvuRu5G6WNmHSyTipZLSs7iAYhphwb7TLGlEakDpI4Q0Wdekmpa5y4tqfkBz9vsX85SkbM8vQlN1soghwn/ujTfNpDu9ntd4/f+MkHv/zbX93rAEGCjE5WEhsd2vlCRqSzzJ1DAgXlQAikEc47GcX5IIgeUGXihqB5HBlQB2bnmznUqbKqI6c5uSyJkabd3LETbpNP2pgKK7AD2Ie2cVOnU0mhS2Nej5u137mbNrpS+KYNTDMEyOUbu+/9PHTLiy/+2m/qasz+VKdx8cy1FevLr387dqsvfOObF6899vr7P/C7/Z31rYtPH2w2/RrHd88Oj3tdZlnTdOCQ2z1LPDbpnvrGm8A3Z8EWFVKIsBEsmZyK2mqArSs0KkO1hrPOZFtRHJutf2k5TXhTdypaBTaKSjk1o9RqPVNs5Mp1abeDqMWr+ZKTy4C0XeuMkZOqJh6wxQagkrMC7NkVRWX1g5VtxjdBlZiTJCAJhjFCFc6FSXeVOO3uPcLMNouDnQhE4SDDGI9Pzgz9Tjk7dkQ1WqHixoWYqmAmLaPUecEtlwHOaVnlHS07ROWH/6OWbJaC+ZEtAvg8R0kKjmUcDlT+UAWxy9xknTsDQo6IAU9FBtgQeaLGU4uGMYFOgDnpDHkqPAUvOE+FFggLnsrplPogm45Sl8+avN4L+MHhG1d918sktbQ6/ut298VTbTlGt39wvPbDOjYEFzl1iV2DODgKqwFY3f/9r196/oVHTk96VU9NTzl4DEISHbwokB6CmaXahwFQcxHOFjSBYndsBB/L9BXJzMrkhBTm+Afj0wOi9ibj/C/acqkcs+g5Qk1WH4gcXGWvi/GEWJmIqtbUNsrbxxAypllAzkPg5re/vP+vv3NzqQiOoWvv16pRVVQTcQsAmofxbG/yaNNyv46NZ1E4LwoGkmMLTBTVnCWb5tVQkvre2A8BBVSSBdBy/cvOl1huZYXQtQ7CxhIoimY7xcU62d6P7QL0oU2tzZ9a/vlcRgitS8iyYEKBuIBqFW6rYa1ZzLr9gLe6usrVVojJu5iowsJmXSdELCW4Scu2tfa+9rPp9ms/hHBscZItHl7hY+tFhNgaXrKkbzKPNKg5kyjBsVNolqrO14ox1dOF2imTme3BGAUMhgqcgYiqzI3znuCzuaBm0iSikQmTZvLKD1576fXvz/Y6EUjMPJJGjWcjPHXwSZoI16MZ0DToOmy6doOWMCpNCBGGpEHEJmCbEzUAjSAALSGAnKIj7WLyzQC/ET4bZSVp7WnDTY9uEB8K/9Pe1AwIUWTno4xQk1ElqLCGEeywgSBiMo7xkUafvjy0nD98b/j4bjM/wFl0+mlGwzx3ExfVhdk0b4K+8cN3b3zuMwef2TmNE+/mrcaOxinnicowC3zGmoBIGgQNxAl50qhMXjB6dkwBagp4dQolzimmnNjAMO6c8433ObnQqCkpHQdyE7QqXpMbk0t2W+Y+y1q4Y+46P7/YgFncJASXw6a/cz+e7rS9z97vh+e+8fyLL/7ajBebe6n9yiUkavcvv/Tdf7NKN7/yO7//3HNf/ODWRytZPX7toF8dno6rS1cufrhcfnx09mTv8nS+TmElzRnYFws0I8KyE1UVMVt2V8V8D19PKioQIjv6ZaMpkP/deStl1Vwez2UPVlWr0zKqHsFwWq6J3KUylpFSH6Zo1rXx9msa41+Y2XufcpHCEVjVkFkSyX3fB9/YXtn+lGMmppQ0Z5M2NsTGnxFyZq0uQ1orxLGXrOX1cR7z6JxjgMDOORCllLxzoC3BCtvXp9a7u3LjcYkMP7/oUeVIlS8K2z+JpZdTuRe4ig1NbVi5LWq7JpQzX1PjRFEc37diTtQ7aCv8NTGPR7W8U/Ugz2i18WihQTFR6ghz4gWwgMyFFgjTPM/HCz2Z8mbOvZe4cOupj4jH//J3nnvp5Z+8e/P1+fzqgQ8ffPDBpWf+q4+H1Es8k+kw3UOCSFJIPEthNrm/PHvUxT/4589eaHeOPjn1riHe2H1lHyiZ7zIVc7GisqYMVD/D4rG9JcFUkFns4hGihLoHBcz9j5wzFSZtPZ6qPkfV/KChnou0095c47ArabX8LHCFGokWwmQzGm35tKqAygi/Ow2UV5t04f/9n95j0anzcUzMA7tTKDNUEAEkYWbkvLk428u5Fwzem3NoyoAjKJlmPDOTY2NZSU5piyURshlFMcxLF1rpaPbs2WnVnLX2defHkqAPr0i2JQvISY3ra3+Galtoz6Mx0NT4gtjym1FPa+2VFFmEGOCaCAQQE1v6rZ1a3maK1EmVt1/SvpaUG4mIzKRd7acnsoCkglaUT4hQfM1qphlUy05BpHitb9OWHppeS+GnigiakPoX+5LyBlnmlaEE57LDAqfUe4yhQuwdYOwOdc6pehWMMREJUQCEKbBnkdhNpq/9+K3v/vlfzK/OIKAonEgyWEhHTclN5ouGOW3Oxuyi8ggetAnsOCgHSAfqqTjpmblBVrMgVS/qbY4UNFAHDSI+9dL14F78AN+LGxJFjx48CgSUTQGu2fY7pFD1QpnJicBJJiLV6MYxi1N0NKRnH+8/ezXud56xN3OzPOjh8frozikFEHtMGp5pWECafOEZN907vfmzm5ee/FrHfSfcIzY0sKZG4+Bb7pyslQOJU2GwJ3auIH7ijRHMTCJklENnjsoZMFNCyVmiFQLfBOYgJplhBhheffDis2292TMHFo+UYwpNpqnwZiXDtNkJLmlMLPdU05iGxcFcVjPAd8mL7HWz+Y/+9qV7q49+8x//n65cv/5g/eCUj4Z2fWf98f71y3dW96aL6czvfXy0uXXn/u4zz/ToErcjOm8Jesb8TOMIAjsLLBLvPRFyFjAce9uyZUkNexByJYhyYeFTrSr1GBBU1ciHVNqoLbBFxLWwPmSaWN6UOhEaSqiESss4V03gIep/uXQtUk5FBU5LGSJz5ckl6gxVw0NExOwdA8g5Z1Wk0QZUi4aQLESOAQu4Zac2lzuGeRJYG27sMNtL5Vy4Zrblom2TUTZlSgQpXXS5kmAdM1E1q6rHulCWt+urSoEmAGyzLnMFG8xqpy53pbLbbI8OpodyZYpn5UPRhbwVATO8akjE8EIB2gETyFRoSrzDvOC0k7pJ3NHlLpYX+Hgi6zCeLKbcjfd3fP/j1/5/P/jee124cgnr9YPDzz335QWnD+9/+2D+K3dSexA2LoXTaecBik1L/uT+ydeea/7+5z63PgtHdyL7OemKxIMlQVzo0tg35MRQQUpmq1AAX2jdS5QBDKCtdaIR9Klc3yxQQSZ2qgyVLCogxw68jf4FKaMSatTKaoExqabiaKEHPRQIWQuY2oXLRHZJb1lQqkhZL18Kszc2bRjHpKGjYZMIrff3mDcibXGc0gjAIZCmg9lezBvm0TnJCURmgmrYj6qB41Rgd1fyl20HDNVEQsXfkQsITFVGY8mA5WTWNYUBvecJlbWxLcIhIgv2MQzq/Fki+3ZFDiaqRFx0upJruEdZ6EqtQ8XnRMTcw7aZCnUpvB2Fy6dSv5lu52siKuJarQepgNa07ScKbaLcSFanoapGBiOQlA+WHnJN326LSUFWsGtO2jlGQgRR0xwTmQ1x6Q/UQhtJraMzAZZtwInJjmcGDK6o7JZiVynsEuDhKes4me58cPP297/348Xegfo0puzFpClZR0IiFdIwJ0bq2xFhpHbQOGjTUjvpRAQYM6b2qcCSOAz9UVUEokAwtmVQsKIDui5iGjEXmidajNgZaLaWyVq8AuoKCGqV2BDbYuAK8USAk6SaU4NJTNrR+oufj48fsKT5Rzfnn37k37k3cDzl6DpqRBmcPIkMTmNaLf3qPdl/apT4fn/vmcnBxSafBucDYoPYUERQeAgnYu+nIa2TrJAkM1iSGWtT+fTYGylQlZldKMt4L+JEDPywe9s+ySwysgSrucIuUQZUkugITtxwq5NWBH0aNjocp43StAkXOsHUr507o4Z+eusnbb//3NWnLhzsv/vWW4erW7/93/33+3v7sT9T4jzJu9dm0uaBhvklv0ynmF48W8Xj1AR0a2nPNAzkvfc+jdGucue8iHjHKafgG+8dQCAx7q7tYpmDMws6AoEcmCwV9iHZnIEy5Uibb0aBe8UiVLJmV74aAVTsVc8raz2WlbRLxdDWHHjONzei20gye9zBzGAybNCxk5RVhR3vzBfmxSMiTfCOmGw9qOqcdwzNMedxHMXlxjlXIwrKUMts04dqcWZWkcxMKPwdiEhjcaSMX6i+9TLDQ1jneUA4VZ1IWXZhO8sS2R1FBVwVVRXnOIvZLFSLuTrc2MVjb13JdANEhSxC0ZBqFVUmJtFqMA8z6HGkthX22gFB0RE68AwyJUyBHZVFmnX9Lq/29HhXjy7gJMjJlV1Z37/z/T/7w19+cWf64OaBrBfN4fIMl3cmy3f/sptef3bOP7v1v15dfNVf+twbS13Pro69DzM+OTr69S8t/v7nnv709kbjyCFTjLaSUs2eg6REBIFjzgCpAGTm9dnGXSWGGviLIhjSap+AeuWiJmbUSUq1uAFmSdvGxwDKUphsGWggvql+qktopTk/DFmXP2kDuKBy/W07SkxMQ6ILewchHI8gL361VnHis0qzFDP8QIJa5KSMkM75RTeXPIYgofE5JeehueDKFeFUFZN5WqnY1k7lbY9LrAw1++ba69lCR6QOleegS+k2BCLbbrrC/fkcXT4v0kwMVpE6y0IVknKpfNu3xm4ArhxruzQs6wGoppha7wlUZjJte1D7YamwzAqM5GATd8V7BHBcceDqrlngbIN+7djailyVCwurfFoP40+GgpTICmYQl7yz8pvrKKBAFhD5kmZmJGxUX1K7k7auahDJVLjk9otOITkTEfuGifyYEihNZ/NbH915553DX/6Vb1x/+uprH7z66s9fDTtNjpk7lihIpFEPl+teVvugiDBSm3To0QUaQxghI0fCaDcTHnJ7A4GoBTWEAHilADiVifa8SJgnzCPmvUwHbddoB/jErXEPx3KreIITseSIZDZrIBEVYseNX29kl8evvpiuXp7eeb97883TmzePVv1pZOccTzm0TumsaRrOQpgyBh+SpN7fv83N/PSzXz7a2Q+NnwSZBYotxY5SR2nTePECzoaR+ODRiY4QOgcvBMJsdkb2eZlik4zI0TRMFLQmTSGZ9Xc217g8iIpSIBJiYckqUSkSBsR2KmEk0pwkkh8196yOu9kkhHn/zAvX//zfv7RaLh+9dP2Vn//1cy9+fv/G/mq1TFM0octrimtdXA4n49HB5Yu3H0jklhcXuv3ry8gbhEhdlMarSNt2IuI8p5QbHwCE4LlOYFtncxsRiE3BpsaPMtsfR2wZYCjbOGuNC56lhBoagO1FUF2uitXxlqy4PbbnZ2N7lPUX775C8GXdukUzGJ5qWmABLO2uKFaYaNtQGZXlolEVZIACewmO4phUsm8gmhUZ8AQWEITYO4WqiuHSxippfGPVEUDOUpdt2/HWDmHNLyp9BD0U25uJXL026vABlaxUkAkQkdnoGFBox1hEgaSqzjm2zLgSjmZlfOtsb+ZPRY3p2JvmgcgpmMiZ4E+pYTTwjlpgAm0VrUrHvCAsFHMNbdzl1b7c28NyF8e7cu/KPN774I1Xvve9xxbr47fuvf+9e4uFk/vHC4/04LgNOI39bG9+meXmnb9I6/cv4kqix5e0J8ujZx+Z/9av/reHHw2OvXRwvc/GxGGBENSTJpAr21/bZdQ9lJIxhhIA2XoB2udyDrTURwJkvYbWze92G1rDj7RU6ZrhZ5tENWiatz7cpR485GJRoE4ttcMO//YhNla2F9GmcQcLd3sFH+j6rLl1OvSU99waQhkZqgTPJA4ah7gX9jy369hPZx7m1wSCQEkgxU+ZCMpl4CMleQjiMIH1tgbaQFnzhEhURKoIB/WnoKo1VzNrrpiUlCPL1VwGFRqWrEK6hbWYSeDqzGuGU+UNAgCw2uyebXvEREWvVE3M7d4o2LWWkmsfHxOjSHJVBYWuXGdme6fVZvFCvahIEYoSWjVr2RarmASZTBfNBa8zRops37Iq1a0bNNhYUowPyLhuAJByZhJyjhjOWHpVeSgwwJzqxn2LzsB5EslQRywQpGKZHRqP9fLsR3/z6hM3XpjPJ+MGHBunwkJZOEcRr83ISOglNGh68ZFDROjReQy9BM/ttEuIilTqLunWpVQF4MDwQIBNlvAkodlo6Knr0a019Oh6ChFuLUEzyMNQxEwsag4zVMG4c0qNYxLxV3bxtc/7zuWX/0I+ePN03cfUKE/amCJGSYizbjENDbLm1RrShRCwadqZB42f3owfvH34hetXO2DgGBA77RsMDeImeA7edyxrmFENsxOXXWM3sIgMItYEFdN+Kyoi4pntH4hGoAEEktk7p6zqVFUt4oeJhWQQJCASJ+LEMkrfdj3FlY6h2R3Z9yl5UebQcMPT9fXn915YPvjLP/qTD29NNn589ebJiT/97HPPp82YJbULwUlsL+5zrw9i6Kk7jq1OH1lL1/e8RrOEHCXxp6fL2c58Op1mkbYLYxq9c1vQ2J7C6qRh774wO2YoQ0UMeaESaG8r4HO1DNkjVs86qgzfcK3yqzbcbJFZgB+6P+yqqE2lGnGqLLdsEioKSMvrFIsqIhhgDOfYSldWJSXnnNnpGRNbtbw65vp5QIBRQYUhzI0tGEDl8JuJLLPbiiLM56tQTL0rxjr2ercH3G5F8/FRFRNQgM7hu1ot7I6jcm0WG/ktTTPnbLcXak9TbjHr7QtJpNBHTbmVkwU50XasAEBacgZBjuFQ0nwDPMEJB5IgOQh1oAljBp6mmT5YyMlcjvbo5MAdz9PhvsY//uP/Ma2mePRqUkzOwvFJms5kiNp0Ct9Od5dnH/ykpcuPTC+eyf2+Xy/COmP/BPPf+b1/ev800gScmKOiUT/xsmZJiRRwQg4qxDpWsjsRCVHl31qhsXkFUh4T0vrRwwCXyk5itduOSqdENWCKKiZfoehSvhk+i1TxlpEWAPuktp2iCZnZUeG0mfSLiWzXTNXUm5p25+n92cdLaUK3zNwFuCFziKoNpLxcYhLWKHFvugDAGKfBiSQIMiEwR8nOXhpyncmlLmDrVvIcR7ZSUUZyq5X1zauRRrVLKT915fGecxPqL6VsYA8V4c2WfVmiBbYeMKWPoeILoMUDtQ6kttxB6dSpOD2AmIoQiCpL0UxoSPl8sWv3g60asjKTq3ut8qVqv1D7960Gv5zBLKoQ77x1VUVwj/OTdx4HVdf4omrC50KnwNZMS7dAfZZctl/nzpzEjh25YpVDBCVLUto+mbbsUmUBrLQDCN7/1fd/EJruxpPXH5z0m7P+6O59ijKOwhHawCVOg6Y++Y6TbwaabBDXaIHkKXnKHpmYZvMVUAdfLToIc5rRBtRAG9UG8JTQrDE908lSZ2eY9jTteR5pp9f5IB1WSnOKoUvURQ1RuacmchsRQogUsnYOROyDLN1il7/xhRCcvPTdo4/f6x3JopuvZFhu+ul0duMzz9+4/swUTKvjEPHp6Z2bJ5+s4kayYCPd2Gkb7z5YKkDIAla4BC8GjIJDcBiIvVbFOFX3b6PFeIaHZClU1tpxqulmvcjDMZo5p8E7x1wF0wAyNAOZEOGTg6AEn/S6mXQNx6UOkOz9XqPMskJGgiqvnvnS0+/dfPbw3VtTNz3dnL39yd8MfPblF39ZJE3bvXbzCHb39y5/5jjxaby73PBicS36xVrCSea+6XpM/HQ2Xa/X/TDs7u6CYToqu98K/diuG1QxS114KEDMjlgF4zg6tkmFq9lDgVqY3PYsKyrHAqpKosas5jp5KJGFn3Ap1bRdpuD8sq22+BU5I4CoNMDFz3ZrokrEOafKmcI4Jluj1muKlZULfjSSIqcMYSJHAiaWnEGQtEWHbQlpXgPGqFJRMaGk5Fx8V4GSLAbL9y7HkAlqk6vU0cNuovIu0xY7s0bG9tkWkJPFljnk2NZsRWrqHUuWQuIqhsAMVBmI1suV2LEXQcqJELwvngYKBrySI23ABA9uwC1h6jADpqRTyI7sYLmrDxZyvJCjfb/cGQ+vTddyv//Kjb/3n/7kz5Znlz5z7Ym9R9vvvvzTtae9/QV62nB0PV99hvP60ysHOz8+PJqHJ47TWnv83h98a3rt4sl7QzsPeUgamDuWU1XOjkVTLqaYmiQzfL1Ut8MFjKZH9edTsiIrpk+tg5sKyZY7BDEBG9VRieyZEyYj6heg0a5je4qt3mP7KQHnhtAoQLCIEqmS4y3Xl7yQANZsgYHVyFev741v3D/y3kFYpnBHDWs2HwcKWQeItxKyNz0QcWwBI55TiqqwmGmFppRgbaWRtJHtOdo+0NtCpXqeCVCfLyMSO5Pz2NayaNPr/ltstquLNXuzVArv0X5AEJEii6qehybbclchJJW5L3puPH2OahksbPOu3RSFiGzfoUDfYNbq+FZ+kjLM169SOu9SB5m4IlLlAamjMBFKvmhpQMnyK0PjRerIbM1uUWSBqbQbKefSv3N5PsoXLy4FwmapsYWwpVhmKkAsDJYsYCF4GBHMnhyBFtZqyZdhx10Xfv7mzR//6JVf/tXfTLk/vHcIXd66+UFYdOgleyBAe5CXJrAGbBbTllKDsdEezA17FpZMg7SZm/lixcVqxgQB1ggxtRB2EWFUN2hI8D26Jc2X2FthvtSdU1qc4OIaEywFKyBw6rpBpz3Wa52sYf9rJnNPZ0iR4ZMM6nX9/Jcm08XyL757cusIYQ/ct+uT40Dd3/vSb9z4zPOzxd7cBz06zMHPLy0uyVPP8erm+tbSp90b8/ty9M6dN48e3N6Mspm2m+x7NJl8ohCptXCzHMWNxSvE7MokqXOigGRhhvcs6gkJ8GbfIpKzREAZDRHSOBKT8yEne6ZHGxdNCunUA5CUOAE9IwAB8BhDt/HwJAzLj1RtWpEuyiqlsDe78OxXvnLr9rugYTJtNKZbp6/O7118/vOfXxw0ON15++jBlccf03CBdy+sh8PW78VwkPx+lnb0ezywzyJt589O1/fj5uLFK77xsFUtqwDkuMBM3llpdEqCTN4WOpxzCoEWe1NWnK7HZAc+qSOtxJRyeVHZFm+HQ84iZGwIS26xla89Mob0gADlc1ivINW5+mcRSHISyeyc887YR8wlT75cu41LIsyOgBAaIsrZ6qZsD70U9BbEJCmzJ2KfUioWBI5zzia1MpCZt1OtgokkmcjSIauQvQADyNSzM4qLZKnDdEE9FcJMusXuKipoN5QatwsQkZzNvIaIVMgWUQXnz0mraoKFRIugy4gXpV+xSUQkMzcoocMgeCJnSbxQp3DkgAB0JA3QEc987jKm0vlhqssF9Xs43uezLh1em68fvHXn+9/+m0s7e7/0xOM/eenvnp1cl7j4p9/8J3/6H/6klyjq2POwGIZ37+5c8vduvXuhvXS6HH/txX/w9Df/2VHz+IPl2ExDWotrvbiEsnMnkobdBOhVs0KUPRsKzQxlazsMYJGHYb0yghSQU4sGphaH+vCoWFpGZesQUKTqZfqxyYeIRsmmtrWBj83WAaiOJ/YnoOIM4OSCczhVi81mEJx6VQirDjzvuumUkmPJo2jywAgVzB1iVhGmxNKlxme/P70e02o68dz5uLH+QEZmZ3o2hmoec/LkmZmy9a0o8Kmp+JiKNVppNdQV/sUWEK1aWGIjYasNe8UiUcy7lJihlHMmb9pALqXPjq8vLHEUGXahPBRjWNtAWaHS+r259k9a2Itk/E+FkbU1F4pysbww+ygwnX+KBJA6AMijiKhnR4Q8Zst9MdoEavyg1VSbCmyHYZw+8lQAa2wreimwun2nKsnRKNBWoA1vcrbKIK8qCmXyRteyDSNZMoQYRxoELvxnURV4R2q/WUg0s6OU0AV/+/bRKz9648WvfWUymb73/u3V6eqTm4cYR8qTnEQGdT2xI+kha0VLxFgtpoHGlUQhF0KXk+sFUx6i+BGObVSAgOGc6b00cRfVJ3WR20Q+quu1XdLuiS7OsLPk3aXMT1Ora6EVdPAcWaIMLvToIk968WcjTWky9XG+GMPoJabIuPKof+Lx4da9y/cH8d19TtO+P730+Gde/KVf3997LMV+6COH1F09ON5sXr/z0eGDW9Ksu91m/+nLT73w3BevTx+/+9Rbn75+/3Sk2TxyO2gXdbLOPjasI3SQJiONKqPmMTk4YxYSkWYpTy1EkGElRtUxs3NQSC5xrk3wAAkkNMFCuhpPImCBROha4MGBdQBFSC/q4Zjh0U8nIbBDy5lFqI+bGdOe9ypICTuPf3nxzAcfvvPazIekjkP/+s03Ztcfu3D9+oUnv/Lea28erkaZTfL+oyyP/Oyj+8vFO5/56u8MQ9PHLqj3p8tTkfHSpUvrMzk+PpxM55PpAhQdfDG9cSwqmg34lYTkOcioIJc1zXe6fn3ynX//Z+zD177+a+as6ryzpfEWjq5Lpy3AhWrHWuR9SsZuL7u4X7w5iSvT2mbxAtSqqlFSK+HQyldKGVscm2Dc6MpxpawCJsceApEsuTjUmvp5izHmnFUlxpJCISLmx1PZjeWHKd4lqJSwIrfX7c+6ddIw4aAjribDduzNdOMc96szvd0dUgDqMlzVXga1UtfO226QUmUkO+cKG4XY1Nuor8oRK3m1/lutB6+bYDPC8kBQ9VCvNCGeuimv5zJOaTVBCvn06hx33vngtT//wYtfXvzr/+dffPGzz37uySe++799Z28+/6/+2X999cLTt++9vbh4YbVeoVPOPvVpvtOus14+CN/6R7/xXu+HGEEBJNCUEzyHJIkbVvPG0voRk3lcS2G6lf9DaWvshTqzcn0iCj5ayN42yVBFG6xpqlezve2MwkIiw28KwkVZspHXtukLRYamKtuNKbHF4qmNWWCAFM74zw7e7sDGh7RpJQb4lpWhM+ePlQJnb0NKSCJe1zIuuoMu7I2x7zqW5JmyfSxQBqVSWJglpTQm3/jiBlVHVtkizGVbpEDxKdmWFTE117n5Zllvq1VSQXVhJBjHzZ4lUUHSKhckJmeYw/lIi7Jwr/1JebyL6QVtQYaK9xh6vT3pYGLh8yKILYJTDFHwUO8ACw73jiQXEmLK2dsX5vr5UjklD3ViYLLIC/PPoipftnHZMHzd3juFSw9WIkeUy3pcUJOUQN7kYIBhyzazl9lb6hyTx5HYe9eUSkHIlnFMLMJtF2KMf/PyD689dumXv/a1GDmjUZn85+X3jm/fy+OokdAgD0aDB09YN6qM1IWlnzFFEZbNupN2SntR+o4aUU+AXZqOhECOMoCMJlIT0SQ0ESGR77lb6mKJxQrzU1mcpZaWymvQGQSQPinnzaINCD75Bl1H057Tceq7dvDznmNoHGaX+nG66xbPXHpsevP+UZ/Onnr+xa9/6R96msi6B/HOYt7H/pVXfnjz5quZV83CrcZTOfV74fJqd3WZH7/y1JW0379/58G1Ky6KG+FH12buOPkcRUfR5CBa1khi20IBMjsCmOGg0dguKpIlkS9ZlkV6p8UMjL2TNJYVjkQiIhcgrEmlF985OIgXQ7WzZBFx4JWfClOmDOcFvehqHFcL1wzU73bd89/4vffuDp+ePfAuE3ZlXP/gzY9+af+FJfNdub3u5+2kXTvm3SeG9eSlV991V48Prj0bNkk34vcu7G7Okgo4HAWa9+uU09F8Z08JIhJCIwrRYicJQMnHmCA5xfHg0v7t27f+45/+8Te+/vVXX3vtx6/84Fd+9ddPzwbVTMyGcZ1jVNjyHcrqytSDgpq7wnV0tHGjFLvCGSmDXWV01oOm5Oz6k5zEed5+JaDMLNYqbSuhgqoikQDyjkQppTzGsRqwezVHWedUchx6FIGyGok2ay5MyO29D1jXwA9xybZiR9T5X8SyQiuSim0DcY6vEm2XuOUr/SJlpn41W7/b7tkqjt2vduR8U7LMKvkeBWETokDk1D5TuxmVQQ5sBZjQAC1oCkwgrUiTO1nNuV9gmOjJlWnC6a1v/+v/7dlLV/7qjz44aK6+8hcf/NavPb+80C/85b/7wd92Sn7UeDwKpMk+9/DgEMbx+P5v/MPfhyKPMTTcNy6l6D0Li8KgPIEj07iiEgClMGXYYLvttrfCkXX1WdUsJrxSQEmLs1HBL00zUuUfDwUkl6FQ6BwNBUDkHvo9VbpmH7gDlIrPhGNi+1TrRoph3YyKwDFscO1WGzekruF5yhsBsZM2dxGWKoHEwkqa0oXFVeYJI09ar8kRxTKqazZ8XdTkjxAROBAXYkElatfrBgCkGACXlsUqyjmrWxUWnGlto20wrBhXfKXQjH3R+qsxtGDfXRRFoqPn7dD24aeHil45+WLjKROxmTxXDzJA5RyvUJGtjnj7xNvkWwo3scUSeybKKsU/IBtou/MAAQAASURBVBePERZUCRnXbWBB5WtHVTgW0HOeowKsABMpaXn6iqaxBFIQs/dQm6RJTAdichDy5akzKWDt68RCD+qNYB+E9cQQKCVQa2f2zTff+fTOPRfmtz8+/uT2vdnuwrm9D26+100biGOBJugocHDiKLOOOQ0ZPfqdbk2z0kJCGSzU5KJKAUOYhRSu2AFoRojwkUKCT/CZ/Ibmp7R3hvkaO71MeZOxBjaCNeCZR5YREjU20+TiiBgx9FgHP4kAh6MoMbSTtY4r3/q9i91iox5Xbzzz9a//XmBeL1eN5851tz68/cO/++5y/V674y9cvaQeOVHsovjNrcO3PhreGt9M2J0ueb+58fXp1cdk5RO5hMAZmqCJ05gd/JY2z0U3qDln4qIvVREq+/RMIMmiCqZkMmubw0QSkxdAVVIyFBrMgdgDJBb1OQC9oGEGc3K6EfFyNu0Eo4ISeSEW5Zz6jnmT1xevPPvib/zBn/7pHwUekFx2TX9y8v5//A9Hq00vGmi6N297nSQ3by92J/c/+PPv/+h3f+tam7u8ET+dLNKw+vTerfne0rdLErdadfePHly7em2xu5dzgkhogmSxKjYOMcboma9cOnj99de//ad//C/+xX/31DNPvvLqq9euXk2jWCssKgryTJJs4qinGZU/44iEmSpTUbcYGNe5V4tqq86TxrraaljNQqtAvWWpdX7cYfhzaVrLIZDyL1SHRRVTkHlXLgkFRNn5tg0gxKhCWyMhu8+lcDh1y9y2KZ6UKuD+EFouWtMDbZiFqGHCxBYkYAtIrZujSiLY0gO28/CW+UKF16rnP6kppJwjUdacf9Hpg7ZXs6qzt5fACqewTCGnwtooe4IHdaxBxAs60FSntJ7QeqqnHW12NE/kwb/6f/1P4734zPMvPvLM6d/e+gEG/OSlW19+7okfvfTuY27n4oWn+/nmw8P3dEqyZO6adJoHvfMrv/wPv/j5L76zlgCwJmF1bSPeJDLCwmJGZQ8PUNuhH1YOthxvuyhhnVW9fLQ+WgVhVtsR14euEuW0fP4o9QYFbTRyrwBgV/jQdZda3Qpx7s1AtnGw0BhbZxIznCqBPAOijmDvNnOgVQpDXjiwufCw85kmoo6RSWzHS0pub3pVUmhD47hLEHJQE3LWdahFpDAzcyM14Q6V9quoygwucb6oZv8gWKgYtOrPasGzN0ur/LzgyeWiI4Xl1xaNgSqKkw6RMtvcJ6TbT0FhImVbbxodQRRgx1S82JRAuS5ESSmrSeQLmFFyFVRzNkjaPohz7AeFGCEpwfa3KB7RhbFcjlWpfqXrUoGQosqZ1MgkVE9WIZURWTwWbACwOR/Gu7SrrALhXM4+jHhW2wStMLigyA0AxwwgF6N4VtWmcSmLSOq69tatO6+/9jORmKSfzcKdTz56dveFPEbBhjRoDNSMNCI7qEADsFExaYUHPNbdVADhkLXJ2gQdI4VRg1Jxq3EQUjBImErdRROptX8eMFnShSUtUgx0qjgjPU04Y6zBcydDUgIajmHWs0Qd1tqvtZ8Qn4w9tdwEyDrdPtbZkVzZW+9f353sX/nSN341XEBcxjCf+ihv/OiNH77yp9SedYsZGne0foBWtREKWOcTikQjZ5/OTtMxh3/9h3/4+W/87vUX/9FqScn5cTX66DRBEyQKBFyGHfsMc+nLxJK/TVNgNGFSERj3FGJtpFYEgkkBKgx3HaHEg+fWYVQwOEB7mC8q1iBmddCsm9lOYh+1iWimHKa+H1I38M7ZZr3z1N976ivxb1/+7iQ4iGzUeT/3B1cP9h698sRzxz2vR78c3N3+LPm9e+9/uv7CZnexN/Tw7OXq9YPJsb9z+47M497e/sGFxfL0OHRdfeZLRdEs/aYnzhf3L4xD/O5//u6777z5f/g//osnbtx4+fs/Orx/7/rjj/d9ZG7MBZDJGWt3O7Nu71Z96G6khxwTDc0rwBG2uCoZXKplV1QgRzAMJVNVvyUZUlUioWpa6rcAkYWDmwVhYYRuDdObBoV8mdl7730co612DAhlZuccrLISOWJnQ23RKJN13rVb2KqZC3TmmS1dQauEUczSpz5M1qKI2FBEtSqXjtqqbhniCaSkrACy3bjVtowJGZrG0bjrujXJgzA3aurhYurlAIb4mjumcISgGYlb5o4liARMaNihYcqpG48f3Vt/79/+0dGto//mN//gyctP/9/+P/+P1b0489PVYXw7fviZG3vrVfvOx28+9sxTZ/2pm3aXnp0++pV0Enrau3onrg4/udk+suslMRIzhCBJ2HsQhtw3FCp9rxYGVS1jnN1uBTcRyaJihBqm4ol6rr2sYtDSxpUqDtG8XQecfxcCTErLYDKlYXnYdfsyyrNDhcRaRjZbvNvH0hA5KABnLY6AQca2ZbiGvfajB08bdKpOpQOQdVJthCGUMwQ6vTi/lkaaX5iMiURFMyuUysKAFMrkoMLsmZFS3Lac9SgZIajgTA/9kOW3MaC/eB4LI5lVQZK1wPVgsD3MIqLOrECqJ92W2CZjPq+yDFPT2QagdES6hZTrHWLtz1YoJKpQ5xhUvU5QO1ZR71zl3VXCpf13dmV1YAlLFTEzdX1BGolsE7F1GTrniz3U1KHsn+pTAs1ZybMKTCJepfasqsyFT2kyQoJJIVhYoQwoF1SugOCeOSHVl2bEDsO4zbCam4DVev2Tn7w55iyI+3sXplP/rW/9hiD8+2//ZfAKTawjBnMkIvWip1mZQHDMuSdZYWSf2kWSdQeOFFrZDJj0mBASEXGRUMCRKigiJPjMTUJjBTjyZIkL6cxhrTgjOoGcMHp1yWkU6pUdNCKNtGmaNbUTDhuZnQ5D48PE7ztE5o2E6Vsfn2g83GkvXH3qxuUnrnLvXQc4fe31t179yb9zs+xmjTaZnMArvLqZxzSF3RY7wFxTN2FepNQ8WPWvvP72/hd/X9xkjHDiZIREkRGclDJpUkmKbA4zDsj28DCTgFVyNQ3UaosEwLzTHQHsSFRIs4EhWwstyVHWzjsnKuTgnANji99qEu3g1KfJdNVOE9YR3VrOJn4esfKYO9anv/oP37s3vPfe652fchSoD7v7e1de4Is3Ou2uT/bas3jzxz/v7378zJWnLnYX0nGmDfnQdDENi8XOxf39LDJGMOdLB3sZEmNkdkxIKTsisxQnDu++9fZLL/3l3t78//w//A/DEO/dW/7lX/z1b/z23/c+pHTmPItmIx6LEJ1TqX/xr+Kven4bqpkriBhGrdjK93Uryaf6NyKQnIvcyma2Aldip6W4JWmBK7c6A81ZbAa1imzcxczsvHdZmAjjmFJKBHau0EK888ysxf3KRN825qo53RvNMldCTL12itQC1RNCQUxltjeSVbmRiq7SYi5sJ7kdfOtG3VghWqG7Umce8t8uQhCyKyNntchVBScRqAORAWWqFhTBBKeOyvbXgzuggQRBR6FzE6SQh1bOrl6Q9/7u5R+/9JPPP/Xl2+/cfeXf/H/Tcdxr99JqI6s4mT7Vfxqf+kJ37/5Hr7/y09n+ZLNe+b0+3A7TJ/2Fy+vTk+PQbEaQ59EhQ4WFfQgJKUGo9TgTLe3sOaWXzJ+ECcSkZGuRImKh8gwZ7lDdEVEu4nN3CpRHyMplcUOyYavs+7IkNkSzWkOTwpR1la+lZR0iTMSCsjtkgNmDyP4RCERQZSav5AFWdSLsuY3jPXaO0lSIlTtpQOjILM5YhFXSesYX99pLcRy6bppSruw9VkRQgEYVu+eL2BGAbzy2moBCzuVKQ9DqxK7nTyNt/2jZZJw/qlr2oQyQY4KzpGo2FhUpqRZvtVKKwXyuHjReqlVdG79RcB/rAQ2kkbq7KfXQflvOWzb19nwzs6aUt5Y1CpBFs/iCgRu1qvj3KBcOCJ2Pp0UdXF5D6YO3BZLsVZCV7erBqWYbCVXzMSwKaQUX3Vppt4s0jbSo6rd04zIv2JurzBY6YzM6FS9urdGQofM/f/Pm3cN77EgkvffOhwd7j6aI1994+97R3eAbyVBEnxsdAWeyQkKE9qIe5OG8Uy+adN1Os2+SbkbXJI2j+i0zDwSG2Esw2DnDZwqZfEQzYJKjow10Q7QGRdaoGkFgjyZlpCGiBW9onMx6WZ+OoQEank74YJlOEnfBty6cOV7dPH5/geWN55/mCz6t4mQePvlw+eqH3/EXE7kOHbsZRk7UCk9UmiRTlk54rtI1S5mcyESmjyHs3VnKO7eO+Op1v/ZIoiMcvJlaawYSKIMF7HPZchKELMXDIpqJoFmSPTrEYBLAQbOSEgJIqOwmRHW0XRMzy8iyEQ1CjsVlzXDkpTguko4iECTwSH03TaGb+kmSfhAOhJzipPXP/co/efPO8WpYBp6kEfHe2af46POLJ6YX91ZuurNz6ctPTY5eu/OFZ36pk+705KwZvVdkoMnCOUYVZidAiGktws57Cyh35LIk5928m9+9d++d9965fv3a51/40vIkEvs/+86fXrqy//kXvni66r1vwHDwaqbrxYr8v6y95/9oV6uB0DkXtNoOYTmrWs6koppZWOtqPXU152Vy26ZdraDWqUCVHZtZL9mUY8faGuQtzklQIEk2Kg9UvfMlxalSXcRopeXl1rnIfgHC7KiwacsxPh/xaTsB2G3EVEEwM7e0Or21EQZI6ysmG1MEwsompUCugL5Fvlqdtn2Veu/ZsSF/7Ey1WZklhG2dNrZrMQ3evpemSyJlx/DseeQUGWMbdHn/+I//3f96cbH7W7/59f/p//4/H7+77jScnR5z7/cvXIpy8t47d5sLj+3t79x5cC+thr0rk7tHx8dv7k/O0v3XD7/5z/6ve5du3Fv6kZuRGlYvSSRmFrDzZY9QbqdESIbzA5RzKrvyWhy3K4GigUatp+c/4jk3DYbXK865tAVuBGBRBAW3LAaK9akrvlgoq7v6ODkiJrWyR4BXZRUGmBDADUDMjRZdNTNcFmYNw9ors0w551kaNj730vkgjERADgl96i/vP6m603UKNIpRwYTEcAIRTcxBMAKqYBURE5ZDRc5jbotPexk3a158bQSNzvuQREArPFDOo2Muw2s1cSuP8zYAOcv2WNK2GhXouJAJC5JfekfzpSGqWQRGVtCs229LZJlhW2xXRQxVtqTTUsltw2tttVX2+iwbgZIMTkBdHVi9tE+RjYqIrTWsufSUH8Ex1evE/rTjYu3BxdgFXqFZzGiToRBlqJKWCZi9aTvIGgzmYn6QTXzBrBU1JRKCsEMaUzfpHhwNb731kW/Cer2cz+ff/PWvXNg7uPPpyf3jB84Ls0hKQCtRxTTuxIigSNqzQMBqAW8UwBFj1wzBNYzEsdFN5XMSUL1nBYkbASV4QZO0gTJ6wgq0JlqDVkRrcO90UGKW3rynHAXOSAnj8XzCvOd1EPHIKVPq0TfomhSm7Q7zpydHq2vXr+gBoQtA+ujNN3BhzRc6SUkbrxNQUIQmeYQJp53Eexgn02WenGFxoovNGGR2cOcwfe+1D75+7VdkM8hZ5kjSKycLPiajIookHSMoMatIAkSpkDFIhZCLbzuySgPWasgrQIIySIDE7FUFyOYQ3GiTByVil3xcRYlCChnK3l0aJ1nQIQdFJ6nlZddR1zU8C8gJfSO+e/Tg2hd/72+/9x8mbZd9u8opPsj00dlTgqmHUDzg3d988bcuLx5ND0Y3BOnhVRsAzCKizouKT9ozdY6l8oohJMxOSbLIlUuXfv3XfwtMq9VaBMHzF1748sWDvWEYzZS7oFjMopnB513h9v4rLSWLefiwmtERO3LOiakTszkLWX3eQlrlS6C67RS6ofmhQem8whjHov6LVcpCVbJphkSUVbckV3PJgZi4CQBZrmK1wSgpEc4XuoWg5ACU1SXZmqdEbPM5E7KwmNOWxGyvW7VgyaAtFkDlWq8V2DZZZFFoZTqWLXmUaIuxqEJBZI5DVUwCkHPOPCEEJgpxYjRXrWW7SJQItt4sHkqmXMpAJk8huEnj/+h/+XcJs3a++6/+1b8dsy4O9uLJigfeyIM2U9fu/+4f/M5//MtvNwvvZ83+1YWGRNMmtbHdmX7phRcfe/bZ9459bHzUea/TPIgmYFQeiQa4kXMWZBAlUAagkCxZkb3zqr3pVW1AtUV9Vq2UdC4gx0PI8bnHb3Htofo8gEFSPSytjXLOKXLO2TmPaloJ2s55TiuKryBWT8Rg74TJMGfxarCaOMAzB7Dx2QJ5dk1Ci9RltzfDIum6ubgXn3ryuZ9+/JY05CKrIKUgvX9875kx+dlsIuLYM4lnDExEkKQkkghOkUGuNl8CqHNORDVn2PNUHBy1Nhl2B7Ma56j0KNbTF3SoHEmqKC9VC1itDxLbJlmriLq8y+wtH7B+NZC1AI7ZXGtwzn0kJi58LyYoObtxzOkCZpJugggyvpeUjFQj4NleHrbXRdbCtpNas0uMIG05WGI9VyFwwsBELl4FhQU+xkSsbJYO5fEy0ZRg6wlSUDPrDAq2RORsGiYFlI26T+RLXGqRJ9nIq7ZlKhtjcs6TKkKrCnr9tTeXy4FY4qDf+JUXd6Z7D46PLl05uLC/9+DoNpjJpwwhtBididOF1EiKBDI+JwtzoORHDMQtp4D1fAfcmcHWNsvVyC6iDFEdlQTIYAUiyRrujHCmciZ8xrRmiuBAqY9gEWRwVsncIHXNMQeRbvTkKQXyPaJHmnLXJ6GJm1w6en/53nV+5sL1xUdvHf7s01d4D0ICDmiEWlGnCMl1HH32c1r7sMqLpUxWmK95fhx9mnS8M3/jg3vXfn772vzROA4sTkahESQsUWgkHaG28UVSTTUKiEyYTgobgUgI5loqoxQxCIPEEVRH1aRluyUAgz1JpOy190kTPFhYBOhEwYjgqUOEz5z6rGvSTjUodxx9E4MTapFo3fKTX/zWj94++uTBzS7M47oP4g/yfrPiMY2ao56lXb8rxyoQbBwG54FsVAUqIV/ZsxfNdTawLQuyZX6BUxYfAoDdxUJE2OHxGzcAJEllL5NBlGE5viRsPgC65SMySJmrRsKxdaZmNa2iRl2zi5a1yEIqn5NsKVWt4lDZE9vKXHg6pXEmGIAQJaEwPcmZsTDKhkpFncVMwBcjizIdGfQEqlKfMmdIWVczkTgmgoi44lYv4IK75WJjglrdjUBqizSUoaHsz6RkB9bR1H4e51w59lZdsYVYAdSSo0JUlt+s5hQFJmRRm+eUXfWJcCqU0wh4UXKegUYBImXKpjxJkpmckBDUMRMyoGeny1fe/8mfv/Wf6e7qs1evX9lbHveajp2c5qadjmn1wrNfi8dnR3fu/+zdN0bXi3b7j4fFwd7t40NuRVwcffelr33rXu7OeJa03WgYydMGiNBIEsEjJClGS/HJIok4qyalDNIs0QTThtMzJXhGYcCxiGQVNnRSFSBlhglpzkn4KiLknd2JTG4Lo9h1bORcqA3B9plbPS+iQvtzIEC9gKBs9yzgS8Uln8UbK0bQsDTUMBrKnDkgBflEst8R6mJO7Wl7vA6aZsEzq08izHHs0t58cQWb5Nu5anRwSQawCkbVxpXsge2YZmO8g8Is6U1yYV5AgBAsqrvACsxKoExb8+TyVYjYk2F7wuyyZGtgs1gvS0bofdiHA0Cdh0mSzcOsELOkLkeEACl1vTSOxASlaohKoJSNVU5ce+XypakaRDJ5ZhFN9aKwwwCxXkIF4hyr6YlViazwl51N+Z7bL1lM08mM7LQcW1d+EOsuBGwZwnZIhcmgdTUfaVI2Jy+CeGuB6oTiiJwS4AJUk4DhrCQzpWxptPYc2tMlGlp+660P33v3Vgh+vUo7s4sHF6//x2//yYWDS1/60q+slp82jlR9lsjUqI7IhIFYIEIosvSi71eBNErBIUIGRUDqRwpgYQWE2eRRnCEMDx/HiKxkLx7AmHDKtIGuBGeERBLho1MRiDKrpFEkGalAVGRvHrwGXq/hj2gVPLzEtcSOo4rfd+Fsc+cnh699ae/5H996edg5nc5D40OKMTHQMUjRgDpWzycyPXPTNe+vsHMi/kT8MnuNLS+uLD8e33/jvcdeeJQjyyb7MYzjSL2wMDIcOdew2tqEzPI8Q5It1QAIVyW4KECOQISUE8DkDJFkJSSNTELwDEbuwXCeRZJsPLcMAgQkTkQ4MWJG0DQIPKgl9MyNqAe1rN5WT5I92q756rNf/85/OGYPrBCm0+dvfD6gu/fx3fXxqtVOs957cO/qwaMhUb9MHltnoFrFpHbE5d9VsmrxfOcKpILYE2+t2Ika761H3W56ij/rQ2wQFLKDjYKKOm5w+T4oyrnidmkaUDW2sJnImw01eyq5YDVtCWWPi7otLiffFbQMqpKhRGqJJCQKVQcCQXIhPgFbAHJ7QZfV0ENYnsJ8MAroS/Wgg+q61RY9xe7n/G0sc7adZwCqKpoITDYD17vJqrGIqumd7OarbThK+XcKIWVRcdsF2raMb6H4ZF7KlfxMTlC2mYqB2ZsDoCqpJO+9qOhIwZMgvvnGW313Fm/9ON3+8UE8+tzBnvBp6MKVx8MLN+bdOnz3D988PfZHy08CuzDt3v34591+t87j3tULB08M7/xtevTK5dlj3Yu/+1Xe3VuddiP7VXI9+3FgDII1MAADMBKiWaOPgBAZym/eIzZfmWtKNkWm/TsTWby5rcbF4p8JJf6dihW2QO03KGDBssIFmrSPRsWsnFmMFifCTCA2qabhMICtMJiIbbkL9Vr2vl7VQ4OjIPAqnhxLAJxwx+QJLYvXB6sjzDRPd5su+sXtn68edIuZIMlaAL/uh2cvf3bWTDdYsm/GSJoSwaIzmcuZ9MQCOFIUO0MIzC6tntatSj2r1JQO59iJiAUDw8bGEgFupdi4bAyo2wrntnsVsJnAW36BqJZUAy3wcnFkozqSERNxlgzYUTUfCgY0a/bMRbFtV4411aIPAzkPY9qAaYnY6m0BNGyuVdSMCa1LhnMK5DkQZhoLyfZ9ySb1kmxVONIKhTluV49nZmfjghrREVxoY0LMrvSB9hiY5ZmRnFCHhXoYVSHm11HaEqMCSdtO7t9f/viVn6Ws7CildOHSwWQ6++av/8b+wcHfvf7Opj+bdp1KZPIAiJLC9MSskGJpoMX/FABS+R8lQgS8akdVu1XM4WzKG9PgiUQdCXlRCCh6rCFrcA4NGlllXokHoqTkMvqsXplIGdmJnzeSdNPtcnZes2Nap9GjmXLXSzRdwSzozz48/vTey8v+MDyi0kuilIOYt5QLPmmzyi5K0/PsROZnudtQOMUU88eRJvd6N9ubN3L69ms///KVL0wxRwKiUmIWaFSLNGXOqgm2JyJFidHLIgKy/zFREklEvtADDRpMo3NeVE3+AuSqkKCcIxFADRNhbEAVwIdqUgmCAApEHdFACKoB8JCNmOCfQOpkoOHZRz9z96k7P3z5ezthMuHp8tbp/jyEsRk3DXl88NYHL33npS985sVvfvXXJiyeanNYHkctT3BRfTAxM0EtMlqxJVQC1VTIgN7t7qyKL6kcjlKCtnXBUFg9p1fUZGB7sAF4bxbgRQOKSpOutb6QoW0MLQvbh6o8ChW0cCHq+qNMSAV5ZIYFvInmlBw78lvP+u11BlWAXWFlgAolikvbXFk8qAOB1GNeNEi1tzGRaCnRqNwzra/S7i/d+suSHWMD+x7G0VG/HdiV99/eUrs5jClZun9b6JUzajcIg2w5bG9JAzjL8CKmyj+i+cw9WB3+1UsvHzyz/8PXvvvCJTefuxefewaHbxzePJN7m3jnTO/ffvqRyW/+3uPf+85by6PTp64//nfvvtVMm+x1sk/v3VwePHf5a7+9ePtw7ZxML167t+l69jHvRr8YMKVENBBGaFTTv1MkHQEkZsscTYBAy0BXZiQRVCZdtUq0Ho5KvB2L+Z6qKjSXt9dufGv6iiEplX0iLPFIyqawBGPbGy620tzuKUvWgtoW0FswIpMHtdBG1EMDu6BB1SsaIEAaES9h3q6WdzZnfyejl2GeZZPPxm5nmueg4DsOi/neO/0HTzz9TN5IOIAykbCmBkYmQCRuCBBSUYZmwnYzysaLkMJEqk8dpDzKCsmZnSOuLqUVZMFWZ1VckcvOohwoq30FzjZJnNTaUrIfCFxzKQwwKoc3S1LYCbNyVVS3TIb8AwRS1mopK4ZPVJbC9kknQhLCFhquD75hRtZJ2As4p4YVyAgFp7LHg8l4jtuvXS4A1KFDC925OHZxKd0gb2QDFUpKqqiutYXrTuQAD7CzA0tOq0e0PYPk4OChXjU5RyoOlB1D4H72xjt9nwCKkdhNHhyt7t47u3Txkbffef8nr73Stt68bh1LztGE+9AE8RBvvYER9KzuqlcNauXB7JwkGrUBLFvRM4kmB4IwhEmdSiZiGtgnlh5jn9Z3T9uhWfB8tV6uaNWl4MilxIZmgzkfZQeOSjLb845Fl54kUF5L7lgZTebNyM0ybo6XQ+MeaYmZU5DezcIAn8BAN1A3QAa4Ic/Wbm+p4XgMmF+//pnfbMfw7ptv4aj/e5/9Ek6lX25m3SIojzFxBGVT8xKxJBkJQhizjswC5MK0YaBas9lH7pyNZ0mFnGMtV65AtH7kBDiDOQAxY2jRAX3LsbG+gQNTqldWr+q1fLBe4eG7BqRZM3mGSjzRF65+8U5389YHH/Q05BcSEnXDjOGO7xw/eG8pR/Enf/XSg/cO/8Fv/LYnLufBHvwijwe2hszmmFqo3UWzUZGobYSqWhxAER0KlIQsLmlbM1ApIaX6SOE3Udks0ba8cl2+FqdUKAFZtSSbmohaxXwzmLaHtlAfbVy1S0qqHaaUNQxJliSJmdl74280TaC66y048flIgW0NrG6xtg0zMRPXmV+9ZxT8Q223RVvScpnOiUtXatXEuuXqSVQX2hVUt+1vmUgqeo9KYlXJQsQJxr4Gey56a/ttJbEBMP6jhVYxCGxLzfo2KZCsuDA14xgDh7t3Pn73/vvvvPnWe0fpl771td14uHzn7oc3V7/7qzeaZ9yr3759YeFZJm++9eDwI/f3//FzP3jp7dde/9n80d3cpzGMYdq6Tl594/63/vmLJ9M3Z9cPuotXV8t2dHtRmg3CBp2ZvVEEjaSRJIkmRVbizCXJKhnhWCSDpIj2bPGv1aYJIGYVZUJKqdh2qnrj1Bgbo864KkqOyudYAuEFiiTFufihBq6UHU2ytUaxlkiV1fxKyDlqFKxic7AHOmWPVs+TVgMQIE4n+/KDn77Rz9zVZw4O75+wE+4nq+XAS258Qifro9vXDw7O0kr16NmdRbY0wEiI3i5aKQ5sYlAUSGzdqCXvxpOkumTUgrRyuU6yJAgsPaVsfKG2liwLdTO+2FKSiwzovPKBQDUtxNppKeXF4s6YTPRfA4KMn7xNIKsPf4VLpWaC1G7StD1aN7xUH/qt/qjasQAVKSqL3bKBLuVyy+0gMxFFeUu2H2xZFNsPouWllp2uHTU1or09NFy/ogeD1ZxQScnkeo2oVWwm8iqoVbk6iNhbkdW7Jme4xqvaPB2bpvvZGzffe/8THyb9egQkiTzz9FN9P976+N7R0RIQdgqF80b9Mutj0yALiaAP1m6Z9yIy4IBGqSENSp5SSNQYGlQvahRXImgjADMkZ4A9+xzT8v4xrflCd+ELzz1z4/K1S/ODe0fHr/zstbdu/tQl184njWtyP8JDGuiJOvZpzA8u7CXyHpk1BdaAlMStcHrGvmvGRoYG/YQmLq8Cb1R45FbgYvYjmgFO0Gx0fpq7nrqlm3fTG/3kus6mU16e3Pp47/riqeeeSkdpWEY5jp20Lvk0ZEpEIkB2DMOfnSNNGUimCd52WuesQAjAomb35L1zWRKzJ1bYphWs0CwaGhZNsE+x+Jpl6T3HpjTWHgggJg0KB2qImTVoXiaFsHc+OAEQMJf57/zqP31r7415t/NIuJzuRYj65B/pHrn04qUnd5788+98++ZP3/zOA/IiZgxULdfrhGtcJBKyR9d+pBq+a4J8e9a2uA+KiV1RStcjsD1plnnEZLTlEjFf0GeUE6hQ4jEnR+zYWQJBUfiU5VxVyXMZMLe4Wbkohc9rp3X0AECuqARB7JGgImwSWqiyk19g8Jz326Xa1osZMHlQaSmNjFp2XVIuK1TK5zmoX76ilgNvYxRTkUhYg1OGjfNxtxjGKqjoSaCidbVNRfaQbXvHhahFgIIZJSRdTYdORGz7AzNIKDMIBEASJcpIkDHNFh4cT26fnh6fPHX9+jf/6a+/ffTBa2+//vhk8eHhvf/5f7nzm1/jf/R/+aVXv/3qOuaDZ68M94dv/+UH//B3bqzG948+WYWJdruzFFK7F9xO990fvnnGt3//d//l7c3eCrsnsneE3VNM08B0prJR3hAGQlREYAQwkgg42fRJNqc73vZthiMXxllVEnH56JlK7KPkspTg+kgqwWyNbacjxda3oB1UZWz2RNuZJQYlduWBLP5PVMF8B3UKBjXEHWmr2sE7mpAEkUb8zFNLBli5zuV5fu/so+lj3bNfe/L4ldcyWiTx8MEzpKHI+1g8deOpH3/vh24M3H3h2aeez5QosPaqo6cMMUYSHIq2JKfSvTEs6aSQpqCogdwgYSHzltNzBIXBWTL9QltcfoVdMSawOqj1+UfFsawXt7pazFmZoULEjjiTpJRRmptKAKmRLVRYk6zWR9lAhe27Wj4rAIUxYk+v97X9PWdOwBh2hEJ5qBcQgbSqA7doGbZYbenldXtGYRqjcz8NIlWI8SO97R1UbYHqCA0VgIQBVmqKDJ+YKEhZCtQvRQCc3QM5M3snklSYCKGl4wfDj3743tA3zALpkg4HFx/9tW9+K40pxri3f+Xmrdvr9YMQOMXI8IArr0y9Iqkyicpg/w1QkII8kAgjKAFeEQlN6enLu5PsE3Wm5YAjD6SYTjengfxTV57+4jOfffyRK0FCPMPYx6uXr1y/ceWdjz7z59//y+Oj44Vf5JyFBMY2I/DUCeN0vuAGpNI58jKk4OfSDTJ3svbpZBqm+/NLafkx0kqEBU0vnKgT5givmDR715Y9jmM6k+mML03TdCLtLh/cP/zgvZ+8uxcvTDGTZU9rXferFrMJ2hAwyCBZxzwSCTlK48gYqyzceKQlMJT//2T9+5Nc13UuCH5r7X0emZWV9UDhQbwIkqBIUZBIUdT7acqWZNn30TP3unt+mb6/9N80ETPRMREzExPdPW23R6Hr8bVlX1tXkmWaoiiK4psgCIIAWCgUsrIyT548Z++15oe19ynodoUfAFGVlXnO2evxre/7FnOUGETYNMFJl6KJdYXsWJ5CR+j7VuGY1ecyzToBKGHtEo4vUG+TLaVIQYKrnCucgiloXEcl2PB95MZfeu7LJGiOegqeIiRECoUnvfrk1a36v/3Hv/3bpx9/wufsYNwi2wtkj2qa3iaNqfV+krZhJ2JpZmql7GeTp9TJITXJJ4kojVzyKIYAmPIoC17tAA7nbbBNGDoRO1JKlPxsVBGjJLjbJKG5a06J2Njp9rlA1qR77/pezMoqipKFEuSVMvkrz2VPZELACR7m2Jsrlg7DKLuCSf2XoE4zhDRuPCWTnWEwnL45/0LNXFIQITKpzaeRYQZL/hkUVYhzFgpVYvDO5SCccMgUbeyX2dA7PXtR1TH7GKPnGqAYu41qdHj/we9+/dpRWI4vjr747Gdf/eXvfn3711efu8yyvzlBv9/8wy8OVvP6j/70sy//+HeLG119BoefNG9+dPz8n+z+7K+aupZG2tH2pNgq+7I9Wh88961/O7rw7IfNeOF2jqVuilETp27F6IA10Ck6sewrvbCIoDPwOQVWFYKcdP/22YiJNMaETthkgRN5hvNTqkCwH7Ioz8l9jIw4YzAGEYiHhSEWMFOujdHWXTibL6UdmMSgQuFUWNUzF8yVUkWFxxiogRrsGWNFTfZf1Is/U22cL/fv3Pwvb/xsfGYS2paCE16LgNR1pd5d3u9uH595fOvosDkIR9dOYdUJB4YjWQnWjuMoe2FaP9gplDgpsIA4zB0VYWjo7TmwXU+pwFU4TpMmZA/1PEA9KaNTAfJ7eZEyKpS0tRbdJFWwsF0N6lhVRYSJJZjHnME6TACT8UisY05UkVw/2axgOBVpmGqJ1px2DChKHS7ByHdAqqUyOmWTs/Thh71MlnNTHDNVp70MuRRFNJ1egFRJRMymhigN+xUKZbCHQuFJmbg0VrxqQY4Br7bVV222AWJWEoYXDYBn9mAR5pd/96v5+kE9GnvyIuiWseuKn/7jq2f29i4++sg777y1mHd1vdl3K5fcd6OqAp7SztYIBPTFUI4oQB7koWWSplHtSEABYqM+BQkcEFVBwaOQpS6bZqP2X/jUp5+99rm9rZ0wV1mjWQasoWhXbXB9eeXRx/7d2TO/fPml199+rd4ZwbM2ClJycJ61JZCqY3iet8GPJ227XkMLwaOPnNrUHd8txnvnPu6KxYMP63K8jkA1radnZ7PDLvbkth85/7l+fz4/OGjdeBRHNIeKbNK0iuM77398sPHY2amPx32tdb/sm8Xhce9e/9Vr29uja5+96kj72BMiIAJxQEKPQUwsiIa5EDtbg+MdW7uTE5pI8leBUgCYmSUa4S/EvJyabISOnpkgDkISEupAzlh7DCD0AUmUr4Ay92C/7kKPCCEWLtTFdceRNErbaYH+9Nb2f/Nv/r1z8DQ895R614yxWkrTfGoMyLVlPEizn9QYp25iwMGQJ8oGFA7kijx0AZBWlJjCY5jTprdBef8BMRHlRHiSIPNhQaZmpvyUkGmQpkfR4EdrTsWk2slbyzmBKsFq++x8k0VAavVxYmgClJW+bHfNMOikJmSCOTOQhTxKfTMRM6umWYO90cSfUCRcgQYPDdXc3qUYSCdLHWxI5thcIEREoTGXQUlXE2PMJFerC8yzAUEjyGyfbWurZRyN2rH3EjpV3pjUNz/84Cc/+7tyd/OJF/aeunbtP//8Z/fDg9Pnzi8XAubxaHrxsed33OFrb/y24OrL3/nMP//oVc/bOxv6zsf3rnz+0vnnmtsfFNVWoRuYXBy/+cHhtS/9q6e+9a9vLsZz3nkg28d+upQdbgtZRDRAC6wJa0IERWVhoIcaaSKQ0YZUYL5diVqUNS3W0jKskRIVtoVSpLDtqmJHSdmmqLkagapB78l6WqECl+Ypht+kB+qh/pBIOfts2K9jIs9UEpVRHMihFK5Za+UJUwUdKcbCNWGs4pV3UZ0tSyrHO2NGG7o1kXo4XcfIoCZoKY0c12V59+Z73/jMV/sReNejZVoaVEroHHUVcUJHKA/A81HNciOAElELooGYE2M/TT2NKmrWFlkRngubJPzNZmrpqbNHKB9PRvIzjzEyk/c+whKuZoNMB0J2ExuwHCuXDLbJ2kG1zZuZ22ixIXMgEtygqZfUREhPXTGnHUonDCRFOhKcOTn509kIDSftrj0KYr6jyHIsTimbHRGLUsq+YMBbxQ4lokLFqTLBAwSUBE/glJgJKG1GASLAQdn28wRmYfZBuqoq337r5jt33il3CwgJBRGUZTnvZnff+ejRxZMXr1z1flqW24qOmMEh9j1zr3CAAC6PqIhIEUoLNLBthw4INpIm6gEPwGQFABJo4AsnXd8083G58cWnnnvu+Wt7pzbaZWzurdGxdOx6T+gABUnXtauDrhj5733zD/Z2tv/zz/+hmJS8zRCJfVivxK08jRlEKODrouJytPuE25BRgQtPnZN7H9z/8M1Jea64uHPUjo76Vas63Xq8Pvupg6PXmzjnYlf73XtSLptlGSmEGAtRUN2Pqr5e3Z+1B02QsDqcV7Xvj8Nv/uk3l85dnGxWP/3p3546VT965ZyokjMLFmh2cHCOTXQDEFiIPJu6PUOnSllDDoUDG8NTlKHeF6o2eA2qxCzgkh2pBLOlQ/BEnhyRkBpQAqAAOwYQ2mBIiv3Vw2sfGIQgfa+A84IowYdKO6c+RG37EEw/k6X3FotsMEac5pL2LCdEPU1B03FCmssmbVCC9hJh8SGYK/8pf3CCjVs4pafs2WKrglSiAae5P86jUEUejiKlXxrq5VTsG8+YmSgma2nYLREj+6IngopjZiWNwgywEyBr6SGp3DXmTfKaFs1rzO3BTtvWYALq9M1DSMsBMqVV++eYk0dKtFa5Zwoa8oTNynFRMMxgkohijDHqScwFoMpEg78xmymmCUQFvx9wPaewBIOgNWHZHGIH+Kr0t+988Nprv/jWdz5z8+DBVnX13t379+7cPf/0lTBuxYWljv3W9lG7f/ODt7ZH41feOj61Oz7/7MV3Xz48dbqOYXbrePHol6rr945k6i9f/fQbNz+59q1/+8Vv//DmcnzkNw/7rSO38SCMl8HzvJek+id0sP/RzhrWaBokRXAUcQIvBs5wiA6aLJhJJaWBpgXyhHgm4MRweWtuErpPRODk05LwKNKgnMXgAlF1yQsqMfw9sQd5hVMhVePalCqeuGCuacw6Ui1Ex6AxYQSMCBPFWDFhLtHUqxnd89WCj+6W2tb12LNDdF0pinI1qbpJhRYfHtz5zDevffrzjz046HnK8KRmPAlIEMBHLaAdyHhz0bYvOeeCMYkTXCR5tuqISBCQmXpWf4nE5PySV3KZJRAx2BYYA5mKaOd9KLsp95HJYjVxQYR9BrWyYt7mBTYQFZE4GLolMS9ATJp2HoKJs+4wa/vzk26kYU2p2IrNJBFO0LWmGABrZWNSGmmuVNgcvOxYqRoYnhwzrS5PfUXKwUoQJc9elUQMSy8YJJbi1DN5go+RFSWRI8fKKqXAgz07T/BQSgWbLQcXBZg947hZ3F0+oB2OIQAsoYUUfaejrUm9Wx01y+t3rt97MGtbjMcToFVZgxwI0IiTCxdOjI2ix5ohoEDKQK/kiRzxWsTbAyFsO1cZYFp80njPzz1z7fkXru3t7i4W6/1bq4IL6jx1qn0fNQA90Kuq57oW3y9D04cXPv/5yXTr7//zTzDr3Firjc3tvV1suOClHtW+8peuXpnsTQ7Xx1p289XxwXrSojmUO+t5nJx5VM5t3P7gLWE9iufacHYf97tY0rKaHk9k/yge9LFzMhUdAww6dnVXz+83BzfubcddH8Dgo7uLSbF59YnLzsk777wSpQUJsUjsmQF4hSOOjjwlu7GeSAmFmYcm3wQ2z0lDYNN5MW2rs3JQI0GBtJlbJKhVe+KZCxOhGXSjawYcPDGT9gLH7GFuezYYCKHz7EmIFBpBQioiwoRCeIkS6yAlBBLTjiPnTKKQntIgUgybRRLRNN33BNmRPQ4WIoUJNp80YQ6yXomS9Qslt/KURkFp97ykvlmz0lCTVTGMkOgzl1oT7W/IxFA18YKc9IyGcllFS2reMdbOMBE7hYqIiUjJQChv1IuUKc2+nzOvRFRZjKOSVbl5AJm72Nxe5y/JTLBhZGXgp82IzSQwxTgiD04tnflZKpDXs9hrm80NElaoMQozHDMxi8QQYlF48wwCEXNigBANemZFVJHel3UUCVGYPZEQmXwkEBXOo18vjmZv7my5V37x+gt/dP7Vd392bfvbf/z9P/31u6+c37nUlqu37r7RXzl//a3XHz/zqXVzYzFvf/6b7k//6NH5a4v5ka9HV9/5ZHXu06eb0eq5r33/7nx99YvPfubrP3hnziu3eyTbRzw5xrShbV4oN6CWdKWyEloxdUBv/bmoWPsrUJEQwb0lCiUWe88AuDjBpNOTYatBAZCYZgZEUJ91qwpjX5yMOIzVnsfHUE78O8CIa5Ss2iGAU04b8hhM7AmFooB45hIoBEJOacQ0YYxdHK/8xMuYeUoyprgRNjaYlgfXrsyvXqwX++0nN8OtG9cVVTnaALxAG95cBK/be6uIrUtT2tPYivcMqAZFJBVwYADSFOxqpMlISCeH2BGreDXIXSESYJWwSB7cJgCLrVIx777cfxovz9SyNgmnhNQIDzC0YXX2fxXsPEGJnIhkCCn5p5prekznmkxnjLT912ZF5sWqnilIFjadfNnJSbiyEGkUgjo4MwBId5oMg1K7GqrZaXJo7iXx/iXxyRJgJlBR5WhTIZ+mNSYzg5IwETN7EWJ2jj1sK6dClFVL4kLBQMXeRxUUyqVSBTiGh5boWUDiS08MLQBC8AHiC4gr+JfvfnxcVmcev/TJrevkA6ILS71w6fyXvvpl6YrVbF44X2/4esN/fP2WUyLiiBbqQB0jiAZK0F+vRpZXcPQQLyxkM00PcixEUpIXEgDsClDTNtKtr37q0a+98NVzF0+vF/Hw7tIFNyIJsVdxikCk4MgIGtfsWNC12oWeXShmt5vz1c6LFz9F4bByfX3tGj96+qBdzhZzdq7X/s7bd+Tt9dpJLFqu3ZXJWb+ou/UEjpqDpq4faehBOz+a9l1HvT8q1vfVF0UxgR6CD0qJGroW9Yqrmo4gi5oXjpvY3FlMx2MiH5atUPvWW7/b37/1la988fKVS11vPv1eEcgkhOyM8MosTIhRVEwgYIxLtjicJjQQy5nJsNF5wIz4SSQQMRCDWIPkmSlCHHtmRBvns1MJJE4DAYxEAYJtuwPIwxNIgkZRRWQGEyICQzQGRPEOoiLc+wyT5sELkWExKprniKpZU2ngrLUhqWswdFCRF1pbFhkOlR31k9yZf2iAicCExNg9+R5Wc08zzVGiJib75Yc8rqyXSxBujMGaP06MGz450Qqz5OL/HcJo3aY90EhtUiqukbnKwxR7mAqnykNTv2ovbJlBJGmvTgQJqdJOUqTcKasBqWm5Zea5WFOSUzgGFY19KFvBTiKDWaBV+XZ9rZETGfoh45tAJAqEnE320jpdciSRSs/Xb7zfNLefeurxO59cv/67+8883x/e+6fnHvt3Lzxd/dU//W/bl/euXnmymu5+7lv/9spucefdXx1Xv/nok1vXH5waPbG6+SYmFVpefxRPxz1/s9kKk8lnvvGD92bVuqiOZHPJW8fYmscRjhQL6DHQACtwx2n624p2QdGItCq982J7glViJGGGIqqa1ZDpBazDIU6EI2Ul8DAmTENf8/EyGwVzEHgIoMgjD9VBUZMxA8kQhiNmVSYTK5nLkuUo8ooCVBJKGolW0Fq0EjeRYlqGWmhTdep4Grao3eT5wezvz/GdcKu99mj19ccxn5957ZWjB7NZF0IQWbn12NfHbddX4/HFST9i2qIe0UVCIIhA2UWWKBw8hKEe2iOpS6NIz2wCH8pFiT3SCgzGy4anEFEyuAaBHMcIEVFJJL6Hz0SeIKfvzUJ385HLJyQZhRkKiqSfThIFiZJm5qYUAECckigTIAyGI1aXfEQHDaRm7Dp1pIzEvkiGQCRi65iTGD/dOIjaipUcbm0+k07EcBxOuJGU/HxAxBbKHJRFU+cPZVUmMuGZZzCImQsVM0wn8mSaH3GqZeSStCCQkif1Ag8UDIfSuy6I2yhu337w3t175eb23unTbnkr9GsnDKUnnn1qsjddzvvtcm+9Xl+enirryUcff4ReET1xLdITlKCORElUeoEnoqBrD69OVNZOGOIYjoIPEMeqPSmTU4qQWTM/v7PztRe/+/RTT4R1WB7YjtsRM4cQnVNBhPTEIfTLZnlcMquveyaghPI6xH7ddO9/MP7wYzeSo6mUHy2XKxyERdMt67FXR955lBNxQVvpFl2coTt07WHVzrTFcnr58lT22vtHLR232tAD1gfwFbtFFQ8VC0inXdutR3Fzy9NKeCG8Iqz8YTt775P3Hn/0yqXz55566iKzxHjFF15ERXp2XjWIOHZONaoKNGpqmTIrIpktURLTZb24KBSByCfJjvSqpBpJS1hzhGG3rCg6FRYVqBDMtskrA/B2qKyqTekye2PYw4rM3RI1KpLEGKARYmNS9TmUp6NlSSVnHEI+iil4JWcDe0aHw2Kjl8ThBRLersiI8cMjJ+S0dHKWc6uCNCnVYb8pZf8JqAQhHo5PLoUhqjJATMO/aNb74CHKU/5AJzOz9N9sumXvRoZyOU2xs7nukJNhjRaJmdoZsJ3DCGeedv4VJ/1avtBWv4uoIrH1RMSqC80XAxj2CRJSBXLCvR4IM5J0kEREKtnNw3h9oskVymwsyZYjJcxOk6FlBx/29x9wIavuzueeLXuKzf56fOrwrdf+/sq1L33zC9958+O3tquzq/3uwHez+YPPfepLO6fO/Pyj/+eNB2c2t8OMlxht7FzcOQj97X79yY3DP/sPf/bug0nrS+HpTOpGNle6wUtgCVpCV5BG0AIt0AEtuGfhQCow9iAJkaioatAYmQygtHQiCSZJehlJDoKaROEmAYyaRvWcMWl7Dn1ueS1t29VlYlExgMVQCR0g/lxvWfAlQxWJiUtCAfVaAQWhVpTAiLSKUjFPHLYEU5nq4V7ZrR+8987P/uHoo9kGd4vXZ7ub8fkvPvvvv3f2r//6pbu3Y72xcbAOVbm9Na7W+4eXd8u4EbChrI5B0is60k6jFzhERBKT5TsRB9tOIQwW2LwXeIiLa8OKE9hJBtIVoGLbBpLSwqhcznbcIrnZWxOaBEJ2spIIPtW1Ji6yJwsph1m9KKUvLR8nAZ/BdszMyTZEVYzAYm5xw/NsSVeHWGH0EdU4UCApFwbG+EhibSak7bD8EA1l2HMKGgp6CxDOaOOSlFF0IjuEI/YQD3giDyqgDvAgM/cuAUcVoYArHTloAfGRShIvPHKucPAKBkoKHJiYC1TMLfWvfPiRbJYrXncbOztPPnr7xltjV6AG7bg3br39xm/eeOLCE5fOPXbjxluHt+9JHdmJdtC2YHiLPVVVeo/VeildsMzAIgKJAHFJIgqJYVUVHLjmlRTMTbd00n3rKy+88JUXfO3nR+vIDFkXSqFvpFvGeiPeeyCzQ9A6HM+O7+27nWl7+cyDTsMiHq+axVq6pj833ZpA2q2t6elTdYX9t96XM9v1md12XRzsL7uuD91qLotQakQnXm+/d0+kZ4iAOpKnuRt3O27GCOsW81rr+X2SqpfNiFnALHCAq0lnIWqQRYhHHa3o9nt362ISutXrr786GX9tb2+3C0HIrbtITExOYiCUzCIxEhyxKIQ4GKOWtAchI0M2wjVzBXHOeMRgMrE0ksKE2L5fradSG+kxsydEkSgSQJ6UQSb860VB6tKQUXPzZhMRieAoplcTsT5YiZhi0j2Zp1oqcE2NzqmsTS2cqQzsRW0FJYFtunZyCnJvkEvo5CihECg9ZGM/6D1SRLVEp+YlccICUWjag00JvafMvjKmjeBEp5u+JIG9FosHz5xhFpu6VcOE08tYLj3pbmOKRzIEdGKTGQ94l0mm0+tQssZLIUCh2VEgFRuZ9zP8yvQe2Fav2pjB8Hwd8vtDtPFEJdC80CZRV4bG5AQPHzSgqRE2ESZUzCOAJfnXIV1phvlPeB5J0KeevvK//S839++sT50LGzsyrrff+vXqrRv/9NwBrj5/9Suf+867n7y9cX7cHi7vdA+2di/tjC/epwvX5+7R3fOzainF7t529dEyvHd0+7//P/0Pn/DlB8wtlW03ajDpV0TLnlvHS9ZjwZxoCTSkjXILXUPRAZ2iB0XSCAnKQsmDNPVAZNqCNPwjJltDoBbB7Wk5eR6YkzwGZtZremeNcRCO583uGWMQ2K+j1O4l0ScyV5+gnK12ChVSccrgAjQi1KANkk3VDY0bggncVPb88ag9Ol+tfnv91e6jj3/4lSfcciaL9vaH7/7uJ59cunLu6e2xP9gP/T1gNxBmq/DodPzrf/hffvDf7fH2eQAijE5ICFElCPeMlSJ4IhAJKKhtw9EA4Qx4/NdfVrNhcNd56Mhquj5W5JlAyBgVNnVKsK45PaWqkEybfdJqa8x5K9topEVm6Ynlk6AAIJvpDU932phiASS3vMONGdzX7Qei0axyFjcfdB1OdjrNEBY20VEa/Cig7JIiVsR0iZIYy2CCo+RMmSpfUiawCinYeQeU1u0CJTyTAyqoV6pJPXhM8I5r9KxSRq49vKqDq8ix05ICwqT2b75++3bf8imPfv3At+cfPzdvb4XFylful7/9WWjDYjmrDhgl/+r1f/Yi5dijBTsfIdqpomKialSPKlKNKg1BVDgU0QtK5iCt+Qy5olyoop8XgZfd6tGL5/7oB3987vSZw9Wy/XBVa2iO5iqh2Npp3n0XFYLw7Ppby27edavl6igwy+a0+7Aab+1NxpvjyeTi6e3pxnQ62S58RVS4euJG1U9/hlf+5eVy4kPsyLNAi6qgiedSXcGudt39pS88FN4777C4O6uLwnWlrjs5DuNy061LRMhx5xbw65oi+rbvx53f8MtmsTw6LlD0nfTr+eZk2gd+78btR5/4FNg7LqFRokrskPX8TCYWElXjM1tfyxn7kERoteGHSrQFdNaiJUdvm1Sm3AtRhZHPjR0kCeO05bfmpkAusS+IIT6fEgBOxYoAsbaBc8qIsZeEVYumlSjqmZCMhHOWAwYImShbbQDD6NdS4TB6TfPrlBf05MgNnL10em1mM/TTOccPgYGBZFyZqJ6/138Pcp0MH1qBnoruoUZP5HNzOtFsSGhFAxFy96t0knpziZ/SGmftnMUpdkZVT1mf0m80ID6xSwY04OFXRf5FqaDK/8F6U2u4SHObDjKeF7P5C55QvhmDUwFMISIqRKRs4lcIVGIcIHSIKqu9TTUzTkr4CcMsxoKiAAtx27e8sz05dzG+887HGl+o/MZbN64fLqtxsTW7d7+bXz5a3Do+flCPi09d+PS8O+qa4kazmFz56oH/wIfxclKOz5SHrnjpv/ziOz/8D/3us7dXZc9+HWvp4Vp2x9ClyEp0odSAG+gCZJPgtRICKDCCoPccY3aiYRZjt7mkdrRpOJu3vQ23mVnFJMMGs6cizINU8iY2S90gb2dxMAKlRIlNsg3Ngrpsf0JkR8slF08whEFsRtAKRyW0UCoJNXSkGKnbKngHsimbMh91989Vjd7/4ObLf/fZ83FPPnrztz8/O9HzTDGA7jadFE/tjFSrueff3Xrz4u6Td1v5aH/pFh/snd3d70p0ymPmoOjAPWurVDHWiAbhgokdwws8KFg7BCAfhJMHOB9j/N5fszo+BjPVUWaydT9EabySn3+1LiEd4qQlTwlvMIoZKIqmAwwJDkotgc1Z8jlI7A+jEIqcbENJVWbS+iO7d6VIQkwZTrTePv1vrk1Pbrew0SwIhLxIgWGq6OFIGTIf06YOs+0COYLJzLwIA6xSKljVQwsuGRW0UKpIK7OdUqkZtQSvrnahDMEHVzvlGErl2gWO1Ua1Pzt++c5td2VSKNBx0zfHYzr/3JNv/epXF8+e/fIL31jN2uWDeRnHXtynmqv7N2+vZ60ryrgI2gcmolAgou8kBhO8cAydCBoRREEMIFcWo7Ioi/XyXF1P9nY2ivHuqb0LT1yhew/ufPxJc+8wTKp7VXH0q5f9+fMH77fxvTfb6akIGZWu9L46dfriqSd2plvVaDQ6tT2aTsuy9vCdshIjeIiPcKJBWnz9Cy8c3N2/devmxpndngMg6IUXJAW7ktHCF1UkVYYvEV03u/EJc8HtmKVe3esCLet+Gtfx9vt3u6W4sEVKodVuIUqu7bpInd9w6kgkzkOj6K9//MGzy889sne6Waw8F65gxBKIUfoYg/MVEIHI7JMjBwJISXsbRCA52Rn0xTHakq40b0qu8rBlh5qN5m3woZaw2dZKk2MSVRva9pTYAyBK2pjUkIETgAoxQjVIVNWxbXxhkQBNIxuvghMm00MVq+UASVKdhGhlXHfYDZd/wHp1K6aN5ZvSL+WqGycAdPqBDEsb4Ju7xFzzUu4Hf7/btX8yr30dIkmGixXDlcNJvqScp80PPqt18mvZvNuE9nZEHfMwtg4xUsbYh1RNCaQ2dpoCygNQffJm03wghQljiSvnaTvoZFEyIZVfRInJDgCpnbCRV27sKTkJpeugGXY+KXQIkEQsIqZMfGUAbOY5IQCC0iG4EY9vXP9FOf7wD//oh5cfe+b9d+9O/JXr+6+eefzcD7/9g9fee3N/sR9KHBeLR05d/Pj27fnBg3oy4TDtx6cPcVb3NvaunT3cP37mO4+ff/7LHzYu9MJdwWtBD12CGmDJsQlowA1LI2iJGqYWiB0QiFtCIAGTpOkuJdTQsE4CqUYr2Ngl4aboAACkR3CY7ocsDx2aKrVlFGRXXpI/uV3FxPPK5ROzKucRYlYBaMrBBvAyFUoOHijJtL9aK40pTiJNeYuX27LY6o/PTRe/+se/d/P3r5zevVgWHzWrOB9J0022JzX0aP+e29y+9NjFR07pI7tPV2ee/HDe/vrWbFP2nc6P6+l6MpJW0IBLqCdznQUD4qAOICgJDw0kD6L53KCmZ3EAgZGzshW+UeNw3rNyFhKjBZqThAggj0jsXmRQHw85G5hkj0zYSyBnbmT56aT8QOeRWnobyK27IvtP5WY6ubQhCeiBwZ9ssK96SGKQErS93yFpU37lRE8hHiAx62Yce7ZgqiAoG1c4t1AMFARW8xv0Ci/mMKqVogaNCAVQi5ZArRiDK2c1GXmOJKEKriSu4yu/vt1MebLltQv9El7Dg7i6sLt15uqVsqi2HtmaTKenz+2tZyE2/Qtf/+p707d/+9JrrNAAIi+rKF0vnc7XqxAb0Y5dYA5lXVyqt6rtrd3p7pm90+PtzQ0RunmzXCwWq5Z3JscXd1/9Lz8Ls8Omjt1sWe3t9W3jvYwfHO7tTMbf/vZ0PB6f3a036hIoSlZEMIlKFyCgpg3A2sNHccolGEFlRNAgrhp/7w//8K9+9KPb+/vVdKwiVLCCgK6P4MJTB2Ymx7IORVUxYlDSvowiEI5dz/AIvpFFgZI6lgCNQShI0S/DLPJaC081S1TR4NmFRXvn3p0Ll0+XXEBIegEB4lTU+zJKRyBiB8RU9CECBTQkeh08WQgVUbX1l1b/BWZieBj30yY6iXcAkECJyHGK/aYkUJh6GMJwqsMpS4Qb1bRH20xTzbI4yxMikTLbXgMFIBK9zVFPBo5IPN50VGwgPPhESaqECaSsGbdD7jfYIsOQxi3N5u/CSZohDPzE34OpTjAtsjGl2i6Gh/M0Tkhd6TdliJpzLpLM2U4Zzmga1uBYF27deJKYDr9dVYXActK6AwMkn1t6m/h6dpIpVoaFBonmw/df1xrJFE3JuNlEQ2zLkouE8bk8wkuXLffiD1+fgW6ayy0FcXJ+zg4JBiKmQYBIhhps366oGRWJ974qy7B76vLnxlfffu/WT3/2f9ncOP3stW+eGp+5897tv/h//c9SllK3GEeJZ8+fmz8yOn+4uqtdN7t17/JoVMvpixcuXKwvuquI4IM7YKUiSGw6DiU6yFJ0CbTKLcO63gW0AxrRKNAW2kF65h7oogTRFlDmKCpOJO1NYDU44CFoxTjotvM1VVQW4lWidV5s1wcEUNCoqi65KWanChWNmd2WqOguOW8IJAr7FI5BED3ZZgjvU+ProQ6oQTXRiGhCNbcbstiQ+bmNdnHrrTtv/PLKlixvvf6gufD0uSdvvn4jzL2Hax70rq3Xi8X1++9OLm9cvBbAVRWaz16+8tLf/8N3/+wzE/h+MpaFoCZZKwqoZ/agklgK1aDqBAGSWZFDvhqukBX7zAPhMHlBmz2qpGLO+wLQCDXrSom21ROAVSxI8EPa6SaqqrZNm4mZQ8gLMFKXmctJTUCMqDKpJqxQJbOeUrhQW1wSFWkfsOoJKdoyvc1rk7gQwsTOeXulAVOyfxaB44eekxQgEhaZCNywwbdNbYYVLMzsCF7hFETqhBhcQApjQbPzWgEj4ppQI1ailaIWjMA1aExu7KUCalABLZU8F05K7aYjfPTh/uzWB5d3NyEzeGkrwfq491t35+35T1+++/57f/6f/nxUjMc8evyRx70WZazr7ckSKxcjiZAwgcuRH23UG8Wp8YYbbRQ7pyanz0y3pjW5zvVFI3o8O5y99dqN27e6cd3Cy6LB/du4/sp4e7pxdXenrB+5fJmZp/W43t4VdJ4Rwb10LOSUVnG1XPTOJWCJ4AmeyIuywimb0W3nPffrdenqvl9MJuWf/ukf/fgv/393796fTDdDF9CLeOKSZR1V4QsIRSpIV9qxkIqLDmAJQFARkHApHiLwhJEjEYwFdZi39zASPy57JwyHGBVgpdsP7gT/WZQkIlyydAIBB68BDiNAFX0IHcwZkEvVwPAEAiTRpLNG1XpTaKrtwJp5nQoopaFWJE07rGCbNBXmaGIWyqQM6YlIAJVo40lLJuxyKhSbM0bApqpKoGADQUQAxORBNBCBcTJGHgzQsombsJqSXh+S3uRkogkAPKlkhzRiDJFh8EZEkjHhDAPqsOAWGHpW8yJU4lwV5yZvYCYn2nDKnEjK/FxeG2ybZQ7WYrJB5ppZSKoqDMPdes2bzB/qzJELDYkyvAH76kOkvI7QpMDWvya5sA5XIPPxSEXD4MmcPlcqnATJLsAg1yy3+L2GOykeQTIkZsc84IOJeDYMIpwDECRB6KYqVg1Erix9WRYO8Y23fvOb1949vXee2BPjU4891s7xk5/8+XR67tLuufWxPP35venFeYPFy6/ceFXoG3/w1ddevz1b3n7hm1cunH7i1Okn64lffLzmmrQo4po0Rm3AKEMXpAX3rI3ElfCasYa2yh31jbooTJ1on1TAIkQRFEQEENWo1p+pBkROUnCyihQn9z4PF9gMsMw3CR4cVYQ0OfU7RlCVCO+BE6YeDV6mw021etDAe2ak4QKsaIymaQBpANdKRGYMCw8qFRXEUY12zN2GLHbK41+9/vJu2Vbtfjj0b713vepGesSTQmtxs7vzelKe2tmOoiTxxu/eubN6vd/6dH3h3NnJZKuSWbPy1GtdUAktRApERACsFCPIfCNBIrYGCshJbTCQtC+CDM2lQFQ0gbAEdmypNEZxeds0EatIkEBgLoiS21ZEfrAo9Q0iQZltR2TCIezY2h+McwAom2EW0pDGCqesgRIC28MpIgSnJ1QQsg0OSVcoicClCc6IOUPnijg1y3TClwCI1NhhGZXi3LtoqtXUErvB6k4BiQolZi/qHBcKA6K9OpVCURIq4pp5xDSGjBSVxlJoBJmwVIIR+1JqhAJdJauxl7Bevv3qq3s1KokhhKLe7MqloO+7FepdaW4FfXDuyiUEPr7X7M/u7N8+4JYnfnL+6pnd8VYdqppGk2K6u7EdFzEu+65tF4uDKO39/YMPr8/aZrHql04VsW9Du31+Z9vVu5Od6cUzriw3RqPxqIYDMUmHFYMEbTcXCKsP2nrxwSY+RqAOobTHg0URKYKZI4IQhciefAgA+RWih+/7db05+uG//t5f/OWPDg8fTOutvhfyLq4iCnXM2kaQoCwiASWTKHoREVaWSBBhD5QllSoeUkIih0oWaBZ6LBONpYAVRq4RsHd35vvLcLy1Oe3WidaPAHjlyFDSDhRK7xzIqXQqvSKoFsm+AgCVQAdlsD1dDGdDFLWnThCZAAijUI55CqMEbw6lIIaarjZtNDTUhZQVnYEtRpZm562/S4NePfGEYuZoE2KFSPCOPUDM5rmakFV7ywoQmVTKenFbCaRktkOU00PCro1bm+GcVDkIBOyZnEt+6llnA1W4NLhL4FJOwVGTywclXQ0AZaaQmDJsyBVR3lGffn3+7envDBMuWHVgQ1OjVeHEzsExW2nPac0YQGztxAkOYTtGlTjB8mZ/QWanQ0gvi8y7TjPXh3pXi+qOnELgoGo01ASSJeMeTm6ZJ5QzALldVgFUE2SX9spIGoobXQ0OzNCY5dBJrMaskYmEVYL3FXsluH7d7e+/91c/+um6C1W5/bs7d7/2ze9euvQYkf+f/6f/91e+9vVbt+7d2z/843/94t3F37/9cqgmZ568cOatdz5+ef1S1y2+8dWvTWTy4P2wfnDn7uzOzbdvhSgvfvNrpx+5sFwEEgprlL2XGKVTWhMtIa3QmtAhtAIRQRBtCWsmidJ5UkIPiCfJeVQQBSQ2CwAQY0zD3lRtwVIvsuzKeLiqOe8SwTuRCFHvve3jsymDZkWSAGBEicmaW6AMdkxCBAfiGNNSL4Zn9kIFUUXkhUVJ2QMFoVQpgVJL3xdxVdLR1LdYHBzeem+T5md3x5t7bkO26479spxW1dvvfnTzYI2li9xdunpxUcy2NqtHn/K/25+/+cbPH/vCH1bUei1r1/feoyD1IMB7VSZ1EBKo8WIdJV6YGzpas4YBC2Fo2S2vQa3yH7SCAgKiKDkHKInJHyJAnr2IhD547x0T4G2gkfB/qKqwp6hpgu7Y4pFZc6S4AUEC4DIUraqJhwFVMf2X0ZTF2TojR0ZdT7SstJaYjfltQA7Y2f2i3AA7c00zFj0SzK0WopMbHiiDJFZ6KEBCUCEyoyuD8h0RETlmj+hUWJRB3tcsXrhkFIwihFJQgWvHG05HwChgLFIzj3Sqq1qXzoVSZITFtpeXf/0+FvcujKddO9d61DeHvlnHGEuU041w/eaN77zwpXNPPXvUV9R5zENzPJcj3qByQyfdvJvfna3n0s0XH965udpfhEUnnRB3sW/KEr6icjKu4UPbHs3aa5/57NWrV7wHVLpAUA1dP1+uzQxEkVjCBDiIoiuYlVsGbHclE+rSaWauMYltaATIkxcSoejglHuyJagRErvptP5XP3zxRz/68fHxQV1vhl49vKygLOxZQCiFWKklUUUAhAUET1wXqJlLqGNXw7kg7Dvu5sd3ddTWjlCCEYQpgBHEoVg1zeHiePvMVDQwFxQUAWbVjABbMYC1t9DNzDFIjEtFKLxnOJWeqAYHoFcJqeeFS1ALi2fWKEoapQXI0hxDoVHAEGI2tBiKPiPMGIpfAaVtHwxbI0uZ5zU8mKIq0hkv1LMLGvp18ClFDJ2fkYwp+84aOdLcIQYHduQOMUNcw1zzoeybBjwiwprg16HnU8quEbl8xvCq5iaSnKJsTXpCvqKaOQ2IEmHGUqYVAimR51+eFUwq1qgrFBJFk6mOxaahHUraWyQyDghCiqii5DgPA4mNyiMiAl96sXmijbPIMIZE0D2Jh7mT1kwpITJvLx1alYfmX5T6Y0qzrkQ/QQqixrpSdea5yQSQV5gsweoAlWgBS2NYC1EZnCuJamobXTy4d9y8ulrfaleLTmVn9+q1z1z9zW/fYl4/eHD3zTfe296ePPP0pcW8uXH9w9+98sblxy8+eu7+u++stsen+/t367P61FOf279+0J0K9w+OQqGnH909u3Hmw7dv/sX//Uf//s/+D1vjvXYNr9C1ciRpRTtgBawFPWsPCb3zgbBWCYq1cgSiaAR6xDVM0GtEdLVkrCAiZwPDdJWS/5fdhnzt0sw7VStm6kSekzOOISL0EJkOeTTgPUNUBIJA8DYHMljFrm+eFtiOH28dqBYiDDhBSSjBIx5hNabjGutN7o4+/hCLGxvl7JHpTntfZ3ePmzvS7xcHN+87X2z4Ka3Dnfdmy0OpL29OLmzEunnu6V2educu7fnQ1Vg7WVE91grkSQtlz1LA9jhJQB/Meh9MykxpIR6g6MSIOURGBU80BQiBhiVj9mnsnKhktgbSaQUSrB9FRJJlrCVRK+8ELoFEApEItQVHNvG1QjwZsWd6pA1fyVZ4Gdxt/Sdk+BmrvOwoafrZdGchIcQYmNkV3jsnMbmx6KDEI44q7HLBLvTwGnIhmOWRbS8TsDoCFQQmYccFhEGOqLSWl50X8lDP3sM7rTiWwY8RavIFaMw6VqnXqEgnnsZ+7JcVrTbRlbr02ntdbY/0k5u33v71W9tj79tl5cEB3uvWI5Uj3j6zzaP6+WefL7cuHscjAc3AHbtWmnXo9g+Wq/11XETMKTZwvfrAqAgde/j1ousC5sdHXXvUd4vj+SeM1Te+9Y3Hr5wLXdM2yiBh+IR1DFMy09Alaq5GUQQYEYJtrh9FlJLLouEE4tmJCEMURhw3tY9TDcwO0ncdTp/Z/qPvvfijH/2471fOlyKdWg3YMbM3Fo+kGStQAqXCE2rQCDQCWLlm8SzQlVtK6N10RDKG8wIGmbYIti/zXnfw6Oi8qEIiCkIAR4aotAIGBSIQdYVGaBRRp1QQQcyP5YQRTIBLQ1kSBpktXg4ldsaTBCKBvqb+FLOXTxQjUTMMT5ods00kUF4IQpnxCWYGkUhksimOJrdqCDN8GptqVpEiY9kJ0UngKvK8zOa/Ce3Nxo046VYzDcS0NsMQVhWkiDm9/d6XmgFUmmzntfYJhyZG1n+KZceItCc0Y0knI9V0lu34WfNv8yWDpiQBtZSIN9YWqK2lG9JsXs7INGyEJ5hWFEwO3i4aTjYhpvErJUGm5hKAhtueq5OHY78mUW76CeP0Ui7lzddHVNWYoqn3symvhyUgnJDFVYS9ioo5aQO+LF05Eul5djA/OLjTdjOuZuI+ZHY7W6c+/Znqn/7helUWLzz/hf/4V3996fKjp/cuPXrp4utvvDk+9eHnvuTeee3G1t7eucnZR8912tYvvvCN8WR68OHB1s6uzHC6PtWFVXej29iaXNo8/86d9uW/fvlf/fBPmvu9JydBIIQVOELXrGtrjYLwWkKrCMw9ECltXouwREJRoRpt1bZwIssYqcBSBgEJvgfzwJJNz2CalQiQDktaf6xKljrsUrFmQD//bBayKqBRBEwwerC3IYIqiyop9xDv1aQmKM0PGCihhdboN9CNZb5bN69/8KrO727vXf35//eNjX7XNWW4p1OqLo6e/ODjD8qNVojHk/H8VrvmrpPRwUcz/uDGt/7bP9u6+tSH60XN44oClVBbbmj2Dw5mNcycNpFCO1UVCPFD5H5k/V4iGJ88IPmp5oGelnA20mSFbd9gLJF0agTI+8rTD6bC1zE/zO5XSRa25DjLkpH5XWp8RUmRLF11qxK8AwExmmmltdOJu6AAkhGZLfUiiQIGu1RgUVYdMjOnfd8JH8mkFlYoS8bmgyoicbLYJ2UWG/064gLwDE/wUWuWkgoXS5Wy50q5jFr5YgwZgUbixhQ2CSNx4zD27QSzEcLUNSO0XhYjj/5odv/dl79ymS6cqSbjyWR7WhWoqkkgbVfr2WJ+v2vuLfzNjz9cdDzvaKH1Els9b/ugRVUV2xtVrS21bdscN00369ujdXvYyryXJgpWzF3t0SwOz53f++MffPvs+d3ForFqSQVAgm6Y7CqRqoRgBAAmVk7mrqoaVA1JcoDRh8iluUAgCQo26Cg9DwKyHRPpQMb1url8+dy3vvX1n/zk75yfIOFMzOyi9AQHYcQKhdLYjgyoBEaEMUuhWgoqoHRciJbS+nWxOZHMsycIJMDM7Asc6gw7xCPWAATRXtEDQZVVTS8Q2LFD60nG3jl1TNSJBGhkm8gpoMocB7BUAWN9igopkzlFmzLuZHZp6lARJSBAyfhDcXAzzdeDQI5dcgOxgRpITfJOJooylyRv8xDHzltmHZDO5AqExN7SlOBSrmLyUSU1CPkxz3zmE0DYsmGe3OZMmU88E5llXeq5B9sPBpTNK8cYNJztK+xMsq2ksw3FDE1ddGrfEys2vYvU7WCYPD+UWXN5zemDpYvlTgQRQ/NqA2mLJWZxxQmptiuQMNET4ylONcl/9UXZLCJTNzXlcGR/ysG5K39FyYNy+r3XU4QQnVchIfWUvJwADjEoE4pCnPd9F48XR/du3miWbUHjjemH21vVquuWq42qjL3oE0+P193may+9fbB/78rlJ4+Xh/Py/my2eP7ZZ2fNwfmz56ZVvX+3aY4OH3/iU11bbpfn5ocHcd4vF4s1gqt5d3sqx4vVbO64Po3drz75lcUn6lesMUAdxKyemRDJDGbRC9ZMa1vyRWRLf0Pa+cURpiPlVLGndVZRkpORUJaipchPw3OXmto8RslXyrzEAVvdE9NZ03RdgfyEJ4yImTyYVVgkSfPN1Y0Yqk4BjRoRAIYHlZw4saUW6ArtCm1K6cvQzvdv/Zvvfu/5iy/8X1//v8mhSodu2dUbuxu8I8c3p1vbVHDXtk9fuXi3u1uFUclH25PxZx6/+lG7qHVccajQO+21KOGVSxYWMATCadEfm3WkvVUSu5gARQBEnigDNVCX91IDic88KHwobeMG2KKPIUBIGp90mAlQUWRyZ1pGBIkGDlsxeSLyNYMa1rSrRFXNeZeTUj+N3SlpI1QhJgNiT6DkjM9s635hZC5rJYhiFBHx3lvTYQklvWkFOFO58klJaIrttiKY8zuYoawBa6fMkCjel4QiRsdcqpZMZXReK+KauBAZq9TEJctY/ZiwTbrt3CZxFSfhsAoPNmW2wbHWeY3jMvYcuhGaP/nS5Q3fop3Nl4eHB9eP1qtmvl6s1kDstF66nVBsPzI6pZu7Wp/pi5057bz6zt1bRwftvdAdhG7WYA5aCLXQZaG9+oL9uPBcAWUMi+Xx4Wefu/bii18rSpkvGsCDRaHk0v1IURDWDFkA66FMkgzDTNWnogHRYjKYRKKEwOQL70QIIjEGWztB7IgEysTMXEiw5Q+yaptnrj11NH/wLy+9Mq7HoqIaRJi5ADw5ryW0BCqgFioZNWhMqKElUc1Sgscgz6hBnkSIEOHMeiGJg1SgNc3dUZhG1KZetJ3ZQAcqiTpCK+hEzQJgzdDSagw+aYLZYrR5LikCQEA0dUquU09iMBtTix1EmITIM9nqu8w8ENIE8DNUQrDHmPPOnpiaKJEYJXkAw4RBkWDIZkwyJKLEPEkBLct1LVKJpP6M06LvZEpHSbRnyCullKUPWWLZ58hdb3KyIGsWh9z0EI+DWbOR8YnwVjPMbf8vR82Tf9QUeim345Q7Jvs5OqmKATASQ1hzq5pC0eDH+RDD8gQkT2QWSSqX1HINadjilCTay6AzzmA00dCU549t10uQc3NuyRImT6nMd+RsdUwOXgqwCpg4RnHOW8NXlL4ec+jDwf3DB7P9amO+OO7u3emOj+5u75wpxnsrubls70zGWw6bsRvfvekvXrx0Zu/4Fz/96PbdxcWLT3AhZaGz4wPCk7OD7vS53TPnyrZFDCPy8fBoUVB9evKIr/0iNtF17WwlnSwP2gtPnHv+29fqcjI/DAXAQjGCAQQFByCItEAQ7RSdaFAEhZD9XxUCHGnOlbZDzsBSZcAVHkDMdClDOCgxDImNTSUJUB2Mm+yOpBex2tPICUMpkyFOe2DyqjzJnZ5TwGy1lAtoeoaZXSbV2SCfyBEYTOoRCq814vzB7enYP//553/5F/98cPe+zMI4bJdU3f3wzkfHN0YjJ8cdnIs+hNWqHtXz2QxTufrY0yNXE8gzSILEFhxABQgQW+sEx44cIZKNYgBiZoVLxWE+c8yp4zd6oDlp52X1pJqs+ShxEFI3ykzGNMx1v2U/BInEjLyDwQSGnP2fBylBsmNTIPkBpnPOLiFpqhqDFYtZcDEU3unvVj0xOwMy0tIQGwMbRdK+MQ2QY9QozrNZcsgw9xlObipclQsvZiCkibpFxOzZ5HmkYDEIKdoR7AW+FCpVSsLYuZHzOz5M4TfRjSSWfd88CG0znXA5wZYGH5ezT2669b0qNGiOEGbTcnXY3teudVHJU1HUnv25csyj8WGzWHXtevGxd+srFz8Vy1NH0S8I7Trcfe+Vg1Y8dqnkctMTnCKCERAq8qENyo68b48ble573//e555/vG3n61XH5NkxNABREVWjJd1kEUgwMU1q7DSokjn0JxYBKAYRCcyemWwLRohC7JRUBBQ7JkdQEVIB+SKBnhwVLoaO4L/4pefvHxzcuPFhXY8kgLgSOOYCJVAoFdBSUIJGwJhpBIzAlecxoRbUUE/qwTWL9KmhBKAeqrYTgbxrq3Vfh2pSxFa1E+qhK4LTZAvKSDaUhaooWiK4KETsDBAzAyzVmKaT6pVC/rOAnCk2c6FplTps05cqiGJSJ5KYZCk58ikr2aocESFjWoGYweYaZZh1CPYe2DFLGj2q2kYlMbUP2xICwIT5ompWVgoGSfJ4sB5lSG8GNBO5dCowJMeBEJVcrZAuKpGKWWH+ngoowasmpx2yFmx9iR1zEhG7Oql+59Tv53o9oZB4KPFajWAkkSywUsWwpiulW+8sB2PYy5S+L32axIX+31dJeOh1ACJWztbV9p8NJz5xyDvhKacfZfNKGfSLwz+yeTNKCIGy0smSr91qZiaNEPIF+Rrzo/kH1/fvHxw2TX/p8unF0Wj/TmjX8tjVyx988J7I6Xo8qqpnZofTyRRNdyPGmTSx9Jt/+n98Nq5PFe482IdYgn3XyVY97XoRRV2Fvj+K6xJQosIXZVXXY7dVlVrUJdy4KgpHJCrNTDiycCB4pxJhfIeoCEAAAlEHBEUgCkRCTmByBARAk5NcbnqQkZIokQyVVxGVbBHKfAJBJ9MkszHXGGFRxzh7lJK2II92c0tHGSMhZzNlEgk2slQSz17VKTuyFcICYmVick49lKEu7aqBI0/qWRlUOw7rQLG7d/vgFz/7uQO/8MI3XvvZa91qzWDv/OZkevnKlVt3b33q6jOz1fLimSf25frdcFfWHjE46ZmloOAIBWsngTKDwvAf8iRdTCmTHMCkrMT+oe1D5jsLCCcpkUaJEHDeyZCmI1ncY3WkRHWOlRCDqGRIIHsU2DhABmYUDViUiogNdTUdBBKVQW2Y3qeqKrxLDvNZNjUkVJy426ajyxbgLTwSnORlkYkkqoZfiUgy7jAhHqVhQjqSxgmT1IxHR0XeuAomtjqG2Ad1IM9FxVw5N67GFGuS0jwb2n6+4NVK9peH4XgVDht/LOc3p2c2uvvd4fwDXt08JYuqPZzgkEe8U0lBfbnWcu1H5YVmFVuJTVcsmtXeKdz+8DcHiwM3diuQTi60W9sP+vGRjtvy1L22PDv1xbj6ZIUYlMDSBVQKYQouLKOvPFQeHB6c2dz9/vf++JHLp9rmSMDMXmMnEEgkUnIgZpGgRvwhU6lZBLUbElPxZB61iWpqIXrQGXC6G0TOaRqtIRqnPYY1vAcY4kHRe+7Dynv/3e9+48///GD2oKlHu7FnplKTX42iRtqZPQJtACPGGKgJI+KxC2XgirRk7zqf2e7QADH6PqLzRcetdOtJNxpXoYnoQB2bO42shDpKH8WQSyUIqK+IzC4jqqojUoQYWcxKb+AMmVaGJPVgBCiiaAbblfKoRRJRMfVVySKJIhJbh7L7RmRIbgMszgt7ZkBEYowi6kvv2MUQfRqaGulnEKFm7U7e7Sa5nLQ8nOqTQaXwsEkWIQHogyhiyMikxGwTBOSmmWARD6SCvIbPhD2sJy486ZMkDwrTLqsmiX2GnYemM1V+NFxkG7XaayQWLCVsxrhMdsHsLanKibdWeueZZS25bWbiYdyk2XWTORkHpb5KcyeP9JHTH06cBzSQUd5zpje4XVSh3jlRxMyLEWHnOKoX6Rlcec+1Ozw8XB0uBMt7+/PCV/UG+2LyYH67qrb29nbLavdgtj/dPHv39ofnzpzvmomf3G26gvXiuNqebJeb050YSHzdNI1Q1yxLz+V4KhJ97c6Q5z7oRlnXp8aK2jM7XwUpCN6r91RHlr7vYy9aIAbRDlQF5yDqidaAKnqVSBRBUaUHRccBHAlKJIJeVW2cPbDP1JpgtVslnIH/E65aMmDRdK/xe70PmWjPLibDjAZTLCZkiyxL0eknsz8cMTuyHd8qIYYMTTgRUmFSSV5YeSQJJrAVWAG65tjVdWjbed/OPr758WI2n/KZj9/92AfP4kiVHI4P25fv/gq+2L978Ogz5yeYLo76lhZ722dKD40tOBC0LNJuF2ZlR0KqRoAOtgnTKVy0j2BtYnK6cICHRjM+g/mZsIHVZsAcSch5ziBwEmSJwPq/5PKiPEyekDw3ALV8ljA80cgpQ6ZraBeWszTYxmqDUYa1x0ogHQik6SykMyepU7dMLHnjiKqYfbp3zorQoMpmzMJOCDHtG1YmUhooJlY8p0CZaNcICiYUzJ7Z+bIWePZl0FJ6lpWu53Pq5ySNhia2sxgOe16sfHtUd8eT2G7EtorNXlF+/NHeQXt2wuc3uh3fPL7rr+xevrj3eHN4+zcvvbI5eeKT266ZdfJgDnJRArXH5x7ZuX3zg2Z+f1x7RsG+fexC/fQzpw666pi2H8TxDNMXPv/5V949+E8v/barNzVQOXJh3bEgNFxuFH3brprFtc8+/Z2v/kFdu8Vi7tmLMDu2LWfsECSqMrS30CMcQZzsIxKAxNCBJ2oXWDxRDFEg3pXm9GkTOpHAoGHQqUrOFUTkPEvsiDwYTF40AJBIm1vTH/zgT/7yL/9KApyvxUUtlGqgUtTgmmRMGJGOiDagE6AWGamOSGt4LxuYe2md2B4uhXZWTYkgkO9L7iSgbrG7qaVgLbJSE2+TI7QgIvIkIhCQkjJkDuaKORgkH9KCWi8iZjaYHWw4VR7kbf+cZElqXkBETJJHsoL8PCNbyuctJebVYwknzcIUUI3M5NmpROcShBu6zspYn/jGIPu9nFxCxIZwRKLWCapmnkr+TrLT6hNVSDJQm7OgfSsNJjZysvsWBHLO+g/NWjylNGdN504Ty9XOa5pppPSXgeyHMi+xzZ0kWd0xkKB/mCjQkGYxwCFluhS5rE8dsjy7h3YfmnaMkTksGaYmiEgU9dmeKYoZLPPDM+CMtuVKik4ysU2CIcgGtellNWfoEINNvErvQ5Cu66AMx/XII+q9g/22WXx8a78oR6OJrluljcW5R+PBnXo8unD9/Xu7O+7g3mJ/f/+5L1yrxzvvvPfa1mb3xOlrjPF0e8P7uA7tqlk2jSJ2dVX6cjLdnIxq71zt3YQY5EZMDiDpnRAgvo/iKPQxqusELfUEV8RCnDChc6UyuSCtJw/tiKAaxEjaZHMbI2xHQUBaFqvWmikwuBwmrNmIOExMpJJLweHhEsm1UyribCCipgwWHQJ3Iruz0Tk1P0upF7Y7IgqyDAWAA3LTZs0hYBo6Mk2OcgJnEnBBypACkNAXTo6aWXM8v/HeHUaNgE/273BHXuqw7ufzvgA+/eyj5y+dv3Dhal9JdcX729X+Ox/snT7ftjou+YH0Ia7VB5FO4KGR4JVU2To545DZpRHn0igv42aJ/5zESiJpiR8xO7JVCwBCMLOCE+KkZdlUVCgjzbokdceWfU2pZdiBxakYjTWVoH62klj05GSqIg+bzBEjc99Sl6sp0UMN8LCcoDCv51Spsxm6aRYZs+GDanvBUqsuUUzV+zALgBPCRL708F6ZRZwKhU5CJzg41kWHwwXNW1mEdtGumqZH17jQVKEZodlw7djz3thvj8uzVX1hvHtx85ldvjQOlyY4u4mzG+u9+njazbZ5huP5j3/y8kev3yyubPNql2fBLyT2UUTO7Yzkk3vzu/slE7yThdKYFx8vDq7/bi6TOW93frfHmeNi/mB/MXbHVBYrHYUuYswhaDmR1aohh+/+4YsvPP35Zh6btiMuBB05l4ytRYIEV7qogTVt8tFhzAKwLTyhlFSsyUpXStTEaKpB1Nk5EjEjC2NsOSISlShRoUwuI97GDGCgBPFqFc6fv/DiH3z/r//TP5a1wCvXLIVQIVQzRkQ1dES0oZgAE8Q60gS+wBRhhNW0WIy073tijUpRKToS53yIshZeCYfop2F/inJVoR+P+xpYklakCzHzXcdOg1rfDAWPCKsiCJjhnRdpYzQ+dI68yWlHyHidlgXIEWkyi8hRfCAbMbmEqGW0Bkgep2JCcpB1jzmhiLFHQ+ydpbsUhZIu0rM3rjkZa9wOQPKUSXNeSvFOLTtBxGBkHSQKaYdAsmlOCIb5e9hpsWOS9hMQRMSlEiMRtILJOohNHU3sbCQtolFELc+lGIrB1iMpTIwhBgUDSQOZT3ICM20hgSrEMQ/UPiN/ugS1mU2lgeFpR5vEmIJ4HnI7UFbLCRF7ytZ6YqIxjThZ4UQMsv3nmsQfRJlyMoShXCgN0dApg1iJRdaTjXq9Wtx459aFS2fH4602AFi8+/Yth/kS3f4t99mrj77/0c353b6ux93azw+qu7c+GdeQ/niy/cjexdNbB/V7N6+HdX/1sWfOnz/nCxbplNsYylExquvxub0N9jXBKwHwIRolkqMQYi/S2dTPOLhKpEKk4sh7VApFWCkgjtmcWiCsAupEA6COiTlCeyJ4F0VEk1mrqAROSc9W2OaqLT0Uakk1GoAzcJbTHBhKRLbr1pR/QPbo1xAjiNhxiMIAKbzjYE+OY6gOoKchGzQ010xgFgEldXgmwKlGFSBCg0RXcDGw0NkxnHhWB4x9TzHcP9ifH86O919z6qXrPDw6tItGFvTktXNf/FK94XebxXR+vAxLvbO63/mj82cubY0n7bzViZi6mCQA4stCWZ1nIgkQ44mBEIKYAFfimokB57wdls4I4soBAvKIImpWaJrgYAJF41T1af9t+s8+4Qaas2MivmquUWDGsKwitmOGPPchELF37Ngb9VPJriDspKuqRGUCwyGN5iURKWBeoJLMR83owI4AsYgICymcY7PjMVRaoSwqnjQogqQsHoTIPJDgPcFBvVMiCGKAdk03X8qiDbOFzhsczXV2LMu2XcdFFxchNuJXKFtUa79B03F1aq/aGW8+Mj1zfnv6yNb0wtbGpWlxtubznjd418+2MJusD6puLu1x18z8+Jjb1Y//17+/9d7tqqjb/cPJZGe+pHKl3azf2R759frmR9e9Y/EsEtgXruTDj+/em91alWVbn34QNx+E6aLYOnanx8Vup9bbEaCld8tmtjWZfu+PfnB590Jz0IkoAwQH9oQO6hUdzNUkqgcJSJJdSYYlTlABsZKHiVMiMEYOpaJG04yfmVlOqqRoha25BCHAOLqivYDtgVUwkV824eqTT32jwT/+8z+MxmMpFBVpoVQCI9ExUIublGHcu0lBtdQ+jnU1ofUI64m2I1kKme4mEEVAC3WK2KCoyPVOpnK0rVMKYR07Kccdld5x0n8qC6L2aeSlQckzPHMoobbMgwEPBGVHDCPtK7Ejr9qriq26IRVHFNNCPjXUHbkOtX0MFtFtN0gyj4EaoxFK6ljEnD3TxFcgpuaJIoAws/cFFKLqJcSEB5tSkElVYojOu8x/Mk8Mr1Ad/NNzk6ER5Nh5a63JxstJ50904phof9WUcRxlJpT9tygxigDeOxG1RaIZIbHWOQao957M9mhoMZP60JKnfffg0Ji1E9mC0Q62sS6JnUaxXBijsLUXKiB2jhUUY/ZREtAJipzSApECSbee3OSzsa01CZIVYGRLw5AcuTRXUnmhOlmRoTk4KTTbhrRl4W999MEvf/4vjuiVV3/1re98oZNDkeMujI4Xo6c/NV0eLG7NDk+f3a3K+uO7x7t7a3Tl9vaFnfOjPdq+c/OD/V/fHlejRy9fOPfIbln50An7uiqnjjfgbWeWC0rSqiBaGV2WLNEkC0Xe5c5JIkLexMjesyKIivdOYrQRu9qoNg2cRDWoBHt0o/TQyAzvWNJqqTyVSLN5SZ7bw2XmRDwb2GsGpaaCFMIJBk6yUdG0Sj6KWs0T1eK+sXZ10P9K7n0zDY6JIVFsn7TzzOSgyHumo6pXjarObhVrEfvIgSAkXdSOuceq9GvyDU2aGG7eXUg99ZNjmgXRmiWuFmHnwvi7f3aqkPjqbw4//OhAfbl9ZuPilQt+pzhY3Tv7+OnNM9O7aBqpGimi31j0vncFelBE6ITVsU17BMyAJwKrigqEI3O+7BLYqarEKMQGqPBJaZPdYbx35joiUULonWNXeMlyAwLSHCyPlszmwk5XlsjB6J82Awqqlv4c04m6wRAmWMGjQDB6HTS3ERDRCFVVBptNQXTOQaWTwMyRPalGiQwBUyCoBCccydg/hNIVIAYCc+fBQaSTdtZ2y7kuF7xY0OGhzB+EZd+v2r5dtetVq5iD51yuyAe34evNanK2nuztTM9Mt89t75wbn90sT42KrbLYq2gDMkIca9iWdqxOuWqPRedRFgUvN7mZcLc37jbQ/uTHf3fjtzc2eKSNHDUPqt3FZF3M51xqtenLO9ff5za4wsOR92xSXSmIR74YcVm7ELjvII56B79uHI0dVYFQ1W52/8GVxy//8Gs/HIfRYn/FYHhGQBTJ0njJF5oBjScmQmLsXAtKBMnkHYvikidhJkhhTptVdeCRZ46tAUygBEIx0lzD/lKYX7mqFyGG7zr53Bc+2/rmn3/zUlXUXJF6aAUthMeMMcko8tS7cTdGO6Fuk5oNtBVWm7KsdUVQZS4keI1RI6ABNEYZ4FtH5/yslrpgWlNYaN+UGwtUISqJOarYqBoalJUlKHpAQMEDYgp2VVH0zAxRib1oSCu/E7gZRETT3kCDkNTQHcsCdugI7IgVsDSR8GqDYxhsjQoAGO/QYg1D1RFnRn7KYd4YhjZ2VRLOcS6GYPkjpbWTFo5OOsskFUlTtMRq1wRnWTZMrrxiOPCA+ik7NxAd2Xl2yYcuz4VT9rUEAFECW7dkwLSRkpI/RqqAxAZ+ROAh7VnUIHLkbTodQgQACeydd2xEFFUD0JIxXoYlJaEZSAE7vbOTSX86ATayyIh2ZmaniXZilg7A8lCPUsq6mk0mKNUorFDEyFWNw8PVeLL9w3/zub/561+8+ruXXnjhyuK4OnM6PpjFxezS2b39o7Xsf7yA3vVFuTF5/KhtmubuR795oxB/9vTe17/2pe3NKcGL+qqaoGawjyIClq6nNJkzy7fkGr5eyXhj2vfB5twhRu8dFCrBOUlgLvsokdmJ9oAqIYokhgyZmWq07dP2nAO9RWFRsbVPMIPWdEmTlHOYu6ggmmSPTAg61IHpQtFwJ2x2z5QZP/BkGLc9PTw8/ym7K7I4x9vLZPdjEJkjeyRDg0ihbGWjc46I8kpZNqEZC0GBHtrqeuxbVzT9uHXtJ4vGrcefvlw20oYDXS7Xj35u8gdf33vnnw5e/vsVirHfBtUtJiVve94pLjz6zEfzD68fvD15/IWmHTW0MeuLwBsrrWglLjiOHLqoHSiChZlEEYlEoexBQiJij5ARNAElVscuarBiMB1bVhqKHxvS5BbXDCaNyUUJbIdgqGWHeiXPZVSDuV3YWE0kiii7ovAW2jTD3XaiTYggyogB5pjhXMob7MzxxoGdQVxEzF5EKHapfCDbSURUOHiCQwwUe5G265pVt5jr0ayYLXC86Jp5WBzHdr1et01oFzG04JalhZdq7E6fHU13xpPdi9s7m9u7p6anq8lONd7x5YRQq5ZRvWyIVKylNutenPiCGaab1cih5q7WMHF9Hde1LHZHzSg2//lHf/vWy29OZbNbEDfUL1dHxx/tTh5vG39qo7z/3vVucVSPaulEnESnkJ49oSZeIfgeesgQhuO+JFmUrqxi26HWqpzfP3rms0/94Gt/zHNqPmld4cFmYcCOvWQtGeCQ5gKmEODM08lqFkBhg8V0K9UCWIpESRNGaXAYQQD5PIXTHKocACU1Q0+FQHzSEJOHcpJIRadOv/YHX12X3a/ffnVcjWMRUSlvsowVI/AEvu620Yy52aLVGO0EixGaDaw2aMVkpPTITlTEGA1rVCsugrpt3J9SsSDXSOep9Yie69nmWLxTQnSCCOqIIgHgQOgB81UIXjUyIpQT0RienVnDBVJh9qqByasTpCc41RnMLBKMi5PNeWyGbgpeW3iTZqAmDaAhrtnEFGTyXc5ejCmXgLyNXcFpF70k/+jEYXBpdUMKW8P6IQyjHQxch4GjlPwATLRX+rRQOlNYCdDhtwBGXUyCfQU8ucTCTE0hHMh5Lyk0WOBIFZiKptJt+H7lh7ulvGEuAd32/QoJXXAqXJQAMVOQwHoCc5JddAy0zP/qS5OzykMjLQvzxqMzO8jEg0hDAitHNI2eNX36rIFOODrScg2ICmtoF8W1a1fef/fNmzfuMOJ4vA3lTrqSdi+dk/l8JkyItHW6mp45d+/g1j/98ufarU9vn3n+6c+dOrPpy00ADp7YMRddNGm4iAqReK6GOygSc7UhqhRDJzG262Y8mhSe+75l9iLaxb4oyigSxTG5GHvOrDOG8dBsGCggMCUjQomdSEj6URUmRyxZmWaPjE0ohtLOfFg4918xj2w1Dwg5KwCS7IhtHzNURYNtYrCpv1k3WMYhRJP3CiiR+NLZNBEPs8t2cYEsdOXHnWCuNxjGS9pJ7JQjaxDtIKhXsrlTLGetf/Zrf/Lyj/7HWMv04vjD/Qff/dMvPXpp/6f/6/4Hr3WnHpv2unZVGQgfz2/Lg/i173x9Uc6e2T79Dy/96vndz64nu4uw2WjVaK0tuIWsRBrx0RuII0GgnSIt2JAoKsHKdgDGMGfHBIrSD4CCmg1YftRCjI7IRrXe+yj5PKbmNe3aSrcz/Xgey9uUi5STY0OqhWwOHaMJlYmUKI27LBok5ZjzHkCIokGcOVeJgaUkIlGEvSeTNxBKL52qkkNEt+5Cs9JlE4+XYXaP5w3ms7CYx9Wib9u279cSV0wr6IpZqppGZTU+VW9vTSaT87u748nu1vbOxsZWXY+5KAUE9l2oRHzbsSw6FSgClzXqxKvjkrhi8SJOuSJMaOoXvm+8a5wcV1hu+K7qm//y1z9+/5X3tnkq84jWaQPueHF0WG9Or2w/ce/m9eb+QT2qQ4B4KCs5CCmtiApFBfao6rbkdYmu4s7F1tPKY+ype7BeP//F51788rfbe7HrurIqZa1mFE8OJkQ0R+vEo7UNuOlxz+4ClpshhqYmbioy+gykSWc6WEQpDwAarFtIVBn7XitJ2ZlJRpqhCikY7ElLVe9GPjoRlW//0bdWVfvGB29Mp5OuClQqRuBN8qN+G82WzjbRTdBsotnAYoxmimaMlgFmc92KQGCQMHWo1iiOGuDg7TPn61rcHNEjeBGPCTmZT6adQFk5sAbVoAjgklHnkB4Z4kCeKBgtPxrjiZ2IqJKIQIU9k03LSRLok3CjzDdRNd8oZlsarZScTKw8JUuXw1lDlsVIIiravDlP2gheJKZO13j91jMSPDvTGDgQgbLYDjpIQTA0JIYEI1uvIxFfTG4mwo45u/VY8WX43kPcaSA3u+aTbJMpo3SYVS+JOYG4RGI2dX6GMZPtOlkITfUJ6EQCgYRci1045zjGSNR7X4QYQoyFg4gyO4PBJbPdOPFwTkDobDlpawBw0iFAjZttowCQaaYNVM5Ef9XBhcuC3OC3ggzwpB6Za0BEyu9//3v/8st/WRyOLl+51IXNypeH9xdlgXJPZwdL6dz+fP7qa796ZPuRZz/1zKXLj5Z1HWIIkUUcs/YhCLTwAhXv8pJd1UgBGQ+HOfU59s71XQihY+bSuxDaEAMz+7RtKcZkDyIx9sNoiNiUmqIxGCzpnFHug+EwzJT59GpAKqUCMRcx+R6dPAs56aYS3W4DnXxPqi1hdIRoBE8HTsWnWrGVcH7VPMcAOe9SM6fJr99UeQxiZlKOJuZhDw2qhb0aUQQKsEgQCtAISJo2UQD33BYIZX377nx1qBifavCgHM+vPl89+ZXwl//j7fm8Pvv0ZHHQ+qpaNHGyE//4T69Ozzm3F2bdvNyVbZz+5W/efOa7X1jMfOc2Wx058dJHrIEV9cvOxcKsso2DLBJEelEhFWKOMVgC9h6AxhhFI5tuanAvQboynNEp5NnK4I1j3FgbuQ7erqllSBOoLEww5iTI3E7Ye/u3NIxH6rlzqE/QidVDDhRFYwxwXJZFIBCRFeoEhLbvmmbdtrpY6HxBs3mcz7v5LDTLdbNcti2AY4RjDT1IfIGyrrem1WS6MZ2enW5v7m5vTKfluB5PJmVVs0MQAD4GXnfargP3BPM1cYWqahA29TEKhgOTlEbGUXj4guGSLp2jjn2sdT1Ct1EKt+3f/PjH995+b7ecrO8HXbB2kRpFX/q1zI/vyv0w2993TCGqlMSelexJg45Ea3DvOLJGHblurF0odBSxIiolNvMHL3z5D178wjeb/T5Cy7rUFnDEfqiKTgyMbDCv0MQwN2FFAtgkA5ZJBTxwSK0ZSPTok7Nn8ShH9pynH+5ENNEkHREDSRiQlPZwNmlVh8DynT/89oO/Obw3PxhN6r7sUBHGmHAzltmmX4xjO9FmkxYTaiau3cRiQ1oRgISjspIYW4F0POqEyoL13vu/+cynLm2jYtvSYDgw+V6LfrJBII6M3mpUYA1UQAQLIyi6EgDQE3sAkIjEj2BRhvYOLMmNPgw05oe/jP2c9DoihtaYNQlwouths99Qa6gSzQKicGnCmNidpKpkGILJtk2ag9w+A9AoEgWOnXNsg3slm/4qKVNid6QhQbpPuTsc/HeQYiVOtEpWT6sOit7sopHeXCrZk29kHkqkqGlQSapKLE9mIXB6KA1Xzb/MXs+o45mYY1gAUZCY57jJWEDTIlo8NDUchs46fJbBhcAQMitrBOrTU46Ex8JWEKYX06EMyZcrbWJD1qRplriABaET1OONZ5+/9uqrv713sJyv4uLouNMVa3l4eK8sq73pqeevfnr6/Benu9MQZL3i1SqQ46hKrhdbLwDtYzC8HaIE55ijvd/0W611VWIUhQfEKEghxK5t61HddYu2XZdFVVYVswudiIhzHEIQYiKw8wpl4yxIANjGe0TEPjPbJRBYJQKabyuQTLx/7ylPQ36Jkqn3ZoxmOcFYcqmzgiI3wxJErMBSMU8Jc/4WBYhYyXlvJRWG+QGd/FIbQ9rBcezAJsAQ88oNMQK97fvSICyMCO2SAZ6spas2j0M3Gp++M1t80kip+MxX9s7vHv35n790v+VLV8+O4qiT+0eL42vfnHz+G6euXz9667Vq5n8hW/Hg1SLuXj79zM5srS1vdG7Sh1ob0Jqp19hFaVVboo4IEWRet71Ib45YBvgD8M4TQyUA6p0PsRNEZvbewYYymp1YIeQS/5/A7FhEyNm0KCny85FNRxWpXEq0LAKp6uBNxuyQjXzSk251qCZsy8od7wv2BFJHCnDsw6pd9926WyylWYajeX8064+PpWmaxbJdN53EReiaEDqgKxyqqtydTop6tDU9vbWzvbU12d0eT8aT8bjcqJlKWzUQOg0hhiAhdCGCnWhggJyr2HkDasHOyGLEhmtEIkUB9mDvtVDxqoVKCakVNaEmL51Hx/1yOm5pce8nP/rz7t5b02rS3OmwYr920kRdEnfAgtDKrNvvpC/YOSh1EK+A+qIU7mhENGJ0rK1irWW1Ln2sZFXSRs1o5w++8sXvPP/iN5s7PTyTl2gevQywsidldVqI9gmHzLRFInONGmZaJxC01ZxJAI4hq2qySbDvlnyXbT2lIonnf28OxzD9AZGhqgSnxFAGsTApInmCR+CwsVP/wQ++85d/9aOOOzdxmGjNzTgcbfvjTTnapNVUF1NaTfV4Im3ZBjTEEQnHBUHZRwJExuCy2xtx+8k7/SfvnL14mfs1k3rA+0jCNshcTMcQoAf1xMroQD0hQKGIBCEEBxS2FI4ZmbQcmbKSJbaqAlYbpKSeUIWzOk4TOQvMFCSag0QmNquV86DUf6rCVJAKBYTZSWLAGdIAqPqhtwaljGEiVBVhSnxjVYkR5NiWTiSWr6ZMQ5RkAyrWkGYpnr1TZ3S7iKwNethmgV2ygNbMyTMxQxYNJqT7YYvk1JZmypXRrOwz2rYie944mWqlKVZGrMU+rIg67yCIEr13DBdiNCA1xoefWsoOTEOCUNNfc648HkrNIEoWD5SNMa1BJ2IdJgIP1ZqJG/PQVwLzCawBWnoOXSdb26e/8Y2vvvQvr8iqCaHfqTdP7Z3+9KNPXDh/wVc+CDqR2TyAAQ22i6bgIkQIdybOsksOskUDKgjZXCt/ttSzEHsbk0vXRVUtSx/6jhlAWDVr6DgEEYmbm1uqQiRAjAqNweAGZgcWtVW+RmIIRmGwB2NQmENEUxkvSH7XoJNiTg0SQ8LIRAnJ49+QhFyak2ODbtCHECXU47EIyLPLsUlVKOMhhHS8bYiRlHEpMFHCVcBZDenSWDtZVgToml0NIQRoD+2UWpJG0CDWZVOPxxunX/vg9rTaaXirH8nvrh/ea6qdK5P54v7R3Dfovv3fTc4/Xv/oL28dHPbllLEr5ejco5994ZfvHL7/xkIur/z5U4um0KWgZW01NuKiI7B0omsV7eA6mHeLqkhQRIU6xwCcp+SGz8zMHi5GgRpxMuVGyuVNolhJOg4S7TXz85AsOhSJXAJORgPelH5EHAe2MyhKVDUTWQP3nWewK7wzVAoQ7TS27XJ5f9EcL9aLRZgvuqN5v2xiu1DV1bpdNKsAaVQ6gnrv6noy2pqMNs5Mp1u7O9tbW1ubm5PJpBzX3hUgimLeCAhBl6167XtRAN6nmbgSXKkURJk17UASZ2CZSLAoZi7QXDFKNStxEmGIV5CyA3vAc43OU1tK2Jnw4mD/P//Hv9AHH5wtRut7LS1LNKFvxC1LWYrRArq160hQjlZd5yBVdOgEnoJEFMBauRVulCpCYEZgCk762sV2cfCF57793Le+9slqLcwC9d6JeY+qsGNlNWINyAyCeLibORDFbD+Wu5AMPjKxDpMFSUcjPfycD57Y6E4GqHIgP+a45c3zQomNdUTqQWUUciWo8pEje6KCV9pfeOKRr3zrK3//yt9NJptcrjbQTGgxkeNNzKa+28JiC4uJLHjBuoBbmHOyQ69pjRBBiXkNLUAbLCqH7733zKVpQOnZmy1sgAvwAV6Yl3WFElSTBoUHCqAABCiBAEgB9IwSujaZIaDmoc2MEDrAMSNRD+0SqyTphJphh2HBEBHnCzOCTCtPokoUW/RpySARoQRqRNs8bSGijPipz1SgFPsNs4NtAcoXPROelZnJ5RWKVk2pZgdHAsg5NjKe5Im/iDjnSJ1lJvtFUVWCMDs2T4ABA7GoZ3Qp63xyftLcyhqL2fJJfr7ULDecLW40TnRGUYgSkweaAGuFem9eg2oMI1OwuGxEoooo0U7yYEs5pHm7KCdeYJKnXQlzTl5Nw6OvCjCQDODT58y8LVU56b+t9rS7G3RNqEUIHFdBCj/+4pe/0Lda+drVEVwQ5HjZS4B3gMKDRSJYRES5BAshFFIGEUh0rBIELMxOCUQuSszNEFJ5DDF1lbU1BmYyu7Zt+17KouCyEjFGCsfQsfMWrzhx+iUKokZm05bQQ3UVrOSjh2uO39vLYWP/7OCZLx0RZXuY1IOp2TmL5rrM5gFgZuedI4+YzNokxiDivQchQh1IgiQtSxoXpOczubuoIm2NVNvZ6cjuS1Qh4sLyUtc1pR8jgDp2gaRTWQZUihLLenK4/w5tP1nouOuat/YPJ/rIUSlnz1ftga6x+Hf/4Xkplv/T/+dNlPXm0yNXYL/XydalM0+/+MIV/Md/eutv/vnGl3/4NeVNLAO10DVzZHSsHaQXzyoaRQLQM0dVUY1KVFelaAcgSiRV70gyl9B5VoFpDCwrG8pEBI2RmMj4/yIgSiwNmwzZmY8n5aWwZkmeBWcBEBUgKpyzs1gUzppsk623q0WzXM7n88X8uGkW7XwRmrZrFl3bhtAFCZ3GoCplNd4Yl5OR351OJ5vnp9Pd7d3t6dZ0e7se1XVdGjwWQX2ARGmCoF0LKbk8TwMXCmF4EiIWQWAGixmlqPoIBryDZ7CHswkmkZDCCLSkBMdKigLm6UHJfcTqbImIpHE85nu3Zz/70V9Pu+Nzk3FczAuuwVDmwEIsXDD6qI7AWjofAWbnRTkqPIsLWkTyLAVQEmpYz63CEC9uY7ZYX7n62S9/97v3eg5p2w+kUxKSaG5XAkGM0VtSsApTkSVcQ9xLwFryksmZWdKSC+UEJzGb85EmAI+S9j0hGRjW3+XYbXWfalY9IE0yyLAEEVa2Zp09BQrrGJ974dkbqxvXZ9fPTVBJW6Ir0YyoLdGOsByhQUNomRaqCyuKjdLsjPavUOqISijgXX3w4aw9CJtb3TqMNlD2Qh18h3JN4x6hqWvUQAt1ihJwpqkEUXJkwskGaI3RCg/HMKs40/NqjIEQhh0nIQTvkuMpJYQSAMMcUlWDRCM3xSgq4gpvExqk6tXq11S9cOqk7S6QZxqyylDgKLEZe1tMtEVILkoMEm3jYqKnGJVD8+3NxTLIGNKqQGF9joLJXDXBSYbgJEGdJ2wpUKLqWT+U/8w4SWi59TF2uC0vs50ngA05csepDxUW+WdTcqGcRG3DBTnn+hgMr88oAYPIzIOgyo4H46AokVOpDyIjqZ2Yb0gKfdnTJ+qJMdBg9qEabed8pvYP1G5NJbyTUCVA1Va3rjt2pa+pF+1aAoKqsHW7QgCERKJKQFkWIEgUUrK4bKs9zfAoxN54FNFMdJwjgmi0D0sMpO1B9jZ1vW5FpKpKVQkhiERVp6rNqvHeO2ekY2KGSBTAM2W3NIBYtCMCyEtqvk7QhVQmpqqPh2cPeaxotz5dRsPPaWjRCIwYoQoyyDyCQI4kIHsQgByRSLp3ZjuoUdlbYGIiDJ6qg0MMW8+b5phBwUTOUHaAQuggrB20VXURLfGasQKWJJVg4t+5w/tdQdVZLY/evBO+/Hms3ohvH67Gsvp3/+dHWuz/1V/dw5nTKOMyStByWZ7p6PTfvvzGuae//pmv/uuX3pv93SvvP/fYDrcc5uJar6tO1owOEC/asC3iRBejKIIjhldNZmqw0U3UkF3kbEYVVJMHe5QoqmmzYgYImNN2L2SsxDAtU11kSI2gziijhXPsnWMPl1TSfR/Wq3WzbBaLxfx4vjg+Xi6Wy8Vi3Tbrru3a1nK8nVp2XJZ1PZ1ujTe2trem0+l0c3u6tT2ZTEb1yHlXeAYhRmmDBtXjVS8W9TKTjInUu5RFc3wQwjAUBACJ5sGlBHh4OBEFArjsEYgKKNsiOqMUi0TYMpPorOvnHCXY3HuEtjZHtz746Bc/+otpaOpRIWZ6mnIesXoFwAJPvAaTBhERdaIOIAdlIcdErJ7ZCxcQEngOLAGFcrlomtPnnv7qd78/j+iCeLBEYYGIciCSzAEUMFwIa2YxOoXzdga6IfGKRM52eypihEOXDU5yFhUgufJS1rvDQHkrxswwWIbnpCKYTwOZCZyBfKbfVuuZRYSBoatyLFCUePH737n713elu1+WoQx9SatKVxuyrt2KQ8AKWIgsCQ0j2OzHAWbhpcmotwZ7zz7M9hfN4Woy2hy7446rkfCKVp7GhbYFypJHsa5QaHQRMIoUwGkaSAQVkrwPjX0h0rPjJMSwmiL/jWBeubY3m8UZacVE2MMg0Uy3EqDPtnHQhsGUEo4Ve6YGo/ygUtJtkLcNDfYYnWAYiUxhzTA48Xo52kyYkr8dDYbTSTGWtwVkeecA3OZiDarqcjo0LFcUSXOkdrk5daQD33pgw+f+0e6ISnYnACHvGs+EHs2Dbgxf+TkbPmWy8heREEOyZUNMYCWnbjS/EKIqVBx7HggM5lACNeWX6uCdQnlOSUwshhSSDosW7SKJKiC+KHOJoGKmemRcFSKQGfMSwTkXY4whe3YT7BmNIRIpe280HCQFjlpatkKJwVECwysrQna+VhEJfb92zrN3THCeRYTJJg6BOe13897+yFFDDEos3htKgRA7qJBz3jvvOInZVJxzohFQ53jYfOWYVWhY3JsqNB2wYWguxTkBppZX3cBlGKgAVrI45713IfSGhRI5hTN7JmaOUR4aEqhGo7kn8AYQYpYYmRwTBYnGJxCJzB5qdGkFApMHRWgPKHPpKweKuiZllcIFH9jBj5wcBzele7cO/KknF6s7KtQcd7cb3vwUbr5697vfOdvt6d/86MO7sjOeTlYr6rniUX3YjZ++8pWuPv/GnfVM52cff66bhVtvfHC62CtXo3DcccvaQTrhrgFINaoEYke2FJ2UxayhB2FD9tIjBTkoHJfs82IGa4sZ2dnNZj1RoCJqcr4YwETee6tBfeG80faYwQjBd11o2/Xx/GC9PDo6mh8v5ovFfLlYrtv1umv70EkSP8IxV2U5ntiUdjzd3Z5Ot7a3tzc2NkbjcV2PnCND5kTR9d1aorZ9ooWy8yk0wg3FNzliVjELWYsNMgx1kuu8ESjgUjhSBYTJJJjIvAdhEtEaqYkWYqfw5CPQszgJkDW0ZmhkknpUuBG/+us3fv3T/3gqxo1xLWGmAsfO4B0w4EmNIZpgHIWIA7xlXw84SvsrfeCClQEPOHElt1wfN4HrzRe//4O+Gq3W6OGlU+mUAzs4VUVQCYIAc1JnBmxhF0TEdtNrmtCbJzYk+RgOIcmCEqVQyMmKz94tJ7l4AiNtZZYx8RKhXWMAewvbOjB0yGVtC4NZMmJNzj4p4Gkt3e6ZrW+9+I1f/af/R1mg5rZGP6bViLqaWmoYDfRY9RiYC0Q8HIFZWWPCV1ErdQwRN4Jw395vN6Yb1bQdcRPIN1qN0K7Qluo9Rr33VCi8wBMKQo/8ToAAUkdwBgMJBDDEmDNFzWZQSJyixPVBjMHEwETJz5yTx7iQOcDamEpM2Jn3UefdwKkJNg32SUaCiHh56A7lAZyZIQsRE7HE3lxlvWPvvcRgQpokLDpJb9YKnfSplnFTq5wmFJo7WVJIplDZzreET56glMpGbU2vF7M71e//yoybZ8EP8l5hZAjpIfTETikzG58suT8yICkrkYHC6WtIxSl/i6iyKbPphDoOQy+T9XQXU6GuosyG6SAnFVKjzyW0n8w0AYkQbnZdxMxRAiXWERGlKW5MyL+1JJLOBiFGAUW27TcQEslLglJoUighbTVgq0lFybGKFL4Uka5dGUCpqkVRcsp2hr2CiEU0aATIFQyFaLTLaneNjZerqcsh21MdYtqDZ4MTstowcX8ol2yS4mMa65vfYS6bGEjUZc5yFyvTiG0PQnd4MNucbm5vb7ZtF4KIwBGHEG3TOsFJjKELReE1ioAK76JEI253XVd4D6hEldArM3unxEGC4Ql2z533phE0WWDUjpQhLCS+JCo0egkcOLJOe8yLD/avX3tuu593q0V45/b2569dfPdmVz968aX3rp/5ytUPfto8WEz95jj0/njePv/iD8488aXDOC5vN76vax6Py1rL6JqCG47HXVxR2Y1IwjpAsSZ0lMYaSRDUr9fewxUm4bXzTYpelVK8UAkhwmBV5wBoNIacpAYPlHYBlsFxYRMU5xiKruvabrU8Xi0Wi6Oj2Xw+Xy6b4+Ojrls17ZyCV5Wu7+zceubReLSzPZ1MJtPN6XRre2t7e2trqx6NyrIqy8IU1BI1RBHRpu3trGm2O/+9/YYqLIOk0OK9FRcxu/xxrqcVYKg+HDaHCKEwACZmoYQAzsR44BAFBVUgH6NAe4aHetEeJSvH0vlyVK4pvvXm+2+881J3+7ULGzSqqxiOGGBfxLBiz0rRcHsbyJIDSmhPLiqI2AO2usPygQeX4ILEKxxToeKli9QKv/i9H4629h50EsmrG2mvEIKwdoIIEhrsDa2jcVDAGi4LfpLeiTASIQdIi9lFRZO5lSEjRnEni35D3UDDHNJuB+zKpqrG/s2gSh2AK+O6mCei7dtTVWVlz1RAvHBFTR+fu/Z4896Vw/d+NZqsa+kqbSptsQiy8HzMugCOFQtCb0ZJSI7UgcgDAhKwIHbiyZdtgQXI82hzGcmPtWp0XGFVw5dYN8WGeqCEeGVP8Ektle5OCs/GaHBAqdraVCvCAjt5LpG8sUCSVSsiSsnAleCIk08iAIl5O5JtJRGYNaZmcF6GfJuuYfaNtOXtqTfXdBwBEmhUtZUOueGjhOEYUJl5vilf284yQnZutg0c9lCoEAY8BCAT63DadX8CLlsOI9vqOfSvJ0RKDCazpkBLMMigKLWnJhFsk028jbuSHiKTXNl8GzTnWoLj5FWZX2dI8bmUAJxnEuLEO0/UNTpBL9OH8Ozsc5GzNj3XPsPBAQZPyqTXyPk4tbYMEo4x5N9ijn3wDHiKIVqREGAe9ATrg5358HlmR5wisf1zDGnVgSGPIgJPSIxlA/md8y6GGENvb8M7J+BE+hAr8Snt+zbD5ROjfdgkAqrZ2EskUUbSDhtkcZgJtxLlPRMCLYdnJqHZspLECCJiRlCRZEhuJagoFHGyUb/8Ly/99rXfnNo7fXrv9FPPfHo0mjB720YOaCJoWzAR4zGiD8G63yhBFSEEx9441X3oKJLzXkSdmSwDCqPyWYIIiqDKzlcSOh8qXWkxLrq2o4IUwseFW7hwP545da3dufve/i/lYOJvy+e+98wH8+7nv5h87rnxI1+Uf/jbdntjr5HgxzvV+RduLqb763q/qZtPuoBDXtW75emDGw/ae7PdYs+vfdsvq3bsfa1YAB0pS+xUO3AklrLwokFjBBAtSDuCeEAlSTmNnkymKhSRGILz7J1nR1XpnSMlxBi7Nc8Xy+VicTyfz+ezxXIxPz5eLI67tgtdF0IbQmBmm5aNRnU5rjc2xru7u5vT6cbG5vb2zng8ruuqGo3IAYIgav4PIUi36kijVbWJJUYMkKg4ZiRkSPhkEpdoJOaSa1k5LzJmNb/QdPCMD2B7J4Gs9LHTiUGbiKgCYsNXDWqPjssonQornONCRDT28Kg8/Lg8XM6vf3DznVtv74dPJmfk4vY2x4O+78klZ15RRAlku6s5+Zvaig4qIJG8Lbn0YoQgLQgMLnzwAV65JDgC+0UT//CHPzh35bGbrQTyQmUblCOROOmEg1JPiNnl2I6gRqWQ3O/JAm1EIlKoWeJmHRjMnSkpMIw7K5kfKpkYm05mIlsMFzC7vSaCpIANv6Y0qtAcrDXjUKScMnrKfB5UkdfuD7/11Z/ef51X92puRrqqwlIXnuaiC9CS9JhoroiEqLaDhmzKwIQA7QDh2Ibtvd0JTXQBLqmueymbMUYTrFpadzoq0ZcudKVHyVQqeVJWGBBh4zoZ7PwFUOdE1QHROQ8FYPZTUZWInI0YUozKsHoKgWo3PMlGkf+cZ76AaT1SejJakA4Rk1NvTZ4yfm2NjTlRpfEYMRNzYQKG7KTBmXdlAKtFKeaYyJWRT5g0CcegQfeKlHDNDjqnLT3JellBm3PywNmxPiu35fm7c3Y8AVpy1uGYroRCbQWDQZpCZI5aGXxRzZZXGqOqCtkSCmszErvg/8/WvzZJll3XgeBa+5x7/bqHZ2Tko7IeKBQKhSIIFB4EwacefIqS2BQlU6tHsp4xm4/zh+brPGw+aMxmRmbdY209akkjsdkUm6TEF0gU8SwAhUIhKysrKzMywsP9+r3n7D0f9j43At2TBIGqjAgP93vP3Y+111rbVJ3Da26Yp4Y+9Q1G9peSRjcVNfj6h4VZFcaBFtyFxjr0vQ8urdHA8NVUq5er1zWAtOE22vxdWLXWUlMSCd1ZkHvRKhYQAqlqyUUmpKdtadYtQbaSrFmmaQKQc2eqxYxgyikzV1OYxq7lwDCZfEelX9JYYGcpJ4SFc3w6j9e5Zy1u0iZeg9+4Ya2JbnhAA8d8/Va4aOSc3By41moGJ+5dXO4/99bnVut+PazHcfz+99955ZVXT2/dmUtlSuLSE5WUBMmbeFWtkpOXwVYhKWmtWopqlfYeXDUgYIWKZJ9LmCq86Q4t95xSrmWSeTU+HwViGYl5fDaPT/dyJR+/c/GFX/niX/zN2+Mo59+oZbj3yt2VvNT92z86/+1/+on7X/74ve/d0nW/H+v/7b/72l71cpRX7q9WpV48K+lwkW9/+pt/crF7VD/36ud2j+bd0w9efXD702/8UuqPtYwQAYuwKmotRxcBO1lKBLDiPjwp0cLc0wyWs3QpA6aq0m1qwTiO+/OL8TBeXl48v7jY7S725/vL3W6exlLnaT7UMjMxJ5Fkm/V2s3mw2WzPTm+fnt0+2W5vnZ5ut9vNZuMFpTlPqRYzXF4dEEQVb5SWajV5BaYKU3W9pLRCmz6Wk2WBSmy1MmPsO6KH99ygLIaEJmJjck69+3u3gli8IGyBh77WPA4/O1Xni+dSrOqcU1qdrtDjvR8/evt//tYHz9/f50vexfYTp7mfxvJ844YTnmoVyYwi2vZGkEACKoykUDqgABlMRDJ2MJplzLScRXuVXmZOV2P5zX/4T1/5/OcejTDmYrkw59yDVoqv5BOrwPIfgMunjOWkNcBFcTG2W3UHvBidHBnEKTYya0M6b/bVZjeDr2cKdzDQ1i1JIFKNTKNGqioVIkY1Nac+VauuqqjJILMVfXD/5K3PvvmDP357OJ26etRJZQIOajvYleBA7I2TAaJOpfQdBf5JJ2jSUufTvO2Rx32xlaVtWvfjiseeU4+55zxgzqil6y0bszC7KNmcPR4VnFd7ABxGsurkTi/g4gKqijBJ8uXlwQkgI+l4jKpaq5KURELUmgO3H0+PJLG3jc4TbomuWU3B8jXGG02L+lHvuuwxqzW6MTVy6xA1b6p89yfJ5BtYYuYSra2ZL8ISQGP3eqs/fYbnRCfzFCvCGx3tkpxrzBqaoBNA9RlENFftt0UxZlCCzaKzzYH9qjFJLIHQ8MW065mwe/XJcv6srbbw66lWrrOIoZTa6KL+nepOqxpwGtCmsK2qEFDbWQ7euN8e8a7O2gBbFQgpqveybOYdtWrK4sQxIX0CkGJrRfGewZ1P4vlxXoG3nk7tVq1mpExlhtnJZjPPs6nl3JGstbgt9lyKV85mEBF1o+Y2EkJADRCRokagwqz4gFrcl0pVASulqK/+TkKgqrJlX8eAvBlhlF1+NCMIxAmW5B8liGQOgWitKoL+zZ/+sqn2fVbFPJV5LqnLPtSHQaHTHBw6B48Aqtacmg8lIUw+e87S+a0iZLEFEQpMwtQ6CVnFCORqo+ReJ+Qu4whZybwr/VnPg/Vj/8G3H/7cz371n/yd//o//uUffvTk/Pf/Yvrbv7x58cv94+npf/t7+W//5mfHp8/efzhIlnyyf/Xl4aVJLn98vDyfnz8sL24fHOWZlouvfvGf3B2233n0ztOHZbv+4HL3nbtnn5jKZOK2XHOpU9EJOVtbsli1mtaUU5dzlpRP3EUPZdJ5LruL/fPzi8uLi4/PH+93+8vdxWF/NR3H4/Hgs39QJeehH7Ynm+3Jve329Nbp7Vunt26fna3X22G1Wq/DPU0rStGith+LBLBcQZqpiHQpLcRHH7j4/VS4uCsGUu6aGXZnAZj5YwBt0vp4DjXIGM2n1sOyRAVsi6WPYyveE/sv8hhFhkXREgq8Vy2UjshWtEt5WA+X0/i9t3/49g+/+9Hzh3arDC9uhvXGqDoVXatBFFIsF6NKp8iWTbqEQXAAeliwDoIkDFEkYQZ7IhO9IhszuYGt0N/qR9lzXf7+P/oX9z775ff3GK07slPpR82YgAJxy5fS7nAD4gMElACfKC4KJR2IV5rzW69DGczdgOMl/A74smlrN4uBGTSe4+L00eZ4kcN9MGZhVuKJG2bmy7iaFtSYCEKpKeeaNNmEMv3Cz32x//ivd9//o25QHMWOyiNlhs1mI+wA1kBqjVJhJiqG1YEwlr7MZcZkGAGFFMERaa1dqhnl5n/QgR2dLuWtJJLznWgZVAlcGyAn9UaiVqOCypCvRpAWEQ/dGtYa6iNvAySlNuTzplIslk3HDDWCe7sL2hhSaMoOheXosZq7lTbPpzC4caN3M1hB5D+DW8UJoahu7K0l5xzPWYgmzUfazrrzDlxkedQCBXEw2/d7AGiboVpT7J/YzIfF7Vk134KT4JzZduTj/UV+WDDhNrpo/xVP6pJ143GuN5vOJi8m6DotYxNHewuYfHnDkt79t/skV9rlbnwHhKbbayA6Xd/MvBpPotFcWjwWKcOs1goVSvtLd/KVtjWZZgZJcq2SUhixaMdi3KCiWkspOeeUBGallFrqauhFuN8dupxTShqwledpJaXWMs1TqM4ozR4Eja4cn1wNAkrKFK2lavC/SlXtJIE8TpNN2vd9z54Clw95SyIpBdHU/KVi6TKalBqAxOAD7j4T91V9G6yScjxOQhyniaBISl2QAzXigtVS5jLXY92sN1MpOWczncbDehhArLoESLbOwZ5aSlU1rVUNBtUK60RgJo6OUuYkPVDM3Y8VOGyQoCi2rleP8fOf/7l3v/f++28//Jf/x//HZ7785rwrGDHl/vd+v7z18w/ufUm2V5/6/a/9+K3P35a1bk8mG/XpezI+f3onb1+80336bFUv98enfP3BT58NZ4/ffziw//JbX3r4+D/M05MuvzZrMhTVqnpU1JQIsst97hKAJGZWy1yO+8PT/X6321/tLp+fP7u4uNhfXeyudofDvtbS51RqhWG1Gtbr4e7Z7Vu3Tk9Pt5vTe6enp6e3bw3r9WpY5ezgPKpB1cpcD8eywML+ICD2A6a+X6kGVh/moc65ZRMBAIgt4RZ7qwhD8uUuDXaO7c9NjRt9lqEtIwPMv0czYli8cH0RtqiBsjqdEMawEdUmFfSYoS5V16o2roezMstf/tk3//xvvnZen+Wz1J+t8mqlh2pQ9oKjqiblatKhYDVhvcdBdFwPW5QJs1BhZtwDIzAIR+UImwQF1oMdkKGdIZMrcG1ymxf16d2Xzn7zn/7u8Mpb7+1lh+0oJyNP9toXWetYeUVMwBFWgGKY4A6LKA4n15Yl1NECH/slb2PUc4i6ehstxt4EFCO1EgtkGv6sYc8X/Gj1Ba8OpcTPB5HN5Z2JvvJNDKCLKqvPJ02EWlVLQWVNdpzx4HT76mtvfPOdPwQF7kshBhfbh2TKxU0wqgqX7bIgtRZR3fQrVAe7RVVFAGG4LYBBUl46zdZfoVjY/5ifD8dHi6nvBEvG4hwUUFQApZnVWkip0KXyu06UBrigAA12dm+SYu58RbcyVo1ZO9qaUF/O3ZJT9pzZEP+gLgd3GqbubiGEuVW9b3wTx5DNsPCknOlq8WqtO21rdBt82wh0RM45WmqSpIYiiWqldaZB93UwHGaQmC/CLDHFODMujNqy0QnBiLZm/OuocqAB2lYlO6RFLyCsqcQc9Yo0XrU60yk4CE6zT4ki2V0zG1zmIaCwJjbOZqhhnOPilaUt0AMQIUVrbKOM1Om2jaquFHKuL+hFromw7deAYakYDACTWNHi2HJMxTyxaddnNnmGiCDbPM1K9Kv+cNgPw5Bz5114KUWSWNWcc3GD6IpqVUCEi2m7mzEu0mvi+wIlGADMpVKiXi6lGqYud4jGxp/mWLYEByUZIJe0xb0xe/YdCWZNMxa/izbbLBRRQRIhMwHV0hgB8UDmruv6TlW1aNdlU+ty/sE734Hq9tZWTU9Pz6ZacspZcs7p9PRW7vvUixVMk2pN5KpWm+a56kT1bioD2Zw6qzOvElRBHnW/vX3/v/oH/+g/fe2/vzhe/Of/4ff67f3bL2+1L1rLu3943N7f3r779HMvDeUDfbCBTpwu8FP31sOd0935Bxg3Q33x+Yc/mi563a82r29euvsqNrrfPXq2u9dJn6SQdbXqBao61HKstU7T8Xx/eXFxDuBqt7+8eHpx8Xy/u9rvr8bxqpTiJ3HV96tV//KDFzab9emd+5vN9vTW6fb0dHuyHdaDpOQ4ctGqqkX1eCzHsfilFhVQU0JV5JxVUXVmrJ5BeNCXQopRTJVMEGqtraj1cljonD5/7Fu8FXbxUDhQrSAzTIUJrF5pOoQqPjxSMMzg5SZDK5AjNpA1gDarFSk1rUZY/olRoKiqfZLE4f0f/vgP/uOfPv7oMp+cbO/dZi5UlP0kfqYmzdbVYse+X3E4lOEoJ3s9ZClXWs82hZMQYDHN1XpjIXrY4DZMhgSsiKRIJivKKk1pnrB/65c/+yu/+/eOqxff33XH7mxvm72tR6xHuaUjZEqYwUItVWdH9LwOdRmBClnKJFI80AEVoISSrIAqjkebkXH54qm4FkNGflr+22sd94QDNPwC28xucS2UAPyxBLPlNgQCwYY0OMHYlJKSJKoej3jppfs/3N6dy49WIpr8572uVpgEuF1iXlt94bxPGsy6Lt+/e9crZGhpazIi4URZYe0YZELCkE3VUJxFgMgrFrix4+gw38WnjiNLAHYw02Ww5nGreM5OCUtq84wXhzg2L0skE1U1kkzeFbhZZcw3CWbqIv9URahcPOk61AyD1oUvBa2xc0MaIrL0nDfyugKMwaE15ZizJrwxrorcuntTI5MX84BIcumPQs0kSfZj4cYostRugEKvPYPVi5+Yu1arCy4Nc/GxqcRILPiTZs7risYVMGs+Hg76O2WpTbDVl8JGundimU9kLO6+k3qTznMVYcrZlzZA3YCmurGZRgXgHLSAkYNjsmS4wPG1ob2xs8kLAXrVExYaQTwRH6r5dkxf+CMJQE4SUij/NZL8Gh6ngwD9MMylaFVJqapRoNVCtk4ptQIq3gQ3BIZkdsy8qoA1NGf+nAaUJWAVP1C0JBDOqmU8CsBhYDONF8K9vn0SEfdSfSRDA7QuIINYbRMsr7ak8yZGq0q1nAl/qJI7IIYAwxAnkUlgCqFW++KXvwo1rbUUt04yrWUc91/7i689fvTw/v27prrZnLzw0osnJ3cl98P69qrfiKS+H/JKwKxVahUzlHkulXoUqAx3+nKpp5vTV077N+7kL37+/uOPpqfPr55eTKcvnfb77tObL/7hv/+T9TDeO9188vUX5t3lxdMnu+Mxz+uLi7Uc5OkHPzx/OJddkenrD/rXdMaTj96/f+dBrXzv4eNXP/XmdD4+f7a7unx6fv7xxcXHF7sn4+XF5cXFcRoBt3TQlGWz2aw33f37r56enp3e2p6dnW1vnW5v3epXQ9/3ENdto5Raaj3WanPxbikemajqrHENXWEIg87TlFKsLXI6rK9LVa1utC4REy0k3JF9fdYibf4gbG73WuecOscwXfjQqjQDOphrIZyKQyVTdrq+aAmKfoRURrgjqVBIZ2LwQT5KRlalZctOuRKz1GfVcjlevvPNv/6zbz1S3jq9a8g6HrEKqq8apEpKSShF5xm5QKbUjegGrPdmA6dBpmEzmUI2FEmaFZMxQybWiaaGXi0DonmVLZWL8emLr539nd/9jTd/6TNPyvajY97L+qL0I1YT+5GrCb0UogJVbQaKJPMmGEnFjGazWVGdXLbQ1tPCW16jOmECVhsNPoaTwvhQrU6+OckKu1w1mjlCKqWoqIJJciZQqgotiVSNsXtaojrgiB6qiVIVrDFjcOqn2zgBuUJffunl+/cfzA9/GKHeKU+guL+6OZZnCakuilPWlKTovLnb333prmqSGIZrrR79HTlT98iEz0abWgp+9EQcPVaURQALUFEd1q3aRrjGCKYxYg96nWoVwC0hyjxlySLSzIZMorWNyZ9LjrwhrITbsZEqFEo4PRosB6PVdwcbmjjHPE8IF39qp/42/WADXJcpgLBtxAUsmmLE8gNc/3FScpbsYCWgXs8K3AeglWcCQWJTr3lJYmZtl4c1zR+Se26IQxHeRZrzpkA6Py3kb06CpGjMk8IwyMtDbbxteh1ni6mILYzuFHU3SlVqERFJzuR0kAxEit9XLawzw8vSQKmqFGeTOUQD1SopWfRrCfBZGgiqWi1Kuk28j6JNmCQKrxjRtNkNgRy1rBiDuxtMgHgMPEr66wiThKo7STK1aTr6x/WhvrOpYVpqFZWckqQcA7zl5LrxjfB6pN2gHx8QmYj5wM1pZDQzO46j4+E0CflJDO9xfX6s1duIStH5Nm2CFYW2P2b+z6WUoP1EkbZMi70CtfiSGcjxcAAJNRG6ZyYkn21f+JVf/415OpZpKtP0r//Nf/fH/+mPX3vtjVdefu3tt795enZnu70NyduTO9vt7e329mq17fvtrdMHItuep/0mP/rB9/IgGFaPf7DJj8evfOU3dum/ffCZu2+8+b+ZbHr68eM/+r3fP1F9+rCcl4uT+c3X3/jl7337P6AeHr7zfe7TOh/PTtavv3Dr9qc247PytT/6E9OxlvE9sddef+HJo4f/z//7v9yPF1e7p9NxpzrlhNU6D8P2xVdeOtlsAJzcOj29dXqy3a43wzAMeeiHfhABTEotHkr2x6M/eK2hkXA7WarZOEbtwW43ug2vGknHRwRtPNjKcDVtPgxoM4MGA8H8VTzRh7+CW984P8tLKK3xnJoBCI+tNurxWi259a0PRFQjGCl8kY6LjLwdTgSQpNTai5QyIQ+VumJ3KAer6/H774x//bU9un51Vg0oBYOhQieTQbKKGnQyG5UrzOthtHmw/QH9WtYZuscma5FuGk7N1DhAiuihyixlX8SSzZr6jB7Sy/540Z/I3/l7v/Czf+tnN68Oj/f9x1gf89m+3Lqs/V7WO673uiozuTceiVk4m01qE+GU4OroHBqA7xCxCaNzADT4mKpm1QO2szThiKBhKfQdhrUbrJfWKQV4mJM4cx6lijA7a73UBrFhoUogENb4+7iTIAxWqs7WSW9mCitz6Tbp3oMHP3y3nqZkObmWI5ANhydqlBN+xrwBdmOKzWazGgZLcIMcOAF92bZitWiprBGpmklU8Mi8lXfadwMk42OHbLr1kA6UtH5Scopr562pBjVfzWopAFJy/x5bDjfo5SsaC8kWyE9Nm70f4SSsSLFN1MtFZIPGiovOXkutuZFioAaxNk7m8nb9k/ubsObba2YtKrbBrRsVqhONRcWXHUUijEDgAZdAuJEEbuvPok+ray2ITU2mzmQzLaqduO4ecQVi6m3aHDE9DXuIcFwbpLiJRwiSvKX2uG1qbRTiYwuRBZINuoLFJ045WcDV5oBqUZWcFtmOg9beBDrY0GjSy/Wrquo0dL8PIhSmxagLjcAFM6vq0wtt+LnW0sRYEkmXRDO1AVDmklJOYn1OqiaJyWSeC1Mi7TiO01Rzzggff5hqsdn/uWhxF1n3uwgRYUMWr59kJ8OFlFAEMLJSfVVzrYVJ3GshmJiC8GxG4PXxaEQctyjafYgQl4NunOmgvcPT/kgQSE4Qj8GLClKcQ4aLtwkUSAojTe1wGCki0lsvw2b7X/2L/91ffe2vb29PX3v19Yvnux++96Pd5UWp7PLjMmtKa6Kfi/b9Vk02m7tDd+fh4w/yppdBXn7t1YdPP/7eX/5/apIvfvWVb330dcVYrW7L5uHDw2a1VdW/+L0/vs/NP/57/3jTb589+uH54/N53B0vn8oh7c6PFyMky7jfSy7T8eJvvv5+mcfbd7rbp8PLD167e+f07OxsWHV9fyttcLLZZjcONZRZqxbVAqJMZV/H4OUE3zf5Ll5baOgRcNWRFv+bhiC2xyAiigfVUJCFq0k8QBH+zWBNcYtWEFl78q7nLzHQdahTIj5p9FRkfJYgHGh7NOgyYfeWd49BLz6jAqRr+aHiqvSmH8OskL6KAiuopQ0vPnicfvjR/ODF8f33Bk6aNmoGmVSylISqKIBCi+rRpKcdYUeUGaOkUftRhisrQluzlMTRVNKu2856NFFBL1as2/R1KhkZGVM5juXwxpde+7u//rdeeu2FA6bHU77A9lJP9nUYsdH+zr5u9tiMGHCETWaj4QBOFBUthkqrPhGM2Za2GBaFEQGraub9Tkowy4CDx+q8LDrDQxtPpsKNoTyOibg2EkGItFAMShshe8Mj9F1mBITMvgrJrJoVsoMmbXGG7qAmZOI8TdYlmDH1s8rd+3cfDafzeCnoWq0mFEOCTcaUXGsoPqgwVQULNJXNepOylFoMYcXp2Hr0INbynKrX92II/6qI+h4sADi5xltRWgstcNqwzybjr64pyykltgY1wCLC3NBRQ59THe9UZZAU1CDBpW1NQVVHb4WUvHiT+WMYdazLvCAiy/swL0S0mUcGI0xi6LawG4Cw3SckMeBQmP+ltMQMGqK783PUEu+iJHaMuBVkMATi6M+zxPzTdUkRQ9tzS480AaAzklHrn8zX0EWsiZ4VdBx6qYkkYIiWUJyaQMe7JGVI8J8BuyaIKGDmidObYIF3fnDJUKmlau1ylyiuNYqb5CVby2JeS0oYtba/8U2ubr4DM7PrIxKnxloLGucfcZ+cdoAk9FXnpRQYV6uV1dg9h6rOHwbQ931OWSQVrT5ENEMpcymzSCLCjJPG4/GYmhCqwQQthrONjIgELr264+GhPlTVWkVSEtHauKPRmrVKfKlZbPEXa0N+l8T5gWy3IKwA1Eqt4l/yxGyxNNvfotc3iCcJFEkWGjAC0zjnhF/4xV+ai15cXP6d3/y1zz/5+PzZ+bOnF++//37XdwDmadqe3hJ0KW9q1Y+ePjw7vctUa8kf/eiD9fZkOo698J0/+0YxlVVGwrBd6eVx4PpnvvAL//k//eFQ7na77rA/cHrw13/yrd0Hj6aDlQmCWbA7OZEHL9829udPy/0XNl/96i8+OX//s599oxQlpNZCmed5D6RxOmKa/HOVefLnNhSEICkpSa1eZZvW2nQG9BDpZ6+NBSIPL2n4epTjdV+EisaKuu6Al6N3zZvzjirMClzgFyq8uCmkBBwNWswARSQBVtU32zlq7RpbJ7dkusGgGeiObCGMRMUyBzOm6BtQK9FBrBQYpl7y0/Pjn/5Vf/9sc2c76lxS7Y7HnDeY5oS+liI1UQ2z1qOhMy2CWXQCR0xDP3FV0M/W761spJ/ERj2i1jtb08H0CFkJitpBZZVR5t3x+atvvvyVr/7qZ7/4U0V0Z8c5r/e2OXA7YntVh3062dVusn5iryU58UpH9UVbKIRCCqHLamwFqgfrcLpjwF2IdFIAr8hDmeK5iQQt1rEETBn7sawFryWCLOofLGcFFB/atb/z3kRid6MhOhfHcavPIenTB7cGkpSt6DzVL33prfzknW/88f+0ko4xdVAo3fTY0GwEnMXr2YDQqpvNIGBVjRXgrtM3DZsMiIBexJGofl4icsCqspo7GpLBYNA4gQDcvEoM1Sfa7e9MmpNGC0VoUKlfAIOaVk0Jvu/I2jjNXElJg/dOzi4DXUzvGdQtYMIPIQjI/hvU27OWgXytnZjVGoZvjeTeKmLD9cOI9mwCCCK0mtUa6iCvPJrAWZu5mb+SQiUwwyY1YyvPETXv0lgji2gzu2DzoG9/GQW8u4kFk8kpcL6MOQDiKJ/i9Hk3ZjdzdWDfN6Dp62PogYyEGZdFJVZNGOHeDGjH3lS1VJOEdG3Ge/1KiCNrMVh1SnKTePsF0uofP0pS0ox0poDTE4UJ2Rs/Nd9yQ0FytzXA1fEEWWt1pmut6hsVIJLcSL2hUj4dMFrOPZwdJlxJNqCqppQ8ZzdNURzTKKkRZdbSzWZIcfgxB9qhRlWVCOy2rDrys95Mdxbrl5/gnQU5Q2s1c+ZzaKIBiouLtNbqymkmlwhEEyyh3VKDJUnRIJLmS4FgVvP5+SFlGTa3VPHiy6996lNvqto0Hstcj8f5xw8ff+ub37m4fDLNyHn1ymuf/ujRU0E369zVk/r8vBtEU57GETmX+aCY765f2Q4Pvv/N97579fV0WL379jsv5VeRJe1GuTger44rHPrMLNztL9TWd+/dfvXVVx++9+6f/PH/9+7dfhhO/+ov/+KnPvu5/eGKAiL3g6F5jHjMzXllhlprUGGzOEppzQa1qma3zmtFToPpXAYXFzc8YhbWRHvQwwtFrzfdLVHJwWpH87xv8QG+P+bCxiTw/bXh2ydk9nOnQt9urBBxJmNoI+jmH/7omyZK28bj2ne0ZzMeoxI9SvJHqWRI1YNIL0ox7t9/nJ8+k1fvjR9f2PHYg7dSfa+UIWebCyw501hHlU6kSDmo9Iqe2FsR2ffrjlWIasyqOuWt6In0ausuzemk9KhDVk56eH4h1N/8u7/25a9+Qfr+YtRj7S2f7jDseHqhm6vu7lVdn9ftTs52PN3rynaGHbinHMWb4HqoMgpmsEAKm8w3Tr6IGhSmvqPETN16BNCqlUQQ2wLlbQxFEGCiBPzsYUXDRT8U/najmpeIGnGrg5Glvq7AfTpAgUnb9E2nZ6LCiomlOmnZ5Mlk5jBh9fGuvPfoGdbCSq7JNWw0DMAkXl2gA6oT15SJWEPWZlZXtwdsSAgHYCBWmKQ76mpCP1ma2B2RJ02Yqs4S8i1H7yO3GFQhZlbM1KwYCzCLk9yCbStR36CNoH180qajgYG2+ag/R7IAosJW1QPROrrha7haAkGy8acsY9HDeKHakqt5BdJcp2I1IomUaq0Go8rSBS6kJidfL++zJZIYFsvyrGukd1iNQllI9xlilLHXTHnCgQYNNqQwGkBFm0diida8trLEjUo84oeFaRvMQkQd7zPW58UrRTOtZvBNkQ0naTx8oA0P/as/Cb+iGT0hpqaelX1iKiLBTRXHM8y5hhocRZKoqmombujlE5XYNiGRUL0DjU9s2tDCRR593eNcv63wP6kaa4lEpNbqpd4CbNz448ZrJJiFKcduu8bOw6rPplZKqbW6Mmq5SteaBMYDbG3l3yJFUzNCcoJWt8gQU5ecSavELarjuAMBvAPwF7NFNhBdjwpEDbUWgjl3OWfz1k9VDTks0KBaKRlmSbIr6fwKQKBUEzFSoGvX4egs4DSO42giHWXFvuty99Nf+sKDV195+uxZv1ofxmPu17dffVxQwH7dnSYB0/0q/UBI7orZ5pY8/tEFDCf29OE3HlH17JW3upHH6fmjR0/Ott39uy9P9XhxfDZOj85Upv3Vww/efvedvyil9Ovyp3/yBy++9ODe/ZfK8fL1V1+8OowKPR6LqqP6PuKtjv1QmJLEAxmu+0G5yYsFd+NEXN/u5Zxbq3F9ZLgUms0stuXuG4BLK5sozlA0KgOwEGMsU5AAkiC+9IwQX+GuyFHsGU1FKa05Rmt//Xclg0ATKWBmoORoT5N3HwmgWrViRhXmTjpkQcXcl/nJI3v3fc4TJlk/uH91ZzP/8Ml9IzGJqiZByTYpEy0Tk9gIGLAidj5F455rdKgiW+kNebbLCfmgq6NsOtGVTFkUx4t62L38yqu/+Ru/cu+llx8ea9kReThKrtLvS7eT2zvZPivbKZ8+5/bKTkZd89JkD90DB+AAjMAITsRodlQ9GEaKzN5dGhSoi/QLqLHjLhyhI6O0sAU4vaPZPlyDHGSbl8fQ0r9/KYJv9AmOpkYUN3NVf4VUX8fQaL/RT6KJpub9nG/lCf2IzYbjEcPetuelK7IdTmaMZpNg9rpCzZhMUEA1KwwwcgOuzQyrByueAlDdQLbA1uVbt452MmEzcTXbapbejrBRZRJMZkUxCQvFhx25oBagAMWiSfGprAIqNHf9X4JnYKuxtj2um1nrD5XWvK9IhKf6EnbJNn+H3ViebRqwBCE55WTqCz6parGMN6ZxdDKXG2cgFk15CWzOgJEW8JU/+ThG0HR4JN6u470NuQV8g3Eg6f5/NeWucXAWtxAKG4vIzIKrZkt+obMrgzWWAGgMwzVaWInW1t+JUEJSZRBf+eqzKA9ALSLRYswuvvMnFE1e4wfdOlo0Z4ShAeVJogww919Fszo3oWSh1sVWhmZ1nmdSck4wmlZzy/l8bWCpjcJM76qb6UnARzE6u24OHQYJU9+c0cIng56OxJQSs4gBtVSYy0s0O68SrVMUprDpZq0q0YUj5xwyuCS5fRVA8gfRj6cahMtekKXpzKGSo09CQsYZOrRW8TVKSXTP0ajdsEDj8oT4BEJN0UYvCBtO8w0oQlNXUqmqahW/UgFkFCTxVh5mUBOI758xxt4Iv2R931c1iLAoaALOV89P15t7p3cPowJdQnpx+6oqiB7Sj3M1U9FkMhJ7SUWP/de+8f7TD5++9ML9ff/xg9fu1f3uhz/+q3e++417pyddr/Nx/sVf/sL3v/e9qdx5cv7js9c2r7z0KqyC9dHDR7uPn+8PH1/s9v/+3/+PJyfbk5Ozey+uP/GJV3Pqlj0cAErRlAFYVQ2T+DC78GLZsyyxROs26HEIx6+7LMi8178R1umVZjhVNcuq5Y8A1x5zcROdnUJzNaMXYJKinYKEaZdkQ2x0N5VWmktm1ygm/v0IY19zzC4vPYu6tDw+VHFugqJCK6Gz6GrqD9NuevLj8o23+4vzAurFoZK2WtHKbWEvY9FbpiMFduyZCFKlUkmldQYBc3SH+9OhUop1xWRkN9bdLekrj2Kqh109Xt4/e/BzP/drX/rCT1uWH+5VZVUkTUWY81xZ5NZ5We3znV23vZj6MZ8dp8wrcm/YUfame+AKtjdO5ERMtIPpaOIJDXPViaKSFuGJebEiUgHTWhocYNfGNkstewPk8Pvod8trWe9cHBD0yaVqaeIYN59W0u2VEaFCTTmbkZKhRBEUsFLVODt5O+vRyqRTv9prX/vN+UX+8LKcyHZK52ljHA0FqFAYCsJsvwRNhkIOxo0RHB6suAVBuwU7wRW2e5zssd5zc+Rm4kmxwaY+VTUllVC1Qitmtamo1YVJ3rUC5voOdWtgY4k1YrgmIImItWAfodjbNQuUsiXshvvdoDE5kzr2FAQUxMZqUIDZIsKZqprbTZMWAiI1mDSsWaG1VK9ykzg5OEhYZkCIqbmob8HWOXsCDk09XOpV2q6a9nWjsEtdDXWB5/hGZ7XQJPmn8+osGhsynHJgDYRqcVqY0CC4hY7ZxlrSRBKR1a4H9f59hPg+wsDQNGAKiWIBIJGC2iPRUzjD0Lw5E/P5QDWleg1fTWlIKQPhqykSm2pKQWqLCuDs0IgrC6c3mnVHkGO2bWEVbtTwCvWlAWYGesSlbwlWc/QjqPEWHtFO1IIa3VEuiKlcFG5q6s23kUbzHjo5w1krISllN/n3k1FqncvsIw1XH4Z9lrvF+PeRbIMJDfcAoiEHTlgA6B82QJDGG/SzZBZC91YUiQhLqaa1T8kVGmUuIkiSkvTRuonUWkiU6kMIQVUaWduYwSthIi2fPwGEWo0kkF3xQMkZqMd5LFpKwaR16G5B8rGcl0mS9cOqV61zOY7H86O+//ETffWTp2/+lOz2T/PlrVc++YLV/K2v//ndu7c/+9XDx48f/8G/u3z4wTdOtxsrcv/+7WHq3/6zr+12z+7dX//c3xp0PpT57njY7cf9bvdoHi8uPt7eu90N2y2QcxoACDs3r3BKmiZLbv4FB68y4SczBojLYbsGcNokwIxOZSBBRa0l+C6BJbkZbjBVlxfhcjxbncToZc2UjWuHdnidLJuqkhAyI4iY2Z2LtXg9nSgOOJNMvtoDBiC7iYenfrjyJDk+UwBNKZvUUmbQDtO4e/sbw4/e7epxEkglLp6OH37Yv3B//l53ovWV4+G9ru+Zi6kodFRz8FOADDsoMizTqbwiPN4aTDpFKuwgkvLJ8fDxPO5fuvfSL/3a3//sm6+dDvnZOGEuNffFxJgn5qKq0h3r6iAnO5xe6GafNtM+cQ/uFHvD3rAzXgmOlCKcwQlaSIUIaNVsKnU2lOw4EbwDNrMKGimm5m4zQb9qgc5nsTd7pOuA4HvPAMKpUAuAaM5F0QKtKlIR3E8NUJICiAZxS00L/G0WWLE6qgxIJVkhCqTImFcrdCOHd7//6OOr0m9P9nU6XV/ZxlDACawkqAYxWqGz6CwZV2aDmVg6TbaFkLYV7WSv/R55ktWEYUI/WXdEhwItYIEWiElwf1S1GAvEqtLFvkXUCGVDniN/GNql8kTGql7gekhbfPZb50hJqXW5oLhffqS+6ASC42UKd8cXiR4Tlq0UUAyNRu485MjEsuQkr0VMnEoN+vJhU6aUJPn1l5S1FhiqanZeTlh5sbHnYaC5J2Jr76xlleiLU5DxggNnmiAki5kWjfblBrdWpXU2EpScnxjyxTAXQuhNRVGzCNBmdtiazZgLRtXi4+RSm00HnfYbVAb/gQATGNqYmFs1LqAhJTFzQVeht48wkHWuHhtT32czrU6lizFzKzIhKRc/2VStrnT04kVymDyH+2PbL8bIcUACTdwb4WZpEkVXUwxTciQ3xgjIhK4q9SJQtI3XEhdjI4bSKMJf+w61TJHUq6iaUoPw1vx4gBgVGbOgaAITaAq0/U5+caQ1ZWi1oE9AboDPIkm0KKKZrZQsmaYZiVpVa4U7UKoipygWTZPkVrZprUW6ztSCNuhKGTWam465lU9o4xTQMJM1ST0IVUuCTVrNU03FTEcyr7quz54VVJhPhjvDcG+a3vjki+tSjuN0JeT7Dx/Nu+O9e3cg9o1vff3Jk3tdJ7/6659//NGjaSebExEmpnqyGW7ffvHJh0+fPXtYpserIaOz0w3vPIBhEpTp+PY4baAv+R3sV9ssdxW9j6Q6dsUmF6qg+fQkKtEXK6S4S6jLtt041aAi0GopJa1qtOQIgbMQAoNMbtUbvIxgf5gtGsJaXUpBkSilHDQigM5TqdtkQRKMwt4UZpkUIJu/W8nozCyZkpZS6i2eWhGBipvzimtGvXbMhlktr0TqCiNqPUJE+gxi951vp+8/1Kwwcs6S8nj+/Lb2+srd8eRv1s+fvCLlXZvMMmSqJjL3IJCs0kLKk1BREmRYr3DEjKLrbrdJkldltGe78wenL/7c3/rSV770+WHFj/aHD3dzAlKCQmYkVRqziUzIE7udbva6OeoJJ8l7YAfsIRN0b7rTulffM20T7Agcq02kFKKoVslKgCiqE1BaOWVmCveAcCovdKmMzGdsjYbrT1QgZo6MeHtHJcQlgq708OYuZ4ZbUXSFrFbFy99kyKI+1aoVUDsqB3JC7sEqLAY1TLBRxiGvsL6ox7/+3qMiw5XmZMe+O/a3JgBWhUIU9ZkEARQP0MoTaAcmwSns1Cogt2XHW6NtD3Wzq6sDu4Os9lhNHDBD5gALdDIUokIsJRBUbRCKD9HNJ9WYQQhrg9/cfylbWjBOZ/I4qrgA+VGGLC0wfZF4+BY1eML7QFlysWsqCYGqZYepF9FlIEiNIBo2iDBqGIxLYqmlLm4StSqYnOtRa5mL2y6KBPJZrNlMuj0w4FVVC9gBG0Vcj9paGnGAAKqZVU3ZReha1ByUJpxHjixJGTZJiA2fUd+FcWmgxEtBAMeNzQChUzmi572BFTSYHJIbSuNXqGGqS29NYfI9kFqg/4tJatQVGt7Idcle4uviJSok/4Xe+JYyk13f9zoXM5WUVVUovgHJDKaKhLmoUGot6i4c/lltqWpIwkq1BVFBA6AYk/GgtqmZmTHAFosvRYnBAKTIxitts+bofWLC18prU0BMdOl1ok5pJirRwgqc2Bc1Qa0xmnRXHcfIWkXAawZEVEIxinSTZ+d1qyobqYTiW33Dvk2qPx6tp5UkpHuQVa3emwdsH78eRSsk2PwuJ1OtsAlMBBWTy2C8QkiZPVPLHK6vDFxDtVAwrFOQz7Xkvnvj06/mjHEcv/LVz9X6mctnz548fnRx/nS/u7z4+Pjo0cVLL79kHD549P3XXnv913775Y8v3u36zTQZYPMxSruu20/z4eriM/O0h/QAhs3zvt/lvm42twSrsXbrk7Msa6AaDqo9SEUUbQpAIUnKXETEWGlGiJaZAtUiyd/zrJTkN993lGv1rrcdkNYyewNAVF1Kbp+6Z68X/dq3ElWaLiMDQopZVvOvZmECMgmtkYxVxaCWodmYiOSQaZCFKBST2SpVqh40db32ChFD7tZPvv+9/p0foJdjQVcVaT0r8/xs3h02r947brffevzkna6DqNW96KA6VaiUQQ8akkzGuTSFdEnAMpZ6NWtfn/R6/5OvffGnf/VnP//66a3h8TiWyynzlogmVJopk7rLsIqmHpJH5CsMdgUc1EbgADmABykHxQg5Jk7AERjNRkMBjzQ7VD0KFSyCQlZYhRXSVAt8cxDN1KSReE2bMQMbB2aZ3LvOdcEpBHSTCJiq1doEqbLc4KDcGmBWgN63mpmp1SpSBSk8dr0q87a8iM2w0ZAVg6CglGQn24cfPXr48cVZPr0se5FNRjldX/Y+8EkIfyUVU0ilexBjA1kBgrE/cksSe2522O6wPXA7ya2Dnex1NSJNk3FUHCGTcFIWl/iES1Lrk5rtYPi9iO/QqLHfw9UYFqM0H6O2qW6tFdYKV/jRW+jIS33iX2mJegmkBk8EFsUQmSVLdJON7RWv48WuS5lV4JwO/+HWRTXgm20Q4AERqlYDO42fwsLpMBpoQidKMkoLx5z9G8wW5kWD4eGuU05iblt+w5LUxW1Bf41ZNaKfN2VQpsJPNKDKKGYURiXaIjw6ndzJOFwG6Wq6rOFUNU91BtOWovxmVnOTS9HWOv+v/sTUzUlVzsNywnfY4SbRufoLp5Sq2TRNIuI2LI5MJNKES6OcUoJBslhB1SqIrU9Ls0uhVceL2ECPuH5BDtBI/NcIfUt/0dCjzdni0eX1vW9/vA9qZWKwA4UxD4FXSiElNHGv0AatLHdZUq5aXZa3HDAGZuHcW8dtzH3gGxsLSm2pnICpHx0JYEgN7EkQqmUukt3hQYUiORyenXPgb2ipnm6yk2BuoeNnQhGibSVF1aqBzGQiFSgMSxv67pWiR6Izq2ZpsxFwZaaqZZpKzrrdbrP0n/7UJ0W/PE3HqVzt9082m9PvvfPtH773/c2m/8EPvrM+zbfvnPV52m5FpAKYylGrlBliq5PTi37z7rR/CcDVx2/td6PK0yd6/tKDN1PePz/fiZ4O/ckwrLoh++o/xVQV83Rg6mDFKVxJqMkkJVQn1kqpTLnzytVIN9UJcRkFMRUGEabuanA+YCP/q1ZFGMkEecdrFKftuz0jkMQEzJRMy2aZzGbiXbtkNyoo6FSyqEA6MsGEEINAFlWrGWGaMUwrnVm6OdcES88f/jj/9V9PuGLNadoQqvNUutWaG3zre+987a++9fziort1FA4GwAomx8lVJ5l6EUBNmAoNinU32GU9f/5c+3rvpXufeuOTb771xoM3Xh5O84h6/nxi2jgoJNSkJRHqayMNxmSVVoAJGKPrtcmwR92Do6ISe+hBMcJGcAQmn7kWYM4oQFGdDMVX9CRxDNkUqrUEiVbols7Xcae5lCysXaDhEcsj7I3EcuwVXKgnDAGwiAC5cQK8eCLMezNb6LcErDIGrhVajIUYK4+UYypnmx8+3D0dbT2cXik7WIYJcLo5ZJ2CWqyFEBQJmy8aVmprViuXuJiHvJuHKzu9xMmVbfbYXNpwwGpmP3HDQkzAZDoWmcUmtRE8AsUdv9TfcTR3po6oqaFNnILU5qCaVx4RkoQx7TW415NLWxuNpV0XAG7N1F4zwqo04LHNDw0ws9ysggAAbYuOxuIimGmplWDKDsRQ20J730/gisNlk4FIMjUTTUlqrbXqorz0T2ytH4rhcduhFG+I7gbusTzcrmRJBCJWqy0yxojTqma+Jlli4Ylj/tamztrm3HALbv8I3oWCoFLamjzPRHQll0UP3a4/ETZnManV4P7QdczuXhlWIfHmrjOxkKFKjXzmjLbkVtTqJUSICZBSNmAu85AyGSXMdcfh9k/qBYblnFU0O0wWbF4yvC1Zm9Cei2mKH5F45MwgKbbwBbULsFDf2ULnXlrPGx+M5A26AbzMMropGg3J0QttG1f8J/U6x0tcKr9lzEzWnm9t9wKxhjHkLF51uXUJAJOWEFIS53OKehGkFrBKM00SEXXTB0e0b1xR+GuWUqtqTpJSrss4XE3adLz9qXEWmszMwqkskpEZRDQlARKtajVKhs6O7auh67pVTnOBsajtn53vKKBMXV5v7z4ok33xyz/71he/eLU7f/+9H/3B7/+PV7vLO3fvDuuyvT2f3e22W9meYrPRW7f7Lu1U74z5AwDj+NH6xC6f3df9cH7+I9WzfrvbnL19Od56/OS2Sb51+6zrtqvVybDq1qsTs1KKClcysNSRoNYi7EioWZc4HvciItLHs8OfONLRPxibhI3x+PjoivQZIUzM3LInATRkxvhIgA7IZonoYB2QKRnIdBcdgYpKZuqzQpHJZJZMBchAjjob7shuPm6p2vc6m5hWdOPT59M3v7YqT/Kqw2GG6ow8JEidznX49pOn7wH11j2ZL2nTpJWpT1WqFhGpWgCxOVGIYlKQkQ7Pd3qob37x01/55a+8+MkH/UlnPcbd9Hx/xSGllD1aFLe9YL9wiFEhgBVoURTIUTiCI2ymzcAEHmGzwZPuBFaimBU1TGYTWXwI4IAdRAnVUiG1SW6XPzf/2buEprD3+2ctV1xH5RsmwkQztLJ0bUbhEKEYFZbC91GLoxQx/WVB8xBllTBBmcFJbFT0Zocka714Uv/8W+/vZXuJbgau5/eabp9c5jTDkKJhdd0sDWZr5K2Uff/4mC9weom85/YKty5weuD2gJO9bfbYTCU39jgx0SbYbDabTeAsVoxaDLOILnr2MIQwT2UL9+QmvdACMXCxq4N30aHJTzCK0Wa+gWTLItgJy6lm4eCNpn9zDkHLsnYmcO2kpcLcDVbUrJQqYjktCkJ3YfRY6QQohmrVu9JglVkOfpM66u22RAoThA+3l07Lrhs0W5DrDj7wvIpsFvbK152KE19VVX0IKhAkM6UF8W8Rp/qF8oS9eFSwBU1t++TdbM8xWBdD+1ojDZyczjzR4MQ5jM2UfIdGEEqB5ffFe63OZRep6h1bil5cw2e9lOIbamGN1Cu+H1CqhfRIi8aVhIuIijAVr6GKSi/BEmjwCa69t9rjF1gDkohVVffHb2iKf3wL3rpf7DijGsZwrU32SxRCkaX2jd9o9LE3skFFVQ1aPZ+1R98rJKItpDMY2+pBQ9ijqFkSmN9frSGE83vjRSglgox5NevVmsJcrFidOwY1GnLuGjCTnSvgZiMAQKaUGuMfpRZpXloFi5RMwt26ZaIo1Sm2iHEt0rtZMcskcmalqu5TzqYyTZozVAtoKZNSzT8rqdqVcoRCiKtxEtRuWL31M2+9/uarP/z+Dy6uru7dfXDx/PLRBx88fPdimg73XzjJedK6u3X343svbAG89Mr9acw4u5rwjuaXLz9+Mp5/8GKR0+2TswfQ8slvfm33wktpGJjzy2d3T7th1a02OUmZLDOXGcYiycfcWioyey0F2QvA4FqGjQZlaajY5EzL1JEUqDA4nkLXiUJgmcxAxwCfs6GnZlUBOkNmpqwEgmBAZylQZiKLZd8xp5WWfMGf2yU4GRRkJQx1LnXN3ob9+WX9xp9z/4FuejmO40oFHMpwHMcfzPKtVb4Y0PcYxlI4FJ2yYKoqmI2JVjoJqisKcNS+y+P5ONwdfuPv/fpnvvAp9BgvyuXVEQPQWbdZ6aTB7IbmPhnNvPxUgBCV8JXwtawH2Ag7Gia63MiOwNE4khMwivN1VWZaURSzyVvhyMSEAqmTqPyCX+jFfHVTGW9Sq0P0UfBe55Rrp1HPzE62ctZnq7ElFCqt04njXcPqAGDg3qoswAwFOAHiRGhMQCZ7YBR0xXrrTrofP3ryvfc+vj3cPcduQ4FlZSroRwyT9dvVTlAFTIIMI2qmqfGIvnJ9Tntid57i7s5kZ7d23F7i1oHbC7t1yVuHMmBH7A178EhO1FlRyEpUYFYU865cUUClVZdTO2zAyAXRYogwtmgysHf/04AxmiG27UYZiGBJm2VJgPPHtX2VXFhBN5opAk4IgqGxrdhCoDXuZDxSALD4RMakMWgYaDR3p114wKUkEVmCWvOpZiS35aXkupuNVvgmOuKYpIgUjRfmT3zRvdcDyFSXMYvKjaHkNUAfPLPYruce2l5NVDNEzxQXiDcuduu5o+IuqjSKO294bjJqjV/nBKYbWA8a8GuqJim5RSPCORIkwjRDFZ6GRfqc51JNUcMxkwY697hWFZGqlUDOeZ7nMpWUc+c+fKQFASuq2jbTjGdwyRJxu0AEIEx6XSV0ItqN7tBF2Gq0xSPFIWJVX/mwYNsIORYMbHuLJSVajRVPlCQabbYias+mkXBldxMB+3133IwQgzO5wz6MbGf4ujel+VZmimOdEUIC/FdZPCEovs2QRMqJDeGQUMqgFk0ipjbXOaWsMdWtIvSlf+0Me2dNgqqVbszUCORGNaiYJJG+77UqE/o+TdNoQDF2siEns1lkBRTwIFwZ55wSUQFUw+XVSOk+/dnPazlS7M2fel3ky1M5TJPSBjM7//j5eDy/+8JdAJvhTDs5uzV94iXkPHSC84uLv3z7X13tqmmBfPD5r/y0yePZPhR8/+GPPjuNZ5vbZXOyvf/CAwBX867vBhYljayqKswKyboIEzRJOzBoroStJAMWVg98aaAZolCyTCZhski6HSBEZ+xpQhng0q4V0UF7ZSZ7WjYIci+1VkuWh1ylsiczLQHZ/LB7nwQFFZPpuvazYvzBD6e/+obuPhxOZdrvq0g3pq7WJ1rftu33h02vY49R9kWFRQA9mephAxSIsDrTUgFIzrbJJuPFfnt3+7v/4Hfvvnzn4vFehoTeZEiq6Ieks7ILXDKnpKaS2PhhMOf6qPtCkEYcDQfYZJjBiTgAR5OJmIERelQUhUzALGLhWKNFbYYVolIUVioBc4LuYujkMENjDqGRL6O0ZoNL0R4xf4Tkxr9eh1dtPvkxa6NGuWm17dNVRYGYoDd4ECzAjLnHESDQEUckiB7F9sYDvve1d3Fhx+H2M02TrAq6gtXMYW/rIsMkW5pW0wRdSRWt2UwlHXRQ7Z8BZ/mFp7h7CVzp9kq2z3FrtO2lnRzqwB15ReypR9gRmKATpICFVLMqhgkoZlOtJVEVM+HJuQAV9Lt13c4uKPt1KPdYz/g2a5izBajgYc1KKeJTQ7LJBqBV/TRQFvDeDMjtoltMc9no6V0GXHukEKe/0pMR28+jDVbdvkNp0UH4UxmOleKbjZbCKgpmiyqglWfwIGhLEdIoYd6S9iLFLJ79YAw1vNcNwxJ9sYRWg/jq70ambWnUADedDhZcaH8ZsK5fE8KU6qSCYBg6pBsvFPolNTNz36h2WCXlZssVtwV63S35nNHRMpOAmiV8uH2m7fsNxRsyp+yKv4ghNGgubYVCTTNzKdVztvQrLJxsxOyiUcauE5uTVhBosty4DwAWQ8d2VVq9hIATW/Z2RpXfDdEbz7J/HWaaE73wWlDqnJMCJdTdZkstZWoqvsUISwZvVZ15cyMkpUKrVix6VcDVc3SOAhHiZ0G6LnpUDD7yNQNTglvZkQA1vCgIQKvWuQBY5VXuM9xat92/5i7nUhx3wKe0NZrwdSvNaJEhxynqTTi7OpnARERNm/XrPNciqsLOeCAElkGTpKYyFxXJFmN0ljIrpEzzVC6S9NCcM9T2RH7h5VPJd+dJARTMECOSGg/jOGJeDdvXXv2li9037p9+dipPL6Y/GtKt7ckm9SPqjzM2P3747EfvffDtv3nnzt2z2/fP5mm3GmR9ItNRNutTsAACUcmi6tGjdwKBN0ps7isLtQpIodFjoiRTANkMVgzZDZw7wPPnAGRPybIBeqAHEqwzrMzE0ipVqXmQk5Oh0kadUk7Iil6MaglMgBkKYdCqVrTP+XCYxz/9en3/3dV8qCd1QhFFKYcOeH/MfybD7nRzWpCP/b5WgRQtqodNlzQN43GkKK1POQFKuGV6OV7qsB3+8X/xj+5s7uye7GWTBLCZKNCs02jSER0ki1atVnOWWpUptlxAfYkvzVf3VMEU8CxmYCKP5ASdlDPsCNRiKNTJ2zVKJZSihqpaDEqtcJURq8tKYLASE8fkK+5uQF/+5Ku7OzdWpSHmfa369xqq+QPSrsmV8WC2MnthcdA/GwEjikjyjQi0wilDgKO6/kSypBWevvf823/+nbRK9hxXJydzNxTmytVkqw2GgvUkk4gJTecjdabWLBDIngNk9RTlldXLz3j3EtjLds/NzrZXtpnmFXZmO7M9cQXsIUdgokziS5Tp24/E8SuIKcWoCiitOns87hNVwuQk2oJGbLmOS0ugW6zXFo+rCBAtOLVo5qiyQ4sIzi8CLAx0abFDujHmCUQx+9IoT3uA473+a9x1wZvhoNV4sIwBfmx9MK/MAvKN9JQkBoSLdJ+tOXbUGQ38BOlMKLa6G23qrapZ2v5d/1Ai5v4OQQKGG3f4rNo/x2JdCdMayhgLa6lWsESPFjoLttALp+8rACs3Br2MQmSxA/jJW2Vm03EkhdLFXyy0QpKUzFhILNIrdJrDA8altFoKFDnnaZohBJQiXc7TNPV93/c9l/EblpLLb5VruizyJgAiSSJZq78FwbWdEOHzbDjtVW5e2JRpgeyCWHpJQDIW2Co+rVZIKdWvuaoWrSJONbTWkYOw5IpVU6cMO5HbS0F/U16BSRuNU5iZEFld2lyWBvNtPz6yVXFbPHEX1+hq4+Mz9PMiNBNKUSsoXpQ6NK1qqsWESSSnzgcrKUHIWqosbEQw5cTojJMG4aAC2kxRFGZWCY6OJ5WSvK1XLUmQfQOaoaIYhNAkUpWUSXIWqMNJZhliQJdzUjWoSQYli2xVp3kkZIJkAOSKLKjCdMzSW8qTHV75xBdfPH5hs7HL3cUf/P7F53761a99/Wv9mg8+8XEePvrkT51+phvG/TDuH2+233vy4/vf/s5l7uW119+Y9s+lq9OE3G36YRAgpbxZbUisVoOkIMh58b48OwAECZTmuQFYMgOYVU2YwOwJGMhAb6T1Jj05CHqzbByITGSbMW/P1kWmP//GfxpOt59763MjCnPWXE0gA53K7naDUsVm7bvu4/e+LY/ezsRxkGwFfUFRKendbvufT7OOHCbUalOuaaIrrzK6WlhrleScUyiKSfISa55H0/Tb/+Af3V7duTo/dtuVjWazsiNmyEpsUl15meFiOYEreVQrKjQm3lZg3gQb7GicaL5wcAKLyCxWoEeIKljIIij+M6oTpIrUeO4cTMPiR1Rpod8wA1FVuTzPaBiYWa3FKCJiMGmsWYSBrDlnXdRhTgOdlLpEkuUPAdCn0wIRmmoFZoAmDskKUFA7m5VHukWCpdTf4pMfPd1/tDt5cSiXEy2VIe23d4m+mkzoi+axHDrhrWEYpx10FjFAi8qonXK9t7LvXnhupxdqE052aXuQk2nMPBB72Gg8mI0OIRCjcQIrUIhCs9FYKTUg5/DsgT/LkOIgcqAFBkN1ulRk0MWPQtrlsECNyZtYZuiTGtxrtkxNed3dEO3fDBkO6/pEZxk/OykYoRcyt/QFne/oWnitGgvisKDadNMbNvDamp9lMPHMzFBLMTiVq2mEWsatqm1XzdK5Qtr0n8JUvWQLwnPYk9zIFKoWr6lBXvIG1/0Xw2KcRteZmIFsXubRyCl9rCsNRNNS6uIztYwgU0raBgCoYYQuWRCmWG4I117TrFv1QFMrNcmLEVlS0WKxpccdVD2LqZpVldRl5kSgquWcS5nN6CbMzklPKZtqiTqDIsnzUJvqIrpVL+mSaNXqgyI3HGrHCW19UytDvJpwHIVaa4Oy2VBmA8EmMm6ic/+NxiTNGiaJWSklJ+Sco81dHn3Pk0Q1M9NafDdUBhoOwsbGdJdKEl6jqrlJtiL6eomljUClSSgHRMQCMmAUdk2aBkkwSxK/x4EjQSaYpJt10uhfnDNPE6YuO5rhspxGE4hq0KsKN5Dx0kdMmJxFlgyWXVhMZcwpVJjMipNmDbGFGk6KEIIC13ijb1EAAKxq1QkiIhnZXWr9shYFmBToIKZqCVnnY8q82mvfb377d363y3J2541/92//zaMfy2YzGKdhXV5+VYYNL86P69uPvvyLLw/5jdPTB+fnFzJ8//HDcn6+26yG8/OLV199dRZo1auri1vbU9+X5UI6eh1EUaNItgW+gmt8U5Zuhpaqfe4UvUFo2YQyMPW55mKDYi3oiAzNik7uPOje+e733nn3W/syvvft99/6lc9JydZBVqYGDKlgzppKrTpbX6Uf+vd/9Ijvf2t9lqfjsdTRKnorR5Xv1f5bfTcl3uo5XxmN2aB5ozqbGYmpjBTCOmKiFEg2TAQNHA+Hf/gP/+FrL9272E1MKGVmT+mJDFlBZpREGX0mHWdbgyoRZRgA88JMaRVqJpU2mRVIFRawQseCCtikLEIFimICC1ylCncXQeClokBjVsHlvwvDIlczWoO/BdWa1wRdJOLypMgKanB2qBnMLBJB0+77kcb1J7v+B3H3CSok0RRWaALL0CnlldqMku1oJM2oadbL1aPvfihXtOeQmmsB1ygmz05OD5IHHSdsRuxZ69OLUvNZFktqJ5v8CdPnxyJ2cnelw9lrH8vdK5Ej1lfzWg6oR+XOOAI72GjYw/bGPTGCs+hRWQQwZkGtbmLuYFXRiVbIqlp8gYuPdU3NNXqRfRw1MGhYdsTmPULMnYQAL8oXKDFQNTq7WsnYitHWKAd507vs3PrXGjhfy+emRtDCTFBa0LSYwpo2XDYgCBFpySPylBNjhFyUwGwJHeETW9vfeJr0+k4Dn2yvZEFp5XImAAslq4d9rzdaQ3o9p/6JQ+NfMlusTzwVm/pn18BrITEKcF4ngUb49uW1dP2SX3gCZrWZt5GmllMqtTIg+kB7JCU2fVitcynVTa+qxuAhpQxiLqWG1TMd8ctd9qGA86zbBuygTosQnl4MmUlVYailqu8NhsE3VkeBa21eYa3Ht7iUcdYkCVvd0HxEVF1wJe4hHINjqud77/urquuLpOGQNBcOhcSa4rRzMxe/8xpDdkY/mJKvxaxuFwO2waIXOa7h9ewmcO+QokUVyf0cAJj53oiFPdc8+NDGXP56akFuj5OqZjRrkjm1qqT4K9RqOSc1pJxqqcFRj87ezCC+ntssEhCYEi2U1RBHH/zkwkgNY632/76cWjXqXhEW1yVDGWIQLBIs5+H7VdCQ22ly9xV/ZqOU5cLIaH7ulJyK1nmcR+jp2cn/9n//LwgIU5lsmlUVKUmZ61yu8mB9vlP06u7dF1Rf3L4+Q8rucvfqJ/LJybZWlSz7/UgRybmUAlWRbMJgZqo7OHYeW1QnmAhzUV11a0VXZ2HqAbFO0jpZZ7Ur2IAbqRnIKutyeneNDv/+9//tk91H/+U//+f/6v/9r37zn/ym3Za57Lu8sVW2lawwopYj6yqfpGxS+O1vPXr/j//DV3otazU9rjJkzTqnr986/Y6kfsp92XFzKlOZVlNvuZjKlIxCZEoWVNCA7D11ggj18uKjX//13/jSlz+z210Ie5szq1ApCmTabD6NRvZ13nEfdCHuEVBf0wSooZpWEyZUUMlCzC7oVbPCpMAR5r4QFTabVUINhdTGdq7BOkMY0wGMbrhFzagbA/cK0mUAlHIjKlqwo7Obl1qt7cQGwYIGeGwLyMoDqIhb1bk3cPUjB1SzCaawrHoEaRWYhDCtTCnvnxw/eOchD+QVzfXMk2qFHW2/Hg6rYUrTHhuhQlSg0DkDJ8VewOFeHo6ytdNX5N4bH+vdK8BmcFfKSDtSroCDYFSMwB48uH8n9KiYYe78LNVQiNkw01CtljL7FIFcTKGclsWWEWOPy7UJ3zK/ix7vOk8z6K5OG44XiTREJAnPgFIKzAQpSfJoln0uFb5+bV7pSc0pDiFgEs+oERYdhW5OvQtr5v/fn2WUF+8bpoRoeDqaWo1A6E/wdeqNHGGQZuMAI4N5TAHEi7AYG7ct03FegjSOxsuMtOMLgxpoIITS1BLhxjAh9TKjiP9elAouR7BR3eAxOqKh+FDbYKaTVlXrcxdTZYJEYltt3bi1btIAApLMV0zFDfSBDbqcq2os72sgQXX2sm9Bpqhq12WaUaRWVdWckz8otZSiNackSa7vTAO9/chIo2QEZ5JREcet9BVFAb8HXLPMgW+sn2jlsSfOlm/qDRW2iJjbVgThcBl6xK9SGKtnyOD1sRUJcWTc09ureDOYgmEVGyYzhiWp+XeYRm1SFQbLKUYVNAlDNNPQtgZwRoO5ooaNnCVUrSoi4YqlBkP1gVnzMtUoBKMSrT85fVD42hJrD0mFgUgRLQPJ921uqmrZOV9hI2iI6K055zatoVsJBHgmSCTiCVd1BnOkZCdBmmpNuXPKGUxLyaVWtVm1uKtg3/dVp5T7tNqSMpVKYVGaVko1lbt3XlCtqgaRUrEeNiRLnZOIqWbJ++OUcscyZ2RfpZCkhyXpEpCKwpCOR4oM1TIU6A0raFasNfUovcigwymG0+4Ifuv9b/7523/0hS9/+Ve/+qv/p3/5f/7sW29+5e9/6dlh7pL0+nwto6kNOHaa6jA8uTr/xjfef/e9b7/34cVXN2ueDHxSbJ7kpFwU+bM6vK/c1IJsKsNudyFdNsi+HAQZgEx91UJkIixryDlnoujF+Ye/+iu/8nNf/eJut5N+baWAxbS3KakRxefXyJZRA28k3C6u9Y5uWmWGaqY0Nalut09RsdmoBlRFJd3ZqhIVrE7cp5U2Zy2ONiP0Pf4fi142YMj27AHWVNrR6YKgmwpLQFuAtYiElceDlL0YXV6nDR6V1iJgKAdbqR1rN+ARGgqoiIpYOLpqZmGFZk2PHj5+/N7jbtPbwawiQVpLT0yGAfvNMA6DT7tFlQKp+sl5f95vxiqjbPKtV3TeDhc5F9RJcQCvILPoTnGM3RUYYQfjERghky9wnIHZtPg+QliF1qKTOmVMFSiSsjAJk3sVLPQUUyvQ1Ahufk1dkwMg3egt/1d/4kH1e9AClWVJ7p7L+BtkkG1JpP/c0jW05iRa8PZ6IXlVb3JFPJyZnzTeeAsEkej51sNPdDWijD1YArlmZPvwDQCa7X6QdUOjOQsk5xTWNNd6lp/YmeNHyYJfjiU93HAw9bbPjcSDcW1mUNDX9nkFCAYkmBIQRGu2S9oMnxVhxyVN7uqopDVxgDRX1fbvjBor56xu++GCJoSoW8NtL/w1p0nJMBuJubsZwJQSgC73bNYiXvT61mEP6RnJOzlrF5QQZduzEZ7axvD5iIhBNopTu76etooWoaPydbnSairwO+If8poN3X42MIskKV7Ili/4916Lk6/LRpEguakx0bkCKfvzT21Uhmgw1ECVVrc2y//rW++/sFZ1GrgIm1jOk7EtPQIJSob8xCDfV4XBLKUkOavWuL9WrxktAHi960u8qDYL53dTLGO5UDnUZgBSIY3pFswwfx6jJAo0wjCXWURyK6XN3xXhQjWmwIea1RhgVChMBCxqKAXiRaqYFFMVySJZi5qhFE1dNhZYrzqnnAwb04lJtHZJMM3FzCA0dcBNrYZrG0BVXXW9SK7FjlMhbXf4rsiU5TZ02w+nq+GM3A6nG9UOOCmUkrQ/TbKxvAV78hawsse7x9//3kePnn+wvb/9L/7Z73y8u/i//qv/yy/85s/8rd/4uxdPn9/J6DglGXtUsUMuZXMrP3nv3fN//fv7p4/3ku+e3b/bz5v5+GyLRExnd//sGR4yD1MRRRk1o8+r08Ozsav6wqv3L5+M0/kovdjYkWYovkCq6/I8HQ6Hy1/79V/9xZ//6tX4XNHXCQkZzBQQPVWsJiuGHJ7qzF5PgXOU+3H0NewHfD7gqVNVqTQthiJCYGrj12qYoVVRgFngMIte591YFaCmTnO2ZRKPIG6TUeW2ZoohjnBoJAZsERspQkfsHF91U3u6m0cTyaJRbv0JF4FD2WRjZtEklir5kzSTKSaXc0chJjx699G8L+thqKMCgkPkJSpcvGWKWsIZWFNWpox5fXKrfOWr4+WhT0Penkq5pZfAOKeabMoYDaPiKHKEjcIZOAJFMUNn2KSwAt8IgcrklBGBFlFNgvBN8jJYzGpAC83vz/taiX7UFGQ8RJGeowNWNLjOo1d8tbGbbnxBhNCI7T7+zAE2LLHPotdR1YaiCtgyYTSxkJTaX9CaBMe99v1U+G0LKs0SmZbY7FErOktj7Gy+pgvRSzYsKS8MDm8uYIlCzSdu0SvHbFstrBaXbHDdsCkhEHeRUefL+AY1NjcvH/MqgOzTAA+y3jwDEHj77poXIyChqaFXgeItjOdTlbZ1wDlLuHHvHBm9IW61dsiZU6KIqroJYq21lZ6RLP2JqDUW7sGr3Gs+o48/G+weehz1XjLuAK+f3AX9WLYGoz0gfmgT8oIlYMEVDE2zbgASZTnT8CPkhiFWfeCvgeHc1L3FrbEw1ncgF6QkwiSGi2AYPgelyq01vAhyXNrH+26hItdwCOAZXkgvh83fFKJmjM9CiloNrzSKqjlyUKpmZleZuwyMvgvSdyZpUNaEDd13WVIYtrsViLB5RTUMvFVlwE8Itb1zWdayOpRDIxIAqqnqXCdSck6d5GqmqinlohWlAMgpBbhgcba0FCOytEF3u50p51qKV88iQkotimQCVXMbUWNSLZKyk8bNvNiSGFGkZr+jhAOSVlXVIFmhJ5vP/OEf/scvvHV7e6KPP/wx+CjnNE7W95vcn/an9wv73ZOrZ4ePJk46yBUudnqYUp1kvveJ25Lzf/jT31/f2/zX/4d//uqL2/2z919cSa9jj5Jx2JQqeb/ZlKfvPd3/9/9udfHRq5tbms4uythLOelrLnicbv/13dOPBUN9fvfszmGL6cPzepikiAxAlr7fSD/VpAQl91SoqVJykvHqgqn+9u/81he/8NndbmcmkhB50b2spMB6c6rfnDnRzUUQbgFw1YLHPZ/5qCkqCCShKmjVrJQyApqzAMWc1CmzwEjn3zeBBcrN7Cs0M/ElgO2xb4+lF9V0gMhxPmuTCQBLKjUwTrLTqGIKEqALm7wyVm1Edg4AU5ZA2AKZAFUdE7JCilkBCppg3KZc9/bwBw9zyTrCZ9yqhiK+spAF6GETeDQmKnI35ILu1jzn8UKmkxVX08W+rx0lqUA02dH0UDFBCrE3nYiD2gw5CkY3w1KBgbO60MiKqSpmYEpupCMwgwhy7nPnPdt1W4bABT34WfxRC8f6hY6BBtBHEFyQ+3h6PVC2GXD8hOSOQDVVVYegYQ2jbSNA8zWFDfWLv1zuikiD/RCdmQ/WFgTT4kzEG1rms0sY8DCs7jhlof8VekHdDgkjqQDIObd23g2b3EqHaO6FCizGwg2/vG6LW8W3nBzGIh7f8e4diIU9GNwHDwShWq/7tUCgCerSdlsDmuKbEptiBQBpgljcSHNlpSS/yGpKQJiaeIspZVJVq6pO0TyFdSXbdfYLrsGbUJ9cklKruuja54Wq6h8gyhifgbbJTrPciF5QrzXiETJaX8gYgsPC9YVodMB4Nn1DVZDIzMwHsWiWZh6Nwozn2qST0W3HrzRAsiBGVBIkNW8e/UU0Hgk3OLtGzMHkLsGIUUDjEHgd6dOKGFR71RPIcbT8S61LuvupQZwk4Y5pAcC41ZOPn6lVYUoRieFZI6+1Z9KLhSaQJRCmM4HnL4fMn3G5foA1NllFPXsTzKb4IINQdxRHbHMCMpOPVBolMSzVbsAA7oXeWmNJCKGq9qtcSoH3NJoUJaderYAQ6ySbk9MBuud2Y0+2zY+gWk2SBQS6BM2rQasOq80X3vqly93+9PT1O/e2l7vje+/96Onjj5+fP0q3Hv7Df/bbp/dO7/UP9FH9/qPvbDbb11775HB3NdzfdKf9Fa62D7a/+ODnX9gKy96efXg/z2u9TDoPUrKWPk+383x4/nz/b/6bl66eDKcvvV3mgY9ynqba3Tf7oW6+rvvvP3mY0qneSqPpPI7DWZ9Pht2TC+317PTuxe5wNV1tTjflakYx0y5nrbVcXO5effHeb/2DX7v74PT55XmSTCToRJBMtGpIphMwE4mSKAnsWMXK4m3RLrobQPpTbAZUeCuDCipYKIVQT7cano2AFA+0RAVqW3O0lLzmsy2izbccP2M7VNH4NttEA0V9M4AInZGBKHJjmiySDM0XwXsAbzSk+QF7iWxqSAITZkRrKGRbLqniNYOZmk0w8T1JMO1Wm/Mnu49+9KTPve1hQzRmVFoBC2wCVuARWPlimNSNCZpe1rTVcfdv/lhLhXTyVs/VXZ0AFauWZtERmCATdYLtgRJzXx58zerMkHk5hq9ENRqsGmtKCJ8hV3pqpQgQumonh1obogX66Ah1NaWy2QqgJTtr/V7zNrjBI47FdCEaUtVoEhJzjR12bLnbE240GbQFN1U3CqmNohPZ14NDgyvip68Dig/z/O4328klphAu0QRQPV7EEW7x0xwLiR69kZ3dZwQI+DEil0e+SLI3RBHmq+yXKOfbP3zpjZerpgCqqSBYTV7BqDkE7fu2brxtRz7blvCY//FmHLXWDJHCWss0z7nLdC5j+FHkaBZVpSmSAG/Vkg8dI2ILBEL6rhhzQxKnvLmvllBqVVxvhYI0JMA/99IO+kzfe0rVWun4umMa/s4JYhmBREEV08hmnSJLJ+eImnMw2CShBJmEWpWSPCFK8nmF0tN4Sy2tRiS9GIMJwSA4NT0ivF73EZt4DeTfK54ahfAFmwii+/X5a8HIwQl/GTrtzYFhVJFkBrEK0CKDQZgcgmHyRWyibXtMrSVLJqSUorVIyikJBI1vGJVulGfmtLXr08jgwqmk5JWW1Vj9F09aUArNl8u1H7SF2iYSOjrHgV3NnRgupMEkJAlWK76vOWprCRqsWVFFSp0CZky5V/d40SrST/OURFLOpmpWJZuqmGqiSPK/9F/hhZgmyaaGlEqtYLJae1mXo73x+uu7/XG/Pz599v533/nRxcX44ouffOMLr2xfOLn14K6cZBvk869/+cu3v/zo2WM51VfffHnsq66EtznogePIi6fbPPa5bDBusc/p0GOSMq/yri/lw3/9/3qwf7574d5Zvz/boVyNohW5e3cavlnT7U++8uYlPto93p3evTqcay/bfjuw319BND/fX6R96bf9tJ/yioJUduXi+eXJSfqNX//Vn/2ZL6qN4/5AZlcbmlVDBiuDkZpj6mliVXz7jG/puEaGYDCfF9TokiIjskmDNeWZ0e8W0+rsDkDNqpn6YsBIlzRfLOKwVCBebE8sAQtaBRktuAYo1b6JtFrD7YFEePYxoENxg6A4Ql7fV6vBpI4D7KczCkQfBtMIJjLDlzoji+ttmw4e0JT6Dx6+ezg/DmeragqjFSQTq5RsmMDOdCB68OipnCP1FqezeiyJ9vSjnHsdTjl2cim2N9RqajYDM+AboSZYEZuASTGBpuAMFueak2ZWCFd9qQgSSBG1DCN01koQWud4RGnR/IuIJKs1rpT7QsRYjVbrIhYCfMgOf07DpscCzfXHttSaWm9tIQxinqY555RTxHfzIxKDTJOGR0RSElJya0QM7mvv8g9bIM3WF3oUEripamOMRevrb129bwM0bIbgW9pb+vCCgCS1mnqX2jZx+FmvcMJt4M2mSEKImwYTzWBsyY2+TYIRGUVNg+LrA63qEZLqyqsk0IZq+ibCuH7RzfkGGK84g2erBiLlFNAvRUSG1SrgbSEUtWrViRTJ0j6jtFtlJmYGcRO2JsKJDifm6KhahBJzWTh121k3Ys6Pdoy9+Wt6Y+cDW2vDAUTP1CDPVrPHZWugSbu4S1K31u2br+zwVq6B8DTVUi01Sruqtk0elrOPXQPeafWTX4QgTANu1mlB/5OWodVb24DcoOa1oDReSFwmbwWccgJR789hQiavA6zlfvEhfM2OMTR0bhlz+OxAfG13iryeJfuHzTkpkvl0mQBQqt9iWRh55psheKMQ8JGO+ughZUm+46uqOnEmvsGxQkX06sT1PYNds819VK7wNRwUQkVNrSJs6Hxa5ysrzTvsYE6q1oZyirfFWURrybmq5lImERFZlTIlUpiicAClbbMGIcxaNUlXTSX3WkHlaDXnvBsnyvrBC/fG/Y8Pl1ef+/Rb1m/OXrh1+8FZMYhAVvNB7XBMm7t3P9o/eefhR/ffONOkwzhRd4Odb+y4Loc7Q+nrZV/HDQ49iuSrs47jsyf50dvPP/Hm+a2Tj7/9N8rule39q/nygPzDUvpbm1PuteBKSe7PV2dY6dV4ud/ttDdWZIVarlq7Taf7snv2PBf5ys986Rd/8Wfu3FlfHS61VpGOaqSqjUydqkIlur2GwZLSRCKCMP9yzxjf0VgYo0AvfloVJgaraq7119hNK+6oo77BmRG2GqsO5komNuzGqRge4G8SgdoxjsLazOjPPtzsOCrxCsSGqyj9A2L2mr5Bm1zCnXvKWTgfiZt8qfrmoGAimcT+ZKPXi2Gbpjo+/vDHakdMvZuC+yDbZmMHdMCKUoEimEBAWMYZr8p4T441i6S+TH03nHbpBXmeS6009jPLlUJpZnZUKcIKzrRJaAUyUYthMnNuNmDFUNQqrWromw1WRCCSAS3KAJvdYz8ePFN/qFsHFgNZU10mWAiQD2FnHDLa6MgCuq5WkVL4ZhBgkHA0q1opCtRr0ygzSRKmQoC1yireFUAIRE1jFmEwJTLFVL1ltgUVAapqygkWgmYLFRoFUNWcMyhWNQWpVRkR9fpEOSPJU4sBWgoJpuT5TjSkOE6qT5JMYGoxS4xj07ACOP9anIHqVaNbtjnmgobAx/mjQDRJuD4tl1sAgehCADOfHzdkFFCtCDPiIHd5ZYlG2s0pm6kVl91o6OvRsGYhtOGfFEjUsFGhAPAarsGXvL6nCMPkgDrRbL8ILoLN2HXlH8ahJza01Ck2cXWcVI9lciVxZYJ3bLEKWkLQ5aW5AARqVFT0stpxyzIXZw9ISO3inNDX+Tr06uY5ZlqKEFWh6vaqJGlUTzsNcrCiVWJ+4fhwjYxnen0AAKuQRpOSkB4hUcJMxuNdXFAS4vHJivsNiThVW8Q9Q7z3F5GcUgSs5vXqtpUgkvi0zAsLEWMWlrZzxksPhT8ycSqqamp2MWZwh944SIw9l3odCiQqIy7jKmsYvttZpxihJyw/o7EMhiCyc6Td5Ua1RmpfUWAQVVMdhU4XAxgFYGAziALLEf6ErK5MTDkjWUkiK2jaXc0vvfL6f/nPfupks316dTx2x+60k9OMDWToZAtsoav64it3j3l6On58X3Cbc18v+nKB+bzHfl3KOpdtnjayy2XqRNc27p//yf5Otnl6AR/YCeaL8zPIKp3M7E8Mu+nJ4eMp2+kG2ayugauTrZEoEE2osAm5kzSk3eMLOern3/rML3zx5144vX+Ydue7PUFATGeKGgjpzSYxiHTw4Q6VzBpQYhYmmBhQS3PpcbsVqIj7WbgjXZN4qJIqVLXirZavns/ZUX1F7Pw2wiSpakEUzTHM8YLMavBp2xoXoIGYwOIYsSCSpkHl8ZGM1uqkZRGB1YokBfTyzgUesKxholVL1SCgSAayk2DF6Ls0zJJqgiZJedYEE0mdKoQZlHmSRx89ZVJFoSbs0W3yfDWlXsqsKWfUWgX9IZe+mJCKl+7f/dTxonu6r5sexVI6ppdfybapO80qKNAC0WTF6gTMqqVKpVQmVvCoOgNFWJBqncs8l5xBp1yGiKtaVdANdYyQ7Pbbar6cBwbnQBNgvqasee0DSA43wpjGqhVTODpFh2sXSJh0U3wDnFclFDhNRJH7LiNmuh5bnHyEKNajf1oA5xj44Vqx5ElliXfL3/hKL81JDFZNwwNTfdLMampqEAO04XJRZEd89ykGDRYoSG0OVqq1luJGgLnLDmrGYFUVMT30Ika4UEnjKlpzyGiVwjWJus0s26eA2eKWvAT0JVcRjEExSZqa7zOXKIiocNwUBmUSUSBFU+UvJBazhOVNxPBaq2lVETanYUgTqwTEKjFF9+832DK3bDVEXAdtuHFT7cQfv+MwqtPRYAifNoms33Y6GBQVTevj8GgQvTxVe1hhStE+kiknqlvkIOfsqYVgztmXawWRIWxJFAamm3X8kuXJsBNxhY1FEvVC3KXP/tSYUiAivnGhyWT9xgbqXF0FfuO3I4JV6+aXmwpYrTV+S3MrO3BHAABdzUlEQVRCszgwrpdW1VqLj4n9sy/ECBKllFprztmqIZ6tWEICczrNMkIC2nkSi50ofqF9NhKFUUMfrH0kVZdRJdU4Q/6u0PoYbV6+IkSYjrmwUBhzkuvxXnFuR3Y2UWuVzdQUJeA4L5iaGM2WHsD8Cb32t5KUh6oZ0gukzDDg+HyXbw1pu+EGWIsNZmvjCW1tshFsMWz7OzrK/snx8sNVOoiNL6zsNF+dyXFVxjPZbYQ5HXKRx4/++u3/+fc+/EF/enuby/bB2XYt5enu2Z2zdL7fMetcUE1ytQ3WFf2MPCPN6zUnYFL2kgcZd+O033/q9U/97Z/9pVfuvFwudHc+EuhkBZhqAUyLgtXvkhDOew8XeU6t8a1qVSRRciJckmtqxevIa6Kft8AtIJjBjaGTBg6lxVBJEkrx7rZW82VmBNWNt3W54iBzhlnIwxgVW6jsImZ4FPfHydcGagBCIiISTFGXMxQPmASdLobFzhiWBQalabZokRKYYAJ0LoWWlMVyrQITSb1g5WGoW/UfPX12vh+701uQBGFKSakJuah28yBA2WGVMQ5TP9FEu+3mzdMHdz4cy0FRSs1DL/16fTcfs85e09KKWTEUk0rW5IRntaIoZBEppCoVVpg1hYVfAR3eN99V6x2RitK0UX/CCSdCEA1wlnurfmGOwhIQyQu/qa2sdbTVXFjqj6SrZJIv3HMjPq/iDCSyi1ha2rHITeo4yaLrjOlrY+s46SZW2ysgKTTbHsyC5uNLhKpKEgYj10jXHiFYq+JJZQlBfmHMxa90kwcXYhIEqxZnwkCjJIvmxGs3xnzCE1OtanBDREGcUAd1/bxq5BMAqNd8hgh07aQDHsIcTUQUGdeVJhqGLHHxWzy9NuiKTBZOV6qqKjkmBim3OxdBLa66ZDGNylQgWcRa62M3ytpWPfjqJEd2gsqEBF4vuXI+EtuGvlZ7xNiK3repIS2E9yje3JsP6jzHhYKFwMQFNxhbaFG7tmF8u5aNoMQsqW2+ckI2vYpp5daSfuIKhzGXLmJ5eiFkWIBwf1BCUK6mge0r1LSNDxLBAEkQeaXJ6xk0+MDm6fWIow1+mqRVXmYxkZf25RjXLqYoFEkwcyDBaq0S3C0wZsMBLNrCwg7cOY5/SnlhRDZn9bgg7e2isQ+vfen80tRWsgK63KnmTadxIyhu5AMsljVRuQaXkmEonsTZD/ChvJYCYZKO0q5LnHABEtD2HUFUc4BERpG+Vkl5rakW0dxl7RVZ0QEdrDMMsLXkW9Lb7laaTrbs+34o+7NeTu1i2x1PcHWvPw5lfPc73/jmX77z+c+8hPyjj78v5SIfDo/G5yf33vzkW2989mvf/proxeffePNHH43j5b6gHzQr04xxRn+UuWKNjNRLuSrnu93t9elv/PJvvvX6F2Xi/tmxHGtCFiG0QCiSVJtwvI1r1EAUcwliW/ykVolU3bzMOwkJhlLTbQPu+ePZkIBp4zmbaW2wDxDohDp5C3S1fdw/f8pCKmK+v6569R/DuxhF3KCeLlECMcNqt2x5XxSXsyw31J9+x3jCINfFLQDFTICsKpBOzIkpK0hSTUAPdjQYMiRZIlc0Krc8//D5lI/r7Yk5+URUskAMRYlSYJgIlbRX7btM1Oe1/slfpulQthmWOZZ6epaGu7ZXnRrNsJLVUCgqZZ4ROxWK88lrnQBlVkIlCQRVq8HoHHJV+CJ2CSDYm0BHEbzxitrbo2V7TEKBJKaBXLbdcGRrCYjk20tNgtEWrYM6scud5ynB4oDlqMccN6EL9xnHLTwRXMDtDA5PXR42CcDcMs+VsdqSj5kPIigyT1MnfaJU74x9oKuKtv3BKWOmbX2giKk6FuiYeXDpLI6UNjmzzzX91yxHrI11QYZnRYMjFxVvS6OyfJKbZ3T5R8+FZqRoy/QRDU2u5yPXP0MCJkt3yPYtbuzgZYCIlFLiZ5b07f9znf8bOUl87omihXCbqibN9hq6sdb9fgUdvd34NrxVNh47GxHZvy8awUja8MbWm0dZlg15pLaA8duvCDhBAma4LkR+4vKBCHvIYByoqghE40hft4CeulqQYyupoqBUEEaGssJM254PP0TJK1UXU8RPa6ymjMNTK5uZFMmYMSPAA204B264uTpg6H9iuBAGacs7WnKiy4LFIlezDebc8rNVIOLDGtJ04ZdGEG4lBxp4QUIW+7C4pF6CBgRNB4P8f9zrDghwxYd2XD6i5wYR0ASmi6XoUtEQgsxs6sZlgTN5uZySL1OGSFKlutEwk8+b/UHyjb/RESKLZCCL9KYZzMakHdFndKV2lT0xAANwYqUrMqR0SzpenXJ/JrtBr06HaTUfT3i4n6aVXdzuD/nq+J2//JvvfOMv3v3OD9+482adxqtHWAMydkMS/XB/Lqt+3ozHPU8fnab+HDqrbEWKYrBUKDN7yevDdr2/2NVafvYXv/SLn/v5U56Oz+Yy+qoOIMf98LAqIkAfzlNW/SL4SUhJ1fzJUjGhwLEQClTV2bNu/3wDAA5ULJ5zA8XXeWt7dtThQNOq4Q9jzvcHGNS8YIRFHcYbqdYPbwBjUcXGSVlgFoGEuCAYD02nrxUGSPLiK+ZZJqbJT7K76ICJyGSS2OIM1QzJwk6kB3rTTgayM0tWxaRjTcAtfFye4pS85eCSKEx61FJTznMtNKFKUaUKFCPkwXF8YTwUVb1AFhTU/OC+cLBLteK8XNCMCitaUYgCFLKACsxmLvxVLYVSRARuq27mEZwpvCuSX2ItABiLCxg0NzQ/d7O2fUcDrGusGaOkG8b/LT2y1c3emP3ELQq5bIvyZOjKb3BumdgIP9IwVu8cBEIPMMsLWrgNG4z6E78qAFwgxKyx3pnR9Ui8y9bvLGZACBQ3ch8ZdGw3qVBJQq/yKGZaSmGjmXnStcggEUkXZUh0fu1qRbdlN+Joa0T0+gNEcFSIaJBgG3U9PnX8qOfEn8yr0fVyMUMP1phHVefOWPRhYHiiAy0tWdWF/OSUqChvmk/Gcm+bOUf8ZjGCsebYiWvtB9Qrd4PCfLXzAmtaaG8jXjtM7zdnAez92vpnC4zLE4pqG4Wb1qoUSMrJq2ZPcQaoVi+DmIDom/WGDrbtPFvuF7Sh7DcOMEhSRWSRKnmzoY4BmBqcJGyFKu4ZCaOi1ZkOA4Qg2/xJ9ofHz4VqaaVlRKp4O4EFeRlGi3koYkwQOFVoOej3N5YWBbpFSddKhdZ+Rk3gI5SwIFWTdvTQNkoB2a0OYU3sFadOxFeJSLxNNZJZEHtoYQ5lXscXp7bFV/3YejxpKbndYjOY04nM37yPuOKB9Ztn6i40jZ3E7Ao+x7MNAsupT5bBDFmL9YYBXNPWhoGyoWwgHNd6ucHzDXabenGW9qfdVV8uhvr8Xj+uC//d//AH05PpFKuvvvnaS+vP/PmffXd+XDeSC/VqOuzPLz/6+Mkn3nhtwvzonQ+607zBiUln0k+qyl45aKpaLz8ax5dffflXfudvv3bnE8ePysWTw8Ki6LquXBWjT8OBZc4U7AqnUvjwvIJCnzdBwAqDUEHfLFdgvpgXKefrI0rloo93o2bT69/CSA9m6mxnMzW3/6HTIglW+DMaTkgxtQlnDs+nKu05BtpcqVX47ZvMII0gY/R1kT7isuj2DSYArSqZIBQmQMyyMRk7sQw3+bHOtFdmcCVdZ4OyJ3uaWMpi2SQTWzzX57hNuSVFFQLJrOL6mAwFjVI4JctVtGou5dNpl5POIzqF6iRl2996ReakkwHON3TGohmW3teACisuQRYBpLAUWtFqoInAy2ehtlt5HeiDWaG6RCDvRSIQtZ6NATyx/bg2SpMPaNrFd3SvqarNWhBb6Bs39Es52CyBbMRM31RlAcYQtgFBcg8m3P8i3wCmYQjlMY5UQlWZRbWht6ZaFTk5vXsZOYe4EBLyyIC5EWY/TQocULB6dxjO+1bDucBbNH/Nlrhg1aw16E4nCLq0t9vu0HWjj1w8QxpAERsZfvJPNLWAw7Wt30NonOJLdvPqtPEyAGFCTl4xNVMps9YBs5Uj4X5l9PIcrnVVieJBbsh+fGyxsJqAoGHacikoaOPagBNMbcFgydYuASo33q//DRl8tfiC/0LHmoO7Zu08tVmF21ff2BvoLxmd8JKzl6vjRdzyb2LwcpWIBUpeG0AQprvt15EM6a+IGK3Gbg6ahYZbRJI7wAW731RjpErCRbFwyxB4iaFLXMaNtxkfRX0yEiMMqARO5bh0G9UmMt6IIHAFr5IaGW/pgM0J9uF1Hb8HN36rBN1J26FyHAQ3suSCnVx38E4CjzgudFWDj+8sHAeWFn4pS31ijCYijaMukpw9K60tcBDUS3z1sZJPf02ATHYpZbMMW5mKdGQP7Q0ZHKhZpRdvgvNpn4fpBLut7W5xt7Xdlle38PwWLtbp6qwf5bn84Bvfevdrf/Ng+PR6yIOcfuN/evadP320LhutZlJQV91pzjvZP9qfPTi7v7n/dPekXz0daZ3YaT4DDpJPp3qFefqd3/pHP/3mz+McV49nKPOmR1FlgYnBUk5azJCFxSJamCzwv4vS4Ri+P8cAKoy+H1ekqPq+X8nup4dqVtDG9hD1YlGWoB9SCp/XqHNJASUtaCeGBtotAiIaRL2irLpMubwpWICU5W8Qbbi1w+NhGIBD6a3TUtDi0JskDRqeGAQqbZ9TzPjNMpmBTOmFvVlvvsV5ADpFT+nAjpYhg8ycL3DRv9BhCPhIsgAqWWYtnSZg1toJJRfdQz67L59AGbsy5A1BHasOp/L6/T5hopodmzRDDRAWUFULUJxZQRSjKQq0xs5MmzUgphlm8KFcwA/+yAAQxeJgRMCadaEPF5OqVq3e2zTQ35X1MZFt+cPakkL9ieYhoiDpCVIbgijMHgKiZ27OB0aIz4pseZ2YRzBokNdRouWbFhuWjjzuqFHEL5jjZTEtW9ovUKuvejZnB3jZ6EbP3rl4gPbO2BpLKyUh6aSe9g7o0cCNhBw6cOqN+ab21miaaTNkaGlxUZ23582d1vwCMFoXZ6FRfeH4Nda+BGwQQfpvdkveC2ubtSvoVacAdJkTlusV/xPsGYfhWyA0R7CvcdG461zuQvNIM1Wt1UC65bc/hM7Qi5wAU1NGVx90p0DJ/I3cGDI7VN/QCrVmu73UEzdxbCJFWlkw+AbbmtccLniV61dQa0ZR105q6lHCmQTBvXK8AtEveIUU1OcmbnTOXfulakoYSq2wwqCDtXqJAUa12YQXsdZy/wLqLmVp5PtEoBVnuMYQFKBp7PmwOKsoGso/99akOFAQRpv+LmsIjZbyKG5oGJQyYChN2R9Ca1Wqw2haNOe0uC4EpUBj9zAIppR8b3FVp+y4jsiNyRDUbvPsL+0IsE0+Ldxy/F35XYtJlrr9bAvs1xV5jbWDgmRdI2Zlk16wIgagBwagVwwyYFzpeCL7wfZdvdik3Vov1rK/tzp+9O7TP/xv/uz2+tEwHi8+3N1/7ZXL3fe/+efv6ZECm8bd2b2Xu6E/TBfTs2lXno77XXfan65Pz87ywKy7qZTd0G0uDh+Ox81v/84/f+1n3/rgyVhG7ft1zeY5px9W82Gu7scMSSJQWZoWRBpUU2u+hOoAshsSICiVRU1US1wOgiJqc0uINa6hBAXX4Q4ApiVmXfT1bQZoXHgq6ZJ0CdaAOYzhd5lOS4QfBy8Cneda9Lp58paKsc08emeYUNxPN0pDE68l3VEztEbI5nkJ2rzyE0w8+5plgxg6ZmJlMsAyrAc3plRZAT3SSX6+uzyvz2WbdKWJ4kouETGxDEuaRWVUribM0PsFr9uhiMokx1xM+6S5f+3F0xfOpomsSeYVMZlNsAKU0GIHB3gGKlyCGglQHdAkixfedJKD02OpQdpxnMNXNALwa0E0CJbQAvgW1zYiikfEVSGeCdrht3b1iNA1Ne5QdYPrm8HTkHnDsYGeoZ2enJITVHltZIVWjaE5Bt94KbZ/bDnAKdoUmkIDxUnicJmzthuLqpYSE1qyamm0KW9HjBALH0c/ZFKL0he8qxJw5UY1Fe/9xMdRi6CTTlv1q50S2yt7nI6pmFxvqYl8S4PREgNNVvOyxGEjaNFwsXTyiQNJLpm//hPsOVVv7qnqvLOAjZaWosX66z4wymMPeN5ZqxMi2II0m6kiDEZj1eK5SHwyoVZRfJ1zYIwt2bqOzZpltfiAdfldP/kRLPIfjFT1TQE1qsNQBzYfFJ8gUJIktosc/LTI39mtla8/W9AJfEzpBahFRckYcTTozz9yy7emjGo2Dp0ZW56md6QWF1+rVcG1XFyE6jxFVUVAb0tJ2AoapzWAy/ebQq2oiSQ6WzwIpdeNskuwATo3NeVsWilcAhwlmZgWFaZQ1kVG9fLFZ9vLhxLQ0WRrhmbe/TZFOGmqtVY2HqPWGDq0jntZtw4Ik4mab/Kgu0rHpSMRY+OQQXoJ62cyyrqoKZPjlAZpGy0JpDYpTESiZFUkZKO0xtjYQTtjBw7iWKUNEI4Dx42MKxzXPN5Kx5Xutnk8lf13v/7d7/7pN4l3Pv/6p/Th+M53Pqh3Xu/K2RffGL7z9uM6HpOmlQxX54cDRm5Uet0/GzNWd09fzH0+mr5wd3t8rpfHy89+9o3f+spvnb7yuR+dj2anKctci4BiIpKqqqRUWSSJlArzdXVp8egDlomV+2vXJeb67mmDVK1RiIcIwidMSkrKXvJVx5d98kTficoFJWv2Fu4e0NJdPDXamoJr2wexNhRTLYZWNzWdSIP07CfgQMTh9391cmtOYm1orVDVKkhejrolDQk6z1k6WFZLap3DGoaMjtope9iKWFN7TQORWVdqHfIp9v+/tt6uSbLruhJbe5+TN7OyqqsbjcYHQRD8FAlRJEVSpKihpNF4wpIdMyNPeMIO+8m/zQ9+dEz4acLhcEgamdJQQ4mi+AFCBEiCJNBoNIHuQnVVVubNc/byw977ZLZmMoJEd3VW5r33nLM/1l577f3N/nS/WC1QQTW6Ey8guCilSumyWN2wtZmqn75sZ9NV34reTGUpMvdZF9NnXtmvUKzZSqEq80pcbFOEaMa9de+f7nHWnY0p3mVkRPdbd0JMtz2crjsInoH8CIxIIdigUhigrsODtKIYfdceVDFsZMKrB0KGxMy8tKSqyYDzXFXdAXuq6m7A+ytViiqaKTSHXBIAzDF07fFXC3CM7gP9biS/rEIDpnMz7QyD4fWcKe3mr5baW2vAtKiAIDpYAgT3CDGSB5DEorruD0lRb0MSWYgfgy5aDgC1J5yubujpqXnA4w+cXkp3j9dbL6UGBgDxVgxXGQTM5/15LVchqCUeGIzQxogV4tYip0nXlMlSeOzs5nI4MlM3DLUHp7c+JXztLCnJrITeJJpe9bjQAAFQSmSiTmkl+yjSEx6TBCnYLDpknSWkIscSnvnVri8cyDyObkq8DExXzIi8cN/2pRYpScxzuWDx3qbw3cjw3S+yEPCRmKAgLzbEOTSAm/AG6gAH2X0hmeSITinODvbYUf2iCEKLtNYAKUWdKqFFSB4UYjBo5mIe9o3E3SCKGPynGRll9OzMpQIRkVLKSARV0XtT8U5Q06JG2t4iBpiqd/OPiDgRrUjme28oQporiFUMbV4/pgIIjXWqI5r0+q7hSHomFYs8NjKBFEWjQDygJJKyTkgRGhdaPdLyMtIYemKQTnqNQsNjUxBYnIkIirAIlVYFk1FRiApRwSRYiCzoEYVMaIuuqzLJdslNxeYENyfYr2R/rv2szt/+y7958eT8G7/9pesP7149wO3T3RIXP/3J688/e/7xj3xi/dVX/ubP/moh9vjX79myyWrCytoOsjRrfbO52mvbTKvHVk9Pn//GH//JvVe/9HC++/7OpCy0aXeOJ2Hd2LzIJIJi3QHlBjFFd+kMBN0vGQPiQ3+6WQHC0Hsa4KeYINmS1FoEzXd1KdEPTFJrlH1z41lAS4qoOfq0clC1GLtCtUiPBncxs+BXRM3SYSwPW33NrRmLeqUPyNGlku3+uTNAWjNEwgKouo5VqBVlUauKFsFEVJGFypKowAJYlFpkDVuAJ+SSOKVMglOgUibIJLgD2UPviKyAalSjJ0yqUtGrKCjN5KQ3TC+27QunV/bhQhakTxu9VH3h3tmnXqham852AxAwaFtC2K1BKSoVMOyflkg2RtzpPVYkXGhMqpaE99kbA/rykFMjc6GPGhKlmPVs9YWbEpercNnbCGs01DkSywxGs0dhMW8AItabGYuWUg+VjRo5bjjwSLS7RWnLXbX4W5SuswO/OBfWoRhMqUHUOVwnASmltKxzelTtdtRgRVQQpBPgEK+5PyKcJwJNu51WX5jZKQYPUKlB4kkfYyFFbInshhGSrG+NcnM6MElc9FD8JM2663w5fKGacVQcnLhCRoDlMUL2tg6aCw5fJANMgOsoSGs9wCUFes6BAiBSVY4+AsiAIAlvkq7hqYT18Ac5oBvhlUhji+RGg7kTa0CLKXsQx/aP3T8QrHpPqoZJRirI5h3GPxaR0cmGSGvhJYZ4HMNN5YOLqSriwI+TswQezUQKIupjm2HWHOuBQGABJBBiMFeu8ZtNkKVMSZtHurGMcyWpWemwnfJnOH4dZDiPHvLgHXh1biAWh+QDgMDMCSASjYH5stZ17Dn/veTtFZX9vGcUoli0Wnem5tGO9ShaIyCLmpJ3OfvGcFQ/uW0ONMRxSZIjsyQPAIrWOoxakxrky0AVV4EGalGaeqe9HiaHFkUxavAnKN3pIgqpzpwFlFSiqq4oK2HVelKx2K5luwZv6f7U5mW/ONPNrbr/27/4i7ursy997jc39+fNL6a/+r//X7t65zc+9cwv32z3f/r+3cX0sZe/Jr9r3/3299quT6cn+9oJM5ia7neX+0dXVc6vbq5Onnv2T/+Xf7dZ3rv/pF2hNdUGgaFArZvrMaOTnega4jcO77hlTzDFI3hGZjwO9oHr4YT7iNTcPIZOcFQJEgoyES2x22OAjWe0xDAqNDYE5cbLlL6vE6NSOFtYNA09Rig8VN6gYnFMFO6H3aoP+EyizSHaMkkADVZFxVVvBRVaDKostBrdEygiKrIAJhbh0jBBlpAT5VqwNiyBU+ikXKGr2RltZ7xtPHVVRU7sql5MbgW6qytb6v50urXf/qZty6axi9XOJRZbNcjqMy8v7q36pnvXKxnMFrRJQNoEziaLTCxT792lxNQAA118z7d8b0EucTpJrJpPslONDkMDyRyeVkpvzR8aGZJgR73Cbp2O8OAAD9zLoGqNGK13i8SS0l1Lzhu5FNaNRAKFRkpxXcbweBilKSoDZsdRjJ3WhiOXPwziCP/kpdZAJCXy8sEMEkitrmylgcVRgicM723D4AI6IyjAs0iL45O16DBPXq4J4dxMZUCoDvY+hzF17r346FwROCsDSroqcXjl5uUT1eQ9xUnLKUp+p8mPYtpj3xzB/x/HLl6jGur54GC/qIijLMxrN6OqC24cP4SDbx9jHD2n9M+1/LQAGL3aZ5YdAeGmRePMt96AoqVk/IU8exIrfNRomDo/MASLCDgugRzCDmR/znFEoQnyPRXsIa2I777hDkaKb9CsgNAGT8G1L8OjxcoFc9xEtJTIFIMaGh2OVNE2+jVzGSRDnzSBR1vl4GZ9XQmiO6XlOHbL5najmbEUL7ZEWCPC7qMeiAYCVAeogkkjpRZkXy9oMXpINIWm4VuliDCURY5ekutp42FzTOYhfTz4AV/Jo+qA2eHngVUAtJ6wP0RQioJqRgTzU8VlGVAgFShiJYIvAYVDn8OUUmFquqpN22nhCvPCbhbsNm9vneG8tu/91X967s7tr7/6m+//cv/v//f/4+Gbb+tGcaXXp/jtz3/iO9/6xYNfvlPsXtGz5TTVRW2Yu+2tGdoMai0LXZTN7v4nf+urv/PH/6aX6clmZ+UOuTAWg1o3hTqtCBKpkITYIkTgA6HM9iLR89N7h5pqtFr4GUsasdOZ7XC0kQ0Co3Z+CLrVl9S5wP7gLJvJBycmN3DuIROK9GTVFpRQ/Us0kRgd5cNQwGuNni51Mbd7tVYLNRam91VRiIkLVpKa/crhfFQUUo1FWMACFDMVKVJElq4iCazAFWWFslZbwVYmp5AT0ap6Jv26l9VupXPFXsgKq9CFNm/pqpiqTB2Lj2P7kXJ9vdJdFb1SzgYUTidnr37KTtWsKWJKMkxJ0ARWBRRUhkRSiUASGmJLaTCSveyUNgaCGD4tzlL8J7tGPMNUAYmqVSKOMu+B9OQiR94OA5EfJwAzIo8eaqjDY462ksxGweqVWl+THkNd6DNPvWLGnPMaAYLqoF1K7LWnsjSHaINSxYMDGlThpHUGedUzNe9RE0mlNZFo+wTh7ITMwvMWgQN/5GB6NIFfv/jAoUNyy4G2mhlcUvABFyfx8+Y3l91W3uWRlhlRz87JdkGf8XqbaQRFLqw/4qu8MuR3McqmeQ6e+venjlJupEAtAbA7oiAD4HY8n9ka6zSkYy/nRjbjXhUSWoCgcJgQPDLC8E7isLnB+ToOuA7X5zcog6iPJFv6ph6oDGMUk0N6MVxZDrGSb+e46APKnrHF4IIOCyWKqEHkgrsZSoT2EPkkZVdIa8fhSl6h19uCbTdEAzPfHX0DiXsHKD6OGsbHmTFM37ig/DdPRiXeSHpp0CGFMHdeL9fiEsI++EGFzUFLeIImCqEeQjRECHK0bXzvBhohGhod+Suhz+ocv7itcXQgoh52xuWLdzx5xylHbd1zPIqIQyYH/RknymopUsnKKXLf2IQqdYIpZFKrTVdc2G6BeYmbBXf3bhs/3P31f/6zL33mzm+++PL24fzad7/38FcPT3RdFFtrP/3RxbMn79856/OG791/Y7V8eX26tJ08fv/h8l6ty2pm1prqat/a7/7RP//mv/5X79zcebw30TVRRRdmaihKtWYqah3qT8D7N1vIj6iK6qK1PX00vQTd2czMOmFZS3MbVOD2bnQkJE/Hw9n/4tiYCppZF8ujGgTIoSYa7M/EIFzsIRrLnIsLQTZeRn50KC0KgFKFFoY0qaZmpO1bVnB859A/UbWK6BHh2t2Pd3/WbDArKguzShaqyoJlEqzABbgEV4YVsBI9VT1VrGErw4TpdP/Mxc0dPD4TqM0CqsgKVmGK3lHv2GKDetf61+ymWRNdL6bTzd1FuUEzLj7y0cUrz7Yr07UaTC3keKLSMi/UFPReeYsRZCP+dKn3iG4F41lAwGzuY/D+qYkikSbUmOgd3GTnr0qMuEmTnlFV2Ck7Eg6ItkCNeIikhYrtMfnUM+MaWJyid2NSd4zBuwGR4reAT2jKWmzEefAHggzWYehO6oldIVm+hrOpkr+QKGQkbRmYAL77kpMShsyLuCam5oK3EgmKm2wGOU1GvpL5+3gOgWz37ocn8Nn4M2LpJB9ZwNuReHHQzkzTe0M0WqZRigQgr671hqDrZv7kWyIB2eHTwjThAHmmH+NBJsFDB4mB12HQwnKOLtqjgGv8Jo/dQCRB8cvBh4vPoyDmG0axKlhnMMlw3tFgUX8WxIGT6fhtNnrJ8U1k22uSqIyiQd1M5zig6NgqYcxjLOfh54ghModc1NGlyFqQR8Xbn6M9KSo67rsyjmO6qNyx4KIuAgCgCwkJYaIaM3wjwo1pE3FJwRz1zNxrdxa1c8ld7fFvtp8xW4lyz0v8USO87q2ZipYioq23gurcuxIjpNQzKpUYli75RT0xxKOSxVFAdHC0HnFm2u60d+ZlCKRodtMzOfFEwJUFB+K9R9KqIUYYuBGzddHH40jNDGkyq0Blh2kpuhBOUics+3Yp24Xu7k3zT/7+H17/y3+/nn+F917+1i/+w5P7131TVotV2Znd6Eldm/TXvnv/q1/97NtvPbLNQkp59u7HLx49eOH8hftXv6i60OXCdra/mrttYW2edbftKNoa9mhWARSDsnsfkPbebaYTMgQSrFF0jdEICinGPUKST8kWjEoZsbefBhsW4wiG9LXg8dnOc2vViz5eai2K7CpTiwQg2OnOtnLDI+LAjADsJqo9lc4hg/eZGV+KBAKppyQqJbSj6TqbEWqo+IzLeBUJbqCKeFkBDE64ZBvSpCcqk2AlmIgVuaScCE7BtfUTcg0907KyhW3Ppel0/Wy5OkErsld0hS3MJvQi1iCzLc+afAV8TvZX0AnbSzNdYHN6stfl+Vc+J+eqDVQ6mVcITqDRpwFyowApJpzg3CQa6QOJA3VDGAdLk2zeuk9jty4EqijQO0SjjOKWLyzlQTQwj1wWfBIEkwMeHYkDh95CUYhIFHRDyAWpWwUoqhRnuKCoSEQNGKEZkAWGDLMjX0HWn+LNkVCEgc6oPCoNfpKjD0gydTiOIw7JcfXyLA/wa3LBbYAuh2BNRqoKpz4diwqlfQsvbpkokV4DDg8Q2YBflvyTzEB69ssm8hTr4tJd+UvqgvhBWwsk+egfRiacSe/glqfaYALU43ZjqcJJOOh0lO4dckS/2hCYOsjfxddJizf5gvmWSVXSmKA3ENfot4Z6j0Mwc2Hs2eLgH2UKsSN7c/zHEWCkKzhcDtMmgbBuhwp1/GLmlwxiV3qU8JhpB4hBE4FrFKWjCUxBoh5PiyxOxVt7HbaXyJJddHLfm+uq1ujVj2qQGzZxvmoE0+KN44N6HmfahtuOAxIXDRKlaoysABAAY9pmibErcLnHwC3N+826NQKHijsiMDlsTg0KgCBDn0Bv4tdcZStOR8y8FCJEbNw0++q7Uz9KozO9HhmzZ9LZ1gT1bMRjFAEK6e3FIqwi4tqUqEABFiqVWECqyJJcGhaysN1KdpPMZ8X+5i/++rVv/58vnX5wZ2Vv/vD79bqu7Zb0ZlvotmCnfW+ffPnlX7zx4HvffvMLX7336P7lOz/ffPkr33jy4SOd+OqrX169OPF8c6Vv32CeV9jsH2y3TReLPfus1juaGaurAUEg1q1IMTYXuYrSFgh0Q2OfS6hCOUnSWwUS1RugPVwex1ylZQSVB1vmdsh5kkmoNr8IdTqXx2TeuqBwUFM8nxkpVlxwb60WVdUWUW7Eq4N8FTY6hFOzByHz3d6sBMdFUr4pzbaISIV3//lXh+8vpLqqKE1NVKXqpKiQpWCCnAhOUNbgqciZ4FRsTZ5ZmeYz3KxwfW77qpf39KL2bcV+UgJWbL9AX6m1Ou33i09j/xtoH3Y917oCFmqXAKzz059YfeolzF1Pq1fFsQIJNLAJK9Qok7BNsE5RxSIZgTBrPicP4iNQO3Ao21rEw4iD6Q4hRqd4npb21ftXRZKLxzRUkrY3aFxhWL0EifTTRob2GYQCbyIdYLCIiFamoKMF40PzyMXxA9RLj4mXN3FxgQR1PYgOhC5lhAJ89M3o1a7opIOMYQQcySvytzMJih9HNpnFXt8fVFhgcZFThN05uIF42hEBUSLX85N0BGf6ycncnSyafbdMg6vO0+lhlFWdDQ5xRTRk0peKrCH1F/mhf1OMf4xQhgF+HDzVsM+HG5dMcIZCmHM7DKGoJR7apk8CA5cYLbHxJGKMuiEapsJQU4Iv5m+M8mRGUQejTTDkIoLhTM87Bxc/dKjiWWqG4/p0dhvkzoRoHbx/KlQYrhvpbul4Q6I2HElj/M8L2wMB944k76Kx1PpxOzmwKxFxBGrU3H2asirQnKPO3ntdLpsFoSafRWaz3eUUMOoXETYRyTrL2xGC6M3KUA7J2IJZvaD/Mc6iAGitoaPW2nvzDdl6L7Xowb7HEvkdwJtx8zoHMcd75ty+xE/SqHBEfd73D3UkI1hHUUkZFocU0Bqg1ZP13lRLqcuoJgJAhyxgHSDRRIs1g0GayMI1kgUQKejWbbbSvYW0rJbT66+/9a3v/vlnXz45aev5qq5ut1qmqZ/e3FyUUq0q1vvthXGx/IM/+eZ//H/+/GdvXbz6hTuvvfnLn//qx7dfuvfG2z/4whe/cO8zp9vV2y/cu4fztq/TR1/9w3mxejKXnS6aTru22PS61QqD7gUh4k+fIc9ONkoXRBFfUSo5sxui4A1gz+46JtH/hjTVHinKIQg97OFMEvK5+0FzRasB4nWH9ogQW+NhNkuCNJp6N5J6f+KTHywZ70+dI6QP9zqykVBn3lo6DQqc6u+0opAdFUQ27FFD8YS4SDFU6II2EVWWYtW8qK8LYAWsISvihDiDrnW92q94vbYnJ7w+Q1svru7qJdrVstgCTUHFvrJP4LatiujLnG+jLWVxg8XOtOl0jkWr9exTz8vdCZeNc4/4skEaZBKG5pWgUkwEC6Kb7eESBaoqC/dyManND4TGyDzLMPWAErgsRHefNA60C8V2ElKyu9XNJsyOFOmPn79TBUQE0OhJiCjJWx6HZQ4/XUWinykrwTZw4DSD7omC9iKqTHaL+DBzsSwk29gfaf9dNkad6G2wTmoO+Dg2ClG7ZQaK+fKcxnt30qLGIFhLXMGEAcz6SGkAPjLb4BGQ59MQgDrCwtCUKHGh1g2pFom8mFypeOWjgascZBQpFgoMamY+flE1Wl09wjCjwHxrQGTIPBwybRz91W1jBKkjjx8QpyML7pF9bHFE4ozEDBnVAJ73q3sxae54fHe4ohMpfv2IUAui0e/jOa+AOeXAs96nLvnIRSGshiR9VIK0Y9l6yPCZZC5JVGGfclopFQKXpsL4rNBaFU1rxyQMOZbh5C91aRMXm4p3OoDSR5AkSbKIL7KuWnpvXkXyL6paMip9KmhTLQhBGxgjTh2X+bT7Rci7c3TEYW9WtQJsPrYos2EmRqLeDWYmhqJi3dUYosE/DodGhBe3w6MhOYdBEaFg5n514EeOiIu3YxFgc1kN89z46U3JCBKCGIQAcMSMKi1SNZbw+FHPN9L9cd4UXdgPfd8EVRX7ecbCHl3fXF3dx9Xm06++0h/+/MMnp7VgWevetr23siywvTWd6iTbqyft8mJ359WvfvEH//Cf7Gf1a7//uw8fXHzuy5/+1fbNN995482rhpNZb08vff7Fb/53/7LdfvnhvJqx2tndua/netplvbXab0y2gi3VFLOhi3QRI1rAigJzuoA/CLJ3M5Xme8isOTw1YJzYURYia8N2iWRuGwIB45ESiNF0wSoVTZzYigdMGdSMlAPD5rgugkZf22GvqceVhDN2GMBHKRFaZe3AFzCJsn48PJuCmPOFJTjrhqJQsyJStCyApWHBmvMzlpQTsRWwgpwAZ8AZeI6V3pzZk1t6dUuuF/3JHduW8vh5fUz5cKUQ21b2hfSFWLXWuZjL6mfAFfkpw5nUra4nrZeiujpZPr+6qZcXp2tBh1atxXpjA2dKE0wCAw3WujSFqGiBBYc87ksK0JkHBuMIOAIMJmVagcGOPGx+j9aVSljKARkYrKMiwmBlH638weda2Gkf3kKSLN4KEalp6NJV693jUw/qY5e4azFD5KzOBVIVaZ1ClhJAhxR1Vp95A2r4JIpEN4KjcOxGWvV2qKhKp3V00zyIYa7WLQJx5UWBKMHiVw0rWpyhrZkcW29UlaKdXUTEXJQIyPmvkHSH3ocLRHyII/Re/PyZanF5GM3pC75mbkWdAG3mFRpvMgi1iAQmvYWyeD24d2MQVb2izZEuS+RDGJglYhRuqHB4gObeHaVokX1vkrlltETFrwepw4ds09N5MlqwM+ZaCJzaDcIrkqVo5wgUSArp41lghJPyCNeqo1JG9iaAqMAU1ouWiN8DN4iMByJBcYI4e5Cejcal0VEUTTwHYEDzLp3IBM0PSbcEAshGUTMrtXj0oUFYdkZVR/Qcm29FO0zdGFTkAINMoLU6SyLCpcgBKPQFi8QfART7PYYPH5FUhKcZsdEr5yI9xdxNAJpKSa/EEpPS6R3nggjMHAjUyQVInXzpgg8YDeUJGsUFl8hDAQxG57AqOIIJhqehNzg6iODmQ6uaBaXUeX2DhOGREhG96jT0fcNiUagC0jpLdMgq1GCEiRXrrM1JR0DXqpMK5+1OV/z2d777XHn8J19/ZWOPLu4///DR2x9ZnzR9Mp2cYf3ArlRWZrPhRCfUF9cf3V9tX/vZ9+89e/7q17/+w+/+HRYP777y4g/femO6p/V8urTrLaxO+PhvfX66+/yjDbayusFqW+rGlq2c7fpqZwu7MZ2BWbE1aYoG7WqB4YDoIKvAHA0GJFQ11FyYXLyNxLL5iwA6qT7AKsrCASTlO5xd50llCGT4Xg8dlMFGAXp3zYdISKKMCBEtBMxa4oVu3EMo3O1IJk2OJntZGjFMXhx0dEarEMY483oUbLkwlmR5T1WLd6AJK3ulCFDrUrgyrgrX1BOxdcMZ9FbhGXELa92e4uo2rm7bh7fwZMUnz8iT6+v7d+b7K7kpfb+QPQC7MW1FTLjarfSJLeSy65vrW3d6W+8268l0v9u9+OIz5/aoXUppFydnbW4TpnqrtL1hD5iiUxpkAgnsISy9R6ENBqAnqTWZk847kiyuBIYWmJlv+iRbQKA4CGMguEvdeTJI/ZNBrdSs2iPKP8ho/VgnCALasPvNxBOKqlIwQrSE1bwIqUU5QjJXs8tCUURUGqyO+G0LYQrVotkHF5oYjOtRD9bo8251VB+HCQMgx1MjvHKVqIByiNk6P983nUKSOuQ1wKNckonnHL0iKxIZTRfhUM0OBna81V85U4oHa+uhE3q4HnWv6QRd8658N9QOaEgm9MIInuBn56iSPV6ee5Xi9tFab6paS03+HrIBN2yxBwGWE/7Cx6d05RGsTfG5PhYEgZKrGeg1YQZ1bggN0dMZ4yQtM0tnaR2Su6iUhG9z4xAj2QQxbtMdRxbIfWsgi5pM5nGW2eEijkfCb3Ioe/sKDGEZcz6uaoaAHsBJtPwRRinSelfrpZTohM6ML7I3CEj1+T+03lCKqupgTh02hIcqGhk7vdbkVRtJcp9PLx7DoOIIhvwLjVVL1CCCzDXYOgIRhHJk9JO4cTUYsl7p7OloIckLPOoAhOhwv8kfG0fN3GJ7q1603pPRSzggNQ3mvwKdKBEnifn3lCpGqJLO/meDVHW+KCphsFJVrTXpysYC3FzPf/2fv333s9P6pdOPvfTSy3c+8qt3fnXz4NfvX1yhymYP2U1a2+nqmX3bEay23j7aXLXt1778tZ//6B/PzhcPfv3B2enm5PnV2x+8sV082uzmtqirlbSzcvcj9/6bf/svX/rcJ967mXb1fIezHc52OGn1dMtV0yXmIo2cITNsBuamLaFy84fvSEEzdKALjsfLWJ53D7mTuOG8GQsqF6Oaf4iRzKl/BAiTOLJa9QCV5JoDqNUdeVgJLyiVop0uJBjuOyMoBMMEGRR6tQhWtQAo3ozjam80cwzbFzacj59GiCpcU98Jz8FlFEUVcdnnQlQUckGrkBWwEqxpK8MKulacyTTtbsnVGS5v4/KcF7d5dSqX9/Tqx7/43nT59qqeYE/uyb2ha597aQVL49K4kFuTSr98tOgPdLHty6ub7adeunV3tZ13F5VtL/bk1pnpzL3qmaIBnWIKo7UuRWRBm1XUu4GtQwJlHnM6FQh5JRf/PzoO+RABZLN1lJFlpJKHRWLvzWJwgRRRY6h2FMns4GAuBiwVPtMAEfNpeVWj0lMl4axx+sNYVBdZMEdWk60zeMxuYwQ6ZtNh6AGkaLMfc4qnRxaKvh7gOUhoBrJLdCYF0piAD48u/9Bt5BiK98fFF3slEC4x74ckwFX/KDmAOV69ZTbxSeogRJ8lJFmmo6brZjYGQ/gDR60l4lC/XwmzSNBlwkJNItT/4yEAQRNiGrqnDmBmT46eoGTBL42jgSiBS6UAFizJwkVrpLb5uR7lOOR47OAd2T7IsI3VE6GgDANsAWB48t7ZYfThEPFIclO6CXeNgUhmBUxdyUg0jSE3M0oxiIT5sIHi8syLagyeV8aGKhKUKI5SthdcEj6JZxFtHCMYNZeojPZ5C5kbRDyp6vs4HlnWYsDuNisBGn8kHP8/nqhm8zGOwrXw+wdSQ6TF4t11yuDbw9NaOfC3It71QxJuNpaNvvlzDoLBZPxKXMshiD1kyZknj02WtxOoDqMlxs8S44GC3goBMLvEKVH0yR3QLSYNOyvOIM2sEE3rhOyUJ6lQaybFfuerX/zR/X94gvnFM7zxwTvl8VsXv/wRtg+er9PtZ/hbr+gHb87X71wupMqimW7KGgssvv/m9z/1yisn6zt//eDPbPdEl3U13b730ZOP/9b5L97bbnn7+c9+5Bv/8qur588eXuus61nXmzbtMN3IYivTFotZl9phe2Ojj8/hHmjkntKA5gOhLTo2YsEJmoj/fEQ2lAxkecyXCHsXIczADZ1qEZLBET8am1uNgBclbdHhXc4uISAMHaODpk0ySXzdxX1pTnAS0mDWg+Tlq5x5VFhUJVCyjlOgoClQoQALBIT39kjy7DJXXsAqZCFcmFVggq6rnIFr4cpOsFlhs5arU16e6dWtfrHGk1tys3n3vl4LSc4mnbajNkgT21MrdQ0sOlZm13tUrs+5k/deevmLv/eVzz2eL86lVMqVVptOd6w2U3a0SooVLVoVk8IU1XQPoEA6TV0MVWSmw25pqN1zjaEm3j+XRiOaUaJeYM4NGmoqKQrtb4I51N3QvbkfkCy/HmQhhpGUDLRzkzxVs6+9dc/coz7s9QIPxJIRHaYgDOGhMuE2JgyDf2dIfGPA4h7H+cmNgrQx1Z4PNhemLoNPOG8tvy+3miErHWaeFzpBKCZ9ucXyOnlUZ/0+wgzkBWczWFrJg1uPtnQRiAY9NdNThq4QRM0aydaaQ3G+dOEjvdZbIgLo1gVSVMo4LfALkrSQsfLZP5QlOO/3L+q5URZtCBHrHU6Yghu+wfxy9ZLIdyUuKeYf/VdfljMBDy54vJx83wdlFx6BhdYFxAdj5m15speJNwDP8AGvBribTTlot1xBDBzeIjNF+CIOAlfgsl5aJ0cJ3T9GQohLguMj0o867dxjZEu2NOsiQljvMdYitSWQzsMPktMoYOxm3cx1wVIPLSssg842thLHnXtwx2xK0rStfvjMIzOFHQfG3kUYZWR37wGVH+3cqPv7mXA77c9y1IYdPvbH6gSUCELSZUQgmHYh7byKaAYiekAa4NFqdlKZUClOvYjn6hqHKWRLEqpmFFqzHaAo6+ItWta5rJOVBnm4OrHt1fL1139aL//xxZXdPblb5epywyb2pT84mx/M//h3j+YPT5dndcZ2v93T+Mv33lqvz5bPFI15tHjz7Qu7ffLiq+tfvX/zmS98Znl3/cF2Mdfzja2ftNVczzd2eoPTGy53WNgNsAP2kL1gDxjEtM2GjtpS+QICSIsSDLMjzh8nkQLCw6RE6zjoDIujczbQHn/y47znY823WkJPiLeox6dhuAmAZj116pk+PsgAROxwYOQvokprHcaOFoIEaa6j6h9IJX0ervPORBFd21YZ5PUKrUDpHVJES8UkrJSlyIliTaxMz8TWxAlP8eSWXN22i9t6ecsuzuXimXJ1u2zmXz968OOHq+sVNuhbk1m0C/cmTW3utoSeKJbKarLUusZJWdjyyWefufnI4hFps+mknE0u2kKmZ2/WalvKCTCj7zoqdVmtQRdqs8IqYEQTrQJTQWdzxEFgIvGvgw8aO5xeYhoPPI1heN/wjQNqhEIjcHXoGhbsEzdp/jm++scf58GShDkf5ysIAQzNXonhOFCRUOyDDssoGk0TmeYEk0/CRrPUVNhh05L1s5BljkKpqqI6MZcR+nnWTxdOY3x12N+hsSkanR7BJztI+D31EvGAjpmyZyyfHnoIB4zDEhYv9PkiTnW4KcOAAI0zAsquaGQBX7JjyqeXw0E+I4TdXOA5cuWgJsUW8McZd2KQLCMz08gDbTqK4hj8qlF9LNGbNKTxogXfwdB8d+SaPJiUrAhEzUnFOwtD6Juh4ulvMK9ehKmIUDoQC/OLpXOSgAwqnTOm6TQj0404KnfnYSWish60lEwmBvUvNpl3WXs7rCNs6UNytRhtY2N9BxQTgEwGf3BBjFx6h5MjEvbvjH0riCqNguaohtBL//mMQ18oqWHOyDv+/vGhSF9dfDApCPh4xHTYMqImEqkuiKwrHMKWML2h4flUmJUhVX63l48jsCYEAycN/50Li9jYphAf0eghQXyvCCgF8Gb3rlItSLZQiLETXVCdN1C1QtGaVRSzPklR6tvvvH66Ku8/vvjua391+7npzp2X7t379brp9YMnCvzN39389ES/+ZVbf/Sn9Tt/dnH59lTPVnPfVcXcNpeXF3ICnSYoVnXSXfvJz96/vz374//p37zw2Rffb6udrj/s68u+vNLTa1vf4OSGy1nXN7bEzmQrMgv3RANmSBc1dTaUzYQ1oKtayegRgQ0Y2dwfu7KtdwQ4EhX7JPG23FdpfCOJHfG3H/7snwuVnTzuCJUDl5lI5iC7UaUXCyb92Fd56CLMHvlxRk4kLeCsYUATMtTDnGlmRF09l3YJDkFxEhNNSBUWFkoRTsQCMklZiazVVsQa07Kd8maNq1u4OsfVHbm6zQ9v8UqvWntvXlzW64c2tYVuDHtTU9samskOXJlOpqu6nwQrkQ0FZbpz+vjvf/T4Y39753O/d2OFsDPDTlcddbN+RjZSTsRm6dve92bSXeNlxKnqPPYIghTZjpdha1IQ/Uw9fXJEosYo2Vp7/IpjZlCJSAeSIhnZVOYrEE/56cV1+3OItfJMV1VX7U9LgAMNdbT8+rVHLJ/azhIeP7yGG1rHp13fJyB3w4gvUqhAipZu2c/m290vwdSUisBM2Ifvh6syuZ3j6HIbsb/foCU/YfQ5hfBAvOGgWo/BEwJCfc39aByeDAGQUkUjHioi6umCkT7jc+SvAzlXEdSgUTSDZMFmYKojDBur71tjJB++2gH3ZczgRF+EGfU1os+zM+Z834iqh9/J24xDOkJxupIU0kPTvNHY6bAjPIzqV9LmgEBL1JHfohJpuSqC0eC2wxyDsRRP1eCqBxAyzszAWJEzjDl0NG3kfInxDkPmNxNYfFg8HU9KomszoAvkZR/L1Xg+kayKdK4QwSCmAeg0aSiuIK+x67wLjkEaAwMgD11BxzkSIMjFxIguvOcNHg0E00vE2xtwCP6O3HY2JyDDCP+zJu2bKUkztpBHVPCt2Z2+LhmQIvt6CUR5goYcZxnxtfjoL1GDjXpoEFsUMB0gqjsqoGSY1FUnWtNe60Jt32pVNV4+enj94YOfvPv4qiymZ06/8rtff/yzN3d49KWv3Pv+n783zxNWtx9trv7jt65//xt3vvEnJ9/9y3ff/em8uDXtt5u+qVhT68JqAxet7jc2v/DxF//1//o/3vv0c7/eL7Z19cTOr3C2recbO7my5TVObvT0ylZyo9yQW2IL2YFbYIY0ERM2WqN0knuROeT7Y2igl2Asl9FXpNNc887XxY1g1hQPHcVOBvUdlopiQgSDksI0KA5rID6MNNAyDkSI+Fs3sSQPjQNr3jIagdQQqSHggbADZi7JISJAa01VcopimiFIFAgBwNS/21OtjKARVWFncaHbXj2yLlInXchc0ZeYF7qvmCfZn8q+tr1s8OzJM5996VN//dr9aoqtqanNhpnckzvAOBOqrara3PbKq4urCWq3yolyWdppnzc4mVSKirQ9YVK9iYtlKrag3XQf1guJNsSMjTzujPgXIIccHI597n8tbgVGBOVZos+UH9UdpFsVjLyNAUErxtpFJuL22I9y/K7FUaWQrH6Boy2NDi+6KwPFh44BnnfK0RVIADKZFaukroyrqxBwJkhPNg3Fi0U0VY3TPjyihDCbf4iEUQ3WT5Hilx9yYAAGCEi30TJ2H2MJ4j4zZ2A3EKaoTz3x3LgMUPawBIdndshekOkTaOpTeKB+ah2DRvF2vbjcSAoRXXchVRBXdZTMjEsKIAlA1pyiYhn0gRESebkAUTcKz+ZM3sNWkRQTHHGzRGwR2S0FTG0J/16nDmVd08PsoBfBS7lFksrgiKqm7I4g4EpBzI0wl7BQjKF9ApFD0SBYKVFxiUtWSspLAFkhjnw4ygSq0vdNqc5TTUMWlf0QQY6C3vAcHtiy905aKcU1v4zDcPlj8L61Qy7ugaHZCOqYTcGe94pi1PPAaD7JDs5M3T2XPdhohXWzbrX4UJKwozCExLJ7wlFoybyJucsRmzy2ygDzR0jKEUO4c2f2JOCp1lEvkZkR6LWmUG1EgRJaDxnOkIiBkGH4RVQNXjb06zLAVCb4AOxutkddFtj+9ddee+OXP344f8A7i9/65udf/I2XHj26fP2X7yy3eOUT+OI3/9kb//n+g4vv3alLtVt/8Z3tN/jC537/9mX7/vxrfeal524+uKTKft/KtFLienf121//+j//4z/grfKrm8VWn9lhfYXzC55e28lGzy9560pu3cgZ9yvZmtwINsQW2JFbyA7YAlvjDMyANFXHZWPQbOj4y7E9dRkpNT00+iNiHY3dyYzaQ8gi1pG5VuGbc3mHbQlUx8xHSoeuOiGiCvH5zRFQe785SFrvcSKCWnvY6EF6sN4OXxGo4fAyHqz6+EgJ0Q4kCpgiQyIqZQKqKVBNqwY+rTQ0T0UmbleYl9oW3K24XWFbbYMt7MZ0V7GDboFm2EKh2BJNZC99Z9JNDdjTmmHChEkWVetqd6fy/KRgy1ZOp9tbcmX7jZzUYr2qFUJRVLUIJmjL+cl+ktWrh3l3UjXs5li0zEAOL3fXMt6ELCL4iVYpQeVJLk2ApsGfikDZj6rTbx1tjnwBwzoJYWLeBxRCpxVpxzVOPFzKI+BLWu+NpGi4wGB1Bt0pj3+ebbjwk+hAOj2dVdVSDhRZH58p1PEwgjyqEJQ0+lK0dPRQpmZ3BNkfSgkg3QIZdNhcM/F2n+q05dH96VK6rUez9LAtntkZkXzRTBR5uLfEMDN1MOIwwDWNkYDovWv4YK8/iD/hsc4eEYUrdmYQc9kjMhURsZhTMaIcd8weSgclGZmY+35y0W5VV4oYfPboJYmWhoNDEvHJPyWgLM2xjEDqqtK0lGHvLaMzZF1/JND+i3QgRNGNC9WG8b3u6Q6ch4hoXL/SWbiH1N9zNgqyE0wkFbnFH751xqpELWNAvuIn0buvEO4H7jQ9gkyZVpBorWstlk17jiWrim8ZZlDSu2vaDLmYQCk1p4plbEsHpfPshicWSBLyHGEJ1p51a2QpxSQo3NFChiztOoetE5Lc5rDcwwoI7ShspA26uN9yXJyn19FTEW8Fotco4vkjy6xHJA+LKGbcmIsGJzSetizQo1hA76UysE61XF1e/OjN7/3ozb9HnT7E5l/84R/ce/FTH3zw8Nvf/xZu76mr7/zD/N/+ET76+Xvv/eNnri4eoMhKNv/fd/d/+LvPfOJrn/z+X34wd1ndgVGrTsvVycXlB//9v/23v/07X/jgQ7vYnG7qrQ9tfWPrK6635fZlnzY83dbzS7vVt4rrLk8E15CdyCyYiUbbkVvTWaWh9wbutDQVE3SIGRvMRDsiGYi79EVMMECybOzPDIR0RrDos8NCkiAj6gAL04akfckH7/lm0lDiQSa5JJfbd68JQtzliHMrIwtwhypAqTVPKKFeufS0TTL59TNSVCWdrrqVZhgvVdTuyKBAq1g1FP8Du7YCFLYJ86L7bKvdCXaTdduoXVvbdp21X21hyq3tt01ZuYc2wRZtKewARH249mYvFZybXda60WXbTXW961uVzYT1VNcntr+uK53ECvdo6oqSSq2AaouuO4m6AozoquYq1+F+xZ9ECTwvirWOVBiR5cDjV2oLui0Zi0VB7zmunK4xEFbFCU3I2jAiJnYdPoWySNJggApkdj02h8BCEA+gF7koaZWRKb4ZS3U6SWwWGwY0RyiGxxUBYI7GOIk3sytEZA4GTweRKPh0aHYp6mm0GcPcRXrrfk4bfFCgN75R1e2g+gmxcKuRB0KQCXCYe58GikyCiqgDDjxiGSXAdPibu02BHDqFo5199Muj1oqsZxeVnsGVKywa4GC+oKoMsDgaykQ0GIsIFjUAV9DoAufLi1tYg3VTLwTH7D4lzK1oNxTQf1O0RG+MdwS6L9EcWyNjGQEpodwQvgskxBUklK01kkYrRReL2o54T/QmaIMADUyAMh+SDqNhAFSrDr+iqkFPz3DU23gj8RzPIFyZ77FAZ0VIhu2DoZZBKhYBxbFR8Zotkk/nktRlUVzy22hu9Zx/5U4pnPGhOuCl8GgzipgOUWg1JeTgDslgVGgIwImYQw7eh82qaip+4gpozSxmMGAAmh7mivdeewwSPFsg0A16YOG7pZSqkn1FmuEWoT7Dg/7Usk7p9L/ObIx2ddBI3uD63hmkxuaM2AvASBccnvGKcYEYaLSmolSh9ffvv/XDn/ztm+8+6DJZbb/1O1945vbLNtu7b769v/hwNU22Or//6Pr7r+nXvrBeXS7f/2CFWvdcsV3++d8+/uY3X3j+i+X171yvFs+K1lLKzeX113/vf3jhC3/w88fzLMsrPbls62s53/LkiZxt7NYVplnPt3rHNuQG2FA2ghvwxngluhXZieyIPawZ9qaczfZmW60doIfxZl3QgQ50iietSZ8dfhHC0Wmm4n0poyqLdHK0sO6ajU3erh2Iguc9Kori/vNQ5XUjlfp/Dn/BvLGeouqxfkInhwghtl0KBSVXZyxjEIGS4ehBQAHUJHllRghLkCAMRaX4PCszhS7UKmShOmnFbsI8ybzEPNluIbuVXOuWumNvWnbl4p2HvNgCk87GLcCGnc1t6tUWG+k7TqaYZ9sXM9Za8P7jZ+9+9N50atidSNtiXtm+apv6rqjJQqRAK1hcMkRMPDOjQBZ14TIHYU4OztEjfCO7d97gcIo8lPZHkRVPpiUWkCi9Myibia4BRdRFIWjmAKPnyNG4SCdUax6SwBIBIVxtK4rwNY6Q/1PaODmkpglvAtHXmb2tYW0BuJ6AdxmNCDu61US0kGEiRCOjHEp4kEE6ZW658fLIwQJSyZ9JKNkaSS1aSvVRi9FtBxUJhx19nMlBdfGiYHYdJcr/5CU4LswkpHgEGY23ZX5xlAsE4DCkPceTZ1waMr3RNKEw+nA3YFh3b5IdGU24GkPvFs8L3n8W9Xoji/Ogw1LS0CGSSgLMqz2kUEHBsH8i3kQ39xhFfXierWBoQjhV6qieKuOOSoZyg0o/IGAyQQaJcn5qwrvmgEXJISMhOEHzaADDeDQAvAN2XLevJjzEzfM11mQsnYe93k2ZOWmsY4x0tVRjN2aDAcx6kMBR/Nbs8L3/ZAvEfwYpj0GZjdOLsNtSRAmrtSCFQVTTcqpmtwDILDcmp10yVPZ7FQ+ULKm6KXnkd9d79u5lYSK0kkTooLJnZtEnB9Ccb5YCr44u05oPSctCQ37FAbRgh5R4FgKVSqOiP/7w7df+8VtvvvlEVqeoNx956aXnnnnxw4eX77/93k9/8ZPV7RPbNKk611s/+Wm7d+f0ld84vX/fHj2c1tM8ye35evv3r/GrX7uzfkkv7ms9sXmWr3/jjz7/jd/99c4a1ibToz5tZX3F9QZnG66vuL7mauYZroTXlJ3IjdiN4QbYQmfYhtzS9pQmOisxEw1ogJntVTtJwHx4O2lkDxYGvA4TuQQS7TfrTusjnHLCYRtdikccT3E3bA7qmxx08d1B0wEqI73zc2wkkRh3KD78kUPTDIdM+tCXPDAkEWEnlVkFQxYRjhQYPTviKGYNYEeULKqL3s3MdOF5FkWlVO3aIdAiWnVSTugLtCXmSXdLzAvb2XXFNWSL3QU2j27unK/axbx5YrqXSdFRt9g3E6KvZthVlzNoVTYr2zbt+uc+/tk6nWw2N2WxqzKr9gkN1oh9F4vpxoXuFCFUF810zJwgCsO0BDCWFe50OnC2kB/qA3yfxUUAQWFJ3ygjKAn1T/MS3iFUSnUtc5pWSjnBCwThfI76h3xorQkrMghLGT46mjo6g49NjEBIF+45cIlTItFDCybY7Uud9v3IEIdNKJ4ijx8eRdphNEKCIdnBHCGjuRSneq9UUxGzPtJuf5SGzrHJdHSDeGZ3AE4Nh5IbwkAy4cu0/ZKrSeCIwTuKcFEzIRxSdv0uKMzMNdBrrQoTxBQUEcCio8ZcONF/GJGBROVRpBSJ3eBWNas9MQeX5p1XIhnZWDSn+HkaVSjk6eJTD/kAqPj9uGV1CRF/9BKTSZNQAtRaI/Ukes8ZjsARlSMstEdEDNVTcXBGcDA9mcV62vBUlE6DiIeRgyTlhFsdZU45gC4ROMhxz1lCsUxtCpe9iiMVnCmGm4Mi/VYoyB5efmAl+4VdsEAiQEx3fuz0DThSPYvlyj4XBX3uO53NxNS2IIQ0RbIXJcpSUUpAVJoZY62yBhkxn47D6FcQpB2axyRZlUBWRfwJRXTmF986CyE4lkOnmZVExWgmqKIGuD6Dx8ea/sntlJlsaj3fXr3/4N0fvv2LK9hJ4Xyzubl7fu+Dhxe36/nPf/7GcqkTFpdXc1koT/Wx2bf+7vEfrV745NfsB3/29m773Hp6slgu33gwP/v++vanz9+8/7jOp//uf/7Tl1/5+IPLWUtB0ZmLzeLkw7660bOrfnKj6yuc7WzS6yo3xmtoEz6hbIQbYgvbkBvyhrIV7mE2A021u8qMBBDVzBrQxIF0d5FO9oi2tugZgYNxGoziDLrgVl0JhVIikfJNp3KYCZ07xUGo2PTedesMCK+wSCQ/EhXHqIiBoM8SEDIcMzj4EqLp9iEaea0P+HKRCWY/gqX5h7DEpoBCq6H2bpQqUKi5wowqTA0LQSWVLFaxr2hL7JdoK2xXusX1pPvW551dlauHP/vCb3b76EsX714+eHP/9ju7yyfWVGa0Bm1GUz3Xs/3Vdip1O2+/9KUvf+1ffZHPnswbq6u6qm253wJbse2qrifspQqqsQp90mUVrYXFySVFpDvVkgZFNcySQav7TfyTg8p4Wv4mV/sOK3RoQvLGX3ipaxQeBejdCjCKtgBCX9HTxCjKdJ9kmL+ldvCsUEgCshyHOXNXIOu4npAneGUejBc3rKIK60YnZ0s7Jid7inWYQ4BhNgD0Zlmj8icUOqhHZcVMgPxy3C2Gjry41pao9IwIHdY266qlmdVaI7YEYVGT9bBlJH1hrEj6sIGBOTPzYFfwxjFNChJofoAGHFXNTAXjpEAGx8zv2glowPgi3/cZmRAwijpYHsTs/MZY4BRZIkQOup4wxBy9+DYHcGNR8/89LZN0wpJ5/bge916JuwIExdm84s7C80BVjW5WpNsetN0MpDy6Gus43L6K9N69Li0yEBpfl3Qtge+RTM5oakRmt3vAdk+fpEMY6J7NP0pTFSEBCUiEkcdLCXcwGTcMnMnVrNTJqHFAJJ3XIaIUZC9APJAM0TJXBWIqsMQFmvfKUQCXeReoglHASgIlTUY7qko2STizi2ounCciQx6PQabzTZHGJ1jKliPzjk1Q3Geiy6SJBdcxtkCSBgfPE4Sx5RYqErIqnZ5SU7TW7fb6/Uc/vtk8vHwk09o2T24+8rGPnd96Fqfy5k9f2z65Onlm2l1sF2fVbqDSdL3+YHf9ne/v/+D3X7zzmZu3vn9tevdsNfXt/Prbuy9++W470y989V+cvfKV+5veq8d7tetiw+VVWW3kfFtWV1j3G9Ub6BbYgBtgD26oN8prckdswC2xFzRhb0UbbTZr5E5kr9occ4Y2wCShvqiYZNirQmeuRTp5aKwedQMas51y2HxJkRTHCxnyNSo8KBqUMrQLvQM8Em7zSZodQy8lNzFT+U5VJcSenEjomzto7SIU0zTFGvl2RL7dk6HQH6JFM6s7YlRQuxmsF6m+U0XBShTX/NhDu4op+lSKdhOx3naVJx88/tlbP/ubi3/cLjb1ufM7n/pNe+Zjm3fe3b71q/nyCZqpCppidX6mG9h+X1f1xZdemm7dvuZcdg0mxv2iYjJvLg9Q6rB7fUFMo+DrDZG0YV5VK2C0xmiAhNs+d2UFUSQlvbwjyTgBgugf3aQHAJbZCZLsVxsfHNZU0rhIvoEiNNMIVeHCBRFUAUx+3UGt3b8eGsPS3I1HVqAqbjgcMOH4UsZRd0OgKjCadZAael1xzA8NiKNWcsiK/8tXsl4QA5U4XOPY0ICKtt5FrJRqZq03CNq++T8Vp9EnrAowbWgiu9FEcaT+EYhcpAr/RdjEtLKZ4hxSCkT9NYr6Umv1WCnJ24jGNSTCrM6eYuR2I471VNuO2e+IQN2TqShAR/Oq+SinsPCRHIkFHSCj+4Rw5UDRQ7jnIGUcyHCOUzH7QQOBZJ5bwtdbk+yWeRpyrx1WMZPwp0Ch4IMzKgzpOUbxwkOiZj0iU3j1IfJ+5Nvig/NesjUGBxZWBkABYoPqVYzsrk4I4ZDz+eqIA6+IG/UWkdgNI0SJZc+XHm41ww/RDOBIiDOLhUIljd1Ca1MJPH0O+PRfAsI4yAUIBg9oFEzgPsOfDrshEur8RUdNaSJDgS/gKjNTqJkZmrK4hMxBt8a9gVA0mgU8KIvACM2LoYBBWKTsthfz/t2i+2mi9f7c3Y/+s6//4Xay//Bn/5dN29XdhV2bCG3TYaU1VJuxWL53hZ+8defVz37z7fs/+PWF9bXWaXF/e3Fvu/i9P/3fXv7oq7/aoIfqaDEpe8oNzjZlfWUrs6lf0WctcAPZiW5gM7EVuzbeEFvILNJEdsJOyAzsRWaykY3cq7oER8819CJqHxgHIYDD+lnF8nMe210lHybSRjq56djAefEdaWyZfyadP+DDbQBwcFs1SloUwHIxJfs6A9sJZ+/xZsktP/YpRMB+0I/xDe/G6CnmWBD+zOEi33QKIcOmiXeVm5Cgqkkx0QaZiXnfrS6kzmU6LSd2/71f/+AfPnzy1iyXuFXr3Vvl/LmTVz525+zu5vXXP3zwcLuq60Xbf/jwIc+0TJBVPbt3SvjkSlgFdLXrtaHuUBtk7mpmMKipD/ZBA5r5RDYVkn3w/d3iHQ298T4qI7t7j3QKhAOxQa9jmn13yALxKsOwbNQYtEOxkAqAow2eT2csFsFYLoEZYR0qwXWNlUPNqt1hi0i0GME11KIYdfTvAnXYxPE7jXQ7ew3TdgRDWNzFHNt6ePkWDpnnGAMnSfnUGrfNURnNjne3zk7bJNniUan5MB9vWBS4yw+PMzo1IkVzUJN5J3mFQdCKxlM3lCMHHU8xHclgoBydq9HvnNeZ4GA4pqhoMijRDvKT7v3T+Hp+Yk5Xjd6uEQSMRytHE04ki+Ijq47cTpW9WzcUCFIhS8ath1k4ypO9Wyhhl/FRh7wTkh3vWdUAmKcU414PFa1jJ5wHwYnN8UAZqaAeriz2UD5p/2rfTJo0YoteKSC9ZjwXv2956ge+7rl1wQOTEaCTVbK24PMOWFEQ2s45Y5PRBCnQSI6FdA24DJBEXOQ1WhDj1xLmO7LB3moXhT1v/GDI8B7ui5mTJt9P2ZNalfoN8bC8uf/Inh9WWiH/5GfDQhwd7OD0QKOd1yfgSjdU1VJLDd6kmAV6GaiAHxPAlDXW2k9X399s37348P3VdLpazk+u7JOvfJKml+8/XtU6UzCTitLqknW3a6UoN5BJt5w/+PD5Ldbnr7zz/uXmiRVaO7Fn9fzVs5e+/t62s1QDVEujmBnLdIPTy77EXm1juhVsITfCG2IGtsANsRO5AbawOTJgmSE6C2azPdBULVZZRUIN3kSMMLMGmGoBrIdGAiBCM2/tc9TKzMQFDxSOhBaIEVKjQePoOMk4QchmMC+JSWgfSZBWEniynF4U4b4dYmB4FwMTaREYKCFCn/YHQ4/IBd3dcLqF9MAa2YrudsmPgmt6G9gpVUthIUhrxN60KzphEGopxagdCilNptYu534z1ZOr6/nnbz3oLF/66pfbRXv0y4ePHm021/Xsaj57Vk6nUpup2CRSJrRqpdZdn3/8kx/c+cI/r0VxoqzN9rIvtRGoK9hkFBikA83QvOnHDW4wVCWG9kWFxgmtyIKRecFA4CfS8Uivannc7JIYHoIbTEyzXSHCXC99qgZObRjedwB+gtG65lRnJw6IaHH1ZcFT/a6oxsy5XZln9GRmcOUw3SBKw0uEkcaS3UIM4Vj6h5AMnlNZ5BiT9N0WriW1H5DBXG7XdEuMQaWZq4yIIeyHaQkFLt9tZlaqwoTslsllhKeHPNS/buDx4X0Z4iMMlDmB2vH9GFkVDtVgHJyyJ4mZb9FTTimlOE3RT+RAOFWld0vzGc4wGDseQZiZc7NE3RV6jJzdTcNR+fu9wJygKcqoN8TpTVXaofh06A0N/4XW+qHO6oQpj4z9wcS3alYoU7ZMDtZeHMtJHUWJTejWWhKphiTtytLBH4U0kpizj1TzYC52H5DSmCIIITNmiJD3k8HT8OYSEyHS0gAHzGOw0AXsESP6E/T7AdgdhyxBXnTqN48jQjk8xfje/EzfrG7vknFDSUV+ORo0mZ8QUM/hcQR0Rb/fCBv9rgbmnDshGXyQbLVP9k3sM/e1/tCgXiEW8b5ma6TDJpU0ovqAEYGL4QR2huAuMCbCjHGIZkC/2uxutr80a0A9O//Q7G6dVleXm3ff/tXVo6vVnYIZRmMBDGUFY0Hv7E2s/uK11xfL9b2Pns7rxZNy+tJLd1/97KfOP/qxD/piL3vrSwgI7RSUxR4aGpMb6Ky6hV0Te5GtYOesK+UNuUFIcNxQ9gbMsB2wVzVDh3ZBA42YE1L06q+XdcPmAjTrTmA6wHcGAI48ESlsklRTNwj+vKIZiEAiMYOsMCAuscRJ6L0s0U+aGIRE9Ba5RHJgj5rNou54oGn52ZGgvQxIPNDy0B1xNk4mKxJdZB6QEqTQ3a2qFJgaXI7OoMZOMRSiNJ2ManoyrS5rsx+89sMHDx/cefZ0unX2sx/+sM7QqbS9vfuz6/5W3+1xWkvFLMBee5kWps1k+8sHr/92++3bZ89dYVdr2duiscxSe8dMNV1oV+tmjWJBJhagw9gs9CGtQU1BM9OiZBE0EnAsUYuqdrSoMiLpKBDvJnCIAZH+ZCYreWg9Z2qRbWopjODXae5QLW56W2vmhNmiWYOHFscJs5YBAFLNuewBPaY5c2K9n97oqQzuMUJpwZeQZiyQbqyLGowWSTs5qIDuyEUHQMfRKWSpiZY4bqll6EuAB63jyEiQpRct4aHgkanPyERUP82B3mjGHYRkFXTSZ/alx8FA8wdVbdgyCRCeEa6GnMTB0A9amz9hcTLE+NSgKQsynshY1dnx4WpExAWgDF2d6xDfE0hV7w0ZjhUJfpwGBckDLIiqGYsqRDodAoBqzNcNye58kehmDl0Ed0rjhLtg7HBiA2Pp1quUeCKS3jD+6DfL4VyZwQfCu4T5d1h5ACqayk0BaTNcT1FSC6KVw0cvW0a4nhO7I0S0c3vUGLvOqQPI70SsVMz6C5q3BIjnS5fjN0QoVFHPUh2rMTPNgADMOkiayqPG0JH6cgQQyL4yr9IaaNYzU4pA/QAbhL9P+xnfkX/T7M4d25PZCj7WNVjrVCqJQQ2RZLaHRYkkPlsIaQy5eTWa2Sz+SFAcolIsBRYhdQTT6mFkSAgQSVkyALv5AtiI2qPH188/r5O+dH5r/d4H77/z87dWiyXmPasRwo2ArAprDUbO0I62sYtf3nz07gsvrtavfOYzr776+bquT2ZSYK0LpRTCCgzWOhp0FtmJbUz2gh1wbdJUmti1YQ/dq+1MtsSs2BMGXezJRnZBozRwZowabH79Ht47KVJVe+9OI1b1miIwSuSMPiOkURIIYa0heNFmouqdAwqqFtUU64Wzunw95KD0QiL24WHzSM6VVI01cBaV01m8ug86NBoxWoI/ZtnaGkGDJ0L0qN7HWHmiX32jkA2oYFNUshEFMJHq6YxXxjNnMi/JUErHwjqa1NkWOk3v/vr+z371Buv29OzFq+3l1eb9qanOtc3z+a0z5a33Hz1aT4ulclpNtmy3Xrz3wide/HD+gGePf/rGjz959+T2R842KLu6gC2BqVsVrQZlBxvYw/u6FVJoj76YubNVodvQwTdKqMlIa6352DoIfBKgwXvnJP2a45CxxtapRV0D1/9hxEtpJEWSbkxjs1ZyNlopBYdOS1c7cVPE0Wr//wNfoigaFBtyugAAAABJRU5ErkJggg==", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "if local_runtime:\n", + " from IPython.display import Image, display\n", + " import tempfile\n", + " import os.path as osp\n", + " import cv2\n", + " with tempfile.TemporaryDirectory() as tmpdir:\n", + " file_name = osp.join(tmpdir, 'pose_results.png')\n", + " cv2.imwrite(file_name, vis_result[:,:,::-1])\n", + " display(Image(file_name))\n", + "else:\n", + " cv2_imshow(vis_result[:,:,::-1]) #RGB2BGR to fit cv2" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "42HG6DSNI0Ke" + }, + "source": [ + "### Add a new dataset\n", + "\n", + "There are two methods to support a customized dataset in MMPose. The first one is to convert the data to a supported format (e.g. COCO) and use the corresponding dataset class (e.g. BaseCocoStyleDataset), as described in the [document](https://mmpose.readthedocs.io/en/1.x/user_guides/prepare_datasets.html). The second one is to add a new dataset class. In this tutorial, we give an example of the second method.\n", + "\n", + "We first download the demo dataset, which contains 100 samples (75 for training and 25 for validation) selected from COCO train2017 dataset. The annotations are stored in a different format from the original COCO format.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "qGzSb0Rm-p3V", + "outputId": "2e7ec2ba-88e1-490f-cd5a-66ef06ec3e52" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content/mmpose/data\n", + "--2022-09-14 10:39:37-- https://download.openmmlab.com/mmpose/datasets/coco_tiny.tar\n", + "Resolving download.openmmlab.com (download.openmmlab.com)... 47.89.140.71\n", + "Connecting to download.openmmlab.com (download.openmmlab.com)|47.89.140.71|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 16558080 (16M) [application/x-tar]\n", + "Saving to: ‘coco_tiny.tar’\n", + "\n", + "coco_tiny.tar 100%[===================>] 15.79M 9.14MB/s in 1.7s \n", + "\n", + "2022-09-14 10:39:40 (9.14 MB/s) - ‘coco_tiny.tar’ saved [16558080/16558080]\n", + "\n", + "/content/mmpose\n" + ] + } + ], + "source": [ + "# download dataset\n", + "%mkdir data\n", + "%cd data\n", + "!wget https://download.openmmlab.com/mmpose/datasets/coco_tiny.tar\n", + "!tar -xf coco_tiny.tar\n", + "%cd .." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "fL6S62JWJls0", + "outputId": "fe4cf7c9-5a8c-4542-f0b1-fe01908ca3e4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Reading package lists...\n", + "Building dependency tree...\n", + "Reading state information...\n", + "The following package was automatically installed and is no longer required:\n", + " libnvidia-common-460\n", + "Use 'apt autoremove' to remove it.\n", + "The following NEW packages will be installed:\n", + " tree\n", + "0 upgraded, 1 newly installed, 0 to remove and 32 not upgraded.\n", + "Need to get 40.7 kB of archives.\n", + "After this operation, 105 kB of additional disk space will be used.\n", + "Get:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 tree amd64 1.7.0-5 [40.7 kB]\n", + "Fetched 40.7 kB in 0s (161 kB/s)\n", + "Selecting previously unselected package tree.\n", + "(Reading database ... 155685 files and directories currently installed.)\n", + "Preparing to unpack .../tree_1.7.0-5_amd64.deb ...\n", + "Unpacking tree (1.7.0-5) ...\n", + "Setting up tree (1.7.0-5) ...\n", + "Processing triggers for man-db (2.8.3-2ubuntu0.1) ...\n", + "data/coco_tiny\n", + "├── images\n", + "│   ├── 000000012754.jpg\n", + "│   ├── 000000017741.jpg\n", + "│   ├── 000000019157.jpg\n", + "│   ├── 000000019523.jpg\n", + "│   ├── 000000019608.jpg\n", + "│   ├── 000000022816.jpg\n", + "│   ├── 000000031092.jpg\n", + "│   ├── 000000032124.jpg\n", + "│   ├── 000000037209.jpg\n", + "│   ├── 000000050713.jpg\n", + "│   ├── 000000057703.jpg\n", + "│   ├── 000000064909.jpg\n", + "│   ├── 000000076942.jpg\n", + "│   ├── 000000079754.jpg\n", + "│   ├── 000000083935.jpg\n", + "│   ├── 000000085316.jpg\n", + "│   ├── 000000101013.jpg\n", + "│   ├── 000000101172.jpg\n", + "│   ├── 000000103134.jpg\n", + "│   ├── 000000103163.jpg\n", + "│   ├── 000000105647.jpg\n", + "│   ├── 000000107960.jpg\n", + "│   ├── 000000117891.jpg\n", + "│   ├── 000000118181.jpg\n", + "│   ├── 000000120021.jpg\n", + "│   ├── 000000128119.jpg\n", + "│   ├── 000000143908.jpg\n", + "│   ├── 000000145025.jpg\n", + "│   ├── 000000147386.jpg\n", + "│   ├── 000000147979.jpg\n", + "│   ├── 000000154222.jpg\n", + "│   ├── 000000160190.jpg\n", + "│   ├── 000000161112.jpg\n", + "│   ├── 000000175737.jpg\n", + "│   ├── 000000177069.jpg\n", + "│   ├── 000000184659.jpg\n", + "│   ├── 000000209468.jpg\n", + "│   ├── 000000210060.jpg\n", + "│   ├── 000000215867.jpg\n", + "│   ├── 000000216861.jpg\n", + "│   ├── 000000227224.jpg\n", + "│   ├── 000000246265.jpg\n", + "│   ├── 000000254919.jpg\n", + "│   ├── 000000263687.jpg\n", + "│   ├── 000000264628.jpg\n", + "│   ├── 000000268927.jpg\n", + "│   ├── 000000271177.jpg\n", + "│   ├── 000000275219.jpg\n", + "│   ├── 000000277542.jpg\n", + "│   ├── 000000279140.jpg\n", + "│   ├── 000000286813.jpg\n", + "│   ├── 000000297980.jpg\n", + "│   ├── 000000301641.jpg\n", + "│   ├── 000000312341.jpg\n", + "│   ├── 000000325768.jpg\n", + "│   ├── 000000332221.jpg\n", + "│   ├── 000000345071.jpg\n", + "│   ├── 000000346965.jpg\n", + "│   ├── 000000347836.jpg\n", + "│   ├── 000000349437.jpg\n", + "│   ├── 000000360735.jpg\n", + "│   ├── 000000362343.jpg\n", + "│   ├── 000000364079.jpg\n", + "│   ├── 000000364113.jpg\n", + "│   ├── 000000386279.jpg\n", + "│   ├── 000000386968.jpg\n", + "│   ├── 000000388619.jpg\n", + "│   ├── 000000390137.jpg\n", + "│   ├── 000000390241.jpg\n", + "│   ├── 000000390298.jpg\n", + "│   ├── 000000390348.jpg\n", + "│   ├── 000000398606.jpg\n", + "│   ├── 000000400456.jpg\n", + "│   ├── 000000402514.jpg\n", + "│   ├── 000000403255.jpg\n", + "│   ├── 000000403432.jpg\n", + "│   ├── 000000410350.jpg\n", + "│   ├── 000000453065.jpg\n", + "│   ├── 000000457254.jpg\n", + "│   ├── 000000464153.jpg\n", + "│   ├── 000000464515.jpg\n", + "│   ├── 000000465418.jpg\n", + "│   ├── 000000480591.jpg\n", + "│   ├── 000000484279.jpg\n", + "│   ├── 000000494014.jpg\n", + "│   ├── 000000515289.jpg\n", + "│   ├── 000000516805.jpg\n", + "│   ├── 000000521994.jpg\n", + "│   ├── 000000528962.jpg\n", + "│   ├── 000000534736.jpg\n", + "│   ├── 000000535588.jpg\n", + "│   ├── 000000537548.jpg\n", + "│   ├── 000000553698.jpg\n", + "│   ├── 000000555622.jpg\n", + "│   ├── 000000566456.jpg\n", + "│   ├── 000000567171.jpg\n", + "│   └── 000000568961.jpg\n", + "├── train.json\n", + "└── val.json\n", + "\n", + "1 directory, 99 files\n" + ] + } + ], + "source": [ + "# check the directory structure\n", + "!apt-get -q install tree\n", + "!tree data/coco_tiny" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Hl09rtA4Jn5b", + "outputId": "e94e84ea-7192-4d2f-9747-716931953d6d" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " 75\n", + "{'bbox': [267.03, 104.32, 229.19, 320],\n", + " 'image_file': '000000537548.jpg',\n", + " 'image_size': [640, 480],\n", + " 'keypoints': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 325, 160, 2, 398,\n", + " 177, 2, 0, 0, 0, 437, 238, 2, 0, 0, 0, 477, 270, 2, 287, 255, 1,\n", + " 339, 267, 2, 0, 0, 0, 423, 314, 2, 0, 0, 0, 355, 367, 2]}\n" + ] + } + ], + "source": [ + "# check the annotation format\n", + "import json\n", + "import pprint\n", + "\n", + "anns = json.load(open('data/coco_tiny/train.json'))\n", + "\n", + "print(type(anns), len(anns))\n", + "pprint.pprint(anns[0], compact=True)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "H-dMbjgnJzbH" + }, + "source": [ + "After downloading the data, we implement a new dataset class to load data samples for model training and validation. Assume that we are going to train a top-down pose estimation model, the new dataset class inherits `BaseCocoStyleDataset`.\n", + "\n", + "We have already implemented a `CocoDataset` so that we can take it as an example." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "jCu4npV2rl_Q" + }, + "source": [ + "#### Note\n", + "If you meet the following error:\n", + "```shell\n", + "AssertionError: class `PoseLocalVisualizer` in mmpose/visualization/local_visualizer.py: instance named of visualizer has been created, the method `get_instance` should not access any other arguments\n", + "```\n", + "Please reboot your jupyter kernel and start running from here." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3I66Pi5Er94J" + }, + "outputs": [], + "source": [ + "%cd mmpose" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rRNq50dytJki" + }, + "outputs": [], + "source": [ + "# Copyright (c) OpenMMLab. All rights reserved.\n", + "import json\n", + "import os.path as osp\n", + "from typing import Callable, List, Optional, Sequence, Union\n", + "\n", + "import numpy as np\n", + "from mmengine.utils import check_file_exist\n", + "\n", + "from mmpose.registry import DATASETS\n", + "from mmpose.datasets.datasets.base import BaseCocoStyleDataset\n", + "\n", + "\n", + "@DATASETS.register_module()\n", + "class TinyCocoDataset(BaseCocoStyleDataset):\n", + " METAINFO: dict = dict(from_file='configs/_base_/datasets/coco.py')\n", + "\n", + " def _load_annotations(self) -> List[dict]:\n", + " \"\"\"Load data from annotations in MPII format.\"\"\"\n", + "\n", + " check_file_exist(self.ann_file)\n", + " with open(self.ann_file) as anno_file:\n", + " anns = json.load(anno_file)\n", + "\n", + " data_list = []\n", + " ann_id = 0\n", + "\n", + " for idx, ann in enumerate(anns):\n", + " img_h, img_w = ann['image_size']\n", + "\n", + " # get bbox in shape [1, 4], formatted as xywh\n", + " x, y, w, h = ann['bbox']\n", + " x1 = np.clip(x, 0, img_w - 1)\n", + " y1 = np.clip(y, 0, img_h - 1)\n", + " x2 = np.clip(x + w, 0, img_w - 1)\n", + " y2 = np.clip(y + h, 0, img_h - 1)\n", + "\n", + " bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4)\n", + "\n", + " # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K]\n", + " joints_3d = np.array(ann['keypoints']).reshape(1, -1, 3)\n", + " num_joints = joints_3d.shape[1]\n", + " keypoints = np.zeros((1, num_joints, 2), dtype=np.float32)\n", + " keypoints[:, :, :2] = joints_3d[:, :, :2]\n", + " keypoints_visible = np.minimum(1, joints_3d[:, :, 2:3])\n", + " keypoints_visible = keypoints_visible.reshape(1, -1)\n", + "\n", + " data_info = {\n", + " 'id': ann_id,\n", + " 'img_id': int(ann['image_file'].split('.')[0]),\n", + " 'img_path': osp.join(self.data_prefix['img'], ann['image_file']),\n", + " 'bbox': bbox,\n", + " 'bbox_score': np.ones(1, dtype=np.float32),\n", + " 'keypoints': keypoints,\n", + " 'keypoints_visible': keypoints_visible,\n", + " }\n", + "\n", + " data_list.append(data_info)\n", + " ann_id = ann_id + 1\n", + "\n", + " return data_list, None\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "UmGitQZkUnom" + }, + "source": [ + "### Create a config file\n", + "\n", + "In the next step, we create a config file which configures the model, dataset and runtime settings. More information can be found at [Configs](https://mmpose.readthedocs.io/en/1.x/user_guides/configs.html). A common practice to create a config file is deriving from a existing one. In this tutorial, we load a config file that trains a HRNet on COCO dataset, and modify it to adapt to the COCOTiny dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "sMbVVHPXK87s", + "outputId": "a23a1ed9-a2ee-4a6a-93da-3c1968c8a2ec" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "default_scope = 'mmpose'\n", + "default_hooks = dict(\n", + " timer=dict(type='IterTimerHook'),\n", + " logger=dict(type='LoggerHook', interval=50),\n", + " param_scheduler=dict(type='ParamSchedulerHook'),\n", + " checkpoint=dict(\n", + " type='CheckpointHook',\n", + " interval=1,\n", + " save_best='pck/PCK@0.05',\n", + " rule='greater',\n", + " max_keep_ckpts=1),\n", + " sampler_seed=dict(type='DistSamplerSeedHook'),\n", + " visualization=dict(type='PoseVisualizationHook', enable=False))\n", + "custom_hooks = [dict(type='SyncBuffersHook')]\n", + "env_cfg = dict(\n", + " cudnn_benchmark=False,\n", + " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", + " dist_cfg=dict(backend='nccl'))\n", + "vis_backends = [dict(type='LocalVisBackend')]\n", + "visualizer = dict(\n", + " type='PoseLocalVisualizer',\n", + " vis_backends=[dict(type='LocalVisBackend')],\n", + " name='visualizer')\n", + "log_processor = dict(\n", + " type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)\n", + "log_level = 'INFO'\n", + "load_from = None\n", + "resume = False\n", + "file_client_args = dict(backend='disk')\n", + "train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)\n", + "val_cfg = dict()\n", + "test_cfg = dict()\n", + "optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))\n", + "param_scheduler = [\n", + " dict(type='LinearLR', begin=0, end=10, start_factor=0.001, by_epoch=False),\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=40,\n", + " milestones=[17, 35],\n", + " gamma=0.1,\n", + " by_epoch=True)\n", + "]\n", + "auto_scale_lr = dict(base_batch_size=512)\n", + "codec = dict(\n", + " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", + "model = dict(\n", + " type='TopdownPoseEstimator',\n", + " data_preprocessor=dict(\n", + " type='PoseDataPreprocessor',\n", + " mean=[123.675, 116.28, 103.53],\n", + " std=[58.395, 57.12, 57.375],\n", + " bgr_to_rgb=True),\n", + " backbone=dict(\n", + " type='HRNet',\n", + " in_channels=3,\n", + " extra=dict(\n", + " stage1=dict(\n", + " num_modules=1,\n", + " num_branches=1,\n", + " block='BOTTLENECK',\n", + " num_blocks=(4, ),\n", + " num_channels=(64, )),\n", + " stage2=dict(\n", + " num_modules=1,\n", + " num_branches=2,\n", + " block='BASIC',\n", + " num_blocks=(4, 4),\n", + " num_channels=(32, 64)),\n", + " stage3=dict(\n", + " num_modules=4,\n", + " num_branches=3,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4),\n", + " num_channels=(32, 64, 128)),\n", + " stage4=dict(\n", + " num_modules=3,\n", + " num_branches=4,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4, 4),\n", + " num_channels=(32, 64, 128, 256))),\n", + " init_cfg=dict(\n", + " type='Pretrained',\n", + " checkpoint=\n", + " 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'\n", + " )),\n", + " head=dict(\n", + " type='HeatmapHead',\n", + " in_channels=32,\n", + " out_channels=17,\n", + " deconv_out_channels=None,\n", + " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", + " decoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))\n", + "dataset_type = 'TinyCocoDataset'\n", + "data_mode = 'topdown'\n", + "data_root = 'data/coco_tiny'\n", + "train_pipeline = [\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(\n", + " type='GenerateTarget',\n", + " target_type='heatmap',\n", + " encoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "train_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=True),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='train.json',\n", + " data_prefix=dict(img='images/'),\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(\n", + " type='GenerateTarget',\n", + " target_type='heatmap',\n", + " encoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "val_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='val.json',\n", + " bbox_file=None,\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "test_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='val.json',\n", + " bbox_file=None,\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "val_evaluator = dict(type='PCKAccuracy')\n", + "test_evaluator = dict(type='PCKAccuracy')\n", + "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", + "randomness = dict(seed=0)\n", + "\n" + ] + } + ], + "source": [ + "from mmengine import Config\n", + "\n", + "cfg = Config.fromfile(\n", + " './configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'\n", + ")\n", + "\n", + "# set basic configs\n", + "cfg.data_root = 'data/coco_tiny'\n", + "cfg.work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", + "cfg.randomness = dict(seed=0)\n", + "\n", + "# set log interval\n", + "cfg.train_cfg.val_interval = 1\n", + "\n", + "# set num of epoch\n", + "cfg.train_cfg.max_epochs = 40\n", + "\n", + "# set optimizer\n", + "cfg.optim_wrapper = dict(optimizer=dict(\n", + " type='Adam',\n", + " lr=5e-4,\n", + "))\n", + "\n", + "# set learning rate policy\n", + "cfg.param_scheduler = [\n", + " dict(\n", + " type='LinearLR', begin=0, end=10, start_factor=0.001,\n", + " by_epoch=False), # warm-up\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=cfg.train_cfg.max_epochs,\n", + " milestones=[17, 35],\n", + " gamma=0.1,\n", + " by_epoch=True)\n", + "]\n", + "\n", + "\n", + "# set batch size\n", + "cfg.train_dataloader.batch_size = 16\n", + "cfg.val_dataloader.batch_size = 16\n", + "cfg.test_dataloader.batch_size = 16\n", + "\n", + "# set dataset configs\n", + "cfg.dataset_type = 'TinyCocoDataset'\n", + "cfg.train_dataloader.dataset.type = cfg.dataset_type\n", + "cfg.train_dataloader.dataset.ann_file = 'train.json'\n", + "cfg.train_dataloader.dataset.data_root = cfg.data_root\n", + "cfg.train_dataloader.dataset.data_prefix = dict(img='images/')\n", + "\n", + "\n", + "cfg.val_dataloader.dataset.type = cfg.dataset_type\n", + "cfg.val_dataloader.dataset.bbox_file = None\n", + "cfg.val_dataloader.dataset.ann_file = 'val.json'\n", + "cfg.val_dataloader.dataset.data_root = cfg.data_root\n", + "cfg.val_dataloader.dataset.data_prefix = dict(img='images/')\n", + "\n", + "cfg.test_dataloader.dataset.type = cfg.dataset_type\n", + "cfg.test_dataloader.dataset.bbox_file = None\n", + "cfg.test_dataloader.dataset.ann_file = 'val.json'\n", + "cfg.test_dataloader.dataset.data_root = cfg.data_root\n", + "cfg.test_dataloader.dataset.data_prefix = dict(img='images/')\n", + "\n", + "# set evaluator\n", + "cfg.val_evaluator = dict(type='PCKAccuracy')\n", + "cfg.test_evaluator = cfg.val_evaluator\n", + "\n", + "cfg.default_hooks.checkpoint.save_best = 'PCK'\n", + "cfg.default_hooks.checkpoint.max_keep_ckpts = 1\n", + "\n", + "print(cfg.pretty_text)\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "UlD8iDZehE2S" + }, + "source": [ + "or you can create a config file like follows:\n", + "```Python3\n", + "_base_ = ['../../../_base_/default_runtime.py']\n", + "\n", + "# runtime\n", + "train_cfg = dict(max_epochs=40, val_interval=1)\n", + "\n", + "# optimizer\n", + "optim_wrapper = dict(optimizer=dict(\n", + " type='Adam',\n", + " lr=5e-4,\n", + "))\n", + "\n", + "# learning policy\n", + "param_scheduler = [\n", + " dict(\n", + " type='LinearLR', begin=0, end=500, start_factor=0.001,\n", + " by_epoch=False), # warm-up\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=train_cfg.max_epochs,\n", + " milestones=[17, 35],\n", + " gamma=0.1,\n", + " by_epoch=True)\n", + "]\n", + "\n", + "# automatically scaling LR based on the actual training batch size\n", + "auto_scale_lr = dict(base_batch_size=512)\n", + "\n", + "# codec settings\n", + "codec = dict(\n", + " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", + "\n", + "# model settings\n", + "model = dict(\n", + " type='TopdownPoseEstimator',\n", + " data_preprocessor=dict(\n", + " type='PoseDataPreprocessor',\n", + " mean=[123.675, 116.28, 103.53],\n", + " std=[58.395, 57.12, 57.375],\n", + " bgr_to_rgb=True),\n", + " backbone=dict(\n", + " type='HRNet',\n", + " in_channels=3,\n", + " extra=dict(\n", + " stage1=dict(\n", + " num_modules=1,\n", + " num_branches=1,\n", + " block='BOTTLENECK',\n", + " num_blocks=(4, ),\n", + " num_channels=(64, )),\n", + " stage2=dict(\n", + " num_modules=1,\n", + " num_branches=2,\n", + " block='BASIC',\n", + " num_blocks=(4, 4),\n", + " num_channels=(32, 64)),\n", + " stage3=dict(\n", + " num_modules=4,\n", + " num_branches=3,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4),\n", + " num_channels=(32, 64, 128)),\n", + " stage4=dict(\n", + " num_modules=3,\n", + " num_branches=4,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4, 4),\n", + " num_channels=(32, 64, 128, 256))),\n", + " init_cfg=dict(\n", + " type='Pretrained',\n", + " checkpoint='https://download.openmmlab.com/mmpose/'\n", + " 'pretrain_models/hrnet_w32-36af842e.pth'),\n", + " ),\n", + " head=dict(\n", + " type='HeatmapHead',\n", + " in_channels=32,\n", + " out_channels=17,\n", + " deconv_out_channels=None,\n", + " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", + " decoder=codec),\n", + " test_cfg=dict(\n", + " flip_test=True,\n", + " flip_mode='heatmap',\n", + " shift_heatmap=True,\n", + " ))\n", + "\n", + "# base dataset settings\n", + "dataset_type = 'TinyCocoDataset'\n", + "data_mode = 'topdown'\n", + "data_root = 'data/coco_tiny'\n", + "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", + "randomness = dict(seed=0)\n", + "\n", + "# pipelines\n", + "train_pipeline = [\n", + " dict(type='LoadImage'),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=codec['input_size']),\n", + " dict(type='GenerateTarget', target_type='heatmap', encoder=codec),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImage'),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=codec['input_size']),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "\n", + "# data loaders\n", + "train_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=True),\n", + " dataset=dict(\n", + " type=dataset_type,\n", + " data_root=data_root,\n", + " data_mode=data_mode,\n", + " ann_file='train.json',\n", + " data_prefix=dict(img='images/'),\n", + " pipeline=train_pipeline,\n", + " ))\n", + "val_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type=dataset_type,\n", + " data_root=data_root,\n", + " data_mode=data_mode,\n", + " ann_file='val.json',\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=test_pipeline,\n", + " ))\n", + "test_dataloader = val_dataloader\n", + "\n", + "# evaluators\n", + "val_evaluator = dict(\n", + " type='PCKAccuracy')\n", + "test_evaluator = val_evaluator\n", + "\n", + "# hooks\n", + "default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater'))\n", + "```" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "ChVqB1oYncmo" + }, + "source": [ + "### Train and Evaluation\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000, + "referenced_widgets": [ + "2a079d9c0b9845318e6c612ca9601b86", + "3554753622334094961a47daf9362c59", + "08e0412b8dd54d28a26c232e75ea6088", + "558a9420b0b34be2a2ca8a8b8af9cbfc", + "a9bd3e477f07449788f0e95e3cd13ddc", + "5b2ee1f3e78d4cd993009d04baf76b24", + "a3e5aa31c3f644b5a677ec49fe2e0832", + "d2ee56f920a245d9875de8e37596a5c8", + "b5f8c86d48a04afa997fc137e1acd716", + "1c1b09d91dec4e3dadefe953daf50745", + "6af448aebdb744b98a2807f66b1d6e5d" + ] + }, + "id": "Ab3xsUdPlXuJ", + "outputId": "c07394b8-21f4-4766-af2b-87d2caa6e74c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "09/15 12:42:06 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"log_processor\" registry tree. As a workaround, the current \"log_processor\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - \n", + "------------------------------------------------------------\n", + "System environment:\n", + " sys.platform: linux\n", + " Python: 3.7.13 (default, Mar 29 2022, 02:18:16) [GCC 7.5.0]\n", + " CUDA available: True\n", + " numpy_random_seed: 0\n", + " GPU 0: NVIDIA GeForce GTX 1660 Ti\n", + " CUDA_HOME: /usr/local/cuda\n", + " NVCC: Cuda compilation tools, release 11.3, V11.3.109\n", + " GCC: gcc (Ubuntu 5.4.0-6ubuntu1~16.04.12) 5.4.0 20160609\n", + " PyTorch: 1.12.0+cu113\n", + " PyTorch compiling details: PyTorch built with:\n", + " - GCC 9.3\n", + " - C++ Version: 201402\n", + " - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n", + " - Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)\n", + " - OpenMP 201511 (a.k.a. OpenMP 4.5)\n", + " - LAPACK is enabled (usually provided by MKL)\n", + " - NNPACK is enabled\n", + " - CPU capability usage: AVX2\n", + " - CUDA Runtime 11.3\n", + " - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n", + " - CuDNN 8.3.2 (built against CUDA 11.5)\n", + " - Magma 2.5.2\n", + " - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.3.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.12.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n", + "\n", + " TorchVision: 0.13.0+cu113\n", + " OpenCV: 4.6.0\n", + " MMEngine: 0.1.0\n", + "\n", + "Runtime environment:\n", + " cudnn_benchmark: False\n", + " mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}\n", + " dist_cfg: {'backend': 'nccl'}\n", + " seed: 0\n", + " Distributed launcher: none\n", + " Distributed training: False\n", + " GPU number: 1\n", + "------------------------------------------------------------\n", + "\n", + "09/15 12:42:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Config:\n", + "default_scope = 'mmpose'\n", + "default_hooks = dict(\n", + " timer=dict(type='IterTimerHook'),\n", + " logger=dict(type='LoggerHook', interval=50),\n", + " param_scheduler=dict(type='ParamSchedulerHook'),\n", + " checkpoint=dict(\n", + " type='CheckpointHook',\n", + " interval=1,\n", + " save_best='pck/PCK@0.05',\n", + " rule='greater',\n", + " max_keep_ckpts=1),\n", + " sampler_seed=dict(type='DistSamplerSeedHook'),\n", + " visualization=dict(type='PoseVisualizationHook', enable=False))\n", + "custom_hooks = [dict(type='SyncBuffersHook')]\n", + "env_cfg = dict(\n", + " cudnn_benchmark=False,\n", + " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", + " dist_cfg=dict(backend='nccl'))\n", + "vis_backends = [dict(type='LocalVisBackend')]\n", + "visualizer = dict(\n", + " type='PoseLocalVisualizer',\n", + " vis_backends=[dict(type='LocalVisBackend')],\n", + " name='visualizer')\n", + "log_processor = dict(\n", + " type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)\n", + "log_level = 'INFO'\n", + "load_from = None\n", + "resume = False\n", + "file_client_args = dict(backend='disk')\n", + "train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)\n", + "val_cfg = dict()\n", + "test_cfg = dict()\n", + "optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))\n", + "param_scheduler = [\n", + " dict(type='LinearLR', begin=0, end=10, start_factor=0.001, by_epoch=False),\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=40,\n", + " milestones=[17, 35],\n", + " gamma=0.1,\n", + " by_epoch=True)\n", + "]\n", + "auto_scale_lr = dict(base_batch_size=512)\n", + "codec = dict(\n", + " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", + "model = dict(\n", + " type='TopdownPoseEstimator',\n", + " data_preprocessor=dict(\n", + " type='PoseDataPreprocessor',\n", + " mean=[123.675, 116.28, 103.53],\n", + " std=[58.395, 57.12, 57.375],\n", + " bgr_to_rgb=True),\n", + " backbone=dict(\n", + " type='HRNet',\n", + " in_channels=3,\n", + " extra=dict(\n", + " stage1=dict(\n", + " num_modules=1,\n", + " num_branches=1,\n", + " block='BOTTLENECK',\n", + " num_blocks=(4, ),\n", + " num_channels=(64, )),\n", + " stage2=dict(\n", + " num_modules=1,\n", + " num_branches=2,\n", + " block='BASIC',\n", + " num_blocks=(4, 4),\n", + " num_channels=(32, 64)),\n", + " stage3=dict(\n", + " num_modules=4,\n", + " num_branches=3,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4),\n", + " num_channels=(32, 64, 128)),\n", + " stage4=dict(\n", + " num_modules=3,\n", + " num_branches=4,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4, 4),\n", + " num_channels=(32, 64, 128, 256))),\n", + " init_cfg=dict(\n", + " type='Pretrained',\n", + " checkpoint=\n", + " 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'\n", + " )),\n", + " head=dict(\n", + " type='HeatmapHead',\n", + " in_channels=32,\n", + " out_channels=17,\n", + " deconv_out_channels=None,\n", + " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", + " decoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))\n", + "dataset_type = 'TinyCocoDataset'\n", + "data_mode = 'topdown'\n", + "data_root = 'data/coco_tiny'\n", + "train_pipeline = [\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(\n", + " type='GenerateTarget',\n", + " target_type='heatmap',\n", + " encoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "train_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=True),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='train.json',\n", + " data_prefix=dict(img='images/'),\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(\n", + " type='GenerateTarget',\n", + " target_type='heatmap',\n", + " encoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "val_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='val.json',\n", + " bbox_file=None,\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "test_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='val.json',\n", + " bbox_file=None,\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "val_evaluator = dict(type='PCKAccuracy')\n", + "test_evaluator = dict(type='PCKAccuracy')\n", + "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", + "randomness = dict(seed=0)\n", + "\n", + "Result has been saved to /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/modules_statistic_results.json\n", + "09/15 12:42:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"data sampler\" registry tree. As a workaround, the current \"data sampler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optimizer wrapper constructor\" registry tree. As a workaround, the current \"optimizer wrapper constructor\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optimizer\" registry tree. As a workaround, the current \"optimizer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optim_wrapper\" registry tree. As a workaround, the current \"optim_wrapper\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"data sampler\" registry tree. As a workaround, the current \"data sampler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - load model from: https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth\n", + "09/15 12:42:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - http loads checkpoint from path: https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth\n", + "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - The model and loaded state dict do not match exactly\n", + "\n", + "unexpected key in source state_dict: head.0.0.0.conv1.weight, head.0.0.0.bn1.weight, head.0.0.0.bn1.bias, head.0.0.0.bn1.running_mean, head.0.0.0.bn1.running_var, head.0.0.0.bn1.num_batches_tracked, head.0.0.0.conv2.weight, head.0.0.0.bn2.weight, head.0.0.0.bn2.bias, head.0.0.0.bn2.running_mean, head.0.0.0.bn2.running_var, head.0.0.0.bn2.num_batches_tracked, head.0.0.0.conv3.weight, head.0.0.0.bn3.weight, head.0.0.0.bn3.bias, head.0.0.0.bn3.running_mean, head.0.0.0.bn3.running_var, head.0.0.0.bn3.num_batches_tracked, head.0.0.0.downsample.0.weight, head.0.0.0.downsample.1.weight, head.0.0.0.downsample.1.bias, head.0.0.0.downsample.1.running_mean, head.0.0.0.downsample.1.running_var, head.0.0.0.downsample.1.num_batches_tracked, head.0.1.0.conv1.weight, head.0.1.0.bn1.weight, head.0.1.0.bn1.bias, head.0.1.0.bn1.running_mean, head.0.1.0.bn1.running_var, head.0.1.0.bn1.num_batches_tracked, head.0.1.0.conv2.weight, head.0.1.0.bn2.weight, head.0.1.0.bn2.bias, head.0.1.0.bn2.running_mean, head.0.1.0.bn2.running_var, head.0.1.0.bn2.num_batches_tracked, head.0.1.0.conv3.weight, head.0.1.0.bn3.weight, head.0.1.0.bn3.bias, head.0.1.0.bn3.running_mean, head.0.1.0.bn3.running_var, head.0.1.0.bn3.num_batches_tracked, head.0.1.0.downsample.0.weight, head.0.1.0.downsample.1.weight, head.0.1.0.downsample.1.bias, head.0.1.0.downsample.1.running_mean, head.0.1.0.downsample.1.running_var, head.0.1.0.downsample.1.num_batches_tracked, head.0.2.0.conv1.weight, head.0.2.0.bn1.weight, head.0.2.0.bn1.bias, head.0.2.0.bn1.running_mean, head.0.2.0.bn1.running_var, head.0.2.0.bn1.num_batches_tracked, head.0.2.0.conv2.weight, head.0.2.0.bn2.weight, head.0.2.0.bn2.bias, head.0.2.0.bn2.running_mean, head.0.2.0.bn2.running_var, head.0.2.0.bn2.num_batches_tracked, head.0.2.0.conv3.weight, head.0.2.0.bn3.weight, head.0.2.0.bn3.bias, head.0.2.0.bn3.running_mean, head.0.2.0.bn3.running_var, head.0.2.0.bn3.num_batches_tracked, head.0.2.0.downsample.0.weight, head.0.2.0.downsample.1.weight, head.0.2.0.downsample.1.bias, head.0.2.0.downsample.1.running_mean, head.0.2.0.downsample.1.running_var, head.0.2.0.downsample.1.num_batches_tracked, head.1.0.0.conv1.weight, head.1.0.0.bn1.weight, head.1.0.0.bn1.bias, head.1.0.0.bn1.running_mean, head.1.0.0.bn1.running_var, head.1.0.0.bn1.num_batches_tracked, head.1.0.0.conv2.weight, head.1.0.0.bn2.weight, head.1.0.0.bn2.bias, head.1.0.0.bn2.running_mean, head.1.0.0.bn2.running_var, head.1.0.0.bn2.num_batches_tracked, head.1.0.0.conv3.weight, head.1.0.0.bn3.weight, head.1.0.0.bn3.bias, head.1.0.0.bn3.running_mean, head.1.0.0.bn3.running_var, head.1.0.0.bn3.num_batches_tracked, head.1.0.0.downsample.0.weight, head.1.0.0.downsample.1.weight, head.1.0.0.downsample.1.bias, head.1.0.0.downsample.1.running_mean, head.1.0.0.downsample.1.running_var, head.1.0.0.downsample.1.num_batches_tracked, head.1.1.0.conv1.weight, head.1.1.0.bn1.weight, head.1.1.0.bn1.bias, head.1.1.0.bn1.running_mean, head.1.1.0.bn1.running_var, head.1.1.0.bn1.num_batches_tracked, head.1.1.0.conv2.weight, head.1.1.0.bn2.weight, head.1.1.0.bn2.bias, head.1.1.0.bn2.running_mean, head.1.1.0.bn2.running_var, head.1.1.0.bn2.num_batches_tracked, head.1.1.0.conv3.weight, head.1.1.0.bn3.weight, head.1.1.0.bn3.bias, head.1.1.0.bn3.running_mean, head.1.1.0.bn3.running_var, head.1.1.0.bn3.num_batches_tracked, head.1.1.0.downsample.0.weight, head.1.1.0.downsample.1.weight, head.1.1.0.downsample.1.bias, head.1.1.0.downsample.1.running_mean, head.1.1.0.downsample.1.running_var, head.1.1.0.downsample.1.num_batches_tracked, head.2.0.0.conv1.weight, head.2.0.0.bn1.weight, head.2.0.0.bn1.bias, head.2.0.0.bn1.running_mean, head.2.0.0.bn1.running_var, head.2.0.0.bn1.num_batches_tracked, head.2.0.0.conv2.weight, head.2.0.0.bn2.weight, head.2.0.0.bn2.bias, head.2.0.0.bn2.running_mean, head.2.0.0.bn2.running_var, head.2.0.0.bn2.num_batches_tracked, head.2.0.0.conv3.weight, head.2.0.0.bn3.weight, head.2.0.0.bn3.bias, head.2.0.0.bn3.running_mean, head.2.0.0.bn3.running_var, head.2.0.0.bn3.num_batches_tracked, head.2.0.0.downsample.0.weight, head.2.0.0.downsample.1.weight, head.2.0.0.downsample.1.bias, head.2.0.0.downsample.1.running_mean, head.2.0.0.downsample.1.running_var, head.2.0.0.downsample.1.num_batches_tracked, head.3.0.0.conv1.weight, head.3.0.0.bn1.weight, head.3.0.0.bn1.bias, head.3.0.0.bn1.running_mean, head.3.0.0.bn1.running_var, head.3.0.0.bn1.num_batches_tracked, head.3.0.0.conv2.weight, head.3.0.0.bn2.weight, head.3.0.0.bn2.bias, head.3.0.0.bn2.running_mean, head.3.0.0.bn2.running_var, head.3.0.0.bn2.num_batches_tracked, head.3.0.0.conv3.weight, head.3.0.0.bn3.weight, head.3.0.0.bn3.bias, head.3.0.0.bn3.running_mean, head.3.0.0.bn3.running_var, head.3.0.0.bn3.num_batches_tracked, head.3.0.0.downsample.0.weight, head.3.0.0.downsample.1.weight, head.3.0.0.downsample.1.bias, head.3.0.0.downsample.1.running_mean, head.3.0.0.downsample.1.running_var, head.3.0.0.downsample.1.num_batches_tracked, fc.weight, fc.bias, stage4.2.fuse_layers.1.0.0.0.weight, stage4.2.fuse_layers.1.0.0.1.weight, stage4.2.fuse_layers.1.0.0.1.bias, stage4.2.fuse_layers.1.0.0.1.running_mean, stage4.2.fuse_layers.1.0.0.1.running_var, stage4.2.fuse_layers.1.0.0.1.num_batches_tracked, stage4.2.fuse_layers.1.2.0.weight, stage4.2.fuse_layers.1.2.1.weight, stage4.2.fuse_layers.1.2.1.bias, stage4.2.fuse_layers.1.2.1.running_mean, stage4.2.fuse_layers.1.2.1.running_var, stage4.2.fuse_layers.1.2.1.num_batches_tracked, stage4.2.fuse_layers.1.3.0.weight, stage4.2.fuse_layers.1.3.1.weight, stage4.2.fuse_layers.1.3.1.bias, stage4.2.fuse_layers.1.3.1.running_mean, stage4.2.fuse_layers.1.3.1.running_var, stage4.2.fuse_layers.1.3.1.num_batches_tracked, stage4.2.fuse_layers.2.0.0.0.weight, stage4.2.fuse_layers.2.0.0.1.weight, stage4.2.fuse_layers.2.0.0.1.bias, stage4.2.fuse_layers.2.0.0.1.running_mean, stage4.2.fuse_layers.2.0.0.1.running_var, stage4.2.fuse_layers.2.0.0.1.num_batches_tracked, stage4.2.fuse_layers.2.0.1.0.weight, stage4.2.fuse_layers.2.0.1.1.weight, stage4.2.fuse_layers.2.0.1.1.bias, stage4.2.fuse_layers.2.0.1.1.running_mean, stage4.2.fuse_layers.2.0.1.1.running_var, stage4.2.fuse_layers.2.0.1.1.num_batches_tracked, stage4.2.fuse_layers.2.1.0.0.weight, stage4.2.fuse_layers.2.1.0.1.weight, stage4.2.fuse_layers.2.1.0.1.bias, stage4.2.fuse_layers.2.1.0.1.running_mean, stage4.2.fuse_layers.2.1.0.1.running_var, stage4.2.fuse_layers.2.1.0.1.num_batches_tracked, stage4.2.fuse_layers.2.3.0.weight, stage4.2.fuse_layers.2.3.1.weight, stage4.2.fuse_layers.2.3.1.bias, stage4.2.fuse_layers.2.3.1.running_mean, stage4.2.fuse_layers.2.3.1.running_var, stage4.2.fuse_layers.2.3.1.num_batches_tracked, stage4.2.fuse_layers.3.0.0.0.weight, stage4.2.fuse_layers.3.0.0.1.weight, stage4.2.fuse_layers.3.0.0.1.bias, stage4.2.fuse_layers.3.0.0.1.running_mean, stage4.2.fuse_layers.3.0.0.1.running_var, stage4.2.fuse_layers.3.0.0.1.num_batches_tracked, stage4.2.fuse_layers.3.0.1.0.weight, stage4.2.fuse_layers.3.0.1.1.weight, stage4.2.fuse_layers.3.0.1.1.bias, stage4.2.fuse_layers.3.0.1.1.running_mean, stage4.2.fuse_layers.3.0.1.1.running_var, stage4.2.fuse_layers.3.0.1.1.num_batches_tracked, stage4.2.fuse_layers.3.0.2.0.weight, stage4.2.fuse_layers.3.0.2.1.weight, stage4.2.fuse_layers.3.0.2.1.bias, stage4.2.fuse_layers.3.0.2.1.running_mean, stage4.2.fuse_layers.3.0.2.1.running_var, stage4.2.fuse_layers.3.0.2.1.num_batches_tracked, stage4.2.fuse_layers.3.1.0.0.weight, stage4.2.fuse_layers.3.1.0.1.weight, stage4.2.fuse_layers.3.1.0.1.bias, stage4.2.fuse_layers.3.1.0.1.running_mean, stage4.2.fuse_layers.3.1.0.1.running_var, stage4.2.fuse_layers.3.1.0.1.num_batches_tracked, stage4.2.fuse_layers.3.1.1.0.weight, stage4.2.fuse_layers.3.1.1.1.weight, stage4.2.fuse_layers.3.1.1.1.bias, stage4.2.fuse_layers.3.1.1.1.running_mean, stage4.2.fuse_layers.3.1.1.1.running_var, stage4.2.fuse_layers.3.1.1.1.num_batches_tracked, stage4.2.fuse_layers.3.2.0.0.weight, stage4.2.fuse_layers.3.2.0.1.weight, stage4.2.fuse_layers.3.2.0.1.bias, stage4.2.fuse_layers.3.2.0.1.running_mean, stage4.2.fuse_layers.3.2.0.1.running_var, stage4.2.fuse_layers.3.2.0.1.num_batches_tracked\n", + "\n", + "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Checkpoints will be saved to /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192 by HardDiskBackend.\n", + "09/15 12:42:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 1 epochs\n", + "09/15 12:42:13 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:13 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [1][2/2] pck/PCK@0.05: 0.009035\n", + "09/15 12:42:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.0090 pck/PCK@0.05 at 1 epoch is saved to best_pck/PCK@0.05_epoch_1.pth.\n", + "09/15 12:42:16 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:16 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 2 epochs\n", + "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [2][2/2] pck/PCK@0.05: 0.163666\n", + "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_1.pth is removed\n", + "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.1637 pck/PCK@0.05 at 2 epoch is saved to best_pck/PCK@0.05_epoch_2.pth.\n", + "09/15 12:42:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 3 epochs\n", + "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [3][2/2] pck/PCK@0.05: 0.201942\n", + "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_2.pth is removed\n", + "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2019 pck/PCK@0.05 at 3 epoch is saved to best_pck/PCK@0.05_epoch_3.pth.\n", + "09/15 12:42:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 4 epochs\n", + "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [4][2/2] pck/PCK@0.05: 0.247750\n", + "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_3.pth is removed\n", + "09/15 12:42:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2477 pck/PCK@0.05 at 4 epoch is saved to best_pck/PCK@0.05_epoch_4.pth.\n", + "09/15 12:42:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 5 epochs\n", + "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [5][2/2] pck/PCK@0.05: 0.296205\n", + "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_4.pth is removed\n", + "09/15 12:42:29 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2962 pck/PCK@0.05 at 5 epoch is saved to best_pck/PCK@0.05_epoch_5.pth.\n", + "09/15 12:42:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 6 epochs\n", + "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [6][2/2] pck/PCK@0.05: 0.316309\n", + "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_5.pth is removed\n", + "09/15 12:42:33 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3163 pck/PCK@0.05 at 6 epoch is saved to best_pck/PCK@0.05_epoch_6.pth.\n", + "09/15 12:42:35 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:35 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 7 epochs\n", + "09/15 12:42:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [7][2/2] pck/PCK@0.05: 0.290834\n", + "09/15 12:42:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 8 epochs\n", + "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [8][2/2] pck/PCK@0.05: 0.335645\n", + "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_6.pth is removed\n", + "09/15 12:42:40 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3356 pck/PCK@0.05 at 8 epoch is saved to best_pck/PCK@0.05_epoch_8.pth.\n", + "09/15 12:42:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 9 epochs\n", + "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [9][2/2] pck/PCK@0.05: 0.348761\n", + "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_8.pth is removed\n", + "09/15 12:42:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3488 pck/PCK@0.05 at 9 epoch is saved to best_pck/PCK@0.05_epoch_9.pth.\n", + "09/15 12:42:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 10 epochs\n", + "09/15 12:42:47 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:47 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [10][2/2] pck/PCK@0.05: 0.310204\n", + "09/15 12:42:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 11 epochs\n", + "09/15 12:42:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [11][2/2] pck/PCK@0.05: 0.338200\n", + "09/15 12:42:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 12 epochs\n", + "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [12][2/2] pck/PCK@0.05: 0.356559\n", + "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_9.pth is removed\n", + "09/15 12:42:54 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3566 pck/PCK@0.05 at 12 epoch is saved to best_pck/PCK@0.05_epoch_12.pth.\n", + "09/15 12:42:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 13 epochs\n", + "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [13][2/2] pck/PCK@0.05: 0.384718\n", + "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_12.pth is removed\n", + "09/15 12:42:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3847 pck/PCK@0.05 at 13 epoch is saved to best_pck/PCK@0.05_epoch_13.pth.\n", + "09/15 12:43:00 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:00 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 14 epochs\n", + "09/15 12:43:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [14][2/2] pck/PCK@0.05: 0.372036\n", + "09/15 12:43:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 15 epochs\n", + "09/15 12:43:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [15][2/2] pck/PCK@0.05: 0.331702\n", + "09/15 12:43:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 16 epochs\n", + "09/15 12:43:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [16][2/2] pck/PCK@0.05: 0.350346\n", + "09/15 12:43:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 17 epochs\n", + "09/15 12:43:10 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:10 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [17][2/2] pck/PCK@0.05: 0.358399\n", + "09/15 12:43:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 18 epochs\n", + "09/15 12:43:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [18][2/2] pck/PCK@0.05: 0.377378\n", + "09/15 12:43:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 19 epochs\n", + "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [19][2/2] pck/PCK@0.05: 0.392675\n", + "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_13.pth is removed\n", + "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3927 pck/PCK@0.05 at 19 epoch is saved to best_pck/PCK@0.05_epoch_19.pth.\n", + "09/15 12:43:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 20 epochs\n", + "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [20][2/2] pck/PCK@0.05: 0.413536\n", + "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_19.pth is removed\n", + "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4135 pck/PCK@0.05 at 20 epoch is saved to best_pck/PCK@0.05_epoch_20.pth.\n", + "09/15 12:43:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 21 epochs\n", + "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [21][2/2] pck/PCK@0.05: 0.422105\n", + "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_20.pth is removed\n", + "09/15 12:43:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4221 pck/PCK@0.05 at 21 epoch is saved to best_pck/PCK@0.05_epoch_21.pth.\n", + "09/15 12:43:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 22 epochs\n", + "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [22][2/2] pck/PCK@0.05: 0.430300\n", + "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_21.pth is removed\n", + "09/15 12:43:29 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4303 pck/PCK@0.05 at 22 epoch is saved to best_pck/PCK@0.05_epoch_22.pth.\n", + "09/15 12:43:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 23 epochs\n", + "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [23][2/2] pck/PCK@0.05: 0.440251\n", + "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_22.pth is removed\n", + "09/15 12:43:33 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4403 pck/PCK@0.05 at 23 epoch is saved to best_pck/PCK@0.05_epoch_23.pth.\n", + "09/15 12:43:34 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:34 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 24 epochs\n", + "09/15 12:43:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [24][2/2] pck/PCK@0.05: 0.433262\n", + "09/15 12:43:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 25 epochs\n", + "09/15 12:43:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [25][2/2] pck/PCK@0.05: 0.429440\n", + "09/15 12:43:41 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:41 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 26 epochs\n", + "09/15 12:43:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [26][2/2] pck/PCK@0.05: 0.423034\n", + "09/15 12:43:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 27 epochs\n", + "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [27][2/2] pck/PCK@0.05: 0.440554\n", + "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_23.pth is removed\n", + "09/15 12:43:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4406 pck/PCK@0.05 at 27 epoch is saved to best_pck/PCK@0.05_epoch_27.pth.\n", + "09/15 12:43:48 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:48 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 28 epochs\n", + "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [28][2/2] pck/PCK@0.05: 0.454103\n", + "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_27.pth is removed\n", + "09/15 12:43:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4541 pck/PCK@0.05 at 28 epoch is saved to best_pck/PCK@0.05_epoch_28.pth.\n", + "09/15 12:43:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 29 epochs\n", + "09/15 12:43:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [29][2/2] pck/PCK@0.05: 0.434462\n", + "09/15 12:43:55 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:55 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 30 epochs\n", + "09/15 12:43:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [30][2/2] pck/PCK@0.05: 0.434963\n", + "09/15 12:43:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 31 epochs\n", + "09/15 12:43:59 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:59 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [31][2/2] pck/PCK@0.05: 0.445667\n", + "09/15 12:44:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 32 epochs\n", + "09/15 12:44:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [32][2/2] pck/PCK@0.05: 0.445784\n", + "09/15 12:44:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 33 epochs\n", + "09/15 12:44:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [33][2/2] pck/PCK@0.05: 0.434502\n", + "09/15 12:44:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 34 epochs\n", + "09/15 12:44:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [34][2/2] pck/PCK@0.05: 0.435661\n", + "09/15 12:44:11 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:11 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 35 epochs\n", + "09/15 12:44:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [35][2/2] pck/PCK@0.05: 0.425407\n", + "09/15 12:44:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 36 epochs\n", + "09/15 12:44:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [36][2/2] pck/PCK@0.05: 0.428712\n", + "09/15 12:44:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 37 epochs\n", + "09/15 12:44:18 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:18 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [37][2/2] pck/PCK@0.05: 0.423183\n", + "09/15 12:44:20 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:20 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 38 epochs\n", + "09/15 12:44:22 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:22 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [38][2/2] pck/PCK@0.05: 0.432350\n", + "09/15 12:44:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 39 epochs\n", + "09/15 12:44:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [39][2/2] pck/PCK@0.05: 0.423967\n", + "09/15 12:44:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 40 epochs\n", + "09/15 12:44:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [40][2/2] pck/PCK@0.05: 0.429198\n" + ] + }, + { + "data": { + "text/plain": [ + "TopdownPoseEstimator(\n", + " (data_preprocessor): PoseDataPreprocessor()\n", + " (backbone): HRNet(\n", + " (conv1): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (layer1): Sequential(\n", + " (0): Bottleneck(\n", + " (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Bottleneck(\n", + " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): Bottleneck(\n", + " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): Bottleneck(\n", + " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (transition1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Conv2d(256, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(256, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (stage2): Sequential(\n", + " (0): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (transition2): ModuleList(\n", + " (0): None\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (stage3): Sequential(\n", + " (0): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (transition3): ModuleList(\n", + " (0): None\n", + " (1): None\n", + " (2): None\n", + " (3): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (stage4): Sequential(\n", + " (0): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=8.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (3): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(32, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (3): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=8.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (3): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(32, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (3): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=8.0, mode=nearest)\n", + " )\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " init_cfg={'type': 'Pretrained', 'checkpoint': 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'}\n", + " (head): HeatmapHead(\n", + " (loss_module): KeypointMSELoss(\n", + " (criterion): MSELoss()\n", + " )\n", + " (deconv_layers): Identity()\n", + " (conv_layers): Identity()\n", + " (final_layer): Conv2d(32, 17, kernel_size=(1, 1), stride=(1, 1))\n", + " )\n", + " init_cfg=[{'type': 'Normal', 'layer': ['Conv2d', 'ConvTranspose2d'], 'std': 0.001}, {'type': 'Constant', 'layer': 'BatchNorm2d', 'val': 1}]\n", + ")" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from mmengine.config import Config, DictAction\n", + "from mmengine.runner import Runner\n", + "\n", + "# set preprocess configs to model\n", + "cfg.model.setdefault('data_preprocessor', cfg.get('preprocess_cfg', {}))\n", + "\n", + "# build the runner from config\n", + "runner = Runner.from_cfg(cfg)\n", + "\n", + "# start training\n", + "runner.train()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "sdLwcaojhE2T" + }, + "source": [ + "#### Note\n", + "The recommended best practice is to convert your customized data into COCO format." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zJyteZNGqwNk" + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "dev2.0", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + }, + "vscode": { + "interpreter": { + "hash": "383ba00087b5a9caebf3648b758a31e474cc01be975489b58f119fa4bc17e1f8" + } + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "08e0412b8dd54d28a26c232e75ea6088": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d2ee56f920a245d9875de8e37596a5c8", + "max": 132594821, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_b5f8c86d48a04afa997fc137e1acd716", + "value": 132594821 + } + }, + "1c1b09d91dec4e3dadefe953daf50745": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2a079d9c0b9845318e6c612ca9601b86": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_3554753622334094961a47daf9362c59", + "IPY_MODEL_08e0412b8dd54d28a26c232e75ea6088", + "IPY_MODEL_558a9420b0b34be2a2ca8a8b8af9cbfc" + ], + "layout": "IPY_MODEL_a9bd3e477f07449788f0e95e3cd13ddc" + } + }, + "3554753622334094961a47daf9362c59": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5b2ee1f3e78d4cd993009d04baf76b24", + "placeholder": "​", + "style": "IPY_MODEL_a3e5aa31c3f644b5a677ec49fe2e0832", + "value": "100%" + } + }, + "558a9420b0b34be2a2ca8a8b8af9cbfc": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_1c1b09d91dec4e3dadefe953daf50745", + "placeholder": "​", + "style": "IPY_MODEL_6af448aebdb744b98a2807f66b1d6e5d", + "value": " 126M/126M [00:14<00:00, 9.32MB/s]" + } + }, + "5b2ee1f3e78d4cd993009d04baf76b24": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6af448aebdb744b98a2807f66b1d6e5d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "a3e5aa31c3f644b5a677ec49fe2e0832": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "a9bd3e477f07449788f0e95e3cd13ddc": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b5f8c86d48a04afa997fc137e1acd716": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "d2ee56f920a245d9875de8e37596a5c8": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/mmpose/demo/body3d_pose_lifter_demo.py b/mmpose/demo/body3d_pose_lifter_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb51a4b9d38320dc981dd978ccb894b2029044e --- /dev/null +++ b/mmpose/demo/body3d_pose_lifter_demo.py @@ -0,0 +1,553 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import mimetypes +import os +import time +from argparse import ArgumentParser +from functools import partial + +import cv2 +import json_tricks as json +import mmcv +import mmengine +import numpy as np +from mmengine.logging import print_log + +from mmpose.apis import (_track_by_iou, _track_by_oks, + convert_keypoint_definition, extract_pose_sequence, + inference_pose_lifter_model, inference_topdown, + init_model) +from mmpose.models.pose_estimators import PoseLifter +from mmpose.models.pose_estimators.topdown import TopdownPoseEstimator +from mmpose.registry import VISUALIZERS +from mmpose.structures import (PoseDataSample, merge_data_samples, + split_instances) +from mmpose.utils import adapt_mmdet_pipeline + +try: + from mmdet.apis import inference_detector, init_detector + has_mmdet = True +except (ImportError, ModuleNotFoundError): + has_mmdet = False + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('det_config', help='Config file for detection') + parser.add_argument('det_checkpoint', help='Checkpoint file for detection') + parser.add_argument( + 'pose_estimator_config', + type=str, + default=None, + help='Config file for the 1st stage 2D pose estimator') + parser.add_argument( + 'pose_estimator_checkpoint', + type=str, + default=None, + help='Checkpoint file for the 1st stage 2D pose estimator') + parser.add_argument( + 'pose_lifter_config', + help='Config file for the 2nd stage pose lifter model') + parser.add_argument( + 'pose_lifter_checkpoint', + help='Checkpoint file for the 2nd stage pose lifter model') + parser.add_argument('--input', type=str, default='', help='Video path') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='Whether to show visualizations') + parser.add_argument( + '--disable-rebase-keypoint', + action='store_true', + default=False, + help='Whether to disable rebasing the predicted 3D pose so its ' + 'lowest keypoint has a height of 0 (landing on the ground). Rebase ' + 'is useful for visualization when the model do not predict the ' + 'global position of the 3D pose.') + parser.add_argument( + '--disable-norm-pose-2d', + action='store_true', + default=False, + help='Whether to scale the bbox (along with the 2D pose) to the ' + 'average bbox scale of the dataset, and move the bbox (along with the ' + '2D pose) to the average bbox center of the dataset. This is useful ' + 'when bbox is small, especially in multi-person scenarios.') + parser.add_argument( + '--num-instances', + type=int, + default=1, + help='The number of 3D poses to be visualized in every frame. If ' + 'less than 0, it will be set to the number of pose results in the ' + 'first frame.') + parser.add_argument( + '--output-root', + type=str, + default='', + help='Root of the output video file. ' + 'Default not saving the visualization video.') + parser.add_argument( + '--save-predictions', + action='store_true', + default=False, + help='Whether to save predicted results') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--det-cat-id', + type=int, + default=0, + help='Category id for bounding box detection model') + parser.add_argument( + '--bbox-thr', + type=float, + default=0.3, + help='Bounding box score threshold') + parser.add_argument('--kpt-thr', type=float, default=0.3) + parser.add_argument( + '--use-oks-tracking', action='store_true', help='Using OKS tracking') + parser.add_argument( + '--tracking-thr', type=float, default=0.3, help='Tracking threshold') + parser.add_argument( + '--show-interval', type=int, default=0, help='Sleep seconds per frame') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--online', + action='store_true', + default=False, + help='Inference mode. If set to True, can not use future frame' + 'information when using multi frames for inference in the 2D pose' + 'detection stage. Default: False.') + + args = parser.parse_args() + return args + + +def process_one_image(args, detector, frame, frame_idx, pose_estimator, + pose_est_results_last, pose_est_results_list, next_id, + pose_lifter, visualize_frame, visualizer): + """Visualize detected and predicted keypoints of one image. + + Pipeline of this function: + + frame + | + V + +-----------------+ + | detector | + +-----------------+ + | det_result + V + +-----------------+ + | pose_estimator | + +-----------------+ + | pose_est_results + V + +--------------------------------------------+ + | convert 2d kpts into pose-lifting format | + +--------------------------------------------+ + | pose_est_results_list + V + +-----------------------+ + | extract_pose_sequence | + +-----------------------+ + | pose_seq_2d + V + +-------------+ + | pose_lifter | + +-------------+ + | pose_lift_results + V + +-----------------+ + | post-processing | + +-----------------+ + | pred_3d_data_samples + V + +------------+ + | visualizer | + +------------+ + + Args: + args (Argument): Custom command-line arguments. + detector (mmdet.BaseDetector): The mmdet detector. + frame (np.ndarray): The image frame read from input image or video. + frame_idx (int): The index of current frame. + pose_estimator (TopdownPoseEstimator): The pose estimator for 2d pose. + pose_est_results_last (list(PoseDataSample)): The results of pose + estimation from the last frame for tracking instances. + pose_est_results_list (list(list(PoseDataSample))): The list of all + pose estimation results converted by + ``convert_keypoint_definition`` from previous frames. In + pose-lifting stage it is used to obtain the 2d estimation sequence. + next_id (int): The next track id to be used. + pose_lifter (PoseLifter): The pose-lifter for estimating 3d pose. + visualize_frame (np.ndarray): The image for drawing the results on. + visualizer (Visualizer): The visualizer for visualizing the 2d and 3d + pose estimation results. + + Returns: + pose_est_results (list(PoseDataSample)): The pose estimation result of + the current frame. + pose_est_results_list (list(list(PoseDataSample))): The list of all + converted pose estimation results until the current frame. + pred_3d_instances (InstanceData): The result of pose-lifting. + Specifically, the predicted keypoints and scores are saved at + ``pred_3d_instances.keypoints`` and + ``pred_3d_instances.keypoint_scores``. + next_id (int): The next track id to be used. + """ + pose_lift_dataset = pose_lifter.cfg.test_dataloader.dataset + pose_lift_dataset_name = pose_lifter.dataset_meta['dataset_name'] + + # First stage: conduct 2D pose detection in a Topdown manner + # use detector to obtain person bounding boxes + det_result = inference_detector(detector, frame) + pred_instance = det_result.pred_instances.cpu().numpy() + + # filter out the person instances with category and bbox threshold + # e.g. 0 for person in COCO + bboxes = pred_instance.bboxes + bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id, + pred_instance.scores > args.bbox_thr)] + + # estimate pose results for current image + pose_est_results = inference_topdown(pose_estimator, frame, bboxes) + + if args.use_oks_tracking: + _track = partial(_track_by_oks) + else: + _track = _track_by_iou + + pose_det_dataset_name = pose_estimator.dataset_meta['dataset_name'] + pose_est_results_converted = [] + + # convert 2d pose estimation results into the format for pose-lifting + # such as changing the keypoint order, flipping the keypoint, etc. + for i, data_sample in enumerate(pose_est_results): + pred_instances = data_sample.pred_instances.cpu().numpy() + keypoints = pred_instances.keypoints + # calculate area and bbox + if 'bboxes' in pred_instances: + areas = np.array([(bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) + for bbox in pred_instances.bboxes]) + pose_est_results[i].pred_instances.set_field(areas, 'areas') + else: + areas, bboxes = [], [] + for keypoint in keypoints: + xmin = np.min(keypoint[:, 0][keypoint[:, 0] > 0], initial=1e10) + xmax = np.max(keypoint[:, 0]) + ymin = np.min(keypoint[:, 1][keypoint[:, 1] > 0], initial=1e10) + ymax = np.max(keypoint[:, 1]) + areas.append((xmax - xmin) * (ymax - ymin)) + bboxes.append([xmin, ymin, xmax, ymax]) + pose_est_results[i].pred_instances.areas = np.array(areas) + pose_est_results[i].pred_instances.bboxes = np.array(bboxes) + + # track id + track_id, pose_est_results_last, _ = _track(data_sample, + pose_est_results_last, + args.tracking_thr) + if track_id == -1: + if np.count_nonzero(keypoints[:, :, 1]) >= 3: + track_id = next_id + next_id += 1 + else: + # If the number of keypoints detected is small, + # delete that person instance. + keypoints[:, :, 1] = -10 + pose_est_results[i].pred_instances.set_field( + keypoints, 'keypoints') + pose_est_results[i].pred_instances.set_field( + pred_instances.bboxes * 0, 'bboxes') + pose_est_results[i].set_field(pred_instances, 'pred_instances') + track_id = -1 + pose_est_results[i].set_field(track_id, 'track_id') + + # convert keypoints for pose-lifting + pose_est_result_converted = PoseDataSample() + pose_est_result_converted.set_field( + pose_est_results[i].pred_instances.clone(), 'pred_instances') + pose_est_result_converted.set_field( + pose_est_results[i].gt_instances.clone(), 'gt_instances') + keypoints = convert_keypoint_definition(keypoints, + pose_det_dataset_name, + pose_lift_dataset_name) + pose_est_result_converted.pred_instances.set_field( + keypoints, 'keypoints') + pose_est_result_converted.set_field(pose_est_results[i].track_id, + 'track_id') + pose_est_results_converted.append(pose_est_result_converted) + + pose_est_results_list.append(pose_est_results_converted.copy()) + + # Second stage: Pose lifting + # extract and pad input pose2d sequence + pose_seq_2d = extract_pose_sequence( + pose_est_results_list, + frame_idx=frame_idx, + causal=pose_lift_dataset.get('causal', False), + seq_len=pose_lift_dataset.get('seq_len', 1), + step=pose_lift_dataset.get('seq_step', 1)) + + # conduct 2D-to-3D pose lifting + norm_pose_2d = not args.disable_norm_pose_2d + pose_lift_results = inference_pose_lifter_model( + pose_lifter, + pose_seq_2d, + image_size=visualize_frame.shape[:2], + norm_pose_2d=norm_pose_2d) + + # post-processing + for idx, pose_lift_result in enumerate(pose_lift_results): + pose_lift_result.track_id = pose_est_results[idx].get('track_id', 1e4) + + pred_instances = pose_lift_result.pred_instances + keypoints = pred_instances.keypoints + keypoint_scores = pred_instances.keypoint_scores + if keypoint_scores.ndim == 3: + keypoint_scores = np.squeeze(keypoint_scores, axis=1) + pose_lift_results[ + idx].pred_instances.keypoint_scores = keypoint_scores + if keypoints.ndim == 4: + keypoints = np.squeeze(keypoints, axis=1) + + keypoints = keypoints[..., [0, 2, 1]] + keypoints[..., 0] = -keypoints[..., 0] + keypoints[..., 2] = -keypoints[..., 2] + + # rebase height (z-axis) + if not args.disable_rebase_keypoint: + keypoints[..., 2] -= np.min( + keypoints[..., 2], axis=-1, keepdims=True) + + pose_lift_results[idx].pred_instances.keypoints = keypoints + + pose_lift_results = sorted( + pose_lift_results, key=lambda x: x.get('track_id', 1e4)) + + pred_3d_data_samples = merge_data_samples(pose_lift_results) + det_data_sample = merge_data_samples(pose_est_results) + pred_3d_instances = pred_3d_data_samples.get('pred_instances', None) + + if args.num_instances < 0: + args.num_instances = len(pose_lift_results) + + # Visualization + if visualizer is not None: + visualizer.add_datasample( + 'result', + visualize_frame, + data_sample=pred_3d_data_samples, + det_data_sample=det_data_sample, + draw_gt=False, + dataset_2d=pose_det_dataset_name, + dataset_3d=pose_lift_dataset_name, + show=args.show, + draw_bbox=True, + kpt_thr=args.kpt_thr, + num_instances=args.num_instances, + wait_time=args.show_interval) + + return pose_est_results, pose_est_results_list, pred_3d_instances, next_id + + +def main(): + assert has_mmdet, 'Please install mmdet to run the demo.' + + args = parse_args() + + assert args.show or (args.output_root != '') + assert args.input != '' + assert args.det_config is not None + assert args.det_checkpoint is not None + + detector = init_detector( + args.det_config, args.det_checkpoint, device=args.device.lower()) + detector.cfg = adapt_mmdet_pipeline(detector.cfg) + + pose_estimator = init_model( + args.pose_estimator_config, + args.pose_estimator_checkpoint, + device=args.device.lower()) + + assert isinstance(pose_estimator, TopdownPoseEstimator), 'Only "TopDown"' \ + 'model is supported for the 1st stage (2D pose detection)' + + det_kpt_color = pose_estimator.dataset_meta.get('keypoint_colors', None) + det_dataset_skeleton = pose_estimator.dataset_meta.get( + 'skeleton_links', None) + det_dataset_link_color = pose_estimator.dataset_meta.get( + 'skeleton_link_colors', None) + + pose_lifter = init_model( + args.pose_lifter_config, + args.pose_lifter_checkpoint, + device=args.device.lower()) + + assert isinstance(pose_lifter, PoseLifter), \ + 'Only "PoseLifter" model is supported for the 2nd stage ' \ + '(2D-to-3D lifting)' + + pose_lifter.cfg.visualizer.radius = args.radius + pose_lifter.cfg.visualizer.line_width = args.thickness + pose_lifter.cfg.visualizer.det_kpt_color = det_kpt_color + pose_lifter.cfg.visualizer.det_dataset_skeleton = det_dataset_skeleton + pose_lifter.cfg.visualizer.det_dataset_link_color = det_dataset_link_color + visualizer = VISUALIZERS.build(pose_lifter.cfg.visualizer) + + # the dataset_meta is loaded from the checkpoint + visualizer.set_dataset_meta(pose_lifter.dataset_meta) + + if args.input == 'webcam': + input_type = 'webcam' + else: + input_type = mimetypes.guess_type(args.input)[0].split('/')[0] + + if args.output_root == '': + save_output = False + else: + mmengine.mkdir_or_exist(args.output_root) + output_file = os.path.join(args.output_root, + os.path.basename(args.input)) + if args.input == 'webcam': + output_file += '.mp4' + save_output = True + + if args.save_predictions: + assert args.output_root != '' + args.pred_save_path = f'{args.output_root}/results_' \ + f'{os.path.splitext(os.path.basename(args.input))[0]}.json' + + if save_output: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + + pose_est_results_list = [] + pred_instances_list = [] + if input_type == 'image': + frame = mmcv.imread(args.input, channel_order='rgb') + _, _, pred_3d_instances, _ = process_one_image( + args=args, + detector=detector, + frame=frame, + frame_idx=0, + pose_estimator=pose_estimator, + pose_est_results_last=[], + pose_est_results_list=pose_est_results_list, + next_id=0, + pose_lifter=pose_lifter, + visualize_frame=frame, + visualizer=visualizer) + + if args.save_predictions: + # save prediction results + pred_instances_list = split_instances(pred_3d_instances) + + if save_output: + frame_vis = visualizer.get_image() + mmcv.imwrite(mmcv.rgb2bgr(frame_vis), output_file) + + elif input_type in ['webcam', 'video']: + next_id = 0 + pose_est_results = [] + + if args.input == 'webcam': + video = cv2.VideoCapture(0) + else: + video = cv2.VideoCapture(args.input) + + (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') + if int(major_ver) < 3: + fps = video.get(cv2.cv.CV_CAP_PROP_FPS) + else: + fps = video.get(cv2.CAP_PROP_FPS) + + video_writer = None + frame_idx = 0 + + while video.isOpened(): + success, frame = video.read() + frame_idx += 1 + + if not success: + break + + pose_est_results_last = pose_est_results + + # First stage: 2D pose detection + # make person results for current image + (pose_est_results, pose_est_results_list, pred_3d_instances, + next_id) = process_one_image( + args=args, + detector=detector, + frame=frame, + frame_idx=frame_idx, + pose_estimator=pose_estimator, + pose_est_results_last=pose_est_results_last, + pose_est_results_list=pose_est_results_list, + next_id=next_id, + pose_lifter=pose_lifter, + visualize_frame=mmcv.bgr2rgb(frame), + visualizer=visualizer) + + if args.save_predictions: + # save prediction results + pred_instances_list.append( + dict( + frame_id=frame_idx, + instances=split_instances(pred_3d_instances))) + + if save_output: + frame_vis = visualizer.get_image() + if video_writer is None: + # the size of the image with visualization may vary + # depending on the presence of heatmaps + video_writer = cv2.VideoWriter(output_file, fourcc, fps, + (frame_vis.shape[1], + frame_vis.shape[0])) + + video_writer.write(mmcv.rgb2bgr(frame_vis)) + + if args.show: + # press ESC to exit + if cv2.waitKey(5) & 0xFF == 27: + break + time.sleep(args.show_interval) + + video.release() + + if video_writer: + video_writer.release() + else: + args.save_predictions = False + raise ValueError( + f'file {os.path.basename(args.input)} has invalid format.') + + if args.save_predictions: + with open(args.pred_save_path, 'w') as f: + json.dump( + dict( + meta_info=pose_lifter.dataset_meta, + instance_info=pred_instances_list), + f, + indent='\t') + print(f'predictions have been saved at {args.pred_save_path}') + + if save_output: + input_type = input_type.replace('webcam', 'video') + print_log( + f'the output {input_type} has been saved at {output_file}', + logger='current', + level=logging.INFO) + + +if __name__ == '__main__': + main() diff --git a/mmpose/demo/bottomup_demo.py b/mmpose/demo/bottomup_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..b493e4c4a1abd4c9c93ccf6bb6b03b63e40dcaea --- /dev/null +++ b/mmpose/demo/bottomup_demo.py @@ -0,0 +1,237 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import mimetypes +import os +import time +from argparse import ArgumentParser + +import cv2 +import json_tricks as json +import mmcv +import mmengine +import numpy as np +from mmengine.logging import print_log + +from mmpose.apis import inference_bottomup, init_model +from mmpose.registry import VISUALIZERS +from mmpose.structures import split_instances + + +def process_one_image(args, + img, + pose_estimator, + visualizer=None, + show_interval=0): + """Visualize predicted keypoints (and heatmaps) of one image.""" + + # inference a single image + batch_results = inference_bottomup(pose_estimator, img) + results = batch_results[0] + + # show the results + if isinstance(img, str): + img = mmcv.imread(img, channel_order='rgb') + elif isinstance(img, np.ndarray): + img = mmcv.bgr2rgb(img) + + if visualizer is not None: + visualizer.add_datasample( + 'result', + img, + data_sample=results, + draw_gt=False, + draw_bbox=False, + draw_heatmap=args.draw_heatmap, + show_kpt_idx=args.show_kpt_idx, + show=args.show, + wait_time=show_interval, + kpt_thr=args.kpt_thr) + + return results.pred_instances + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--input', type=str, default='', help='Image/Video file') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='whether to show img') + parser.add_argument( + '--output-root', + type=str, + default='', + help='root of the output img file. ' + 'Default not saving the visualization images.') + parser.add_argument( + '--save-predictions', + action='store_true', + default=False, + help='whether to save predicted results') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--draw-heatmap', + action='store_true', + help='Visualize the predicted heatmap') + parser.add_argument( + '--show-kpt-idx', + action='store_true', + default=False, + help='Whether to show the index of keypoints') + parser.add_argument( + '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + parser.add_argument( + '--show-interval', type=int, default=0, help='Sleep seconds per frame') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + assert args.show or (args.output_root != '') + assert args.input != '' + + output_file = None + if args.output_root: + mmengine.mkdir_or_exist(args.output_root) + output_file = os.path.join(args.output_root, + os.path.basename(args.input)) + if args.input == 'webcam': + output_file += '.mp4' + + if args.save_predictions: + assert args.output_root != '' + args.pred_save_path = f'{args.output_root}/results_' \ + f'{os.path.splitext(os.path.basename(args.input))[0]}.json' + + # build the model from a config file and a checkpoint file + if args.draw_heatmap: + cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True))) + else: + cfg_options = None + + model = init_model( + args.config, + args.checkpoint, + device=args.device, + cfg_options=cfg_options) + + # build visualizer + model.cfg.visualizer.radius = args.radius + model.cfg.visualizer.line_width = args.thickness + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.set_dataset_meta(model.dataset_meta) + + if args.input == 'webcam': + input_type = 'webcam' + else: + input_type = mimetypes.guess_type(args.input)[0].split('/')[0] + + if input_type == 'image': + # inference + pred_instances = process_one_image( + args, args.input, model, visualizer, show_interval=0) + + if args.save_predictions: + pred_instances_list = split_instances(pred_instances) + + if output_file: + img_vis = visualizer.get_image() + mmcv.imwrite(mmcv.rgb2bgr(img_vis), output_file) + + elif input_type in ['webcam', 'video']: + + if args.input == 'webcam': + cap = cv2.VideoCapture(0) + else: + cap = cv2.VideoCapture(args.input) + + video_writer = None + pred_instances_list = [] + frame_idx = 0 + + while cap.isOpened(): + success, frame = cap.read() + frame_idx += 1 + + if not success: + break + + pred_instances = process_one_image(args, frame, model, visualizer, + 0.001) + + if args.save_predictions: + # save prediction results + pred_instances_list.append( + dict( + frame_id=frame_idx, + instances=split_instances(pred_instances))) + + # output videos + if output_file: + frame_vis = visualizer.get_image() + + if video_writer is None: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + # the size of the image with visualization may vary + # depending on the presence of heatmaps + video_writer = cv2.VideoWriter( + output_file, + fourcc, + 25, # saved fps + (frame_vis.shape[1], frame_vis.shape[0])) + + video_writer.write(mmcv.rgb2bgr(frame_vis)) + + if args.show: + # press ESC to exit + if cv2.waitKey(5) & 0xFF == 27: + break + + time.sleep(args.show_interval) + + if video_writer: + video_writer.release() + + cap.release() + + else: + args.save_predictions = False + raise ValueError( + f'file {os.path.basename(args.input)} has invalid format.') + + if args.save_predictions: + with open(args.pred_save_path, 'w') as f: + json.dump( + dict( + meta_info=model.dataset_meta, + instance_info=pred_instances_list), + f, + indent='\t') + print(f'predictions have been saved at {args.pred_save_path}') + + if output_file: + input_type = input_type.replace('webcam', 'video') + print_log( + f'the output {input_type} has been saved at {output_file}', + logger='current', + level=logging.INFO) + + +if __name__ == '__main__': + main() diff --git a/mmpose/demo/docs/en/2d_animal_demo.md b/mmpose/demo/docs/en/2d_animal_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..0680e5a6edba9da35183bb89b1dda7f81e86e1f3 --- /dev/null +++ b/mmpose/demo/docs/en/2d_animal_demo.md @@ -0,0 +1,120 @@ +## 2D Animal Pose Demo + +We provide a demo script to test a single image or video with top-down pose estimators and animal detectors. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +### 2D Animal Pose Image Demo + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} --det-cat-id ${DET_CAT_ID} \ + [--show] [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] \ + [--device ${GPU_ID or CPU}] +``` + +The pre-trained animal pose estimation model can be found from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/animal_2d_keypoint.html). +Take [animalpose model](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) as an example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --show --draw-heatmap --det-cat-id=15 +``` + +Visualization result: + +
+ +If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. + +The augement `--det-cat-id=15` selected detected bounding boxes with label 'cat'. 15 is the index of category 'cat' in COCO dataset, on which the detection model is trained. + +**COCO-animals** +In COCO dataset, there are 80 object categories, including 10 common `animal` categories (14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe'). + +For other animals, we have also provided some pre-trained animal detection models. Supported models can be found in [detection model zoo](/demo/docs/en/mmdet_modelzoo.md). + +To save visualized results on disk: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --output-root vis_results --draw-heatmap --det-cat-id=15 +``` + +To save predicted results on disk: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --output-root vis_results --save-predictions --draw-heatmap --det-cat-id=15 +``` + +To run demos on CPU: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_tiny_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_tiny_8xb32-300e_coco/rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --show --draw-heatmap --det-cat-id=15 --device cpu +``` + +### 2D Animal Pose Video Demo + +Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. + +For example, + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input demo/resources/ \ + --output-root vis_results --draw-heatmap --det-cat-id=16 +``` + +
+ +The original video can be downloaded from [Google Drive](https://drive.google.com/file/d/18d8K3wuUpKiDFHvOx0mh1TEwYwpOc5UO/view?usp=sharing). + +### 2D Animal Pose Demo with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/ap10k \ + --pose2d animal --vis-out-dir vis_results/ap10k +``` + +This command infers all images located in `tests/data/ap10k` and saves the visualization results in the `vis_results/ap10k` directory. + +Image 1 Image 2 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +Some tips to speed up MMPose inference: + +1. set `model.test_cfg.flip_test=False` in [animalpose_hrnet-w32](../../configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py#85). +2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). diff --git a/mmpose/demo/docs/en/2d_face_demo.md b/mmpose/demo/docs/en/2d_face_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..4e8dd70684032d4634307b8ceb41c5bf61c87920 --- /dev/null +++ b/mmpose/demo/docs/en/2d_face_demo.md @@ -0,0 +1,101 @@ +## 2D Face Keypoint Demo + +We provide a demo script to test a single image or video with face detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +**Face Bounding Box Model Preparation:** The pre-trained face box estimation model can be found in [mmdet model zoo](/demo/docs/en/mmdet_modelzoo.md#face-bounding-box-detection-models). + +### 2D Face Image Demo + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ + [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] +``` + +The pre-trained face keypoint estimation models can be found from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/face_2d_keypoint.html). +Take [aflw model](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) as an example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth \ + --input tests/data/cofw/001766.jpg \ + --show --draw-heatmap +``` + +Visualization result: + +
+ +If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. + +To save visualized results on disk: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth \ + --input tests/data/cofw/001766.jpg \ + --draw-heatmap --output-root vis_results +``` + +To save the predicted results on disk, please specify `--save-predictions`. + +To run demos on CPU: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth \ + --input tests/data/cofw/001766.jpg \ + --show --draw-heatmap --device=cpu +``` + +### 2D Face Video Demo + +Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth \ + --input demo/resources/ \ + --show --output-root vis_results --radius 1 +``` + +
+ +The original video can be downloaded from [Google Drive](https://drive.google.com/file/d/1kQt80t6w802b_vgVcmiV_QfcSJ3RWzmb/view?usp=sharing). + +### 2D Face Pose Demo with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/wflw \ + --pose2d face --vis-out-dir vis_results/wflw --radius 1 +``` + +This command infers all images located in `tests/data/wflw` and saves the visualization results in the `vis_results/wflw` directory. + +Image 1 + +Image 2 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +For 2D face keypoint estimation models, try to edit the config file. For example, set `model.test_cfg.flip_test=False` in line 90 of [aflw_hrnetv2](../../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py). diff --git a/mmpose/demo/docs/en/2d_hand_demo.md b/mmpose/demo/docs/en/2d_hand_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..cea74e2be4b1fafca6c6cc1e88cf5c5da232edc3 --- /dev/null +++ b/mmpose/demo/docs/en/2d_hand_demo.md @@ -0,0 +1,100 @@ +## 2D Hand Keypoint Demo + +We provide a demo script to test a single image or video with hand detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +**Hand Box Model Preparation:** The pre-trained hand box estimation model can be found in [mmdet model zoo](/demo/docs/en/mmdet_modelzoo.md#hand-bounding-box-detection-models). + +### 2D Hand Image Demo + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ + [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] +``` + +The pre-trained hand pose estimation model can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/hand_2d_keypoint.html). +Take [onehand10k model](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) as an example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth \ + configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth \ + --input tests/data/onehand10k/9.jpg \ + --show --draw-heatmap +``` + +Visualization result: + +
+ +If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. + +To save visualized results on disk: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth \ + configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth \ + --input tests/data/onehand10k/9.jpg \ + --output-root vis_results --show --draw-heatmap +``` + +To save the predicted results on disk, please specify `--save-predictions`. + +To run demos on CPU: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth \ + configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth \ + --input tests/data/onehand10k/9.jpg \ + --show --draw-heatmap --device cpu +``` + +### 2D Hand Keypoints Video Demo + +Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth \ + configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth \ + --input data/tests_data_nvgesture_sk_color.avi \ + --output-root vis_results --kpt-thr 0.1 +``` + +
+ +The original video can be downloaded from [Github](https://raw.githubusercontent.com/open-mmlab/mmpose/master/tests/data/nvgesture/sk_color.avi). + +### 2D Hand Keypoints Demo with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/onehand10k \ + --pose2d hand --vis-out-dir vis_results/onehand10k \ + --bbox-thr 0.5 --kpt-thr 0.05 +``` + +This command infers all images located in `tests/data/onehand10k` and saves the visualization results in the `vis_results/onehand10k` directory. + +Image 1 Image 2 Image 3 Image 4 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +For 2D hand keypoint estimation models, try to edit the config file. For example, set `model.test_cfg.flip_test=False` in [onehand10k_hrnetv2](../../configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py#90). diff --git a/mmpose/demo/docs/en/2d_human_pose_demo.md b/mmpose/demo/docs/en/2d_human_pose_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..4e682cc8ffd916b84839e1de09e1f96a40b36112 --- /dev/null +++ b/mmpose/demo/docs/en/2d_human_pose_demo.md @@ -0,0 +1,151 @@ +## 2D Human Pose Demo + +We provide demo scripts to perform human pose estimation on images or videos. + +### 2D Human Pose Top-Down Image Demo + +#### Use full image as input + +We provide a demo script to test a single image, using the full image as input bounding box. + +```shell +python demo/image_demo.py \ + ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --out-file ${OUTPUT_FILE} \ + [--device ${GPU_ID or CPU}] \ + [--draw_heatmap] +``` + +If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. + +The pre-trained human pose estimation models can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/body_2d_keypoint.html). +Take [coco model](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth) as an example: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap +``` + +To run this demo on CPU: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap \ + --device=cpu +``` + +Visualization result: + +
+ +#### Use mmdet for human bounding box detection + +We provide a demo script to run mmdet for human detection, and mmpose for pose estimation. + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ + [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] +``` + +Example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ + configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth \ + --input tests/data/coco/000000197388.jpg --show --draw-heatmap \ + --output-root vis_results/ +``` + +Visualization result: + +
+ +To save the predicted results on disk, please specify `--save-predictions`. + +### 2D Human Pose Top-Down Video Demo + +The above demo script can also take video as input, and run mmdet for human detection, and mmpose for pose estimation. The difference is, the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +Example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ + configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth \ + --input tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ + --output-root=vis_results/demo --show --draw-heatmap +``` + +### 2D Human Pose Bottom-up Image/Video Demo + +We also provide a demo script using bottom-up models to estimate the human pose in an image or a video, which does not rely on human detectors. + +```shell +python demo/bottomup_demo.py \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--device ${GPU_ID or CPU}] \ + [--kpt-thr ${KPT_SCORE_THR}] +``` + +Example: + +```shell +python demo/bottomup_demo.py \ + configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py \ + https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth \ + --input tests/data/coco/000000197388.jpg --output-root=vis_results \ + --show --save-predictions +``` + +Visualization result: + +
+ +### 2D Human Pose Estimation with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py \ + tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ + --pose2d human --vis-out-dir vis_results/posetrack18 +``` + +This command infers the video and saves the visualization results in the `vis_results/posetrack18` directory. + +Image 1 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +Some tips to speed up MMPose inference: + +For top-down models, try to edit the config file. For example, + +1. set `model.test_cfg.flip_test=False` in [topdown-res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py#L56). +2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). diff --git a/mmpose/demo/docs/en/2d_wholebody_pose_demo.md b/mmpose/demo/docs/en/2d_wholebody_pose_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..a4f9ace061aaf02675902fd3d7b437376ec8dfb7 --- /dev/null +++ b/mmpose/demo/docs/en/2d_wholebody_pose_demo.md @@ -0,0 +1,113 @@ +## 2D Human Whole-Body Pose Demo + +### 2D Human Whole-Body Pose Top-Down Image Demo + +#### Use full image as input + +We provide a demo script to test a single image, using the full image as input bounding box. + +```shell +python demo/image_demo.py \ + ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --out-file ${OUTPUT_FILE} \ + [--device ${GPU_ID or CPU}] \ + [--draw_heatmap] +``` + +The pre-trained hand pose estimation models can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/2d_wholebody_keypoint.html). +Take [coco-wholebody_vipnas_res50_dark](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) model as an example: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ + --out-file vis_results.jpg +``` + +To run demos on CPU: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ + --out-file vis_results.jpg \ + --device=cpu +``` + +#### Use mmdet for human bounding box detection + +We provide a demo script to run mmdet for human detection, and mmpose for pose estimation. + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ + [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] +``` + +Examples: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ + --input tests/data/coco/000000196141.jpg \ + --output-root vis_results/ --show +``` + +To save the predicted results on disk, please specify `--save-predictions`. + +### 2D Human Whole-Body Pose Top-Down Video Demo + +The above demo script can also take video as input, and run mmdet for human detection, and mmpose for pose estimation. + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection). + +Examples: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ + --input https://user-images.githubusercontent.com/87690686/137440639-fb08603d-9a35-474e-b65f-46b5c06b68d6.mp4 \ + --output-root vis_results/ --show +``` + +Visualization result: + +
+ +### 2D Human Whole-Body Pose Estimation with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/crowdpose \ + --pose2d wholebody --vis-out-dir vis_results/crowdpose +``` + +This command infers all images located in `tests/data/crowdpose` and saves the visualization results in the `vis_results/crowdpose` directory. + +Image 1 Image 2 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +Some tips to speed up MMPose inference: + +For top-down models, try to edit the config file. For example, + +1. set `model.test_cfg.flip_test=False` in [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py#L90). +2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). diff --git a/mmpose/demo/docs/en/3d_hand_demo.md b/mmpose/demo/docs/en/3d_hand_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..edd1a4fa6e5bcbd40f00c0352a674c12e397a4b4 --- /dev/null +++ b/mmpose/demo/docs/en/3d_hand_demo.md @@ -0,0 +1,52 @@ +## 3D Hand Demo + +
+ +### 3D Hand Estimation Image Demo + +#### Using gt hand bounding boxes as input + +We provide a demo script to test a single image, given gt json file. + +```shell +python demo/hand3d_internet_demo.py \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_FILE} \ + --output-root ${OUTPUT_ROOT} \ + [--save-predictions] \ + [--gt-joints-file ${GT_JOINTS_FILE}]\ + [--disable-rebase-keypoint] \ + [--show] \ + [--device ${GPU_ID or CPU}] \ + [--kpt-thr ${KPT_THR}] \ + [--show-kpt-idx] \ + [--show-interval] \ + [--radius ${RADIUS}] \ + [--thickness ${THICKNESS}] +``` + +The pre-trained hand pose estimation model can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/hand_3d_keypoint.html). +Take [internet model](https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256-42b7f2ac_20210702.pth) as an example: + +```shell +python demo/hand3d_internet_demo.py \ + configs/hand_3d_keypoint/internet/interhand3d/internet_res50_4xb16-20e_interhand3d-256x256.py \ + https://download.openmmlab.com/mmpose/hand3d/internet/res50_intehand3dv1.0_all_256x256-42b7f2ac_20210702.pth \ + --input tests/data/interhand2.6m/image69148.jpg \ + --save-predictions \ + --output-root vis_results +``` + +### 3D Hand Pose Estimation with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/interhand2.6m/image29590.jpg --pose3d hand3d --vis-out-dir vis_results/hand3d +``` + +This command infers the image and saves the visualization results in the `vis_results/hand3d` directory. + +Image 1 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html#inferencer-a-unified-inference-interface). diff --git a/mmpose/demo/docs/en/3d_human_pose_demo.md b/mmpose/demo/docs/en/3d_human_pose_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..d71515cd84ad400da41cd8dbfd22cb836b2087e9 --- /dev/null +++ b/mmpose/demo/docs/en/3d_human_pose_demo.md @@ -0,0 +1,89 @@ +## 3D Human Pose Demo + +
+ +### 3D Human Pose Two-stage Estimation Demo + +#### Using mmdet for human bounding box detection and top-down model for the 1st stage (2D pose detection), and inference the 2nd stage (2D-to-3D lifting) + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection). + +```shell +python demo/body3d_pose_lifter_demo.py \ +${MMDET_CONFIG_FILE} \ +${MMDET_CHECKPOINT_FILE} \ +${MMPOSE_CONFIG_FILE_2D} \ +${MMPOSE_CHECKPOINT_FILE_2D} \ +${MMPOSE_CONFIG_FILE_3D} \ +${MMPOSE_CHECKPOINT_FILE_3D} \ +--input ${VIDEO_PATH or IMAGE_PATH or 'webcam'} \ +[--show] \ +[--disable-rebase-keypoint] \ +[--disable-norm-pose-2d] \ +[--num-instances ${NUM_INSTANCES}] \ +[--output-root ${OUT_VIDEO_ROOT}] \ +[--save-predictions] \ +[--device ${GPU_ID or CPU}] \ +[--det-cat-id ${DET_CAT_ID}] \ +[--bbox-thr ${BBOX_THR}] \ +[--kpt-thr ${KPT_THR}] \ +[--use-oks-tracking] \ +[--tracking-thr ${TRACKING_THR}] \ +[--show-interval ${INTERVAL}] \ +[--thickness ${THICKNESS}] \ +[--radius ${RADIUS}] \ +[--online] +``` + +Note that + +1. `${VIDEO_PATH}` can be the local path or **URL** link to video file. + +2. If the `[--online]` option is set to **True**, future frame information can **not** be used when using multi frames for inference in the 2D pose detection stage. + +Examples: + +During 2D pose detection, for single-frame inference that do not rely on extra frames to get the final results of the current frame and save the prediction results, try this: + +```shell +python demo/body3d_pose_lifter_demo.py \ +demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ +https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ +configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py \ +https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth \ +configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py \ +https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth \ +--input https://user-images.githubusercontent.com/87690686/164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4 \ +--output-root vis_results \ +--save-predictions +``` + +During 2D pose detection, for multi-frame inference that rely on extra frames to get the final results of the current frame, try this: + +```shell +python demo/body3d_pose_lifter_demo.py \ +demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ +https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ +configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py \ +https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth \ +configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py \ +https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth \ +--input https://user-images.githubusercontent.com/87690686/164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4 \ +--output-root vis_results \ +--online +``` + +### 3D Human Pose Demo with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/coco/000000000785.jpg \ + --pose3d human3d --vis-out-dir vis_results/human3d +``` + +This command infers the image and saves the visualization results in the `vis_results/human3d` directory. + +Image 1 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html#inferencer-a-unified-inference-interface). diff --git a/mmpose/demo/docs/en/mmdet_modelzoo.md b/mmpose/demo/docs/en/mmdet_modelzoo.md new file mode 100644 index 0000000000000000000000000000000000000000..3dd5e4a55ae3f1ca51544b59d9ea81e764821a4a --- /dev/null +++ b/mmpose/demo/docs/en/mmdet_modelzoo.md @@ -0,0 +1,41 @@ +## Pre-trained Detection Models + +### Human Bounding Box Detection Models + +For human bounding box detection models, please download from [MMDetection Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). +MMDetection provides 80-class COCO-pretrained models, which already includes the `person` category. + +### Hand Bounding Box Detection Models + +For hand bounding box detection, we simply train our hand box models on OneHand10K dataset using MMDetection. + +#### Hand detection results on OneHand10K test set + +| Arch | Box AP | ckpt | log | +| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | +| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.817 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k_20201030.log.json) | +| [RTMDet-nano](/demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py) | 0.760 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth) | - | + +### Face Bounding Box Detection Models + +For face bounding box detection, we train a YOLOX detector on COCO-face data using MMDetection. + +#### Face detection results on COCO-face test set + +| Arch | Box AP | ckpt | +| :-------------------------------------------------------------- | :----: | :----------------------------------------------------------------------------------------------------: | +| [YOLOX-s](/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py) | 0.408 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth) | + +### Animal Bounding Box Detection Models + +#### COCO animals + +In COCO dataset, there are 80 object categories, including 10 common `animal` categories (14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') +For animals in the categories, please download from [MMDetection Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). + +#### Macaque detection results on MacaquePose test set + +| Arch | Box AP | ckpt | log | +| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | +| [Faster_R-CNN_Res50-FPN-1class](/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py) | 0.840 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque-f64f2812_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque_20210409.log.json) | +| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.879 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque-e45e36f5_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque_20210409.log.json) | diff --git a/mmpose/demo/docs/en/webcam_api_demo.md b/mmpose/demo/docs/en/webcam_api_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..986939217181544f4202f2a4d6b30b8df3a1417c --- /dev/null +++ b/mmpose/demo/docs/en/webcam_api_demo.md @@ -0,0 +1,30 @@ +## Webcam Demo + +The original Webcam API has been deprecated starting from version v1.1.0. Users now have the option to utilize either the Inferencer or the demo script for conducting pose estimation using webcam input. + +### Webcam Demo with Inferencer + +Users can utilize the MMPose Inferencer to estimate human poses in webcam inputs by executing the following command: + +```shell +python demo/inferencer_demo.py webcam --pose2d 'human' +``` + +For additional information about the arguments of Inferencer, please refer to the [Inferencer Documentation](/docs/en/user_guides/inference.md). + +### Webcam Demo with Demo Script + +All of the demo scripts, except for `demo/image_demo.py`, support webcam input. + +Take `demo/topdown_demo_with_mmdet.py` as example, users can utilize this script with webcam input by specifying **`--input webcam`** in the command: + +```shell +# inference with webcam +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input webcam \ + --show +``` diff --git a/mmpose/demo/docs/zh_cn/2d_animal_demo.md b/mmpose/demo/docs/zh_cn/2d_animal_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..f1932cf6eb2322f239ff4de5433abbe488449638 --- /dev/null +++ b/mmpose/demo/docs/zh_cn/2d_animal_demo.md @@ -0,0 +1,124 @@ +## 2D Animal Pose Demo + +本系列文档我们会来介绍如何使用提供了的脚本进行完成基本的推理 demo ,本节先介绍如何对 top-down 结构和动物的 2D 姿态进行单张图片和视频推理,请确保你已经安装了 3.0 以上版本的 [MMDetection](https://github.com/open-mmlab/mmdetection) 。 + +### 2D 动物图片姿态识别推理 + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} --det-cat-id ${DET_CAT_ID} \ + [--show] [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] \ + [--device ${GPU_ID or CPU}] +``` + +用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/animal_2d_keypoint.html) 获取预训练好的关键点识别模型。 + +这里我们用 [animalpose model](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) 来进行演示: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --show --draw-heatmap --det-cat-id=15 +``` + +可视化结果如下: + +
+ +如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 + +`--det-cat-id=15` 参数用来指定模型只检测 `cat` 类型,这是基于 COCO 数据集的数据。 + +**COCO 数据集动物信息** + +COCO 数据集共包含 80 个类别,其中有 10 种常见动物,类别如下: + +(14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') + +对于其他类型的动物,我们也提供了一些训练好的动物检测模型,用户可以前往 [detection model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md) 下载。 + +如果想本地保存可视化结果可使用如下命令: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --output-root vis_results --draw-heatmap --det-cat-id=15 +``` + +如果想本地保存预测结果,需要使用 `--save-predictions` 。 + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --output-root vis_results --save-predictions --draw-heatmap --det-cat-id=15 +``` + +仅使用 CPU: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_tiny_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_tiny_8xb32-300e_coco/rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --show --draw-heatmap --det-cat-id=15 --device cpu +``` + +### 2D 动物视频姿态识别推理 + +视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 + +例如: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py \ + https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input demo/resources/ \ + --output-root vis_results --draw-heatmap --det-cat-id=16 +``` + +
+ +这段视频可以在 [Google Drive](https://drive.google.com/file/d/18d8K3wuUpKiDFHvOx0mh1TEwYwpOc5UO/view?usp=sharing) 下载。 + +### 使用 Inferencer 进行 2D 动物姿态识别推理 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py tests/data/ap10k \ + --pose2d animal --vis-out-dir vis_results/ap10k +``` + +该命令会对输入的 `tests/data/ap10k` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/ap10k` 文件夹下。 + +Image 1 Image 2 + +Inferencer 同样支持保存预测结果,更多的信息可以参考 [Inferencer 文档](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface) 。 + +### 加速推理 + +用户可以通过修改配置文件来加速,更多具体例子可以参考: + +1. 设置 `model.test_cfg.flip_test=False`,如 [animalpose_hrnet-w32](../../configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py#85) 所示。 +2. 使用更快的 bounding box 检测器,可参考 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 diff --git a/mmpose/demo/docs/zh_cn/2d_face_demo.md b/mmpose/demo/docs/zh_cn/2d_face_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..78091f1ffed9c88282f6c6394936b24eb78f430c --- /dev/null +++ b/mmpose/demo/docs/zh_cn/2d_face_demo.md @@ -0,0 +1,88 @@ +## 2D Face Keypoint Demo + +本节我们继续演示如何使用 demo 脚本进行 2D 脸部关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [MMdetection](https://github.com/open-mmlab/mmdetection) 。 + +我们在 [mmdet model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md#脸部-bounding-box-检测模型) 提供了一个预训练好的脸部 Bounding Box 预测模型,用户可以前往下载。 + +### 2D 脸部图片关键点识别推理 + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ + [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] +``` + +用户可以在 [model zoo](https://mmpose.readthedocs.io/en/dev-1.x/model_zoo/face_2d_keypoint.html) 获取预训练好的脸部关键点识别模型。 + +这里我们用 [face6 model](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) 来进行演示: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth \ + --input tests/data/cofw/001766.jpg \ + --show --draw-heatmap +``` + +可视化结果如下图所示: + +
+ +如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 + +如果想本地保存可视化结果可使用如下命令: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth \ + --input tests/data/cofw/001766.jpg \ + --draw-heatmap --output-root vis_results +``` + +### 2D 脸部视频关键点识别推理 + +视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth \ + --input demo/resources/ \ + --show --output-root vis_results --radius 1 +``` + +
+ +这段视频可以在 [Google Drive](https://drive.google.com/file/d/1kQt80t6w802b_vgVcmiV_QfcSJ3RWzmb/view?usp=sharing) 下载。 + +### 使用 Inferencer 进行 2D 脸部关键点识别推理 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py tests/data/wflw \ + --pose2d face --vis-out-dir vis_results/wflw --radius 1 +``` + +该命令会对输入的 `tests/data/wflw` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/wflw` 文件夹下。 + +Image 1 + +Image 2 + +除此之外, Inferencer 也支持保存预测的姿态结果。具体信息可在 [Inferencer 文档](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface) 查看。 + +### 加速推理 + +对于 2D 脸部关键点预测模型,用户可以通过修改配置文件中的 `model.test_cfg.flip_test=False` 来加速,例如 [aflw_hrnetv2](../../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py) 中的第 90 行。 diff --git a/mmpose/demo/docs/zh_cn/2d_hand_demo.md b/mmpose/demo/docs/zh_cn/2d_hand_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..886aace38e8674730de589636e365b55a578d94e --- /dev/null +++ b/mmpose/demo/docs/zh_cn/2d_hand_demo.md @@ -0,0 +1,103 @@ +## 2D Hand Keypoint Demo + +本节我们继续通过 demo 脚本演示对单张图片或者视频的 2D 手部关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [MMDetection](https://github.com/open-mmlab/mmdetection) 。 + +我们在 [mmdet model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md#手部-bounding-box-识别模型) 提供了预训练好的手部 Bounding Box 预测模型,用户可以前往下载。 + +### 2D 手部图片关键点识别 + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ + [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] +``` + +用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/hand_2d_keypoint.html) 获取预训练好的关键点识别模型。 + +这里我们用 [onehand10k model](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) 来进行演示: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth \ + configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth \ + --input tests/data/onehand10k/9.jpg \ + --show --draw-heatmap +``` + +可视化结果如下: + +
+ +如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 + +如果想本地保存可视化结果可使用如下命令: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth \ + configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth \ + --input tests/data/onehand10k/9.jpg \ + --output-root vis_results --show --draw-heatmap +``` + +如果想本地保存预测结果,需要添加 `--save-predictions` 。 + +如果想用 CPU 进行 demo 需添加 `--device cpu` : + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth \ + configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth \ + --input tests/data/onehand10k/9.jpg \ + --show --draw-heatmap --device cpu +``` + +### 2D 手部视频关键点识别推理 + +视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth \ + configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth \ + --input data/tests_data_nvgesture_sk_color.avi \ + --output-root vis_results --kpt-thr 0.1 +``` + +
+ +
+ +这段视频可以在 [Google Drive](https://raw.githubusercontent.com/open-mmlab/mmpose/master/tests/data/nvgesture/sk_color.avi) 下载到。 + +### 使用 Inferencer 进行 2D 手部关键点识别推理 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py tests/data/onehand10k \ + --pose2d hand --vis-out-dir vis_results/onehand10k \ + --bbox-thr 0.5 --kpt-thr 0.05 +``` + +该命令会对输入的 `tests/data/onehand10k` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/onehand10k` 文件夹下。 + +Image 1 Image 2 Image 3 Image 4 + +除此之外, Inferencer 也支持保存预测的姿态结果。具体信息可在 [Inferencer 文档](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/inference.html) 查看。 + +### 加速推理 + +对于 2D 手部关键点预测模型,用户可以通过修改配置文件中的 `model.test_cfg.flip_test=False` 来加速,如 [onehand10k_hrnetv2](../../configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py#90) 所示。 diff --git a/mmpose/demo/docs/zh_cn/2d_human_pose_demo.md b/mmpose/demo/docs/zh_cn/2d_human_pose_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..b39e510891adb637345d90d0d9465e2aade44cc9 --- /dev/null +++ b/mmpose/demo/docs/zh_cn/2d_human_pose_demo.md @@ -0,0 +1,146 @@ +## 2D Human Pose Demo + +本节我们继续使用 demo 脚本演示 2D 人体关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [mmdet](https://github.com/open-mmlab/mmdetection) 。 + +### 2D 人体姿态 Top-Down 图片检测 + +#### 使用整张图片作为输入进行检测 + +此时输入的整张图片会被当作 bounding box 使用。 + +```shell +python demo/image_demo.py \ + ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --out-file ${OUTPUT_FILE} \ + [--device ${GPU_ID or CPU}] \ + [--draw_heatmap] +``` + +如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 + +用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo/body_2d_keypoint.html) 获取预训练好的关键点识别模型。 + +这里我们用 [coco model](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth) 来进行演示: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap +``` + +使用 CPU 推理: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap \ + --device=cpu +``` + +可视化结果如下: + +
+ +#### 使用 MMDet 做人体 bounding box 检测 + +使用 MMDet 进行识别的命令如下所示: + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ + [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] +``` + +结合我们的具体例子: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ + configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth \ + --input tests/data/coco/000000197388.jpg --show --draw-heatmap \ + --output-root vis_results/ +``` + +可视化结果如下: + +
+ +想要本地保存识别结果,用户需要加上 `--save-predictions` 。 + +### 2D 人体姿态 Top-Down 视频检测 + +我们的脚本同样支持视频作为输入,由 MMDet 完成人体检测后 MMPose 完成 Top-Down 的姿态预估,视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 + +例如: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ + configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth \ + --input tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ + --output-root=vis_results/demo --show --draw-heatmap +``` + +### 2D 人体姿态 Bottom-Up 图片和视频识别检测 + +除了 Top-Down ,我们也支持 Bottom-Up 不依赖人体识别器的人体姿态预估识别,使用方式如下: + +```shell +python demo/bottomup_demo.py \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--device ${GPU_ID or CPU}] \ + [--kpt-thr ${KPT_SCORE_THR}] +``` + +结合具体示例如下: + +```shell +python demo/bottomup_demo.py \ + configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py \ + https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth \ + --input tests/data/coco/000000197388.jpg --output-root=vis_results \ + --show --save-predictions +``` + +其可视化结果如图所示: + +
+ +### 使用 Inferencer 进行 2D 人体姿态识别检测 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py \ + tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ + --pose2d human --vis-out-dir vis_results/posetrack18 +``` + +该命令会对输入的 `tests/data/posetrack18` 下的视频进行推理并且把可视化结果存入 `vis_results/posetrack18` 文件夹下。 + +Image 1 + +Inferencer 支持保存姿态的检测结果,具体的使用可参考 [inferencer document](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/inference.html) 。 + +### 加速推理 + +对于 top-down 结构的模型,用户可以通过修改配置文件来加速,更多具体例子可以参考: + +1. 设置 `model.test_cfg.flip_test=False`,如 [topdown-res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py#L56) 所示。 +2. 使用更快的人体 bounding box 检测器,可参考 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 diff --git a/mmpose/demo/docs/zh_cn/2d_wholebody_pose_demo.md b/mmpose/demo/docs/zh_cn/2d_wholebody_pose_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..6c4d77e3dfbf3bd10b152e44df303beb906d7d3d --- /dev/null +++ b/mmpose/demo/docs/zh_cn/2d_wholebody_pose_demo.md @@ -0,0 +1,108 @@ +## 2D Human Whole-Body Pose Demo + +### 2D 人体全身姿态 Top-Down 图片识别 + +#### 使用整张图片作为输入进行检测 + +此时输入的整张图片会被当作 bounding box 使用。 + +```shell +python demo/image_demo.py \ + ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --out-file ${OUTPUT_FILE} \ + [--device ${GPU_ID or CPU}] \ + [--draw_heatmap] +``` + +用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/2d_wholebody_keypoint.html) 获取预训练好的关键点识别模型。 + +这里我们用 [coco-wholebody_vipnas_res50_dark](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) 来进行演示: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ + --out-file vis_results.jpg +``` + +使用 CPU 推理: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ + --out-file vis_results.jpg \ + --device=cpu +``` + +#### 使用 MMDet 进行人体 bounding box 检测 + +使用 MMDet 进行识别的命令格式如下: + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ + [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] +``` + +具体可例如: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ + --input tests/data/coco/000000196141.jpg \ + --output-root vis_results/ --show +``` + +想要本地保存识别结果,用户需要加上 `--save-predictions` 。 + +### 2D 人体全身姿态 Top-Down 视频识别检测 + +我们的脚本同样支持视频作为输入,由 MMDet 完成人体检测后 MMPose 完成 Top-Down 的姿态预估。 + +例如: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ + --input https://user-images.githubusercontent.com/87690686/137440639-fb08603d-9a35-474e-b65f-46b5c06b68d6.mp4 \ + --output-root vis_results/ --show +``` + +可视化结果如下: + +
+ +### 使用 Inferencer 进行 2D 人体全身姿态识别 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py tests/data/crowdpose \ + --pose2d wholebody --vis-out-dir vis_results/crowdpose +``` + +该命令会对输入的 `tests/data/crowdpose` 下所有图片进行推理并且把可视化结果存入 `vis_results/crowdpose` 文件夹下。 + +Image 1 Image 2 + +Inferencer 支持保存姿态的检测结果,具体的使用可参考 [Inferencer 文档](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/#inferencer-a-unified-inference-interface) 。 + +### 加速推理 + +对于 top-down 结构的模型,用户可以通过修改配置文件来加速,更多具体例子可以参考: + +1. 设置 `model.test_cfg.flip_test=False`,用户可参考 [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py#L90) 。 +2. 使用更快的人体 bounding box 检测器,如 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 diff --git a/mmpose/demo/docs/zh_cn/3d_human_pose_demo.md b/mmpose/demo/docs/zh_cn/3d_human_pose_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..6ed9dd67de1326d5c31e0edf2bb5ae549bb71789 --- /dev/null +++ b/mmpose/demo/docs/zh_cn/3d_human_pose_demo.md @@ -0,0 +1 @@ +coming soon diff --git a/mmpose/demo/docs/zh_cn/mmdet_modelzoo.md b/mmpose/demo/docs/zh_cn/mmdet_modelzoo.md new file mode 100644 index 0000000000000000000000000000000000000000..1cb12358a35e42efecbacbbac1df383147a84f76 --- /dev/null +++ b/mmpose/demo/docs/zh_cn/mmdet_modelzoo.md @@ -0,0 +1,43 @@ +## Pre-trained Detection Models + +### 人体 Bounding Box 检测模型 + +MMDetection 提供了基于 COCO 的包括 `person` 在内的 80 个类别的预训练模型,用户可前往 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 下载并将其用作人体 bounding box 识别模型。 + +### 手部 Bounding Box 检测模型 + +对于手部 bounding box 检测模型,我们提供了一个通过 MMDetection 基于 OneHand10K 数据库训练的模型。 + +#### 基于 OneHand10K 测试集的测试结果 + +| Arch | Box AP | ckpt | log | +| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | +| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.817 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k_20201030.log.json) | +| [RTMDet-nano](/demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py) | 0.760 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth) | - | + +### 脸部 Bounding Box 检测模型 + +对于脸部 bounding box 检测模型,我们提供了一个通过 MMDetection 基于 COCO-Face 数据库训练的 YOLOX 检测器。 + +#### 基于 COCO-face 测试集的测试结果 + +| Arch | Box AP | ckpt | +| :-------------------------------------------------------------- | :----: | :----------------------------------------------------------------------------------------------------: | +| [YOLOX-s](/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py) | 0.408 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth) | + +### 动物 Bounding Box 检测模型 + +#### COCO animals + +COCO 数据集内包括了 10 种常见的 `animal` 类型: + +(14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') 。 + +用户如果需要使用以上类别的动物检测模型,可以前往 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 下载。 + +#### 基于 MacaquePose 测试集的测试结果 + +| Arch | Box AP | ckpt | log | +| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | +| [Faster_R-CNN_Res50-FPN-1class](/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py) | 0.840 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque-f64f2812_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque_20210409.log.json) | +| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.879 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque-e45e36f5_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque_20210409.log.json) | diff --git a/mmpose/demo/docs/zh_cn/webcam_api_demo.md b/mmpose/demo/docs/zh_cn/webcam_api_demo.md new file mode 100644 index 0000000000000000000000000000000000000000..66099c9ca676dbdb1a63a3113bd2a96d8c56d435 --- /dev/null +++ b/mmpose/demo/docs/zh_cn/webcam_api_demo.md @@ -0,0 +1,30 @@ +## 摄像头推理 + +从版本 v1.1.0 开始,原来的摄像头 API 已被弃用。用户现在可以选择使用推理器(Inferencer)或 Demo 脚本从摄像头读取的视频中进行姿势估计。 + +### 使用推理器进行摄像头推理 + +用户可以通过执行以下命令来利用 MMPose Inferencer 对摄像头输入进行人体姿势估计: + +```shell +python demo/inferencer_demo.py webcam --pose2d 'human' +``` + +有关推理器的参数详细信息,请参阅 [推理器文档](/docs/en/user_guides/inference.md)。 + +### 使用 Demo 脚本进行摄像头推理 + +除了 `demo/image_demo.py` 之外,所有的 Demo 脚本都支持摄像头输入。 + +以 `demo/topdown_demo_with_mmdet.py` 为例,用户可以通过在命令中指定 **`--input webcam`** 来使用该脚本对摄像头输入进行推理: + +```shell +# inference with webcam +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input webcam \ + --show +``` diff --git a/mmpose/demo/hand3d_internet_demo.py b/mmpose/demo/hand3d_internet_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..1cb10a820a46e38f01dcde3e0f36224784099d79 --- /dev/null +++ b/mmpose/demo/hand3d_internet_demo.py @@ -0,0 +1,285 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import mimetypes +import os +import time +from argparse import ArgumentParser + +import cv2 +import json_tricks as json +import mmcv +import mmengine +import numpy as np +from mmengine.logging import print_log + +from mmpose.apis import inference_topdown, init_model +from mmpose.registry import VISUALIZERS +from mmpose.structures import (PoseDataSample, merge_data_samples, + split_instances) + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--input', type=str, default='', help='Image/Video file') + parser.add_argument( + '--output-root', + type=str, + default='', + help='root of the output img file. ' + 'Default not saving the visualization images.') + parser.add_argument( + '--save-predictions', + action='store_true', + default=False, + help='whether to save predicted results') + parser.add_argument( + '--disable-rebase-keypoint', + action='store_true', + default=False, + help='Whether to disable rebasing the predicted 3D pose so its ' + 'lowest keypoint has a height of 0 (landing on the ground). Rebase ' + 'is useful for visualization when the model do not predict the ' + 'global position of the 3D pose.') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='whether to show result') + parser.add_argument('--device', default='cpu', help='Device for inference') + parser.add_argument( + '--kpt-thr', + type=float, + default=0.3, + help='Visualizing keypoint thresholds') + parser.add_argument( + '--show-kpt-idx', + action='store_true', + default=False, + help='Whether to show the index of keypoints') + parser.add_argument( + '--show-interval', type=int, default=0, help='Sleep seconds per frame') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + + args = parser.parse_args() + return args + + +def process_one_image(args, img, model, visualizer=None, show_interval=0): + """Visualize predicted keypoints of one image.""" + # inference a single image + pose_results = inference_topdown(model, img) + # post-processing + pose_results_2d = [] + for idx, res in enumerate(pose_results): + pred_instances = res.pred_instances + keypoints = pred_instances.keypoints + rel_root_depth = pred_instances.rel_root_depth + scores = pred_instances.keypoint_scores + hand_type = pred_instances.hand_type + + res_2d = PoseDataSample() + gt_instances = res.gt_instances.clone() + pred_instances = pred_instances.clone() + res_2d.gt_instances = gt_instances + res_2d.pred_instances = pred_instances + + # add relative root depth to left hand joints + keypoints[:, 21:, 2] += rel_root_depth + + # set joint scores according to hand type + scores[:, :21] *= hand_type[:, [0]] + scores[:, 21:] *= hand_type[:, [1]] + # normalize kpt score + if scores.max() > 1: + scores /= 255 + + res_2d.pred_instances.set_field(keypoints[..., :2].copy(), 'keypoints') + + # rotate the keypoint to make z-axis correspondent to height + # for better visualization + vis_R = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + keypoints[..., :3] = keypoints[..., :3] @ vis_R + + # rebase height (z-axis) + if not args.disable_rebase_keypoint: + valid = scores > 0 + keypoints[..., 2] -= np.min( + keypoints[valid, 2], axis=-1, keepdims=True) + + pose_results[idx].pred_instances.keypoints = keypoints + pose_results[idx].pred_instances.keypoint_scores = scores + pose_results_2d.append(res_2d) + + data_samples = merge_data_samples(pose_results) + data_samples_2d = merge_data_samples(pose_results_2d) + + # show the results + if isinstance(img, str): + img = mmcv.imread(img, channel_order='rgb') + elif isinstance(img, np.ndarray): + img = mmcv.bgr2rgb(img) + + if visualizer is not None: + visualizer.add_datasample( + 'result', + img, + data_sample=data_samples, + det_data_sample=data_samples_2d, + draw_gt=False, + draw_bbox=True, + kpt_thr=args.kpt_thr, + convert_keypoint=False, + axis_azimuth=-115, + axis_limit=200, + axis_elev=15, + show_kpt_idx=args.show_kpt_idx, + show=args.show, + wait_time=show_interval) + + # if there is no instance detected, return None + return data_samples.get('pred_instances', None) + + +def main(): + args = parse_args() + + assert args.input != '' + assert args.show or (args.output_root != '') + + output_file = None + if args.output_root: + mmengine.mkdir_or_exist(args.output_root) + output_file = os.path.join(args.output_root, + os.path.basename(args.input)) + if args.input == 'webcam': + output_file += '.mp4' + + if args.save_predictions: + assert args.output_root != '' + args.pred_save_path = f'{args.output_root}/results_' \ + f'{os.path.splitext(os.path.basename(args.input))[0]}.json' + + # build the model from a config file and a checkpoint file + model = init_model( + args.config, args.checkpoint, device=args.device.lower()) + + # init visualizer + model.cfg.visualizer.radius = args.radius + model.cfg.visualizer.line_width = args.thickness + + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.set_dataset_meta(model.dataset_meta) + + if args.input == 'webcam': + input_type = 'webcam' + else: + input_type = mimetypes.guess_type(args.input)[0].split('/')[0] + + if input_type == 'image': + # inference + pred_instances = process_one_image(args, args.input, model, visualizer) + + if args.save_predictions: + pred_instances_list = split_instances(pred_instances) + + if output_file: + img_vis = visualizer.get_image() + mmcv.imwrite(mmcv.rgb2bgr(img_vis), output_file) + + elif input_type in ['webcam', 'video']: + + if args.input == 'webcam': + cap = cv2.VideoCapture(0) + else: + cap = cv2.VideoCapture(args.input) + + video_writer = None + pred_instances_list = [] + frame_idx = 0 + + while cap.isOpened(): + success, frame = cap.read() + frame_idx += 1 + + if not success: + break + + # topdown pose estimation + pred_instances = process_one_image(args, frame, model, visualizer, + 0.001) + + if args.save_predictions: + # save prediction results + pred_instances_list.append( + dict( + frame_id=frame_idx, + instances=split_instances(pred_instances))) + + # output videos + if output_file: + frame_vis = visualizer.get_image() + + if video_writer is None: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + # the size of the image with visualization may vary + # depending on the presence of heatmaps + video_writer = cv2.VideoWriter( + output_file, + fourcc, + 25, # saved fps + (frame_vis.shape[1], frame_vis.shape[0])) + + video_writer.write(mmcv.rgb2bgr(frame_vis)) + + if args.show: + # press ESC to exit + if cv2.waitKey(5) & 0xFF == 27: + break + + time.sleep(args.show_interval) + + if video_writer: + video_writer.release() + + cap.release() + + else: + args.save_predictions = False + raise ValueError( + f'file {os.path.basename(args.input)} has invalid format.') + + if args.save_predictions: + with open(args.pred_save_path, 'w') as f: + json.dump( + dict( + meta_info=model.dataset_meta, + instance_info=pred_instances_list), + f, + indent='\t') + print_log( + f'predictions have been saved at {args.pred_save_path}', + logger='current', + level=logging.INFO) + + if output_file is not None: + input_type = input_type.replace('webcam', 'video') + print_log( + f'the output {input_type} has been saved at {output_file}', + logger='current', + level=logging.INFO) + + +if __name__ == '__main__': + main() diff --git a/mmpose/demo/image_demo.py b/mmpose/demo/image_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..6a408d17605fb5809968317c1357b12386f58b6f --- /dev/null +++ b/mmpose/demo/image_demo.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +from argparse import ArgumentParser + +from mmcv.image import imread +from mmengine.logging import print_log + +from mmpose.apis import inference_topdown, init_model +from mmpose.registry import VISUALIZERS +from mmpose.structures import merge_data_samples + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('--out-file', default=None, help='Path to output file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--draw-heatmap', + action='store_true', + help='Visualize the predicted heatmap') + parser.add_argument( + '--show-kpt-idx', + action='store_true', + default=False, + help='Whether to show the index of keypoints') + parser.add_argument( + '--skeleton-style', + default='mmpose', + type=str, + choices=['mmpose', 'openpose'], + help='Skeleton style selection') + parser.add_argument( + '--kpt-thr', + type=float, + default=0.3, + help='Visualizing keypoint thresholds') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + parser.add_argument( + '--alpha', type=float, default=0.8, help='The transparency of bboxes') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='whether to show img') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + # build the model from a config file and a checkpoint file + if args.draw_heatmap: + cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True))) + else: + cfg_options = None + + model = init_model( + args.config, + args.checkpoint, + device=args.device, + cfg_options=cfg_options) + + # init visualizer + model.cfg.visualizer.radius = args.radius + model.cfg.visualizer.alpha = args.alpha + model.cfg.visualizer.line_width = args.thickness + + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.set_dataset_meta( + model.dataset_meta, skeleton_style=args.skeleton_style) + + # inference a single image + batch_results = inference_topdown(model, args.img) + results = merge_data_samples(batch_results) + + # show the results + img = imread(args.img, channel_order='rgb') + visualizer.add_datasample( + 'result', + img, + data_sample=results, + draw_gt=False, + draw_bbox=True, + kpt_thr=args.kpt_thr, + draw_heatmap=args.draw_heatmap, + show_kpt_idx=args.show_kpt_idx, + skeleton_style=args.skeleton_style, + show=args.show, + out_file=args.out_file) + + if args.out_file is not None: + print_log( + f'the output image has been saved at {args.out_file}', + logger='current', + level=logging.INFO) + + +if __name__ == '__main__': + main() diff --git a/mmpose/demo/inferencer_demo.py b/mmpose/demo/inferencer_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..d20c433f4e0f3dac55b2ce2d5f54fea058392407 --- /dev/null +++ b/mmpose/demo/inferencer_demo.py @@ -0,0 +1,222 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser +from typing import Dict + +from mmpose.apis.inferencers import MMPoseInferencer, get_model_aliases + +filter_args = dict(bbox_thr=0.3, nms_thr=0.3, pose_based_nms=False) +POSE2D_SPECIFIC_ARGS = dict( + yoloxpose=dict(bbox_thr=0.01, nms_thr=0.65, pose_based_nms=True), + rtmo=dict(bbox_thr=0.1, nms_thr=0.65, pose_based_nms=True), +) + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument( + 'inputs', + type=str, + nargs='?', + help='Input image/video path or folder path.') + + # init args + parser.add_argument( + '--pose2d', + type=str, + default=None, + help='Pretrained 2D pose estimation algorithm. It\'s the path to the ' + 'config file or the model name defined in metafile.') + parser.add_argument( + '--pose2d-weights', + type=str, + default=None, + help='Path to the custom checkpoint file of the selected pose model. ' + 'If it is not specified and "pose2d" is a model name of metafile, ' + 'the weights will be loaded from metafile.') + parser.add_argument( + '--pose3d', + type=str, + default=None, + help='Pretrained 3D pose estimation algorithm. It\'s the path to the ' + 'config file or the model name defined in metafile.') + parser.add_argument( + '--pose3d-weights', + type=str, + default=None, + help='Path to the custom checkpoint file of the selected pose model. ' + 'If it is not specified and "pose3d" is a model name of metafile, ' + 'the weights will be loaded from metafile.') + parser.add_argument( + '--det-model', + type=str, + default=None, + help='Config path or alias of detection model.') + parser.add_argument( + '--det-weights', + type=str, + default=None, + help='Path to the checkpoints of detection model.') + parser.add_argument( + '--det-cat-ids', + type=int, + nargs='+', + default=0, + help='Category id for detection model.') + parser.add_argument( + '--scope', + type=str, + default='mmpose', + help='Scope where modules are defined.') + parser.add_argument( + '--device', + type=str, + default=None, + help='Device used for inference. ' + 'If not specified, the available device will be automatically used.') + parser.add_argument( + '--show-progress', + action='store_true', + help='Display the progress bar during inference.') + + # The default arguments for prediction filtering differ for top-down + # and bottom-up models. We assign the default arguments according to the + # selected pose2d model + args, _ = parser.parse_known_args() + for model in POSE2D_SPECIFIC_ARGS: + if model in args.pose2d: + filter_args.update(POSE2D_SPECIFIC_ARGS[model]) + break + + # call args + parser.add_argument( + '--show', + action='store_true', + help='Display the image/video in a popup window.') + parser.add_argument( + '--draw-bbox', + action='store_true', + help='Whether to draw the bounding boxes.') + parser.add_argument( + '--draw-heatmap', + action='store_true', + default=False, + help='Whether to draw the predicted heatmaps.') + parser.add_argument( + '--bbox-thr', + type=float, + default=filter_args['bbox_thr'], + help='Bounding box score threshold') + parser.add_argument( + '--nms-thr', + type=float, + default=filter_args['nms_thr'], + help='IoU threshold for bounding box NMS') + parser.add_argument( + '--pose-based-nms', + type=lambda arg: arg.lower() in ('true', 'yes', 't', 'y', '1'), + default=filter_args['pose_based_nms'], + help='Whether to use pose-based NMS') + parser.add_argument( + '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') + parser.add_argument( + '--tracking-thr', type=float, default=0.3, help='Tracking threshold') + parser.add_argument( + '--use-oks-tracking', + action='store_true', + help='Whether to use OKS as similarity in tracking') + parser.add_argument( + '--disable-norm-pose-2d', + action='store_true', + help='Whether to scale the bbox (along with the 2D pose) to the ' + 'average bbox scale of the dataset, and move the bbox (along with the ' + '2D pose) to the average bbox center of the dataset. This is useful ' + 'when bbox is small, especially in multi-person scenarios.') + parser.add_argument( + '--disable-rebase-keypoint', + action='store_true', + default=False, + help='Whether to disable rebasing the predicted 3D pose so its ' + 'lowest keypoint has a height of 0 (landing on the ground). Rebase ' + 'is useful for visualization when the model do not predict the ' + 'global position of the 3D pose.') + parser.add_argument( + '--num-instances', + type=int, + default=1, + help='The number of 3D poses to be visualized in every frame. If ' + 'less than 0, it will be set to the number of pose results in the ' + 'first frame.') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization.') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization.') + parser.add_argument( + '--skeleton-style', + default='mmpose', + type=str, + choices=['mmpose', 'openpose'], + help='Skeleton style selection') + parser.add_argument( + '--black-background', + action='store_true', + help='Plot predictions on a black image') + parser.add_argument( + '--vis-out-dir', + type=str, + default='', + help='Directory for saving visualized results.') + parser.add_argument( + '--pred-out-dir', + type=str, + default='', + help='Directory for saving inference results.') + parser.add_argument( + '--show-alias', + action='store_true', + help='Display all the available model aliases.') + + call_args = vars(parser.parse_args()) + + init_kws = [ + 'pose2d', 'pose2d_weights', 'scope', 'device', 'det_model', + 'det_weights', 'det_cat_ids', 'pose3d', 'pose3d_weights', + 'show_progress' + ] + init_args = {} + for init_kw in init_kws: + init_args[init_kw] = call_args.pop(init_kw) + + display_alias = call_args.pop('show_alias') + + return init_args, call_args, display_alias + + +def display_model_aliases(model_aliases: Dict[str, str]) -> None: + """Display the available model aliases and their corresponding model + names.""" + aliases = list(model_aliases.keys()) + max_alias_length = max(map(len, aliases)) + print(f'{"ALIAS".ljust(max_alias_length+2)}MODEL_NAME') + for alias in sorted(aliases): + print(f'{alias.ljust(max_alias_length+2)}{model_aliases[alias]}') + + +def main(): + init_args, call_args, display_alias = parse_args() + if display_alias: + model_alises = get_model_aliases(init_args['scope']) + display_model_aliases(model_alises) + else: + inferencer = MMPoseInferencer(**init_args) + for _ in inferencer(**call_args): + pass + + +if __name__ == '__main__': + main() diff --git a/mmpose/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py b/mmpose/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py new file mode 100644 index 0000000000000000000000000000000000000000..0ccb78cfcab59b58839f8165dbf157b4d34721d2 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py @@ -0,0 +1,270 @@ +# runtime settings +default_scope = 'mmdet' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='DetVisualizationHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False + +# model settings +model = dict( + type='CascadeRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator diff --git a/mmpose/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_coco.py b/mmpose/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f91bd0d105b9394c514ffb82d54117dba347680a --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_coco.py @@ -0,0 +1,256 @@ +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] + +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 19]) +total_epochs = 20 + +# model settings +model = dict( + type='CascadeRCNN', + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) + +dataset_type = 'CocoDataset' +data_root = 'data/coco' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_train2017.json', + img_prefix=f'{data_root}/train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') diff --git a/mmpose/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py b/mmpose/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py new file mode 100644 index 0000000000000000000000000000000000000000..ee54f5b66bd216c485db0a56a68bf2793428d123 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py @@ -0,0 +1,182 @@ +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8, 11]) +total_epochs = 12 + +model = dict( + type='FasterRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) + )) + +dataset_type = 'CocoDataset' +data_root = 'data/coco' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_train2017.json', + img_prefix=f'{data_root}/train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') diff --git a/mmpose/demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py b/mmpose/demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5bceed65ba1359995caf82f614d6d1a7b86da460 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py @@ -0,0 +1,196 @@ +# runtime settings +default_scope = 'mmdet' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='DetVisualizationHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False + +# model settings +model = dict( + type='FasterRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) + )) + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator diff --git a/mmpose/demo/mmdetection_cfg/mask_rcnn_r50_fpn_2x_coco.py b/mmpose/demo/mmdetection_cfg/mask_rcnn_r50_fpn_2x_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..05d39fa9a87a0200f9b9d29cd19acd28c155d126 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/mask_rcnn_r50_fpn_2x_coco.py @@ -0,0 +1,242 @@ +model = dict( + type='MaskRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CocoDataset', + ann_file='data/coco/annotations/instances_train2017.json', + img_prefix='data/coco/train2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) + ]), + val=dict( + type='CocoDataset', + ann_file='data/coco/annotations/instances_val2017.json', + img_prefix='data/coco/val2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ]), + test=dict( + type='CocoDataset', + ann_file='data/coco/annotations/instances_val2017.json', + img_prefix='data/coco/val2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +evaluation = dict(metric=['bbox', 'segm']) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) +checkpoint_config = dict(interval=1) +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +custom_hooks = [dict(type='NumClassCheckHook')] +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/mmpose/demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py b/mmpose/demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py new file mode 100644 index 0000000000000000000000000000000000000000..620de8dc8f038f7267bc566e04afd8b647ba75da --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py @@ -0,0 +1,20 @@ +_base_ = 'mmdet::rtmdet/rtmdet_m_8xb32-300e_coco.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), + bbox_head=dict(num_classes=1), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +train_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', )))) + +val_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', )))) +test_dataloader = val_dataloader diff --git a/mmpose/demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py b/mmpose/demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6d0d3dfef15f96f7c0ea188998c304031fa8c828 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py @@ -0,0 +1 @@ +_base_ = 'mmdet::rtmdet/rtmdet_m_8xb32-300e_coco.py' diff --git a/mmpose/demo/mmdetection_cfg/rtmdet_nano_320-8xb32_coco-person.py b/mmpose/demo/mmdetection_cfg/rtmdet_nano_320-8xb32_coco-person.py new file mode 100644 index 0000000000000000000000000000000000000000..c2f1b64e4acb31ec24396c8421492d7d2fdd7aab --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/rtmdet_nano_320-8xb32_coco-person.py @@ -0,0 +1,104 @@ +_base_ = 'mmdet::rtmdet/rtmdet_l_8xb32-300e_coco.py' + +input_shape = 320 + +model = dict( + backbone=dict( + deepen_factor=0.33, + widen_factor=0.25, + use_depthwise=True, + ), + neck=dict( + in_channels=[64, 128, 256], + out_channels=64, + num_csp_blocks=1, + use_depthwise=True, + ), + bbox_head=dict( + in_channels=64, + feat_channels=64, + share_conv=False, + exp_on_reg=False, + use_depthwise=True, + num_classes=1), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='CachedMosaic', + img_scale=(input_shape, input_shape), + pad_val=114.0, + max_cached_images=20, + random_pop=False), + dict( + type='RandomResize', + scale=(input_shape * 2, input_shape * 2), + ratio_range=(0.5, 1.5), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(input_shape, input_shape)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(input_shape, input_shape), + ratio_range=(0.5, 1.5), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(input_shape, input_shape)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_shape, input_shape), keep_ratio=True), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + dataset=dict(pipeline=train_pipeline, metainfo=dict(classes=('person', )))) + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=('person', )))) +test_dataloader = val_dataloader + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] diff --git a/mmpose/demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py b/mmpose/demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py new file mode 100644 index 0000000000000000000000000000000000000000..278cc0bfe82670f89a566dc7e79b362b5a23a3d9 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/rtmdet_nano_320-8xb32_hand.py @@ -0,0 +1,171 @@ +_base_ = 'mmdet::rtmdet/rtmdet_l_8xb32-300e_coco.py' + +input_shape = 320 + +model = dict( + backbone=dict( + deepen_factor=0.33, + widen_factor=0.25, + use_depthwise=True, + ), + neck=dict( + in_channels=[64, 128, 256], + out_channels=64, + num_csp_blocks=1, + use_depthwise=True, + ), + bbox_head=dict( + in_channels=64, + feat_channels=64, + share_conv=False, + exp_on_reg=False, + use_depthwise=True, + num_classes=1), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({'data/': 's3://openmmlab/datasets/'})) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='CachedMosaic', + img_scale=(input_shape, input_shape), + pad_val=114.0, + max_cached_images=20, + random_pop=False), + dict( + type='RandomResize', + scale=(input_shape * 2, input_shape * 2), + ratio_range=(0.5, 1.5), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(input_shape, input_shape)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(input_shape, input_shape), + ratio_range=(0.5, 1.5), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(input_shape, input_shape)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_shape, input_shape), keep_ratio=True), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +data_mode = 'topdown' +data_root = 'data/' + +train_dataset = dict( + _delete_=True, + type='ConcatDataset', + datasets=[ + dict( + type='mmpose.OneHand10KDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=train_pipeline, + ann_file='onehand10k/annotations/onehand10k_train.json', + data_prefix=dict(img='pose/OneHand10K/')), + dict( + type='mmpose.FreiHandDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=train_pipeline, + ann_file='freihand/annotations/freihand_train.json', + data_prefix=dict(img='pose/FreiHand/')), + dict( + type='mmpose.Rhd2DDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=train_pipeline, + ann_file='rhd/annotations/rhd_train.json', + data_prefix=dict(img='pose/RHD/')), + dict( + type='mmpose.HalpeHandDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=train_pipeline, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict( + img='pose/Halpe/hico_20160224_det/images/train2015/') # noqa + ) + ], + ignore_keys=[ + 'CLASSES', 'dataset_keypoint_weights', 'dataset_name', 'flip_indices', + 'flip_pairs', 'keypoint_colors', 'keypoint_id2name', + 'keypoint_name2id', 'lower_body_ids', 'num_keypoints', + 'num_skeleton_links', 'sigmas', 'skeleton_link_colors', + 'skeleton_links', 'upper_body_ids' + ], +) + +test_dataset = dict( + _delete_=True, + type='mmpose.OneHand10KDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=test_pipeline, + ann_file='onehand10k/annotations/onehand10k_test.json', + data_prefix=dict(img='pose/OneHand10K/'), +) + +train_dataloader = dict(dataset=train_dataset) +val_dataloader = dict(dataset=test_dataset) +test_dataloader = val_dataloader + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'onehand10k/annotations/onehand10k_test.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator + +train_cfg = dict(val_interval=1) diff --git a/mmpose/demo/mmdetection_cfg/rtmdet_tiny_8xb32-300e_coco.py b/mmpose/demo/mmdetection_cfg/rtmdet_tiny_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..db26ca83388163047fcd45bcaede7d839bdb58f8 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/rtmdet_tiny_8xb32-300e_coco.py @@ -0,0 +1 @@ +_base_ = 'mmdet::rtmdet/rtmdet_tiny_8xb32-300e_coco.py' diff --git a/mmpose/demo/mmdetection_cfg/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py b/mmpose/demo/mmdetection_cfg/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..05c6e9659c7d80eea468624247b8f98d7ad5b428 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py @@ -0,0 +1,136 @@ +# model settings +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1) +model = dict( + type='SingleStageDetector', + data_preprocessor=data_preprocessor, + backbone=dict( + type='MobileNetV2', + out_indices=(4, 7), + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + neck=dict( + type='SSDNeck', + in_channels=(96, 1280), + out_channels=(96, 1280, 512, 256, 256, 128), + level_strides=(2, 2, 2, 2), + level_paddings=(1, 1, 1, 1), + l2_norm_scale=None, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + bbox_head=dict( + type='SSDHead', + in_channels=(96, 1280, 512, 256, 256, 128), + num_classes=80, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), + + # set anchor size manually instead of using the predefined + # SSD300 setting. + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + strides=[16, 32, 64, 107, 160, 320], + ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], + min_sizes=[48, 100, 150, 202, 253, 304], + max_sizes=[100, 150, 202, 253, 304, 320]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + sampler=dict(type='PseudoSampler'), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +env_cfg = dict(cudnn_benchmark=True) + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +input_size = 320 +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=data_preprocessor['mean'], + to_rgb=data_preprocessor['bgr_to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=24, + num_workers=4, + batch_sampler=None, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline))) +val_dataloader = dict( + batch_size=8, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/mmpose/demo/mmdetection_cfg/ssdlite_mobilenetv2_scratch_600e_onehand.py b/mmpose/demo/mmdetection_cfg/ssdlite_mobilenetv2_scratch_600e_onehand.py new file mode 100644 index 0000000000000000000000000000000000000000..ebdd2e719cb29263f0902ad627fc5742a92fca72 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/ssdlite_mobilenetv2_scratch_600e_onehand.py @@ -0,0 +1,153 @@ +# ========================================================= +# from 'mmdetection/configs/_base_/default_runtime.py' +# ========================================================= +default_scope = 'mmdet' +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +custom_hooks = [dict(type='NumClassCheckHook')] +# ========================================================= + +# model settings +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1) +model = dict( + type='SingleStageDetector', + data_preprocessor=data_preprocessor, + backbone=dict( + type='MobileNetV2', + out_indices=(4, 7), + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + neck=dict( + type='SSDNeck', + in_channels=(96, 1280), + out_channels=(96, 1280, 512, 256, 256, 128), + level_strides=(2, 2, 2, 2), + level_paddings=(1, 1, 1, 1), + l2_norm_scale=None, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + bbox_head=dict( + type='SSDHead', + in_channels=(96, 1280, 512, 256, 256, 128), + num_classes=1, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), + + # set anchor size manually instead of using the predefined + # SSD300 setting. + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + strides=[16, 32, 64, 107, 160, 320], + ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], + min_sizes=[48, 100, 150, 202, 253, 304], + max_sizes=[100, 150, 202, 253, 304, 320]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + sampler=dict(type='PseudoSampler'), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +cudnn_benchmark = True + +# dataset settings +file_client_args = dict(backend='disk') + +dataset_type = 'CocoDataset' +data_root = 'data/onehand10k/' +classes = ('hand', ) +input_size = 320 +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict( + batch_size=8, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/onehand10k_test.json', + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + min_lr=0) +runner = dict(type='EpochBasedRunner', max_epochs=120) + +# Avoid evaluation and saving weights too frequently +evaluation = dict(interval=5, metric='bbox') +checkpoint_config = dict(interval=5) +custom_hooks = [ + dict(type='NumClassCheckHook'), + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +log_config = dict(interval=5) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (24 samples per GPU) +auto_scale_lr = dict(base_batch_size=192) + +load_from = 'https://download.openmmlab.com/mmdetection/' +'v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/' +'ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth' + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/mmpose/demo/mmdetection_cfg/yolov3_d53_320_273e_coco.py b/mmpose/demo/mmdetection_cfg/yolov3_d53_320_273e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e9cca1eb34f9935a9eaf74b4cae18d1efaa248 --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/yolov3_d53_320_273e_coco.py @@ -0,0 +1,140 @@ +# model settings +model = dict( + type='YOLOV3', + pretrained='open-mmlab://darknet53', + backbone=dict(type='Darknet', depth=53, out_indices=(3, 4, 5)), + neck=dict( + type='YOLOV3Neck', + num_scales=3, + in_channels=[1024, 512, 256], + out_channels=[512, 256, 128]), + bbox_head=dict( + type='YOLOV3Head', + num_classes=80, + in_channels=[512, 256, 128], + out_channels=[1024, 512, 256], + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=2.0, + reduction='sum'), + loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='GridAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco' +img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='PhotoMetricDistortion'), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(320, 320), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(320, 320), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_train2017.json', + img_prefix=f'{data_root}/train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, # same as burn-in in darknet + warmup_ratio=0.1, + step=[218, 246]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=273) +evaluation = dict(interval=1, metric=['bbox']) + +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +custom_hooks = [dict(type='NumClassCheckHook')] + +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/mmpose/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py b/mmpose/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py new file mode 100644 index 0000000000000000000000000000000000000000..16f891304ac8d6242a3e054fb18c60a9cb4a237c --- /dev/null +++ b/mmpose/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py @@ -0,0 +1,300 @@ +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300, val_interval=10) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +param_scheduler = [ + dict( + type='mmdet.QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0005, + begin=5, + T_max=285, + end=285, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=285, end=300) +] +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True), + paramwise_cfg=dict(norm_decay_mult=0.0, bias_decay_mult=0.0)) +auto_scale_lr = dict(enable=False, base_batch_size=64) +default_scope = 'mmdet' +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='DetVisualizationHook')) +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) +log_level = 'INFO' +load_from = 'https://download.openmmlab.com/mmdetection/' \ + 'v2.0/yolox/yolox_s_8x8_300e_coco/' \ + 'yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth' +resume = False +img_scale = (640, 640) +model = dict( + type='YOLOX', + data_preprocessor=dict( + type='DetDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=10) + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(2, 3, 4), + use_depthwise=False, + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + neck=dict( + type='YOLOXPAFPN', + in_channels=[128, 256, 512], + out_channels=128, + num_csp_blocks=1, + use_depthwise=False, + upsample_cfg=dict(scale_factor=2, mode='nearest'), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + bbox_head=dict( + type='YOLOXHead', + num_classes=1, + in_channels=128, + feat_channels=128, + stacked_convs=2, + strides=(8, 16, 32), + use_depthwise=False, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_obj=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0)), + train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), + test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +backend_args = dict(backend='local') +train_pipeline = [ + dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomAffine', scaling_ratio_range=(0.1, 2), + border=(-320, -320)), + dict( + type='MixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') +] +train_dataset = dict( + type='MultiImageMixDataset', + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict(type='LoadImageFromFile', backend_args=dict(backend='local')), + dict(type='LoadAnnotations', with_bbox=True) + ], + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + pipeline=[ + dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-320, -320)), + dict( + type='MixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') + ]) +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=dict(backend='local')), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='MultiImageMixDataset', + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/coco_face_train.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=dict(backend='local')), + dict(type='LoadAnnotations', with_bbox=True) + ], + filter_cfg=dict(filter_empty_gt=False, min_size=32), + metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60))), + pipeline=[ + dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-320, -320)), + dict( + type='MixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type='FilterAnnotations', + min_gt_bbox_wh=(1, 1), + keep_empty=False), + dict(type='PackDetInputs') + ])) +val_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/coco_face_val.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=[ + dict(type='LoadImageFromFile', backend_args=dict(backend='local')), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) + ], + metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60)))) +test_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/coco_face_val.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=[ + dict(type='LoadImageFromFile', backend_args=dict(backend='local')), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) + ], + metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60)))) +val_evaluator = dict( + type='CocoMetric', + ann_file='data/coco/annotations/coco_face_val.json', + metric='bbox') +test_evaluator = dict( + type='CocoMetric', + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') +max_epochs = 300 +num_last_epochs = 15 +interval = 10 +base_lr = 0.01 +custom_hooks = [ + dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + strict_load=False, + update_buffers=True, + priority=49) +] +metainfo = dict(CLASSES=('person', ), PALETTE=(220, 20, 60)) +launcher = 'pytorch' diff --git a/mmpose/demo/mmtracking_cfg/deepsort_faster-rcnn_fpn_4e_mot17-private-half.py b/mmpose/demo/mmtracking_cfg/deepsort_faster-rcnn_fpn_4e_mot17-private-half.py new file mode 100644 index 0000000000000000000000000000000000000000..1d7fccf0cbe9929618274218274726eb28577273 --- /dev/null +++ b/mmpose/demo/mmtracking_cfg/deepsort_faster-rcnn_fpn_4e_mot17-private-half.py @@ -0,0 +1,321 @@ +model = dict( + detector=dict( + type='FasterRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + clip_border=False), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=0.1111111111111111, + loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + clip_border=False), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=1.0))), + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmtracking/' + 'mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth')), + type='DeepSORT', + motion=dict(type='KalmanFilter', center_only=False), + reid=dict( + type='BaseReID', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), + head=dict( + type='LinearReIDHead', + num_fcs=1, + in_channels=2048, + fc_channels=1024, + out_channels=128, + num_classes=380, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + loss_pairwise=dict( + type='TripletLoss', margin=0.3, loss_weight=1.0), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU')), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmtracking/' + 'mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth')), + tracker=dict( + type='SortTracker', + obj_score_thr=0.5, + reid=dict( + num_samples=10, + img_scale=(256, 128), + img_norm_cfg=None, + match_score_thr=2.0), + match_iou_thr=0.5, + momentums=None, + num_tentatives=2, + num_frames_retain=100)) +dataset_type = 'MOTChallengeDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadMultiImagesFromFile', to_float32=True), + dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), + dict( + type='SeqResize', + img_scale=(1088, 1088), + share_params=True, + ratio_range=(0.8, 1.2), + keep_ratio=True, + bbox_clip_border=False), + dict(type='SeqPhotoMetricDistortion', share_params=True), + dict( + type='SeqRandomCrop', + share_params=False, + crop_size=(1088, 1088), + bbox_clip_border=False), + dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), + dict( + type='SeqNormalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='SeqPad', size_divisor=32), + dict(type='MatchInstances', skip_nomatch=True), + dict( + type='VideoCollect', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', + 'gt_instance_ids' + ]), + dict(type='SeqDefaultFormatBundle', ref_prefix='ref') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) +] +data_root = 'data/MOT17/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='MOTChallengeDataset', + visibility_thr=-1, + ann_file='data/MOT17/annotations/half-train_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=dict( + num_ref_imgs=1, + frame_range=10, + filter_key_img=True, + method='uniform'), + pipeline=[ + dict(type='LoadMultiImagesFromFile', to_float32=True), + dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), + dict( + type='SeqResize', + img_scale=(1088, 1088), + share_params=True, + ratio_range=(0.8, 1.2), + keep_ratio=True, + bbox_clip_border=False), + dict(type='SeqPhotoMetricDistortion', share_params=True), + dict( + type='SeqRandomCrop', + share_params=False, + crop_size=(1088, 1088), + bbox_clip_border=False), + dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), + dict( + type='SeqNormalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='SeqPad', size_divisor=32), + dict(type='MatchInstances', skip_nomatch=True), + dict( + type='VideoCollect', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', + 'gt_instance_ids' + ]), + dict(type='SeqDefaultFormatBundle', ref_prefix='ref') + ]), + val=dict( + type='MOTChallengeDataset', + ann_file='data/MOT17/annotations/half-val_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=None, + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) + ]), + test=dict( + type='MOTChallengeDataset', + ann_file='data/MOT17/annotations/half-val_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=None, + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) + ])) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +checkpoint_config = dict(interval=1) +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=100, + warmup_ratio=0.01, + step=[3]) +total_epochs = 4 +evaluation = dict(metric=['bbox', 'track'], interval=1) +search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML'] diff --git a/mmpose/demo/mmtracking_cfg/tracktor_faster-rcnn_r50_fpn_4e_mot17-private.py b/mmpose/demo/mmtracking_cfg/tracktor_faster-rcnn_r50_fpn_4e_mot17-private.py new file mode 100644 index 0000000000000000000000000000000000000000..9736269bd9ca1f950eadaa7a4933656db3130ca8 --- /dev/null +++ b/mmpose/demo/mmtracking_cfg/tracktor_faster-rcnn_r50_fpn_4e_mot17-private.py @@ -0,0 +1,325 @@ +model = dict( + detector=dict( + type='FasterRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + clip_border=False), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=0.1111111111111111, + loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + clip_border=False), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=1.0))), + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))), + type='Tracktor', + pretrains=dict( + detector='https://download.openmmlab.com/mmtracking/' + 'mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth', + reid='https://download.openmmlab.com/mmtracking/mot/' + 'reid/reid_r50_6e_mot17-4bf6b63d.pth'), + reid=dict( + type='BaseReID', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), + head=dict( + type='LinearReIDHead', + num_fcs=1, + in_channels=2048, + fc_channels=1024, + out_channels=128, + num_classes=378, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + loss_pairwise=dict( + type='TripletLoss', margin=0.3, loss_weight=1.0), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'))), + motion=dict( + type='CameraMotionCompensation', + warp_mode='cv2.MOTION_EUCLIDEAN', + num_iters=100, + stop_eps=1e-05), + tracker=dict( + type='TracktorTracker', + obj_score_thr=0.5, + regression=dict( + obj_score_thr=0.5, + nms=dict(type='nms', iou_threshold=0.6), + match_iou_thr=0.3), + reid=dict( + num_samples=10, + img_scale=(256, 128), + img_norm_cfg=None, + match_score_thr=2.0, + match_iou_thr=0.2), + momentums=None, + num_frames_retain=10)) +dataset_type = 'MOTChallengeDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadMultiImagesFromFile', to_float32=True), + dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), + dict( + type='SeqResize', + img_scale=(1088, 1088), + share_params=True, + ratio_range=(0.8, 1.2), + keep_ratio=True, + bbox_clip_border=False), + dict(type='SeqPhotoMetricDistortion', share_params=True), + dict( + type='SeqRandomCrop', + share_params=False, + crop_size=(1088, 1088), + bbox_clip_border=False), + dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), + dict( + type='SeqNormalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='SeqPad', size_divisor=32), + dict(type='MatchInstances', skip_nomatch=True), + dict( + type='VideoCollect', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', + 'gt_instance_ids' + ]), + dict(type='SeqDefaultFormatBundle', ref_prefix='ref') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) +] +data_root = 'data/MOT17/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='MOTChallengeDataset', + visibility_thr=-1, + ann_file='data/MOT17/annotations/train_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=dict( + num_ref_imgs=1, + frame_range=10, + filter_key_img=True, + method='uniform'), + pipeline=[ + dict(type='LoadMultiImagesFromFile', to_float32=True), + dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), + dict( + type='SeqResize', + img_scale=(1088, 1088), + share_params=True, + ratio_range=(0.8, 1.2), + keep_ratio=True, + bbox_clip_border=False), + dict(type='SeqPhotoMetricDistortion', share_params=True), + dict( + type='SeqRandomCrop', + share_params=False, + crop_size=(1088, 1088), + bbox_clip_border=False), + dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), + dict( + type='SeqNormalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='SeqPad', size_divisor=32), + dict(type='MatchInstances', skip_nomatch=True), + dict( + type='VideoCollect', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', + 'gt_instance_ids' + ]), + dict(type='SeqDefaultFormatBundle', ref_prefix='ref') + ]), + val=dict( + type='MOTChallengeDataset', + ann_file='data/MOT17/annotations/train_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=None, + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) + ]), + test=dict( + type='MOTChallengeDataset', + ann_file='data/MOT17/annotations/train_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=None, + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) + ])) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +checkpoint_config = dict(interval=1) +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=100, + warmup_ratio=0.01, + step=[3]) +total_epochs = 4 +evaluation = dict(metric=['bbox', 'track'], interval=1) +search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML'] +test_set = 'train' diff --git a/mmpose/demo/topdown_demo_with_mmdet.py b/mmpose/demo/topdown_demo_with_mmdet.py new file mode 100644 index 0000000000000000000000000000000000000000..4e39c362076b27b3dab23536894f2ce616989938 --- /dev/null +++ b/mmpose/demo/topdown_demo_with_mmdet.py @@ -0,0 +1,302 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import mimetypes +import os +import time +from argparse import ArgumentParser + +import cv2 +import json_tricks as json +import mmcv +import mmengine +import numpy as np +from mmengine.logging import print_log + +from mmpose.apis import inference_topdown +from mmpose.apis import init_model as init_pose_estimator +from mmpose.evaluation.functional import nms +from mmpose.registry import VISUALIZERS +from mmpose.structures import merge_data_samples, split_instances +from mmpose.utils import adapt_mmdet_pipeline + +try: + from mmdet.apis import inference_detector, init_detector + has_mmdet = True +except (ImportError, ModuleNotFoundError): + has_mmdet = False + + +def process_one_image(args, + img, + detector, + pose_estimator, + visualizer=None, + show_interval=0): + """Visualize predicted keypoints (and heatmaps) of one image.""" + + # predict bbox + det_result = inference_detector(detector, img) + pred_instance = det_result.pred_instances.cpu().numpy() + bboxes = np.concatenate( + (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1) + bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id, + pred_instance.scores > args.bbox_thr)] + bboxes = bboxes[nms(bboxes, args.nms_thr), :4] + + # predict keypoints + pose_results = inference_topdown(pose_estimator, img, bboxes) + data_samples = merge_data_samples(pose_results) + + # show the results + if isinstance(img, str): + img = mmcv.imread(img, channel_order='rgb') + elif isinstance(img, np.ndarray): + img = mmcv.bgr2rgb(img) + + if visualizer is not None: + visualizer.add_datasample( + 'result', + img, + data_sample=data_samples, + draw_gt=False, + draw_heatmap=args.draw_heatmap, + draw_bbox=args.draw_bbox, + show_kpt_idx=args.show_kpt_idx, + skeleton_style=args.skeleton_style, + show=args.show, + wait_time=show_interval, + kpt_thr=args.kpt_thr) + + # if there is no instance detected, return None + return data_samples.get('pred_instances', None) + + +def main(): + """Visualize the demo images. + + Using mmdet to detect the human. + """ + parser = ArgumentParser() + parser.add_argument('det_config', help='Config file for detection') + parser.add_argument('det_checkpoint', help='Checkpoint file for detection') + parser.add_argument('pose_config', help='Config file for pose') + parser.add_argument('pose_checkpoint', help='Checkpoint file for pose') + parser.add_argument( + '--input', type=str, default='', help='Image/Video file') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='whether to show img') + parser.add_argument( + '--output-root', + type=str, + default='', + help='root of the output img file. ' + 'Default not saving the visualization images.') + parser.add_argument( + '--save-predictions', + action='store_true', + default=False, + help='whether to save predicted results') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--det-cat-id', + type=int, + default=0, + help='Category id for bounding box detection model') + parser.add_argument( + '--bbox-thr', + type=float, + default=0.3, + help='Bounding box score threshold') + parser.add_argument( + '--nms-thr', + type=float, + default=0.3, + help='IoU threshold for bounding box NMS') + parser.add_argument( + '--kpt-thr', + type=float, + default=0.3, + help='Visualizing keypoint thresholds') + parser.add_argument( + '--draw-heatmap', + action='store_true', + default=False, + help='Draw heatmap predicted by the model') + parser.add_argument( + '--show-kpt-idx', + action='store_true', + default=False, + help='Whether to show the index of keypoints') + parser.add_argument( + '--skeleton-style', + default='mmpose', + type=str, + choices=['mmpose', 'openpose'], + help='Skeleton style selection') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + parser.add_argument( + '--show-interval', type=int, default=0, help='Sleep seconds per frame') + parser.add_argument( + '--alpha', type=float, default=0.8, help='The transparency of bboxes') + parser.add_argument( + '--draw-bbox', action='store_true', help='Draw bboxes of instances') + + assert has_mmdet, 'Please install mmdet to run the demo.' + + args = parser.parse_args() + + assert args.show or (args.output_root != '') + assert args.input != '' + assert args.det_config is not None + assert args.det_checkpoint is not None + + output_file = None + if args.output_root: + mmengine.mkdir_or_exist(args.output_root) + output_file = os.path.join(args.output_root, + os.path.basename(args.input)) + if args.input == 'webcam': + output_file += '.mp4' + + if args.save_predictions: + assert args.output_root != '' + args.pred_save_path = f'{args.output_root}/results_' \ + f'{os.path.splitext(os.path.basename(args.input))[0]}.json' + + # build detector + detector = init_detector( + args.det_config, args.det_checkpoint, device=args.device) + detector.cfg = adapt_mmdet_pipeline(detector.cfg) + + # build pose estimator + pose_estimator = init_pose_estimator( + args.pose_config, + args.pose_checkpoint, + device=args.device, + cfg_options=dict( + model=dict(test_cfg=dict(output_heatmaps=args.draw_heatmap)))) + + # build visualizer + pose_estimator.cfg.visualizer.radius = args.radius + pose_estimator.cfg.visualizer.alpha = args.alpha + pose_estimator.cfg.visualizer.line_width = args.thickness + visualizer = VISUALIZERS.build(pose_estimator.cfg.visualizer) + # the dataset_meta is loaded from the checkpoint and + # then pass to the model in init_pose_estimator + visualizer.set_dataset_meta( + pose_estimator.dataset_meta, skeleton_style=args.skeleton_style) + + if args.input == 'webcam': + input_type = 'webcam' + else: + input_type = mimetypes.guess_type(args.input)[0].split('/')[0] + + if input_type == 'image': + + # inference + pred_instances = process_one_image(args, args.input, detector, + pose_estimator, visualizer) + + if args.save_predictions: + pred_instances_list = split_instances(pred_instances) + + if output_file: + img_vis = visualizer.get_image() + mmcv.imwrite(mmcv.rgb2bgr(img_vis), output_file) + + elif input_type in ['webcam', 'video']: + + if args.input == 'webcam': + cap = cv2.VideoCapture(0) + else: + cap = cv2.VideoCapture(args.input) + + video_writer = None + pred_instances_list = [] + frame_idx = 0 + + while cap.isOpened(): + success, frame = cap.read() + frame_idx += 1 + + if not success: + break + + # topdown pose estimation + pred_instances = process_one_image(args, frame, detector, + pose_estimator, visualizer, + 0.001) + + if args.save_predictions: + # save prediction results + pred_instances_list.append( + dict( + frame_id=frame_idx, + instances=split_instances(pred_instances))) + + # output videos + if output_file: + frame_vis = visualizer.get_image() + + if video_writer is None: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + # the size of the image with visualization may vary + # depending on the presence of heatmaps + video_writer = cv2.VideoWriter( + output_file, + fourcc, + 25, # saved fps + (frame_vis.shape[1], frame_vis.shape[0])) + + video_writer.write(mmcv.rgb2bgr(frame_vis)) + + if args.show: + # press ESC to exit + if cv2.waitKey(5) & 0xFF == 27: + break + + time.sleep(args.show_interval) + + if video_writer: + video_writer.release() + + cap.release() + + else: + args.save_predictions = False + raise ValueError( + f'file {os.path.basename(args.input)} has invalid format.') + + if args.save_predictions: + with open(args.pred_save_path, 'w') as f: + json.dump( + dict( + meta_info=pose_estimator.dataset_meta, + instance_info=pred_instances_list), + f, + indent='\t') + print(f'predictions have been saved at {args.pred_save_path}') + + if output_file: + input_type = input_type.replace('webcam', 'video') + print_log( + f'the output {input_type} has been saved at {output_file}', + logger='current', + level=logging.INFO) + + +if __name__ == '__main__': + main() diff --git a/mmpose/engine/__init__.py b/mmpose/engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..44f7fa17bc587273415fcf72e88ccd619003253b --- /dev/null +++ b/mmpose/engine/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hooks import * # noqa: F401, F403 +from .optim_wrappers import * # noqa: F401, F403 +from .schedulers import * # noqa: F401, F403 diff --git a/mmpose/engine/hooks/__init__.py b/mmpose/engine/hooks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2527a258bcf8888bae8b3c259d7a97b3fce541e4 --- /dev/null +++ b/mmpose/engine/hooks/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .badcase_hook import BadCaseAnalysisHook +from .ema_hook import ExpMomentumEMA +from .mode_switch_hooks import RTMOModeSwitchHook, YOLOXPoseModeSwitchHook +from .sync_norm_hook import SyncNormHook +from .visualization_hook import PoseVisualizationHook + +__all__ = [ + 'PoseVisualizationHook', 'ExpMomentumEMA', 'BadCaseAnalysisHook', + 'YOLOXPoseModeSwitchHook', 'SyncNormHook', 'RTMOModeSwitchHook' +] diff --git a/mmpose/engine/hooks/badcase_hook.py b/mmpose/engine/hooks/badcase_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..a06ef5af53fc0eedd7546cd590a5c8bb848c1c9b --- /dev/null +++ b/mmpose/engine/hooks/badcase_hook.py @@ -0,0 +1,239 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import logging +import os +import warnings +from typing import Dict, Optional, Sequence + +import mmcv +import mmengine +import mmengine.fileio as fileio +import torch +from mmengine.config import ConfigDict +from mmengine.hooks import Hook +from mmengine.logging import print_log +from mmengine.runner import Runner +from mmengine.visualization import Visualizer + +from mmpose.registry import HOOKS, METRICS, MODELS +from mmpose.structures import PoseDataSample, merge_data_samples + + +@HOOKS.register_module() +class BadCaseAnalysisHook(Hook): + """Bad Case Analyze Hook. Used to visualize validation and testing process + prediction results. + + In the testing phase: + + 1. If ``show`` is True, it means that only the prediction results are + visualized without storing data, so ``vis_backends`` needs to + be excluded. + 2. If ``out_dir`` is specified, it means that the prediction results + need to be saved to ``out_dir``. In order to avoid vis_backends + also storing data, so ``vis_backends`` needs to be excluded. + 3. ``vis_backends`` takes effect if the user does not specify ``show`` + and `out_dir``. You can set ``vis_backends`` to WandbVisBackend or + TensorboardVisBackend to store the prediction result in Wandb or + Tensorboard. + + Args: + enable (bool): whether to draw prediction results. If it is False, + it means that no drawing will be done. Defaults to False. + show (bool): Whether to display the drawn image. Default to False. + wait_time (float): The interval of show (s). Defaults to 0. + interval (int): The interval of visualization. Defaults to 50. + kpt_thr (float): The threshold to visualize the keypoints. + Defaults to 0.3. + out_dir (str, optional): directory where painted images + will be saved in testing process. + backend_args (dict, optional): Arguments to instantiate the preifx of + uri corresponding backend. Defaults to None. + metric_type (str): the mretic type to decide a badcase, + loss or accuracy. + metric (ConfigDict): The config of metric. + metric_key (str): key of needed metric value in the return dict + from class 'metric'. + badcase_thr (float): min loss or max accuracy for a badcase. + """ + + def __init__( + self, + enable: bool = False, + show: bool = False, + wait_time: float = 0., + interval: int = 50, + kpt_thr: float = 0.3, + out_dir: Optional[str] = None, + backend_args: Optional[dict] = None, + metric_type: str = 'loss', + metric: ConfigDict = ConfigDict(type='KeypointMSELoss'), + metric_key: str = 'PCK', + badcase_thr: float = 5, + ): + self._visualizer: Visualizer = Visualizer.get_current_instance() + self.interval = interval + self.kpt_thr = kpt_thr + self.show = show + if self.show: + # No need to think about vis backends. + self._visualizer._vis_backends = {} + warnings.warn('The show is True, it means that only ' + 'the prediction results are visualized ' + 'without storing data, so vis_backends ' + 'needs to be excluded.') + + self.wait_time = wait_time + self.enable = enable + self.out_dir = out_dir + self._test_index = 0 + self.backend_args = backend_args + + self.metric_type = metric_type + if metric_type not in ['loss', 'accuracy']: + raise KeyError( + f'The badcase metric type {metric_type} is not supported by ' + f"{self.__class__.__name__}. Should be one of 'loss', " + f"'accuracy', but got {metric_type}.") + self.metric = MODELS.build(metric) if metric_type == 'loss'\ + else METRICS.build(metric) + self.metric_name = metric.type if metric_type == 'loss'\ + else metric_key + self.metric_key = metric_key + self.badcase_thr = badcase_thr + self.results = [] + + def check_badcase(self, data_batch, data_sample): + """Check whether the sample is a badcase. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + Return: + is_badcase (bool): whether the sample is a badcase or not + metric_value (float) + """ + if self.metric_type == 'loss': + gts = data_sample.gt_instances.keypoints + preds = data_sample.pred_instances.keypoints + weights = data_sample.gt_instances.keypoints_visible + with torch.no_grad(): + metric_value = self.metric( + torch.from_numpy(preds), torch.from_numpy(gts), + torch.from_numpy(weights)).item() + is_badcase = metric_value >= self.badcase_thr + else: + self.metric.process([data_batch], [data_sample.to_dict()]) + metric_value = self.metric.evaluate(1)[self.metric_key] + is_badcase = metric_value <= self.badcase_thr + return is_badcase, metric_value + + def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[PoseDataSample]) -> None: + """Run after every testing iterations. + + Args: + runner (:obj:`Runner`): The runner of the testing process. + batch_idx (int): The index of the current batch in the test loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`PoseDataSample`]): Outputs from model. + """ + if not self.enable: + return + + if self.out_dir is not None: + self.out_dir = os.path.join(runner.work_dir, runner.timestamp, + self.out_dir) + mmengine.mkdir_or_exist(self.out_dir) + + self._visualizer.set_dataset_meta(runner.test_evaluator.dataset_meta) + + for data_sample in outputs: + self._test_index += 1 + + img_path = data_sample.get('img_path') + img_bytes = fileio.get(img_path, backend_args=self.backend_args) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + data_sample = merge_data_samples([data_sample]) + + is_badcase, metric_value = self.check_badcase( + data_batch, data_sample) + + if is_badcase: + img_name, postfix = os.path.basename(img_path).rsplit('.', 1) + bboxes = data_sample.gt_instances.bboxes.astype(int).tolist() + bbox_info = 'bbox' + str(bboxes) + metric_postfix = self.metric_name + str(round(metric_value, 2)) + + self.results.append({ + 'img': img_name, + 'bbox': bboxes, + self.metric_name: metric_value + }) + + badcase_name = f'{img_name}_{bbox_info}_{metric_postfix}' + + out_file = None + if self.out_dir is not None: + out_file = f'{badcase_name}.{postfix}' + out_file = os.path.join(self.out_dir, out_file) + + # draw gt keypoints in blue color + self._visualizer.kpt_color = 'blue' + self._visualizer.link_color = 'blue' + img_gt_drawn = self._visualizer.add_datasample( + badcase_name if self.show else 'test_img', + img, + data_sample=data_sample, + show=False, + draw_pred=False, + draw_gt=True, + draw_bbox=False, + draw_heatmap=False, + wait_time=self.wait_time, + kpt_thr=self.kpt_thr, + out_file=None, + step=self._test_index) + # draw pred keypoints in red color + self._visualizer.kpt_color = 'red' + self._visualizer.link_color = 'red' + self._visualizer.add_datasample( + badcase_name if self.show else 'test_img', + img_gt_drawn, + data_sample=data_sample, + show=self.show, + draw_pred=True, + draw_gt=False, + draw_bbox=True, + draw_heatmap=False, + wait_time=self.wait_time, + kpt_thr=self.kpt_thr, + out_file=out_file, + step=self._test_index) + + def after_test_epoch(self, + runner, + metrics: Optional[Dict[str, float]] = None) -> None: + """All subclasses should override this method, if they need any + operations after each test epoch. + + Args: + runner (Runner): The runner of the testing process. + metrics (Dict[str, float], optional): Evaluation results of all + metrics on test dataset. The keys are the names of the + metrics, and the values are corresponding results. + """ + if not self.enable or not self.results: + return + + mmengine.mkdir_or_exist(self.out_dir) + out_file = os.path.join(self.out_dir, 'results.json') + with open(out_file, 'w') as f: + json.dump(self.results, f) + + print_log( + f'the bad cases are saved under {self.out_dir}', + logger='current', + level=logging.INFO) diff --git a/mmpose/engine/hooks/ema_hook.py b/mmpose/engine/hooks/ema_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..fd1a689f96f49c33059ec1e4afbe7b01b85164f9 --- /dev/null +++ b/mmpose/engine/hooks/ema_hook.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional + +import torch +import torch.nn as nn +from mmengine.model import ExponentialMovingAverage +from torch import Tensor + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class ExpMomentumEMA(ExponentialMovingAverage): + """Exponential moving average (EMA) with exponential momentum strategy, + which is used in YOLOX. + + Ported from ` the implementation of MMDetection + `_. + + Args: + model (nn.Module): The model to be averaged. + momentum (float): The momentum used for updating ema parameter. + Ema's parameter are updated with the formula: + `averaged_param = (1-momentum) * averaged_param + momentum * + source_param`. Defaults to 0.0002. + gamma (int): Use a larger momentum early in training and gradually + annealing to a smaller value to update the ema model smoothly. The + momentum is calculated as + `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`. + Defaults to 2000. + interval (int): Interval between two updates. Defaults to 1. + device (torch.device, optional): If provided, the averaged model will + be stored on the :attr:`device`. Defaults to None. + update_buffers (bool): if True, it will compute running averages for + both the parameters and the buffers of the model. Defaults to + False. + """ + + def __init__(self, + model: nn.Module, + momentum: float = 0.0002, + gamma: int = 2000, + interval=1, + device: Optional[torch.device] = None, + update_buffers: bool = False) -> None: + super().__init__( + model=model, + momentum=momentum, + interval=interval, + device=device, + update_buffers=update_buffers) + assert gamma > 0, f'gamma must be greater than 0, but got {gamma}' + self.gamma = gamma + + def avg_func(self, averaged_param: Tensor, source_param: Tensor, + steps: int) -> None: + """Compute the moving average of the parameters using the exponential + momentum strategy. + + Args: + averaged_param (Tensor): The averaged parameters. + source_param (Tensor): The source parameters. + steps (int): The number of times the parameters have been + updated. + """ + momentum = (1 - self.momentum) * math.exp( + -float(1 + steps) / self.gamma) + self.momentum + averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum) diff --git a/mmpose/engine/hooks/mode_switch_hooks.py b/mmpose/engine/hooks/mode_switch_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..8990ecab678fe067cab64cf95e613f0439eddba1 --- /dev/null +++ b/mmpose/engine/hooks/mode_switch_hooks.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Dict, Sequence + +from mmengine.hooks import Hook +from mmengine.model import is_model_wrapper +from mmengine.runner import Runner + +from mmpose.registry import HOOKS +from mmpose.utils.hooks import rgetattr, rsetattr + + +@HOOKS.register_module() +class YOLOXPoseModeSwitchHook(Hook): + """Switch the mode of YOLOX-Pose during training. + + This hook: + 1) Turns off mosaic and mixup data augmentation. + 2) Uses instance mask to assist positive anchor selection. + 3) Uses auxiliary L1 loss in the head. + + Args: + num_last_epochs (int): The number of last epochs at the end of + training to close the data augmentation and switch to L1 loss. + Defaults to 20. + new_train_dataset (dict): New training dataset configuration that + will be used in place of the original training dataset. Defaults + to None. + new_train_pipeline (Sequence[dict]): New data augmentation pipeline + configuration that will be used in place of the original pipeline + during training. Defaults to None. + """ + + def __init__(self, + num_last_epochs: int = 20, + new_train_dataset: dict = None, + new_train_pipeline: Sequence[dict] = None): + self.num_last_epochs = num_last_epochs + self.new_train_dataset = new_train_dataset + self.new_train_pipeline = new_train_pipeline + + def _modify_dataloader(self, runner: Runner): + """Modify dataloader with new dataset and pipeline configurations.""" + runner.logger.info(f'New Pipeline: {self.new_train_pipeline}') + + train_dataloader_cfg = copy.deepcopy(runner.cfg.train_dataloader) + if self.new_train_dataset: + train_dataloader_cfg.dataset = self.new_train_dataset + if self.new_train_pipeline: + train_dataloader_cfg.dataset.pipeline = self.new_train_pipeline + + new_train_dataloader = Runner.build_dataloader(train_dataloader_cfg) + runner.train_loop.dataloader = new_train_dataloader + runner.logger.info('Recreated the dataloader!') + + def before_train_epoch(self, runner: Runner): + """Close mosaic and mixup augmentation, switch to use L1 loss.""" + epoch = runner.epoch + model = runner.model + if is_model_wrapper(model): + model = model.module + + if epoch + 1 == runner.max_epochs - self.num_last_epochs: + self._modify_dataloader(runner) + runner.logger.info('Added additional reg loss now!') + model.head.use_aux_loss = True + + +@HOOKS.register_module() +class RTMOModeSwitchHook(Hook): + """A hook to switch the mode of RTMO during training. + + This hook allows for dynamic adjustments of model attributes at specified + training epochs. It is designed to modify configurations such as turning + off specific augmentations or changing loss functions at different stages + of the training process. + + Args: + epoch_attributes (Dict[str, Dict]): A dictionary where keys are epoch + numbers and values are attribute modification dictionaries. Each + dictionary specifies the attribute to modify and its new value. + + Example: + epoch_attributes = { + 5: [{"attr1.subattr": new_value1}, {"attr2.subattr": new_value2}], + 10: [{"attr3.subattr": new_value3}] + } + """ + + def __init__(self, epoch_attributes: Dict[int, Dict]): + self.epoch_attributes = epoch_attributes + + def before_train_epoch(self, runner: Runner): + """Method called before each training epoch. + + It checks if the current epoch is in the `epoch_attributes` mapping and + applies the corresponding attribute changes to the model. + """ + epoch = runner.epoch + model = runner.model + if is_model_wrapper(model): + model = model.module + + if epoch in self.epoch_attributes: + for key, value in self.epoch_attributes[epoch].items(): + rsetattr(model.head, key, value) + runner.logger.info( + f'Change model.head.{key} to {rgetattr(model.head, key)}') diff --git a/mmpose/engine/hooks/sync_norm_hook.py b/mmpose/engine/hooks/sync_norm_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..053e4f92af37037a64309b2262ef4610d336b3f5 --- /dev/null +++ b/mmpose/engine/hooks/sync_norm_hook.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +from mmengine.dist import all_reduce_dict, get_dist_info +from mmengine.hooks import Hook +from torch import nn + +from mmpose.registry import HOOKS + + +def get_norm_states(module: nn.Module) -> OrderedDict: + """Get the state_dict of batch norms in the module.""" + async_norm_states = OrderedDict() + for name, child in module.named_modules(): + if isinstance(child, nn.modules.batchnorm._NormBase): + for k, v in child.state_dict().items(): + async_norm_states['.'.join([name, k])] = v + return async_norm_states + + +@HOOKS.register_module() +class SyncNormHook(Hook): + """Synchronize Norm states before validation.""" + + def before_val_epoch(self, runner): + """Synchronize normalization statistics.""" + module = runner.model + rank, world_size = get_dist_info() + + if world_size == 1: + return + + norm_states = get_norm_states(module) + if len(norm_states) == 0: + return + + try: + norm_states = all_reduce_dict(norm_states, op='mean') + module.load_state_dict(norm_states, strict=True) + except Exception as e: + runner.logger.warn(f'SyncNormHook failed: {str(e)}') diff --git a/mmpose/engine/hooks/visualization_hook.py b/mmpose/engine/hooks/visualization_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..7de273698c2dc3cb29be2183e2b7d9eb11a7f298 --- /dev/null +++ b/mmpose/engine/hooks/visualization_hook.py @@ -0,0 +1,207 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import warnings +from typing import Optional, Sequence + +import numpy as np + +import mmcv +import mmengine +import mmengine.fileio as fileio +from mmengine.hooks import Hook +from mmengine.runner import Runner +from mmengine.visualization import Visualizer + +from mmpose.registry import HOOKS +from mmpose.structures import PoseDataSample, merge_data_samples +from mmpose.structures.keypoint import fix_bbox_aspect_ratio + + +@HOOKS.register_module() +class PoseVisualizationHook(Hook): + """Pose Estimation Visualization Hook. Used to visualize validation and + testing process prediction results. + + In the testing phase: + + 1. If ``show`` is True, it means that only the prediction results are + visualized without storing data, so ``vis_backends`` needs to + be excluded. + 2. If ``out_dir`` is specified, it means that the prediction results + need to be saved to ``out_dir``. In order to avoid vis_backends + also storing data, so ``vis_backends`` needs to be excluded. + 3. ``vis_backends`` takes effect if the user does not specify ``show`` + and `out_dir``. You can set ``vis_backends`` to WandbVisBackend or + TensorboardVisBackend to store the prediction result in Wandb or + Tensorboard. + + Args: + enable (bool): whether to draw prediction results. If it is False, + it means that no drawing will be done. Defaults to False. + interval (int): The interval of visualization. Defaults to 50. + score_thr (float): The threshold to visualize the bboxes + and masks. Defaults to 0.3. + show (bool): Whether to display the drawn image. Default to False. + wait_time (float): The interval of show (s). Defaults to 0. + out_dir (str, optional): directory where painted images + will be saved in testing process. + backend_args (dict, optional): Arguments to instantiate the preifx of + uri corresponding backend. Defaults to None. + """ + + def __init__( + self, + enable: bool = False, + interval: int = 50, + kpt_thr: float = 0.3, + show: bool = False, + wait_time: float = 0., + out_dir: Optional[str] = None, + backend_args: Optional[dict] = None, + ): + self._visualizer: Visualizer = Visualizer.get_current_instance() + self.interval = interval + self.kpt_thr = kpt_thr + self.show = show + if self.show: + # No need to think about vis backends. + self._visualizer._vis_backends = {} + warnings.warn('The show is True, it means that only ' + 'the prediction results are visualized ' + 'without storing data, so vis_backends ' + 'needs to be excluded.') + + self.wait_time = wait_time + self.enable = enable + self.out_dir = out_dir + self._test_index = 0 + self.backend_args = backend_args + + def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[PoseDataSample]) -> None: + """Run after every ``self.interval`` validation iterations. + + Args: + runner (:obj:`Runner`): The runner of the validation process. + batch_idx (int): The index of the current batch in the val loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`PoseDataSample`]): Outputs from model. + """ + if self.enable is False: + return + + self._visualizer.set_dataset_meta(runner.val_evaluator.dataset_meta) + + # There is no guarantee that the same batch of images + # is visualized for each evaluation. + total_curr_iter = runner.iter + batch_idx + + # Visualize only the first data + img_path = data_batch['data_samples'][0].get('img_path') + img_bytes = fileio.get(img_path, backend_args=self.backend_args) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + data_sample = outputs[0] + + # revert the heatmap on the original image + data_sample = merge_data_samples([data_sample]) + + if total_curr_iter % self.interval == 0: + self._visualizer.add_datasample( + os.path.basename(img_path) if self.show else 'val_img', + img, + data_sample=data_sample, + draw_gt=False, + draw_bbox=True, + draw_heatmap=True, + show=self.show, + wait_time=self.wait_time, + kpt_thr=self.kpt_thr, + step=total_curr_iter) + + def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[PoseDataSample]) -> None: + """Run after every testing iterations. + + Args: + runner (:obj:`Runner`): The runner of the testing process. + batch_idx (int): The index of the current batch in the test loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`PoseDataSample`]): Outputs from model. + """ + if self.enable is False: + return + + if self.out_dir is not None: + self.out_dir = os.path.join(runner.work_dir, runner.timestamp, + self.out_dir) + mmengine.mkdir_or_exist(self.out_dir) + + self._visualizer.set_dataset_meta(runner.test_evaluator.dataset_meta) + + for data_sample in outputs: + self._test_index += 1 + + img_path = data_sample.get('img_path') + img_bytes = fileio.get(img_path, backend_args=self.backend_args) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + + # img = pad_img_to_amap(img, data_sample) + + data_sample = merge_data_samples([data_sample]) + + # Resize image to heatmap size + if data_sample.get('_pred_heatmaps') is not None: + heatmap_size = data_sample._pred_heatmaps.shape + img = mmcv.imresize(img, heatmap_size[::-1]) + + out_file = None + if self.out_dir is not None: + out_file_name, postfix = os.path.basename(img_path).rsplit( + '.', 1) + index = len([ + fname for fname in os.listdir(self.out_dir) + if fname.startswith(out_file_name) + ]) + out_file = f'{out_file_name}_{index}.{postfix}' + out_file = os.path.join(self.out_dir, out_file) + + self._visualizer.add_datasample( + os.path.basename(img_path) if self.show else 'test_img', + img, + data_sample=data_sample, + show=self.show, + draw_gt=False, + draw_bbox=True, + draw_heatmap=True, + wait_time=self.wait_time, + kpt_thr=self.kpt_thr, + out_file=out_file, + step=self._test_index) + + +def pad_img_to_amap(img, data_sample): + bbox_xywh = None + if 'raw_ann_info' in data_sample: + bbox_xywh = data_sample.raw_ann_info['bbox'] + elif 'pred_instances' in data_sample: + bbox_xywh = data_sample.pred_instances.bboxes.flatten() + + if bbox_xywh is None: + return img + + bbox_xyxy = np.array([ + bbox_xywh[0], bbox_xywh[1], + bbox_xywh[0] + bbox_xywh[2], bbox_xywh[1] + bbox_xywh[3] + ]) + abox_xyxy = fix_bbox_aspect_ratio(bbox_xyxy, aspect_ratio=3/4, padding=1.25, bbox_format='xyxy') + abox_xyxy = abox_xyxy.flatten() + + x_pad = np.array([max(0, -abox_xyxy[0]), max(0, abox_xyxy[2] - img.shape[1])], dtype=int) + y_pad = np.array([max(0, -abox_xyxy[1]), max(0, abox_xyxy[3] - img.shape[0])], dtype=int) + img = np.pad(img, ((y_pad[0], y_pad[1]), (x_pad[0], x_pad[1]), (0, 0)), mode='constant') + + kpts = data_sample.pred_instances.keypoints[0].reshape(-1, 2) + kpts[:, :2] += np.array([x_pad[0], y_pad[0]]) + data_sample.pred_instances.keypoints[0] = kpts.reshape(data_sample.pred_instances.keypoints[0].shape) + + return img diff --git a/mmpose/engine/optim_wrappers/__init__.py b/mmpose/engine/optim_wrappers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..16174c500f9dfa9e67ffd0692d1afe9016afdb27 --- /dev/null +++ b/mmpose/engine/optim_wrappers/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .force_default_constructor import ForceDefaultOptimWrapperConstructor +from .layer_decay_optim_wrapper import LayerDecayOptimWrapperConstructor + +__all__ = [ + 'LayerDecayOptimWrapperConstructor', 'ForceDefaultOptimWrapperConstructor' +] diff --git a/mmpose/engine/optim_wrappers/force_default_constructor.py b/mmpose/engine/optim_wrappers/force_default_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..f45291a73b0c38b94ae8b00bd2b7927f8778b622 --- /dev/null +++ b/mmpose/engine/optim_wrappers/force_default_constructor.py @@ -0,0 +1,255 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from mmengine.logging import print_log +from mmengine.optim import DefaultOptimWrapperConstructor +from mmengine.utils.dl_utils import mmcv_full_available +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm, _InstanceNorm +from torch.nn import GroupNorm, LayerNorm + +from mmpose.registry import OPTIM_WRAPPER_CONSTRUCTORS + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class ForceDefaultOptimWrapperConstructor(DefaultOptimWrapperConstructor): + """Default constructor with forced optimizer settings. + + This constructor extends the default constructor to add an option for + forcing default optimizer settings. This is useful for ensuring that + certain parameters or layers strictly adhere to pre-defined default + settings, regardless of any custom settings specified. + + By default, each parameter share the same optimizer settings, and we + provide an argument ``paramwise_cfg`` to specify parameter-wise settings. + It is a dict and may contain various fields like 'custom_keys', + 'bias_lr_mult', etc., as well as the additional field + `force_default_settings` which allows for enforcing default settings on + optimizer parameters. + + - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If + one of the keys in ``custom_keys`` is a substring of the name of one + parameter, then the setting of the parameter will be specified by + ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will + be ignored. It should be noted that the aforementioned ``key`` is the + longest key that is a substring of the name of the parameter. If there + are multiple matched keys with the same length, then the key with lower + alphabet order will be chosen. + ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult`` + and ``decay_mult``. See Example 2 below. + - ``bias_lr_mult`` (float): It will be multiplied to the learning + rate for all bias parameters (except for those in normalization + layers and offset layers of DCN). + - ``bias_decay_mult`` (float): It will be multiplied to the weight + decay for all bias parameters (except for those in + normalization layers, depthwise conv layers, offset layers of DCN). + - ``norm_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of normalization + layers. + - ``flat_decay_mult`` (float): It will be multiplied to the weight + decay for all one-dimensional parameters + - ``dwconv_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of depthwise conv + layers. + - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning + rate for parameters of offset layer in the deformable convs + of a model. + - ``bypass_duplicate`` (bool): If true, the duplicate parameters + would not be added into optimizer. Defaults to False. + - ``force_default_settings`` (bool): If true, this will override any + custom settings defined by ``custom_keys`` and enforce the use of + default settings for optimizer parameters like ``bias_lr_mult``. + This is particularly useful when you want to ensure that certain layers + or parameters adhere strictly to the pre-defined default settings. + + Note: + + 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will + override the effect of ``bias_lr_mult`` in the bias of offset layer. + So be careful when using both ``bias_lr_mult`` and + ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset + layer in deformable convs, set ``dcn_offset_lr_mult`` to the original + ``dcn_offset_lr_mult`` * ``bias_lr_mult``. + + 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will + apply it to all the DCN layers in the model. So be careful when the + model contains multiple DCN layers in places other than backbone. + + 3. When the option ``force_default_settings`` is true, it will override + any custom settings provided in ``custom_keys``. This ensures that the + default settings for the optimizer parameters are used. + + Args: + optim_wrapper_cfg (dict): The config dict of the optimizer wrapper. + + Required fields of ``optim_wrapper_cfg`` are + + - ``type``: class name of the OptimizerWrapper + - ``optimizer``: The configuration of optimizer. + + Optional fields of ``optim_wrapper_cfg`` are + + - any arguments of the corresponding optimizer wrapper type, + e.g., accumulative_counts, clip_grad, etc. + + Required fields of ``optimizer`` are + + - `type`: class name of the optimizer. + + Optional fields of ``optimizer`` are + + - any arguments of the corresponding optimizer type, e.g., + lr, weight_decay, momentum, etc. + + paramwise_cfg (dict, optional): Parameter-wise options. + + Example 1: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> optim_wrapper_cfg = dict( + >>> dict(type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01, + >>> momentum=0.9, weight_decay=0.0001)) + >>> paramwise_cfg = dict(norm_decay_mult=0.) + >>> optim_wrapper_builder = DefaultOptimWrapperConstructor( + >>> optim_wrapper_cfg, paramwise_cfg) + >>> optim_wrapper = optim_wrapper_builder(model) + + Example 2: + >>> # assume model have attribute model.backbone and model.cls_head + >>> optim_wrapper_cfg = dict(type='OptimWrapper', optimizer=dict( + >>> type='SGD', lr=0.01, weight_decay=0.95)) + >>> paramwise_cfg = dict(custom_keys={ + >>> 'backbone': dict(lr_mult=0.1, decay_mult=0.9)}) + >>> optim_wrapper_builder = DefaultOptimWrapperConstructor( + >>> optim_wrapper_cfg, paramwise_cfg) + >>> optim_wrapper = optim_wrapper_builder(model) + >>> # Then the `lr` and `weight_decay` for model.backbone is + >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for + >>> # model.cls_head is (0.01, 0.95). + """ + + def add_params(self, + params: List[dict], + module: nn.Module, + prefix: str = '', + is_dcn_module: Optional[Union[int, float]] = None) -> None: + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + prefix (str): The prefix of the module + is_dcn_module (int|float|None): If the current module is a + submodule of DCN, `is_dcn_module` will be passed to + control conv_offset layer's learning rate. Defaults to None. + """ + # get param-wise options + custom_keys = self.paramwise_cfg.get('custom_keys', {}) + # first sort with alphabet order and then sort with reversed len of str + sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) + + bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', None) + bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', None) + norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', None) + dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', None) + flat_decay_mult = self.paramwise_cfg.get('flat_decay_mult', None) + bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) + dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', None) + force_default_settings = self.paramwise_cfg.get( + 'force_default_settings', False) + + # special rules for norm layers and depth-wise conv layers + is_norm = isinstance(module, + (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) + is_dwconv = ( + isinstance(module, torch.nn.Conv2d) + and module.in_channels == module.groups) + + for name, param in module.named_parameters(recurse=False): + param_group = {'params': [param]} + if bypass_duplicate and self._is_in(param_group, params): + print_log( + f'{prefix} is duplicate. It is skipped since ' + f'bypass_duplicate={bypass_duplicate}', + logger='current', + level=logging.WARNING) + continue + if not param.requires_grad: + params.append(param_group) + continue + + # if the parameter match one of the custom keys, ignore other rules + is_custom = False + for key in sorted_keys: + if key in f'{prefix}.{name}': + is_custom = True + lr_mult = custom_keys[key].get('lr_mult', 1.) + param_group['lr'] = self.base_lr * lr_mult + if self.base_wd is not None: + decay_mult = custom_keys[key].get('decay_mult', 1.) + param_group['weight_decay'] = self.base_wd * decay_mult + # add custom settings to param_group + for k, v in custom_keys[key].items(): + param_group[k] = v + break + + if not is_custom or force_default_settings: + # bias_lr_mult affects all bias parameters + # except for norm.bias dcn.conv_offset.bias + if name == 'bias' and not ( + is_norm or is_dcn_module) and bias_lr_mult is not None: + param_group['lr'] = self.base_lr * bias_lr_mult + + if (prefix.find('conv_offset') != -1 and is_dcn_module + and dcn_offset_lr_mult is not None + and isinstance(module, torch.nn.Conv2d)): + # deal with both dcn_offset's bias & weight + param_group['lr'] = self.base_lr * dcn_offset_lr_mult + + # apply weight decay policies + if self.base_wd is not None: + # norm decay + if is_norm and norm_decay_mult is not None: + param_group[ + 'weight_decay'] = self.base_wd * norm_decay_mult + # bias lr and decay + elif (name == 'bias' and not is_dcn_module + and bias_decay_mult is not None): + param_group[ + 'weight_decay'] = self.base_wd * bias_decay_mult + # depth-wise conv + elif is_dwconv and dwconv_decay_mult is not None: + param_group[ + 'weight_decay'] = self.base_wd * dwconv_decay_mult + # flatten parameters except dcn offset + elif (param.ndim == 1 and not is_dcn_module + and flat_decay_mult is not None): + param_group[ + 'weight_decay'] = self.base_wd * flat_decay_mult + params.append(param_group) + for key, value in param_group.items(): + if key == 'params': + continue + full_name = f'{prefix}.{name}' if prefix else name + print_log( + f'paramwise_options -- {full_name}:{key}={value}', + logger='current') + + if mmcv_full_available(): + from mmcv.ops import DeformConv2d, ModulatedDeformConv2d + is_dcn_module = isinstance(module, + (DeformConv2d, ModulatedDeformConv2d)) + else: + is_dcn_module = False + for child_name, child_mod in module.named_children(): + child_prefix = f'{prefix}.{child_name}' if prefix else child_name + self.add_params( + params, + child_mod, + prefix=child_prefix, + is_dcn_module=is_dcn_module) diff --git a/mmpose/engine/optim_wrappers/layer_decay_optim_wrapper.py b/mmpose/engine/optim_wrappers/layer_decay_optim_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..6513e5593d98e9aa77a2795529ddeb538b6099c3 --- /dev/null +++ b/mmpose/engine/optim_wrappers/layer_decay_optim_wrapper.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.dist.utils import get_dist_info +from mmengine.optim import DefaultOptimWrapperConstructor +from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS + + +def get_num_layer_for_vit(var_name, num_max_layer): + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.patch_embed'): + return 0 + elif var_name.startswith('backbone.layers'): + layer_id = int(var_name.split('.')[2]) + return layer_id + 1 + else: + return num_max_layer - 1 + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module(force=True) +class LayerDecayOptimWrapperConstructor(DefaultOptimWrapperConstructor): + + def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): + super().__init__(optim_wrapper_cfg, paramwise_cfg=None) + self.layer_decay_rate = paramwise_cfg.get('layer_decay_rate', 0.5) + + super().__init__(optim_wrapper_cfg, paramwise_cfg) + + def add_params(self, params, module, prefix='', lr=None): + parameter_groups = {} + print(self.paramwise_cfg) + num_layers = self.paramwise_cfg.get('num_layers') + 2 + layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate') + weight_decay = self.base_wd + + for name, param in module.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if (len(param.shape) == 1 or name.endswith('.bias') + or 'pos_embed' in name): + group_name = 'no_decay' + this_weight_decay = 0. + else: + group_name = 'decay' + this_weight_decay = weight_decay + layer_id = get_num_layer_for_vit(name, num_layers) + group_name = 'layer_%d_%s' % (layer_id, group_name) + + if group_name not in parameter_groups: + scale = layer_decay_rate**(num_layers - layer_id - 1) + + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr, + } + + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + rank, _ = get_dist_info() + if rank == 0: + to_display = {} + for key in parameter_groups: + to_display[key] = { + 'param_names': parameter_groups[key]['param_names'], + 'lr_scale': parameter_groups[key]['lr_scale'], + 'lr': parameter_groups[key]['lr'], + 'weight_decay': parameter_groups[key]['weight_decay'], + } + params.extend(parameter_groups.values()) diff --git a/mmpose/engine/schedulers/__init__.py b/mmpose/engine/schedulers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ea59930e8c465dc75c52106d0440656a5a9446a --- /dev/null +++ b/mmpose/engine/schedulers/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .constant_lr import ConstantLR +from .quadratic_warmup import (QuadraticWarmupLR, QuadraticWarmupMomentum, + QuadraticWarmupParamScheduler) + +__all__ = [ + 'QuadraticWarmupParamScheduler', 'QuadraticWarmupMomentum', + 'QuadraticWarmupLR', 'ConstantLR' +] diff --git a/mmpose/engine/schedulers/constant_lr.py b/mmpose/engine/schedulers/constant_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..3b96374542f6c85d5b1edaad77ef81cc031ae3ad --- /dev/null +++ b/mmpose/engine/schedulers/constant_lr.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.optim.scheduler import \ + ConstantParamScheduler as MMENGINE_ConstantParamScheduler +from mmengine.optim.scheduler.lr_scheduler import LRSchedulerMixin + +from mmpose.registry import PARAM_SCHEDULERS + +INF = int(1e9) + + +class ConstantParamScheduler(MMENGINE_ConstantParamScheduler): + """Decays the parameter value of each parameter group by a small constant + factor until the number of epoch reaches a pre-defined milestone: ``end``. + Notice that such decay can happen simultaneously with other changes to the + parameter value from outside this scheduler. The factor range restriction + is removed. + + Args: + optimizer (Optimizer or BaseOptimWrapper): optimizer or Wrapped + optimizer. + param_name (str): Name of the parameter to be adjusted, such as + ``lr``, ``momentum``. + factor (float): The number we multiply parameter value until the + milestone. Defaults to 1./3. + begin (int): Step at which to start updating the parameters. + Defaults to 0. + end (int): Step at which to stop updating the parameters. + Defaults to INF. + last_step (int): The index of last step. Used for resume without + state dict. Defaults to -1. + by_epoch (bool): Whether the scheduled parameters are updated by + epochs. Defaults to True. + verbose (bool): Whether to print the value for each update. + Defaults to False. + """ + + def __init__(self, + optimizer, + param_name: str, + factor: float = 1.0 / 3, + begin: int = 0, + end: int = INF, + last_step: int = -1, + by_epoch: bool = True, + verbose: bool = False): + + self.factor = factor + self.total_iters = end - begin - 1 + super(MMENGINE_ConstantParamScheduler, self).__init__( + optimizer, + param_name=param_name, + begin=begin, + end=end, + last_step=last_step, + by_epoch=by_epoch, + verbose=verbose) + + +@PARAM_SCHEDULERS.register_module() +class ConstantLR(LRSchedulerMixin, ConstantParamScheduler): + """Decays the learning rate value of each parameter group by a small + constant factor until the number of epoch reaches a pre-defined milestone: + ``end``. Notice that such decay can happen simultaneously with other + changes to the learning rate value from outside this scheduler. + + Args: + optimizer (Optimizer or OptimWrapper): Wrapped optimizer. + factor (float): The number we multiply learning rate until the + milestone. Defaults to 1./3. + begin (int): Step at which to start updating the learning rate. + Defaults to 0. + end (int): Step at which to stop updating the learning rate. + Defaults to INF. + last_step (int): The index of last step. Used for resume without state + dict. Defaults to -1. + by_epoch (bool): Whether the scheduled learning rate is updated by + epochs. Defaults to True. + verbose (bool): Whether to print the learning rate for each update. + Defaults to False. + """ diff --git a/mmpose/engine/schedulers/quadratic_warmup.py b/mmpose/engine/schedulers/quadratic_warmup.py new file mode 100644 index 0000000000000000000000000000000000000000..10217972173ac9e764ea71966a1f2dd3a8b79a1d --- /dev/null +++ b/mmpose/engine/schedulers/quadratic_warmup.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.optim.scheduler.lr_scheduler import LRSchedulerMixin +from mmengine.optim.scheduler.momentum_scheduler import MomentumSchedulerMixin +from mmengine.optim.scheduler.param_scheduler import INF, _ParamScheduler +from torch.optim import Optimizer + +from mmpose.registry import PARAM_SCHEDULERS + + +@PARAM_SCHEDULERS.register_module() +class QuadraticWarmupParamScheduler(_ParamScheduler): + r"""Warm up the parameter value of each parameter group by quadratic + formula: + + .. math:: + + X_{t} = X_{t-1} + \frac{2t+1}{{(end-begin)}^{2}} \times X_{base} + + Args: + optimizer (Optimizer): Wrapped optimizer. + param_name (str): Name of the parameter to be adjusted, such as + ``lr``, ``momentum``. + begin (int): Step at which to start updating the parameters. + Defaults to 0. + end (int): Step at which to stop updating the parameters. + Defaults to INF. + last_step (int): The index of last step. Used for resume without + state dict. Defaults to -1. + by_epoch (bool): Whether the scheduled parameters are updated by + epochs. Defaults to True. + verbose (bool): Whether to print the value for each update. + Defaults to False. + """ + + def __init__(self, + optimizer: Optimizer, + param_name: str, + begin: int = 0, + end: int = INF, + last_step: int = -1, + by_epoch: bool = True, + verbose: bool = False): + if end >= INF: + raise ValueError('``end`` must be less than infinity,' + 'Please set ``end`` parameter of ' + '``QuadraticWarmupScheduler`` as the ' + 'number of warmup end.') + self.total_iters = end - begin + super().__init__( + optimizer=optimizer, + param_name=param_name, + begin=begin, + end=end, + last_step=last_step, + by_epoch=by_epoch, + verbose=verbose) + + @classmethod + def build_iter_from_epoch(cls, + *args, + begin=0, + end=INF, + by_epoch=True, + epoch_length=None, + **kwargs): + """Build an iter-based instance of this scheduler from an epoch-based + config.""" + assert by_epoch, 'Only epoch-based kwargs whose `by_epoch=True` can ' \ + 'be converted to iter-based.' + assert epoch_length is not None and epoch_length > 0, \ + f'`epoch_length` must be a positive integer, ' \ + f'but got {epoch_length}.' + by_epoch = False + begin = begin * epoch_length + if end != INF: + end = end * epoch_length + return cls(*args, begin=begin, end=end, by_epoch=by_epoch, **kwargs) + + def _get_value(self): + """Compute value using chainable form of the scheduler.""" + if self.last_step == 0: + return [ + base_value * (2 * self.last_step + 1) / self.total_iters**2 + for base_value in self.base_values + ] + + return [ + group[self.param_name] + base_value * + (2 * self.last_step + 1) / self.total_iters**2 + for base_value, group in zip(self.base_values, + self.optimizer.param_groups) + ] + + +@PARAM_SCHEDULERS.register_module() +class QuadraticWarmupLR(LRSchedulerMixin, QuadraticWarmupParamScheduler): + """Warm up the learning rate of each parameter group by quadratic formula. + + Args: + optimizer (Optimizer): Wrapped optimizer. + begin (int): Step at which to start updating the parameters. + Defaults to 0. + end (int): Step at which to stop updating the parameters. + Defaults to INF. + last_step (int): The index of last step. Used for resume without + state dict. Defaults to -1. + by_epoch (bool): Whether the scheduled parameters are updated by + epochs. Defaults to True. + verbose (bool): Whether to print the value for each update. + Defaults to False. + """ + + +@PARAM_SCHEDULERS.register_module() +class QuadraticWarmupMomentum(MomentumSchedulerMixin, + QuadraticWarmupParamScheduler): + """Warm up the momentum value of each parameter group by quadratic formula. + + Args: + optimizer (Optimizer): Wrapped optimizer. + begin (int): Step at which to start updating the parameters. + Defaults to 0. + end (int): Step at which to stop updating the parameters. + Defaults to INF. + last_step (int): The index of last step. Used for resume without + state dict. Defaults to -1. + by_epoch (bool): Whether the scheduled parameters are updated by + epochs. Defaults to True. + verbose (bool): Whether to print the value for each update. + Defaults to False. + """ diff --git a/mmpose/evaluation/__init__.py b/mmpose/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a758ba7c1b3d526e108b5b99da4546c75dcd14f5 --- /dev/null +++ b/mmpose/evaluation/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .evaluators import * # noqa: F401,F403 +from .functional import * # noqa: F401,F403 +from .metrics import * # noqa: F401,F403 diff --git a/mmpose/evaluation/evaluators/__init__.py b/mmpose/evaluation/evaluators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae2d79d514dca929d4a0458acace3c6eaab6aea1 --- /dev/null +++ b/mmpose/evaluation/evaluators/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .mutli_dataset_evaluator import MultiDatasetEvaluator + +__all__ = ['MultiDatasetEvaluator'] diff --git a/mmpose/evaluation/evaluators/mutli_dataset_evaluator.py b/mmpose/evaluation/evaluators/mutli_dataset_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..bc47d2980c9d05a2e068d1860068d2f6ba213e1f --- /dev/null +++ b/mmpose/evaluation/evaluators/mutli_dataset_evaluator.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from typing import Any, Optional, Sequence, Union + +from mmengine.evaluator.evaluator import Evaluator +from mmengine.evaluator.metric import BaseMetric +from mmengine.structures import BaseDataElement + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.registry import DATASETS, EVALUATORS + + +@EVALUATORS.register_module() +class MultiDatasetEvaluator(Evaluator): + """Wrapper class to compose multiple :class:`BaseMetric` instances. + + Args: + metrics (dict or BaseMetric or Sequence): The configs of metrics. + datasets (Sequence[str]): The configs of datasets. + """ + + def __init__( + self, + metrics: Union[dict, BaseMetric, Sequence], + datasets: Sequence[dict], + ): + + assert len(metrics) == len(datasets), 'the argument ' \ + 'datasets should have same length as metrics' + + super().__init__(metrics) + + # Initialize metrics for each dataset + metrics_dict = dict() + for dataset, metric in zip(datasets, self.metrics): + metainfo_file = DATASETS.module_dict[dataset['type']].METAINFO + dataset_meta = parse_pose_metainfo(metainfo_file) + metric.dataset_meta = dataset_meta + dataset_name = dataset_meta['dataset_name'] + metrics_dict[dataset_name] = metric + self.metrics_dict = metrics_dict + + @property + def dataset_meta(self) -> Optional[dict]: + """Optional[dict]: Meta info of the dataset.""" + return self._dataset_meta + + @dataset_meta.setter + def dataset_meta(self, dataset_meta: dict) -> None: + """Set the dataset meta info to the evaluator and it's metrics.""" + self._dataset_meta = dataset_meta + + def process(self, + data_samples: Sequence[BaseDataElement], + data_batch: Optional[Any] = None): + """Convert ``BaseDataSample`` to dict and invoke process method of each + metric. + + Args: + data_samples (Sequence[BaseDataElement]): predictions of the model, + and the ground truth of the validation set. + data_batch (Any, optional): A batch of data from the dataloader. + """ + _data_samples = defaultdict(list) + _data_batch = dict( + inputs=defaultdict(list), + data_samples=defaultdict(list), + ) + + for inputs, data_ds, data_sample in zip(data_batch['inputs'], + data_batch['data_samples'], + data_samples): + if isinstance(data_sample, BaseDataElement): + data_sample = data_sample.to_dict() + assert isinstance(data_sample, dict) + dataset_name = data_sample.get('dataset_name', + self.dataset_meta['dataset_name']) + _data_samples[dataset_name].append(data_sample) + _data_batch['inputs'][dataset_name].append(inputs) + _data_batch['data_samples'][dataset_name].append(data_ds) + + for dataset_name, metric in self.metrics_dict.items(): + if dataset_name in _data_samples: + data_batch = dict( + inputs=_data_batch['inputs'][dataset_name], + data_samples=_data_batch['data_samples'][dataset_name]) + metric.process(data_batch, _data_samples[dataset_name]) + else: + continue diff --git a/mmpose/evaluation/functional/__init__.py b/mmpose/evaluation/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..239968f03aa4c67dd65f752c5945a35d20b31897 --- /dev/null +++ b/mmpose/evaluation/functional/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .keypoint_eval import (keypoint_auc, keypoint_epe, keypoint_mpjpe, + keypoint_nme, keypoint_pck_accuracy, + multilabel_classification_accuracy, + pose_pck_accuracy, simcc_pck_accuracy) +from .nms import nearby_joints_nms, nms, nms_torch, oks_nms, soft_oks_nms +from .transforms import transform_ann, transform_pred, transform_sigmas + +__all__ = [ + 'keypoint_pck_accuracy', 'keypoint_auc', 'keypoint_nme', 'keypoint_epe', + 'pose_pck_accuracy', 'multilabel_classification_accuracy', + 'simcc_pck_accuracy', 'nms', 'oks_nms', 'soft_oks_nms', 'keypoint_mpjpe', + 'nms_torch', 'transform_ann', 'transform_sigmas', 'transform_pred', + 'nearby_joints_nms' +] diff --git a/mmpose/evaluation/functional/keypoint_eval.py b/mmpose/evaluation/functional/keypoint_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d5d0584b5ebe5da34abbe3ab99033b283956eb --- /dev/null +++ b/mmpose/evaluation/functional/keypoint_eval.py @@ -0,0 +1,384 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numpy as np + +from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum, get_heatmap_expected_value +from .mesh_eval import compute_similarity_transform + + +def _calc_distances(preds: np.ndarray, gts: np.ndarray, mask: np.ndarray, + norm_factor: np.ndarray) -> np.ndarray: + """Calculate the normalized distances between preds and target. + + Note: + - instance number: N + - keypoint number: K + - keypoint dimension: D (normally, D=2 or D=3) + + Args: + preds (np.ndarray[N, K, D]): Predicted keypoint location. + gts (np.ndarray[N, K, D]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + norm_factor (np.ndarray[N, D]): Normalization factor. + Typical value is heatmap_size. + + Returns: + np.ndarray[K, N]: The normalized distances. \ + If target keypoints are missing, the distance is -1. + """ + N, K, _ = preds.shape + # set mask=0 when norm_factor==0 + _mask = mask.copy() + _mask[np.where((norm_factor == 0).sum(1))[0], :] = False + + distances = np.full((N, K), -1, dtype=np.float32) + # handle invalid values + norm_factor[np.where(norm_factor <= 0)] = 1e6 + distances[_mask] = np.linalg.norm( + ((preds - gts) / norm_factor[:, None, :])[_mask], axis=-1) + return distances.T + + +def _distance_acc(distances: np.ndarray, thr: float = 0.5) -> float: + """Return the percentage below the distance threshold, while ignoring + distances values with -1. + + Note: + - instance number: N + + Args: + distances (np.ndarray[N, ]): The normalized distances. + thr (float): Threshold of the distances. + + Returns: + float: Percentage of distances below the threshold. \ + If all target keypoints are missing, return -1. + """ + distance_valid = distances != -1 + num_distance_valid = distance_valid.sum() + if num_distance_valid > 0: + return (distances[distance_valid] < thr).sum() / num_distance_valid + return -1 + + +def keypoint_pck_accuracy(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, + thr: np.ndarray, norm_factor: np.ndarray) -> tuple: + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints for coordinates. + + Note: + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + - instance number: N + - keypoint number: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + thr (float): Threshold of PCK calculation. + norm_factor (np.ndarray[N, 2]): Normalization factor for H&W. + + Returns: + tuple: A tuple containing keypoint accuracy. + + - acc (np.ndarray[K]): Accuracy of each keypoint. + - avg_acc (float): Averaged accuracy across all keypoints. + - cnt (int): Number of valid keypoints. + """ + distances = _calc_distances(pred, gt, mask, norm_factor) + acc = np.array([_distance_acc(d, thr) for d in distances]) + valid_acc = acc[acc >= 0] + cnt = len(valid_acc) + avg_acc = valid_acc.mean() if cnt > 0 else 0.0 + return acc, avg_acc, cnt + + +def keypoint_auc(pred: np.ndarray, + gt: np.ndarray, + mask: np.ndarray, + norm_factor: np.ndarray, + num_thrs: int = 20) -> float: + """Calculate the Area under curve (AUC) of keypoint PCK accuracy. + + Note: + - instance number: N + - keypoint number: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + norm_factor (float): Normalization factor. + num_thrs (int): number of thresholds to calculate auc. + + Returns: + float: Area under curve (AUC) of keypoint PCK accuracy. + """ + nor = np.tile(np.array([[norm_factor, norm_factor]]), (pred.shape[0], 1)) + thrs = [1.0 * i / num_thrs for i in range(num_thrs)] + avg_accs = [] + for thr in thrs: + _, avg_acc, _ = keypoint_pck_accuracy(pred, gt, mask, thr, nor) + avg_accs.append(avg_acc) + + auc = 0 + for i in range(num_thrs): + auc += 1.0 / num_thrs * avg_accs[i] + return auc + + +def keypoint_nme(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, + normalize_factor: np.ndarray) -> float: + """Calculate the normalized mean error (NME). + + Note: + - instance number: N + - keypoint number: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + normalize_factor (np.ndarray[N, 2]): Normalization factor. + + Returns: + float: normalized mean error + """ + distances = _calc_distances(pred, gt, mask, normalize_factor) + distance_valid = distances[distances != -1] + return distance_valid.sum() / max(1, len(distance_valid)) + + +def keypoint_epe(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray) -> float: + """Calculate the end-point error. + + Note: + - instance number: N + - keypoint number: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + + Returns: + float: Average end-point error. + """ + + distances = _calc_distances( + pred, gt, mask, + np.ones((pred.shape[0], pred.shape[2]), dtype=np.float32)) + distance_valid = distances[distances != -1] + return distance_valid.sum() / max(1, len(distance_valid)) + + +def pose_pck_accuracy(output: np.ndarray, + target: np.ndarray, + mask: np.ndarray, + thr: float = 0.05, + normalize: Optional[np.ndarray] = None, + method: str = 'argmax') -> tuple: + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints from heatmaps. + + Note: + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + - batch_size: N + - num_keypoints: K + - heatmap height: H + - heatmap width: W + + Args: + output (np.ndarray[N, K, H, W]): Model output heatmaps. + target (np.ndarray[N, K, H, W]): Groundtruth heatmaps. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + thr (float): Threshold of PCK calculation. Default 0.05. + normalize (np.ndarray[N, 2]): Normalization factor for H&W. + + Returns: + tuple: A tuple containing keypoint accuracy. + + - np.ndarray[K]: Accuracy of each keypoint. + - float: Averaged accuracy across all keypoints. + - int: Number of valid keypoints. + """ + method = method.lower() + if method not in ['argmax', 'expected']: + raise ValueError(f'Invalid method: {method}') + + N, K, H, W = output.shape + if K == 0: + return None, 0, 0 + if normalize is None: + normalize = np.tile(np.array([[H, W]]), (N, 1)) + + if method == 'argmax': + pred, _ = get_heatmap_maximum(output) + gt, _ = get_heatmap_maximum(target) + else: + pred, _ = get_heatmap_expected_value(output) + gt, _ = get_heatmap_expected_value(target) + return keypoint_pck_accuracy(pred, gt, mask, thr, normalize) + + +def simcc_pck_accuracy(output: Tuple[np.ndarray, np.ndarray], + target: Tuple[np.ndarray, np.ndarray], + simcc_split_ratio: float, + mask: np.ndarray, + thr: float = 0.05, + normalize: Optional[np.ndarray] = None) -> tuple: + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints from SimCC. + + Note: + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + - instance number: N + - keypoint number: K + + Args: + output (Tuple[np.ndarray, np.ndarray]): Model predicted SimCC. + target (Tuple[np.ndarray, np.ndarray]): Groundtruth SimCC. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + thr (float): Threshold of PCK calculation. Default 0.05. + normalize (np.ndarray[N, 2]): Normalization factor for H&W. + + Returns: + tuple: A tuple containing keypoint accuracy. + + - np.ndarray[K]: Accuracy of each keypoint. + - float: Averaged accuracy across all keypoints. + - int: Number of valid keypoints. + """ + pred_x, pred_y = output + gt_x, gt_y = target + + N, _, Wx = pred_x.shape + _, _, Wy = pred_y.shape + W, H = int(Wx / simcc_split_ratio), int(Wy / simcc_split_ratio) + + if normalize is None: + normalize = np.tile(np.array([[H, W]]), (N, 1)) + + pred_coords, _ = get_simcc_maximum(pred_x, pred_y) + pred_coords /= simcc_split_ratio + gt_coords, _ = get_simcc_maximum(gt_x, gt_y) + gt_coords /= simcc_split_ratio + + return keypoint_pck_accuracy(pred_coords, gt_coords, mask, thr, normalize) + + +def multilabel_classification_accuracy(pred: np.ndarray, + gt: np.ndarray, + mask: np.ndarray, + thr: float = 0.5) -> float: + """Get multi-label classification accuracy. + + Note: + - batch size: N + - label number: L + + Args: + pred (np.ndarray[N, L, 2]): model predicted labels. + gt (np.ndarray[N, L, 2]): ground-truth labels. + mask (np.ndarray[N, 1] or np.ndarray[N, L] ): reliability of + ground-truth labels. + thr (float): Threshold for calculating accuracy. + + Returns: + float: multi-label classification accuracy. + """ + # we only compute accuracy on the samples with ground-truth of all labels. + valid = (mask > 0).min(axis=1) if mask.ndim == 2 else (mask > 0) + pred, gt = pred[valid], gt[valid] + + if pred.shape[0] == 0: + acc = 0.0 # when no sample is with gt labels, set acc to 0. + else: + # The classification of a sample is regarded as correct + # only if it's correct for all labels. + acc = (((pred - thr) * (gt - thr)) > 0).all(axis=1).mean() + return acc + + +def keypoint_mpjpe(pred: np.ndarray, + gt: np.ndarray, + mask: np.ndarray, + alignment: str = 'none'): + """Calculate the mean per-joint position error (MPJPE) and the error after + rigid alignment with the ground truth (P-MPJPE). + + Note: + - batch_size: N + - num_keypoints: K + - keypoint_dims: C + + Args: + pred (np.ndarray): Predicted keypoint location with shape [N, K, C]. + gt (np.ndarray): Groundtruth keypoint location with shape [N, K, C]. + mask (np.ndarray): Visibility of the target with shape [N, K]. + False for invisible joints, and True for visible. + Invisible joints will be ignored for accuracy calculation. + alignment (str, optional): method to align the prediction with the + groundtruth. Supported options are: + + - ``'none'``: no alignment will be applied + - ``'scale'``: align in the least-square sense in scale + - ``'procrustes'``: align in the least-square sense in + scale, rotation and translation. + + Returns: + tuple: A tuple containing joint position errors + + - (float | np.ndarray): mean per-joint position error (mpjpe). + - (float | np.ndarray): mpjpe after rigid alignment with the + ground truth (p-mpjpe). + """ + assert mask.any() + + if alignment == 'none': + pass + elif alignment == 'procrustes': + pred = np.stack([ + compute_similarity_transform(pred_i, gt_i) + for pred_i, gt_i in zip(pred, gt) + ]) + elif alignment == 'scale': + pred_dot_pred = np.einsum('nkc,nkc->n', pred, pred) + pred_dot_gt = np.einsum('nkc,nkc->n', pred, gt) + scale_factor = pred_dot_gt / pred_dot_pred + pred = pred * scale_factor[:, None, None] + else: + raise ValueError(f'Invalid value for alignment: {alignment}') + error = np.linalg.norm(pred - gt, ord=2, axis=-1)[mask].mean() + + return error diff --git a/mmpose/evaluation/functional/mesh_eval.py b/mmpose/evaluation/functional/mesh_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..683b4539b29d1829a324de424c6d9f85a7037e5d --- /dev/null +++ b/mmpose/evaluation/functional/mesh_eval.py @@ -0,0 +1,66 @@ +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/akanazawa/hmr +# Original licence: Copyright (c) 2018 akanazawa, under the MIT License. +# ------------------------------------------------------------------------------ + +import numpy as np + + +def compute_similarity_transform(source_points, target_points): + """Computes a similarity transform (sR, t) that takes a set of 3D points + source_points (N x 3) closest to a set of 3D points target_points, where R + is an 3x3 rotation matrix, t 3x1 translation, s scale. And return the + transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal + Procrutes problem. + + Note: + Points number: N + + Args: + source_points (np.ndarray): Source point set with shape [N, 3]. + target_points (np.ndarray): Target point set with shape [N, 3]. + + Returns: + np.ndarray: Transformed source point set with shape [N, 3]. + """ + + assert target_points.shape[0] == source_points.shape[0] + assert target_points.shape[1] == 3 and source_points.shape[1] == 3 + + source_points = source_points.T + target_points = target_points.T + + # 1. Remove mean. + mu1 = source_points.mean(axis=1, keepdims=True) + mu2 = target_points.mean(axis=1, keepdims=True) + X1 = source_points - mu1 + X2 = target_points - mu2 + + # 2. Compute variance of X1 used for scale. + var1 = np.sum(X1**2) + + # 3. The outer product of X1 and X2. + K = X1.dot(X2.T) + + # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are + # singular vectors of K. + U, _, Vh = np.linalg.svd(K) + V = Vh.T + # Construct Z that fixes the orientation of R to get det(R)=1. + Z = np.eye(U.shape[0]) + Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T))) + # Construct R. + R = V.dot(Z.dot(U.T)) + + # 5. Recover scale. + scale = np.trace(R.dot(K)) / var1 + + # 6. Recover translation. + t = mu2 - scale * (R.dot(mu1)) + + # 7. Transform the source points: + source_points_hat = scale * R.dot(source_points) + t + + source_points_hat = source_points_hat.T + + return source_points_hat diff --git a/mmpose/evaluation/functional/nms.py b/mmpose/evaluation/functional/nms.py new file mode 100644 index 0000000000000000000000000000000000000000..f7dd2279c74cb74ef943a02bff3998f4d03f744d --- /dev/null +++ b/mmpose/evaluation/functional/nms.py @@ -0,0 +1,369 @@ +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/leoxiaobin/deep-high-resolution-net.pytorch +# and https://github.com/HRNet/DEKR +# Original licence: Copyright (c) Microsoft, under the MIT License. +# ------------------------------------------------------------------------------ + +from typing import List, Optional + +import numpy as np +import torch +from torch import Tensor + +from mmpose.structures.bbox import bbox_overlaps + + +def nms(dets: np.ndarray, thr: float) -> List[int]: + """Greedily select boxes with high confidence and overlap <= thr. + + Args: + dets (np.ndarray): [[x1, y1, x2, y2, score]]. + thr (float): Retain overlap < thr. + + Returns: + list: Indexes to keep. + """ + if len(dets) == 0: + return [] + + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + scores = dets[:, 4] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while len(order) > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= thr)[0] + order = order[inds + 1] + + return keep + + +def oks_iou(g: np.ndarray, + d: np.ndarray, + a_g: float, + a_d: np.ndarray, + sigmas: Optional[np.ndarray] = None, + vis_thr: Optional[float] = None) -> np.ndarray: + """Calculate oks ious. + + Note: + + - number of keypoints: K + - number of instances: N + + Args: + g (np.ndarray): The instance to calculate OKS IOU with other + instances. Containing the keypoints coordinates. Shape: (K*3, ) + d (np.ndarray): The rest instances. Containing the keypoints + coordinates. Shape: (N, K*3) + a_g (float): Area of the ground truth object. + a_d (np.ndarray): Area of the detected object. Shape: (N, ) + sigmas (np.ndarray, optional): Keypoint labelling uncertainty. + Please refer to `COCO keypoint evaluation + `__ for more details. + If not given, use the sigmas on COCO dataset. + If specified, shape: (K, ). Defaults to ``None`` + vis_thr(float, optional): Threshold of the keypoint visibility. + If specified, will calculate OKS based on those keypoints whose + visibility higher than vis_thr. If not given, calculate the OKS + based on all keypoints. Defaults to ``None`` + + Returns: + np.ndarray: The oks ious. + """ + if sigmas is None: + sigmas = np.array([ + .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, + .87, .87, .89, .89 + ]) / 10.0 + vars = (sigmas * 2)**2 + xg = g[0::3] + yg = g[1::3] + vg = g[2::3] + ious = np.zeros(len(d), dtype=np.float32) + for n_d in range(0, len(d)): + xd = d[n_d, 0::3] + yd = d[n_d, 1::3] + vd = d[n_d, 2::3] + dx = xd - xg + dy = yd - yg + e = (dx**2 + dy**2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2 + if vis_thr is not None: + ind = list((vg > vis_thr) & (vd > vis_thr)) + e = e[ind] + ious[n_d] = np.sum(np.exp(-e)) / len(e) if len(e) != 0 else 0.0 + return ious + + +def oks_nms(kpts_db: List[dict], + thr: float, + sigmas: Optional[np.ndarray] = None, + vis_thr: Optional[float] = None, + score_per_joint: bool = False): + """OKS NMS implementations. + + Args: + kpts_db (List[dict]): The keypoints results of the same image. + thr (float): The threshold of NMS. Will retain oks overlap < thr. + sigmas (np.ndarray, optional): Keypoint labelling uncertainty. + Please refer to `COCO keypoint evaluation + `__ for more details. + If not given, use the sigmas on COCO dataset. Defaults to ``None`` + vis_thr(float, optional): Threshold of the keypoint visibility. + If specified, will calculate OKS based on those keypoints whose + visibility higher than vis_thr. If not given, calculate the OKS + based on all keypoints. Defaults to ``None`` + score_per_joint(bool): Whether the input scores (in kpts_db) are + per-joint scores. Defaults to ``False`` + + Returns: + np.ndarray: indexes to keep. + """ + if len(kpts_db) == 0: + return [] + + if score_per_joint: + scores = np.array([k['score'].mean() for k in kpts_db]) + else: + scores = np.array([k['score'] for k in kpts_db]) + + kpts = np.array([k['keypoints'].flatten() for k in kpts_db]) + areas = np.array([k['area'] for k in kpts_db]) + + order = scores.argsort()[::-1] + + keep = [] + while len(order) > 0: + i = order[0] + keep.append(i) + + oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], + sigmas, vis_thr) + + inds = np.where(oks_ovr <= thr)[0] + order = order[inds + 1] + + keep = np.array(keep) + + return keep + + +def _rescore(overlap: np.ndarray, + scores: np.ndarray, + thr: float, + type: str = 'gaussian'): + """Rescoring mechanism gaussian or linear. + + Args: + overlap (np.ndarray): The calculated oks ious. + scores (np.ndarray): target scores. + thr (float): retain oks overlap < thr. + type (str): The rescoring type. Could be 'gaussian' or 'linear'. + Defaults to ``'gaussian'`` + + Returns: + np.ndarray: indexes to keep + """ + assert len(overlap) == len(scores) + assert type in ['gaussian', 'linear'] + + if type == 'linear': + inds = np.where(overlap >= thr)[0] + scores[inds] = scores[inds] * (1 - overlap[inds]) + else: + scores = scores * np.exp(-overlap**2 / thr) + + return scores + + +def soft_oks_nms(kpts_db: List[dict], + thr: float, + max_dets: int = 20, + sigmas: Optional[np.ndarray] = None, + vis_thr: Optional[float] = None, + score_per_joint: bool = False): + """Soft OKS NMS implementations. + + Args: + kpts_db (List[dict]): The keypoints results of the same image. + thr (float): The threshold of NMS. Will retain oks overlap < thr. + max_dets (int): Maximum number of detections to keep. Defaults to 20 + sigmas (np.ndarray, optional): Keypoint labelling uncertainty. + Please refer to `COCO keypoint evaluation + `__ for more details. + If not given, use the sigmas on COCO dataset. Defaults to ``None`` + vis_thr(float, optional): Threshold of the keypoint visibility. + If specified, will calculate OKS based on those keypoints whose + visibility higher than vis_thr. If not given, calculate the OKS + based on all keypoints. Defaults to ``None`` + score_per_joint(bool): Whether the input scores (in kpts_db) are + per-joint scores. Defaults to ``False`` + + Returns: + np.ndarray: indexes to keep. + """ + if len(kpts_db) == 0: + return [] + + if score_per_joint: + scores = np.array([k['score'].mean() for k in kpts_db]) + else: + scores = np.array([k['score'] for k in kpts_db]) + + kpts = np.array([k['keypoints'].flatten() for k in kpts_db]) + areas = np.array([k['area'] for k in kpts_db]) + + order = scores.argsort()[::-1] + scores = scores[order] + + keep = np.zeros(max_dets, dtype=np.intp) + keep_cnt = 0 + while len(order) > 0 and keep_cnt < max_dets: + i = order[0] + + oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], + sigmas, vis_thr) + + order = order[1:] + scores = _rescore(oks_ovr, scores[1:], thr) + + tmp = scores.argsort()[::-1] + order = order[tmp] + scores = scores[tmp] + + keep[keep_cnt] = i + keep_cnt += 1 + + keep = keep[:keep_cnt] + + return keep + + +def nearby_joints_nms( + kpts_db: List[dict], + dist_thr: float = 0.05, + num_nearby_joints_thr: Optional[int] = None, + score_per_joint: bool = False, + max_dets: int = 30, +): + """Nearby joints NMS implementations. Instances with non-maximum scores + will be suppressed if they have too much closed joints with other + instances. This function is modified from project + `DEKR`. + + Args: + kpts_db (list[dict]): keypoints and scores. + dist_thr (float): threshold for judging whether two joints are close. + Defaults to 0.05. + num_nearby_joints_thr (int): threshold for judging whether two + instances are close. + max_dets (int): max number of detections to keep. Defaults to 30. + score_per_joint (bool): the input scores (in kpts_db) are per joint + scores. + + Returns: + np.ndarray: indexes to keep. + """ + + assert dist_thr > 0, '`dist_thr` must be greater than 0.' + if len(kpts_db) == 0: + return [] + + if score_per_joint: + scores = np.array([k['score'].mean() for k in kpts_db]) + else: + scores = np.array([k['score'] for k in kpts_db]) + + kpts = np.array([k['keypoints'] for k in kpts_db]) + + num_people, num_joints, _ = kpts.shape + if num_nearby_joints_thr is None: + num_nearby_joints_thr = num_joints // 2 + assert num_nearby_joints_thr < num_joints, '`num_nearby_joints_thr` must '\ + 'be less than the number of joints.' + + # compute distance threshold + pose_area = kpts.max(axis=1) - kpts.min(axis=1) + pose_area = np.sqrt(np.power(pose_area, 2).sum(axis=1)) + pose_area = pose_area.reshape(num_people, 1, 1) + pose_area = np.tile(pose_area, (num_people, num_joints)) + close_dist_thr = pose_area * dist_thr + + # count nearby joints between instances + instance_dist = kpts[:, None] - kpts + instance_dist = np.sqrt(np.power(instance_dist, 2).sum(axis=3)) + close_instance_num = (instance_dist < close_dist_thr).sum(2) + close_instance = close_instance_num > num_nearby_joints_thr + + # apply nms + ignored_pose_inds, keep_pose_inds = set(), list() + indexes = np.argsort(scores)[::-1] + for i in indexes: + if i in ignored_pose_inds: + continue + keep_inds = close_instance[i].nonzero()[0] + keep_ind = keep_inds[np.argmax(scores[keep_inds])] + if keep_ind not in ignored_pose_inds: + keep_pose_inds.append(keep_ind) + ignored_pose_inds = ignored_pose_inds.union(set(keep_inds)) + + # limit the number of output instances + if max_dets > 0 and len(keep_pose_inds) > max_dets: + sub_inds = np.argsort(scores[keep_pose_inds])[-1:-max_dets - 1:-1] + keep_pose_inds = [keep_pose_inds[i] for i in sub_inds] + + return keep_pose_inds + + +def nms_torch(bboxes: Tensor, + scores: Tensor, + threshold: float = 0.65, + iou_calculator=bbox_overlaps, + return_group: bool = False): + """Perform Non-Maximum Suppression (NMS) on a set of bounding boxes using + their corresponding scores. + + Args: + + bboxes (Tensor): list of bounding boxes (each containing 4 elements + for x1, y1, x2, y2). + scores (Tensor): scores associated with each bounding box. + threshold (float): IoU threshold to determine overlap. + iou_calculator (function): method to calculate IoU. + return_group (bool): if True, returns groups of overlapping bounding + boxes, otherwise returns the main bounding boxes. + """ + + _, indices = scores.sort(descending=True) + groups = [] + while len(indices): + idx, indices = indices[0], indices[1:] + bbox = bboxes[idx] + ious = iou_calculator(bbox, bboxes[indices]) + close_indices = torch.where(ious > threshold)[1] + keep_indices = torch.ones_like(indices, dtype=torch.bool) + keep_indices[close_indices] = 0 + groups.append(torch.cat((idx[None], indices[close_indices]))) + indices = indices[keep_indices] + + if return_group: + return groups + else: + return torch.cat([g[:1] for g in groups]) diff --git a/mmpose/evaluation/functional/transforms.py b/mmpose/evaluation/functional/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..56873b389cc145ceaad7f1307399f901a2ed0157 --- /dev/null +++ b/mmpose/evaluation/functional/transforms.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import numpy as np + + +def transform_sigmas(sigmas: Union[List, np.ndarray], num_keypoints: int, + mapping: Union[List[Tuple[int, int]], List[Tuple[Tuple, + int]]]): + """Transforms the sigmas based on the mapping.""" + if len(mapping): + source_index, target_index = map(list, zip(*mapping)) + else: + source_index, target_index = [], [] + + list_input = False + if isinstance(sigmas, list): + sigmas = np.array(sigmas) + list_input = True + + new_sigmas = np.ones(num_keypoints, dtype=sigmas.dtype) + new_sigmas[target_index] = sigmas[source_index] + + if list_input: + new_sigmas = new_sigmas.tolist() + + return new_sigmas + + +def transform_ann(ann_info: Union[dict, list], num_keypoints: int, + mapping: Union[List[Tuple[int, int]], List[Tuple[Tuple, + int]]]): + """Transforms COCO-format annotations based on the mapping.""" + if len(mapping): + source_index, target_index = map(list, zip(*mapping)) + else: + source_index, target_index = [], [] + + list_input = True + if not isinstance(ann_info, list): + ann_info = [ann_info] + list_input = False + + for each in ann_info: + if 'keypoints' in each: + keypoints = np.array(each['keypoints']) + + C = 3 # COCO-format: x, y, score + keypoints = keypoints.reshape(-1, C) + new_keypoints = np.zeros((num_keypoints, C), dtype=keypoints.dtype) + new_keypoints[target_index] = keypoints[source_index] + each['keypoints'] = new_keypoints.reshape(-1).tolist() + + if 'num_keypoints' in each: + each['num_keypoints'] = num_keypoints + + if not list_input: + ann_info = ann_info[0] + + return ann_info + + +def transform_pred(pred_info: Union[dict, list], num_keypoints: int, + mapping: Union[List[Tuple[int, int]], List[Tuple[Tuple, + int]]]): + """Transforms predictions based on the mapping.""" + if len(mapping): + source_index, target_index = map(list, zip(*mapping)) + else: + source_index, target_index = [], [] + + list_input = True + if not isinstance(pred_info, list): + pred_info = [pred_info] + list_input = False + + for each in pred_info: + if 'keypoints' in each: + keypoints = np.array(each['keypoints']) + + N, _, C = keypoints.shape + new_keypoints = np.zeros((N, num_keypoints, C), + dtype=keypoints.dtype) + new_keypoints[:, target_index] = keypoints[:, source_index] + each['keypoints'] = new_keypoints + + keypoint_scores = np.array(each['keypoint_scores']) + new_scores = np.zeros((N, num_keypoints), + dtype=keypoint_scores.dtype) + new_scores[:, target_index] = keypoint_scores[:, source_index] + each['keypoint_scores'] = new_scores + + if 'num_keypoints' in each: + each['num_keypoints'] = num_keypoints + + if not list_input: + pred_info = pred_info[0] + + return pred_info diff --git a/mmpose/evaluation/metrics/__init__.py b/mmpose/evaluation/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9e82356a49f9cfa5136ed0478dc9dba3281fc837 --- /dev/null +++ b/mmpose/evaluation/metrics/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .coco_metric import CocoMetric +from .coco_wholebody_metric import CocoWholeBodyMetric +from .hand_metric import InterHandMetric +from .keypoint_2d_metrics import (AUC, EPE, NME, JhmdbPCKAccuracy, + MpiiPCKAccuracy, PCKAccuracy) +from .keypoint_3d_metrics import MPJPE +from .keypoint_partition_metric import KeypointPartitionMetric +from .posetrack18_metric import PoseTrack18Metric +from .simple_keypoint_3d_metrics import SimpleMPJPE + +__all__ = [ + 'CocoMetric', 'PCKAccuracy', 'MpiiPCKAccuracy', 'JhmdbPCKAccuracy', 'AUC', + 'EPE', 'NME', 'PoseTrack18Metric', 'CocoWholeBodyMetric', + 'KeypointPartitionMetric', 'MPJPE', 'InterHandMetric', 'SimpleMPJPE' +] diff --git a/mmpose/evaluation/metrics/coco_metric.py b/mmpose/evaluation/metrics/coco_metric.py new file mode 100644 index 0000000000000000000000000000000000000000..440693e30167a11cc52bfcf1219e95c19213f4a3 --- /dev/null +++ b/mmpose/evaluation/metrics/coco_metric.py @@ -0,0 +1,787 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import os.path as osp +import tempfile +from collections import OrderedDict, defaultdict +from typing import Dict, Optional, Sequence +import traceback + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.fileio import dump, get_local_path, load +from mmengine.logging import MessageHub, MMLogger, print_log +from xtcocotools.coco import COCO +from xtcocotools.cocoeval import COCOeval + +from mmpose.registry import METRICS +from mmpose.structures.bbox import bbox_xyxy2xywh +from mmpose.structures.keypoint import find_min_padding_exact, fix_bbox_aspect_ratio +from ..functional import (oks_nms, soft_oks_nms, transform_ann, transform_pred, + transform_sigmas) + +import cv2 +import os + +import matplotlib.pyplot as plt +from matplotlib import rc + +from xtcocotools.mask import _mask as maskUtils + + +@METRICS.register_module() +class CocoMetric(BaseMetric): + """COCO pose estimation task evaluation metric. + + Evaluate AR, AP, and mAP for keypoint detection tasks. Support COCO + dataset and other datasets in COCO format. Please refer to + `COCO keypoint evaluation `__ + for more details. + + Args: + ann_file (str, optional): Path to the coco format annotation file. + If not specified, ground truth annotations from the dataset will + be converted to coco format. Defaults to None + use_area (bool): Whether to use ``'area'`` message in the annotations. + If the ground truth annotations (e.g. CrowdPose, AIC) do not have + the field ``'area'``, please set ``use_area=False``. + Defaults to ``True`` + iou_type (str): The same parameter as `iouType` in + :class:`xtcocotools.COCOeval`, which can be ``'keypoints'``, or + ``'keypoints_crowd'`` (used in CrowdPose dataset). + Defaults to ``'keypoints'`` + score_mode (str): The mode to score the prediction results which + should be one of the following options: + + - ``'bbox'``: Take the score of bbox as the score of the + prediction results. + - ``'bbox_keypoint'``: Use keypoint score to rescore the + prediction results. + - ``'bbox_rle'``: Use rle_score to rescore the + prediction results. + + Defaults to ``'bbox_keypoint'` + keypoint_score_thr (float): The threshold of keypoint score. The + keypoints with score lower than it will not be included to + rescore the prediction results. Valid only when ``score_mode`` is + ``bbox_keypoint``. Defaults to ``0.2`` + nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), + which should be one of the following options: + + - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to + perform NMS. + - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) + to perform soft NMS. + - ``'none'``: Do not perform NMS. Typically for bottomup mode + output. + + Defaults to ``'oks_nms'` + nms_thr (float): The Object Keypoint Similarity (OKS) threshold + used in NMS when ``nms_mode`` is ``'oks_nms'`` or + ``'soft_oks_nms'``. Will retain the prediction results with OKS + lower than ``nms_thr``. Defaults to ``0.9`` + format_only (bool): Whether only format the output results without + doing quantitative evaluation. This is designed for the need of + test submission when the ground truth annotations are absent. If + set to ``True``, ``outfile_prefix`` should specify the path to + store the output results. Defaults to ``False`` + pred_converter (dict, optional): Config dictionary for the prediction + converter. The dictionary has the same parameters as + 'KeypointConverter'. Defaults to None. + gt_converter (dict, optional): Config dictionary for the ground truth + converter. The dictionary has the same parameters as + 'KeypointConverter'. Defaults to None. + outfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., ``'a/b/prefix'``. + If not specified, a temp file will be created. Defaults to ``None`` + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Defaults to ``'cpu'`` + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Defaults to ``None`` + """ + default_prefix: Optional[str] = 'coco' + + def __init__(self, + ann_file: Optional[str] = None, + use_area: bool = True, + iou_type: str = 'keypoints', + score_mode: str = 'bbox_keypoint', + score_thresh_type: str = 'score', + keypoint_score_thr: float = 0.2, + nms_mode: str = 'oks_nms', + nms_thr: float = 0.9, + format_only: bool = False, + pred_converter: Dict = None, + gt_converter: Dict = None, + outfile_prefix: Optional[str] = None, + collect_device: str = 'cpu', + prefix: Optional[str] = None, + extended: list = [False], + match_by_bbox: list = [False], + ignore_border_points: list = [False], + ignore_stats: list = [], + padding: float = 1.25) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.ann_file = ann_file + # initialize coco helper with the annotation json file + # if ann_file is not specified, initialize with the converted dataset + if ann_file is not None: + with get_local_path(ann_file) as local_path: + self.coco = COCO(local_path) + else: + self.coco = None + + self.use_area = use_area + self.iou_type = iou_type + + allowed_score_modes = ['bbox', 'bbox_keypoint', 'bbox_rle', 'keypoint'] + if score_mode not in allowed_score_modes: + raise ValueError( + "`score_mode` should be one of 'bbox', 'bbox_keypoint', " + f"'bbox_rle', but got {score_mode}") + self.score_mode = score_mode + self.keypoint_score_thr = keypoint_score_thr + if score_thresh_type not in ['score', 'prob']: + raise ValueError( + "'score_thresh_type' should be one of 'score' or 'prob'" + ) + self.score_thresh_type = score_thresh_type + + allowed_nms_modes = ['oks_nms', 'soft_oks_nms', 'none'] + if nms_mode not in allowed_nms_modes: + raise ValueError( + "`nms_mode` should be one of 'oks_nms', 'soft_oks_nms', " + f"'none', but got {nms_mode}") + self.nms_mode = nms_mode + self.nms_thr = nms_thr + + if format_only: + assert outfile_prefix is not None, '`outfile_prefix` can not be '\ + 'None when `format_only` is True, otherwise the result file '\ + 'will be saved to a temp directory which will be cleaned up '\ + 'in the end.' + elif ann_file is not None: + # do evaluation only if the ground truth annotations exist + assert 'annotations' in load(ann_file), \ + 'Ground truth annotations are required for evaluation '\ + 'when `format_only` is False.' + + self.format_only = format_only + self.outfile_prefix = outfile_prefix + self.pred_converter = pred_converter + self.gt_converter = gt_converter + + len_params = max(len(extended), len(match_by_bbox)) + if len(extended) == 1 and len_params > 1: + extended = extended * len_params + if len(match_by_bbox) == 1 and len_params > 1: + match_by_bbox = match_by_bbox * len_params + assert len(extended) == len(match_by_bbox), \ + 'The length of `extended` and `match_by_bbox` should be the same.' + assert len(extended) >= 1, \ + 'The length of `extended` and `match_by_bbox` should be at least 1.' + self.extended = extended + self.match_by_bbox = match_by_bbox + self.ignore_border_points = ignore_border_points + + self.ignore_stats = ignore_stats + self.prob_thr = -1 + self.has_probability = True + self.padding = padding + + self._compute_min_padding_in_coco() + + @property + def dataset_meta(self) -> Optional[dict]: + """Optional[dict]: Meta info of the dataset.""" + return self._dataset_meta + + @dataset_meta.setter + def dataset_meta(self, dataset_meta: dict) -> None: + """Set the dataset meta info to the metric.""" + if self.gt_converter is not None: + dataset_meta['sigmas'] = transform_sigmas( + dataset_meta['sigmas'], self.gt_converter['num_keypoints'], + self.gt_converter['mapping']) + dataset_meta['num_keypoints'] = len(dataset_meta['sigmas']) + self._dataset_meta = dataset_meta + + if self.coco is None: + message = MessageHub.get_current_instance() + ann_file = message.get_info( + f"{dataset_meta['dataset_name']}_ann_file", None) + if ann_file is not None: + with get_local_path(ann_file) as local_path: + self.coco = COCO(local_path) + print_log( + f'CocoMetric for dataset ' + f"{dataset_meta['dataset_name']} has successfully " + f'loaded the annotation file from {ann_file}', 'current') + + def _compute_min_padding_in_coco(self): + """Compute the minimum padding in COCO format.""" + if self.coco is None: + return + + for _, ann in self.coco.anns.items(): + if 'pad_to_contain' in ann.keys(): + continue + + kpts = np.array(ann['keypoints']).reshape(-1, 3) + bbox = np.array(ann['bbox']).flatten() + min_padding = find_min_padding_exact(bbox, kpts) + ann['pad_to_contain'] = min_padding + + return + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model, each of which has the following keys: + + - 'id': The id of the sample + - 'img_id': The image_id of the sample + - 'pred_instances': The prediction results of instance(s) + """ + self.results_len = len(self.results) + for data_sample in data_samples: + if 'pred_instances' not in data_sample: + raise ValueError( + '`pred_instances` are required to process the ' + f'predictions results in {self.__class__.__name__}. ') + + # keypoints.shape: [N, K, 2], + # N: number of instances, K: number of keypoints + # for topdown-style output, N is usually 1, while for + # bottomup-style output, N is the number of instances in the image + keypoints = data_sample['pred_instances']['keypoints'] + N, K, _ = keypoints.shape + # [N, K], the scores for all keypoints of all instances + keypoint_scores = data_sample['pred_instances']['keypoint_scores'] + assert keypoint_scores.shape == keypoints.shape[:2] + + if 'keypoints_visible' in data_sample['pred_instances']: + keypoints_visible = data_sample['pred_instances']['keypoints_visible'] + else: + keypoints_visible = keypoint_scores.copy() + + if 'keypoints_probs' in data_sample['pred_instances']: + keypoints_probs = data_sample['pred_instances']['keypoints_probs'] + # keypoints_probs = keypoint_scores.copy() + else: + self.has_probability = False + keypoints_probs = keypoint_scores.copy() + + if 'keypoints_oks' in data_sample['pred_instances']: + keypoints_oks = data_sample['pred_instances']['keypoints_oks'] + else: + keypoints_oks = keypoint_scores.copy() + + if 'keypoints_error' in data_sample['pred_instances']: + keypoints_error = data_sample['pred_instances']['keypoints_error'] + else: + keypoints_error = keypoint_scores.copy() + + if K == 21: + # Translate 21 keypoints to 17 keypoints by ignoring the last 4 keypoints + keypoints = keypoints[:, :17, :] + keypoint_scores = keypoint_scores[:, :17] + keypoints_visible = keypoints_visible[:, :17] + keypoints_probs = keypoints_probs[:, :17] + keypoints_oks = keypoints_oks[:, :17] + keypoints_error = keypoints_error[:, :17] + + elif K != 17: + raise ValueError('The number of keypoints should be 17 or 21, ' + f'but got {K}.') + + assert keypoints.shape[1] == 17, f'Number of keypoints should be 17 but got {keypoints.shape}' + assert keypoint_scores.shape[1] == 17, f'Number of keypoint scores should be 17 but got {keypoint_scores.shape}' + assert keypoints_visible.shape[1] == 17, f'Number of visible keypoints should be 17 but got {keypoints_visible.shape}' + assert keypoints_probs.shape[1] == 17, f'Number of keypoint probs should be 17 but got {keypoints_probs.shape}' + assert keypoints_oks.shape[1] == 17, f'Number of keypoint oks should be 17 but got {keypoints_oks.shape}' + assert keypoints_error.shape[1] == 17, f'Number of keypoint error should be 17 but got {keypoints_error.shape}' + assert heatmaps.shape[1] == 17, f'Number of heatmaps should be 17 but got {heatmaps.shape}' + + # parse prediction results + pred = dict() + pred['id'] = data_sample['id'] + pred['img_id'] = data_sample['img_id'] + + pred['keypoints'] = keypoints + pred['keypoint_scores'] = keypoint_scores + pred['keypoints_visible'] = keypoints_visible + pred['keypoint_probs'] = keypoints_probs + pred['keypoint_oks'] = keypoints_oks + pred['keypoint_error'] = keypoints_error + pred['category_id'] = data_sample.get('category_id', 1) + if 'bboxes' in data_sample['pred_instances']: + pred['bbox'] = bbox_xyxy2xywh( + data_sample['pred_instances']['bboxes']) + + if 'bbox_scores' in data_sample['pred_instances']: + # some one-stage models will predict bboxes and scores + # together with keypoints + bbox_scores = data_sample['pred_instances']['bbox_scores'] + elif ('bbox_scores' not in data_sample['gt_instances'] + or len(data_sample['gt_instances']['bbox_scores']) != + len(keypoints)): + # bottom-up models might output different number of + # instances from annotation + bbox_scores = np.ones(len(keypoints)) + else: + # top-down models use detected bboxes, the scores of which + # are contained in the gt_instances + bbox_scores = data_sample['gt_instances']['bbox_scores'] + pred['bbox_scores'] = bbox_scores + + # get area information + if 'bbox_scales' in data_sample['gt_instances']: + pred['areas'] = np.prod( + data_sample['gt_instances']['bbox_scales'], axis=1) + + # parse gt + gt = dict() + if self.coco is None: + gt['width'] = data_sample['ori_shape'][1] + gt['height'] = data_sample['ori_shape'][0] + gt['img_id'] = data_sample['img_id'] + if self.iou_type == 'keypoints_crowd': + assert 'crowd_index' in data_sample, \ + '`crowd_index` is required when `self.iou_type` is ' \ + '`keypoints_crowd`' + gt['crowd_index'] = data_sample['crowd_index'] + assert 'raw_ann_info' in data_sample, \ + 'The row ground truth annotations are required for ' \ + 'evaluation when `ann_file` is not provided' + anns = data_sample['raw_ann_info'] + gt['raw_ann_info'] = anns if isinstance(anns, list) else [anns] + + # add converted result to the results list + self.results.append((pred, gt)) + processed_len = len(self.results) - self.results_len + if processed_len != len(data_samples): + print(f'Warning: {processed_len} samples are processed, ') + print(f'but {len(data_samples)} samples are provided.') + + def gt_to_coco_json(self, gt_dicts: Sequence[dict], + outfile_prefix: str) -> str: + """Convert ground truth to coco format json file. + + Args: + gt_dicts (Sequence[dict]): Ground truth of the dataset. Each dict + contains the ground truth information about the data sample. + Required keys of the each `gt_dict` in `gt_dicts`: + - `img_id`: image id of the data sample + - `width`: original image width + - `height`: original image height + - `raw_ann_info`: the raw annotation information + Optional keys: + - `crowd_index`: measure the crowding level of an image, + defined in CrowdPose dataset + It is worth mentioning that, in order to compute `CocoMetric`, + there are some required keys in the `raw_ann_info`: + - `id`: the id to distinguish different annotations + - `image_id`: the image id of this annotation + - `category_id`: the category of the instance. + - `bbox`: the object bounding box + - `keypoints`: the keypoints cooridinates along with their + visibilities. Note that it need to be aligned + with the official COCO format, e.g., a list with length + N * 3, in which N is the number of keypoints. And each + triplet represent the [x, y, visible] of the keypoint. + - `iscrowd`: indicating whether the annotation is a crowd. + It is useful when matching the detection results to + the ground truth. + There are some optional keys as well: + - `area`: it is necessary when `self.use_area` is `True` + - `num_keypoints`: it is necessary when `self.iou_type` + is set as `keypoints_crowd`. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json file will be named + "somepath/xxx.gt.json". + Returns: + str: The filename of the json file. + """ + image_infos = [] + annotations = [] + img_ids = [] + ann_ids = [] + + for gt_dict in gt_dicts: + # filter duplicate image_info + if gt_dict['img_id'] not in img_ids: + image_info = dict( + id=gt_dict['img_id'], + width=gt_dict['width'], + height=gt_dict['height'], + ) + if self.iou_type == 'keypoints_crowd': + image_info['crowdIndex'] = gt_dict['crowd_index'] + + image_infos.append(image_info) + img_ids.append(gt_dict['img_id']) + + # filter duplicate annotations + for ann in gt_dict['raw_ann_info']: + if ann is None: + # during evaluation on bottom-up datasets, some images + # do not have instance annotation + continue + + annotation = dict( + id=ann['id'], + image_id=ann['image_id'], + category_id=ann['category_id'], + bbox=ann['bbox'], + keypoints=ann['keypoints'], + iscrowd=ann['iscrowd'], + ) + if self.use_area: + assert 'area' in ann, \ + '`area` is required when `self.use_area` is `True`' + annotation['area'] = ann['area'] + + if self.iou_type == 'keypoints_crowd': + assert 'num_keypoints' in ann, \ + '`num_keypoints` is required when `self.iou_type` ' \ + 'is `keypoints_crowd`' + annotation['num_keypoints'] = ann['num_keypoints'] + + annotations.append(annotation) + ann_ids.append(ann['id']) + + info = dict( + date_created=str(datetime.datetime.now()), + description='Coco json file converted by mmpose CocoMetric.') + coco_json = dict( + info=info, + images=image_infos, + categories=self.dataset_meta['CLASSES'], + licenses=None, + annotations=annotations, + ) + converted_json_path = f'{outfile_prefix}.gt.json' + dump(coco_json, converted_json_path, sort_keys=True, indent=4) + return converted_json_path + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # split prediction and gt list + preds, gts = zip(*results) + + tmp_dir = None + if self.outfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + outfile_prefix = osp.join(tmp_dir.name, 'results') + else: + outfile_prefix = self.outfile_prefix + + if self.coco is None: + # use converted gt json file to initialize coco helper + logger.info('Converting ground truth to coco format...') + coco_json_path = self.gt_to_coco_json( + gt_dicts=gts, outfile_prefix=outfile_prefix) + self.coco = COCO(coco_json_path) + if self.gt_converter is not None: + for id_, ann in self.coco.anns.items(): + self.coco.anns[id_] = transform_ann( + ann, self.gt_converter['num_keypoints'], + self.gt_converter['mapping']) + + kpts = defaultdict(list) + + # group the preds by img_id + for pred in preds: + img_id = pred['img_id'] + + if self.pred_converter is not None: + pred = transform_pred(pred, + self.pred_converter['num_keypoints'], + self.pred_converter['mapping']) + + for idx, keypoints in enumerate(pred['keypoints']): + + instance = { + 'id': pred['id'], + 'img_id': pred['img_id'], + 'category_id': pred['category_id'], + 'keypoints': keypoints, + 'keypoint_scores': pred['keypoint_scores'][idx], + 'bbox_score': pred['bbox_scores'][idx], + 'keypoints_visible': pred['keypoints_visible'][idx], + 'keypoint_probs': pred['keypoint_probs'][idx], + 'keypoint_oks': pred['keypoint_oks'][idx], + 'keypoint_error': pred['keypoint_error'][idx], + } + + # breakpoint() + if 'bbox' in pred: + instance['bbox'] = pred['bbox'][idx] + diagonal = np.sqrt( + instance['bbox'][2]**2 + instance['bbox'][3]**2) + if 'areas' in pred: + instance['area'] = pred['areas'][idx] + diagonal = np.sqrt(instance['area']) + else: + # use keypoint to calculate bbox and get area + area = ( + np.max(keypoints[:, 0]) - np.min(keypoints[:, 0])) * ( + np.max(keypoints[:, 1]) - np.min(keypoints[:, 1])) + instance['area'] = area + diagonal = np.sqrt(area) + + kpts[img_id].append(instance) + + # sort keypoint results according to id and remove duplicate ones + kpts = self._sort_and_unique_bboxes(kpts, key='id') + + # score the prediction results according to `score_mode` + # and perform NMS according to `nms_mode` + valid_kpts = defaultdict(list) + if self.pred_converter is not None: + num_keypoints = self.pred_converter['num_keypoints'] + else: + num_keypoints = self.dataset_meta['num_keypoints'] + for img_id, instances in kpts.items(): + for instance in instances: + # concatenate the keypoint coordinates and scores + instance['keypoints'] = np.concatenate([ + instance['keypoints'], instance['keypoint_probs'][:, None] + ], + axis=-1) + if self.score_mode == 'bbox': + instance['score'] = instance['bbox_score'] + elif self.score_mode == 'keypoint': + instance['score'] = np.mean(instance['keypoint_scores']) + else: + bbox_score = instance['bbox_score'] + if self.score_mode == 'bbox_rle': + keypoint_scores = instance['keypoint_scores'] + instance['score'] = float(bbox_score + + np.mean(keypoint_scores) + + np.max(keypoint_scores)) + + else: # self.score_mode == 'bbox_keypoint': + mean_kpt_score = 0 + valid_num = 0 + for kpt_idx in range(num_keypoints): + kpt_score = instance['keypoint_scores'][kpt_idx] + kpt_prob = instance['keypoint_probs'][kpt_idx] + kpt_thresh = kpt_score if self.score_thresh_type == 'score' else kpt_prob + if kpt_thresh > self.keypoint_score_thr: + mean_kpt_score += kpt_score + valid_num += 1 + if valid_num != 0: + mean_kpt_score /= valid_num + instance['score'] = bbox_score * mean_kpt_score + # perform nms + if self.nms_mode == 'none': + valid_kpts[img_id] = instances + else: + nms = oks_nms if self.nms_mode == 'oks_nms' else soft_oks_nms + keep = nms( + instances, + self.nms_thr, + sigmas=self.dataset_meta['sigmas']) + valid_kpts[img_id] = [instances[_keep] for _keep in keep] + + # convert results to coco style and dump into a json file + self.results2json(valid_kpts, outfile_prefix=outfile_prefix) + + # only format the results without doing quantitative evaluation + if self.format_only: + logger.info('results are saved in ' + f'{osp.dirname(outfile_prefix)}') + return {} + + eval_results = OrderedDict() + + # mAP evaluation results + logger.info(f'Evaluating {self.__class__.__name__}...') + self.prob_thr = 0.51 + + # Localization evaluation results + info_str = self._do_python_keypoint_eval(outfile_prefix) + name_value = OrderedDict(info_str) + eval_results.update(name_value) + + + logger.info('Number of values per dataset: {}'.format(len(eval_results))) + + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results + + def results2json(self, keypoints: Dict[int, list], + outfile_prefix: str) -> str: + """Dump the keypoint detection results to a COCO style json file. + + Args: + keypoints (Dict[int, list]): Keypoint detection results + of the dataset. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json", + + Returns: + str: The json file name of keypoint results. + """ + # the results with category_id + cat_results = [] + + for _, img_kpts in keypoints.items(): + _keypoints = np.array( + [img_kpt['keypoints'] for img_kpt in img_kpts]) + num_keypoints = self.dataset_meta['num_keypoints'] + # collect all the person keypoints in current image + _keypoints = _keypoints.reshape(-1, num_keypoints * 3) + + result = [] + for img_kpt, keypoint in zip(img_kpts, _keypoints): + res = { + 'image_id': img_kpt['img_id'], + 'category_id': img_kpt['category_id'], + 'keypoints': keypoint.tolist(), + 'score': float(img_kpt['score']), + } + if 'bbox' in img_kpt: + res['bbox'] = img_kpt['bbox'].tolist() + if 'keypoint_probs' in img_kpt: + res['probs'] = img_kpt['keypoint_probs'].tolist() + result.append(res) + + cat_results.extend(result) + + res_file = f'{outfile_prefix}.keypoints.json' + dump(cat_results, res_file, sort_keys=True, indent=4) + + def _do_python_keypoint_eval(self, outfile_prefix: str) -> list: + """Do keypoint evaluation using COCOAPI. + + Args: + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json", + + Returns: + list: a list of tuples. Each tuple contains the evaluation stats + name and corresponding stats value. + """ + res_file = f'{outfile_prefix}.keypoints.json' + coco_det = self.coco.loadRes(res_file) + sigmas = self.dataset_meta['sigmas'] + + info_str = [] + for extended_oks, match_by_bbox, ignore_border_points in zip( + self.extended, self.match_by_bbox, self.ignore_border_points + ): + prefix = "" + suffix = "" + if match_by_bbox: + prefix = "bbox_" + prefix + if extended_oks: + prefix = "Ex_" + prefix + if ignore_border_points: + suffix = suffix + "_NoBrd" + + conf_thr = self.prob_thr + print("+"*80) + print("COCO Eval params: Bbox {:5s}, ExOKS {:5s}".format( + str(match_by_bbox), str(extended_oks) + ), end="") + if extended_oks: + print(" with conf_thr: {:.2f} (has probability: {})".format(conf_thr, self.has_probability), end="") + print() + + coco_eval = COCOeval( + self.coco, + coco_det, + iouType=self.iou_type, + sigmas=sigmas, + use_area=self.use_area, + extended_oks=extended_oks, + match_by_bbox=match_by_bbox, + confidence_thr=conf_thr, + padding=self.padding, + ignore_near_bbox=ignore_border_points + ) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + try: + stats_names = coco_eval.stats_names + except AttributeError: + if self.iou_type == 'keypoints_crowd': + stats_names = [ + 'AP', 'AP .5', 'AP .75', 'AR', 'AR .5', 'AR .75', + 'AP(E)', 'AP(M)', 'AP(H)' + ] + else: + stats_names = [ + 'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', + 'AR .5', 'AR .75', 'AR (M)', 'AR (L)' + ] + i_str = list(zip(stats_names, coco_eval.stats)) + ignore_stats = self.ignore_stats + # if match_by_bbox or extended_oks: + # ignore_stats.extend(['AP (M)', 'AP (L)', 'AR (M)', 'AR (L)', 'AR']) + i_str = [(k, v) for k, v in i_str if k not in self.ignore_stats] + i_str = [(f'{prefix}{k}', v) for k, v in i_str] + i_str = [(f'{k}{suffix}', v) for k, v in i_str] + + info_str.extend(i_str) + + return info_str + + def _sort_and_unique_bboxes(self, + kpts: Dict[int, list], + key: str = 'id') -> Dict[int, list]: + """Sort keypoint detection results in each image and remove the + duplicate ones. Usually performed in multi-batch testing. + + Args: + kpts (Dict[int, list]): keypoint prediction results. The keys are + '`img_id`' and the values are list that may contain + keypoints of multiple persons. Each element in the list is a + dict containing the ``'key'`` field. + See the argument ``key`` for details. + key (str): The key name in each person prediction results. The + corresponding value will be used for sorting the results. + Default: ``'id'``. + + Returns: + Dict[int, list]: The sorted keypoint detection results. + """ + for img_id, persons in kpts.items(): + # deal with bottomup-style output + if isinstance(kpts[img_id][0][key], Sequence): + return kpts + num = len(persons) + kpts[img_id] = sorted(kpts[img_id], key=lambda x: x[key]) + for i in range(num - 1, 0, -1): + if kpts[img_id][i][key] == kpts[img_id][i - 1][key]: + del kpts[img_id][i] + + return kpts diff --git a/mmpose/evaluation/metrics/coco_wholebody_metric.py b/mmpose/evaluation/metrics/coco_wholebody_metric.py new file mode 100644 index 0000000000000000000000000000000000000000..74dc52c2ad1db6ca4d296ed2b620bcf7290f93c2 --- /dev/null +++ b/mmpose/evaluation/metrics/coco_wholebody_metric.py @@ -0,0 +1,316 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +from typing import Dict, Optional, Sequence + +import numpy as np +from mmengine.fileio import dump +from xtcocotools.cocoeval import COCOeval + +from mmpose.registry import METRICS +from .coco_metric import CocoMetric + + +@METRICS.register_module() +class CocoWholeBodyMetric(CocoMetric): + """COCO-WholeBody evaluation metric. + + Evaluate AR, AP, and mAP for COCO-WholeBody keypoint detection tasks. + Support COCO-WholeBody dataset. Please refer to + `COCO keypoint evaluation `__ + for more details. + + Args: + ann_file (str, optional): Path to the coco format annotation file. + If not specified, ground truth annotations from the dataset will + be converted to coco format. Defaults to None + use_area (bool): Whether to use ``'area'`` message in the annotations. + If the ground truth annotations (e.g. CrowdPose, AIC) do not have + the field ``'area'``, please set ``use_area=False``. + Defaults to ``True`` + iou_type (str): The same parameter as `iouType` in + :class:`xtcocotools.COCOeval`, which can be ``'keypoints'``, or + ``'keypoints_crowd'`` (used in CrowdPose dataset). + Defaults to ``'keypoints'`` + score_mode (str): The mode to score the prediction results which + should be one of the following options: + + - ``'bbox'``: Take the score of bbox as the score of the + prediction results. + - ``'bbox_keypoint'``: Use keypoint score to rescore the + prediction results. + - ``'bbox_rle'``: Use rle_score to rescore the + prediction results. + + Defaults to ``'bbox_keypoint'` + keypoint_score_thr (float): The threshold of keypoint score. The + keypoints with score lower than it will not be included to + rescore the prediction results. Valid only when ``score_mode`` is + ``bbox_keypoint``. Defaults to ``0.2`` + nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), + which should be one of the following options: + + - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to + perform NMS. + - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) + to perform soft NMS. + - ``'none'``: Do not perform NMS. Typically for bottomup mode + output. + + Defaults to ``'oks_nms'` + nms_thr (float): The Object Keypoint Similarity (OKS) threshold + used in NMS when ``nms_mode`` is ``'oks_nms'`` or + ``'soft_oks_nms'``. Will retain the prediction results with OKS + lower than ``nms_thr``. Defaults to ``0.9`` + format_only (bool): Whether only format the output results without + doing quantitative evaluation. This is designed for the need of + test submission when the ground truth annotations are absent. If + set to ``True``, ``outfile_prefix`` should specify the path to + store the output results. Defaults to ``False`` + outfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., ``'a/b/prefix'``. + If not specified, a temp file will be created. Defaults to ``None`` + **kwargs: Keyword parameters passed to :class:`mmeval.BaseMetric` + """ + default_prefix: Optional[str] = 'coco-wholebody' + body_num = 17 + foot_num = 6 + face_num = 68 + left_hand_num = 21 + right_hand_num = 21 + + def gt_to_coco_json(self, gt_dicts: Sequence[dict], + outfile_prefix: str) -> str: + """Convert ground truth to coco format json file. + + Args: + gt_dicts (Sequence[dict]): Ground truth of the dataset. Each dict + contains the ground truth information about the data sample. + Required keys of the each `gt_dict` in `gt_dicts`: + - `img_id`: image id of the data sample + - `width`: original image width + - `height`: original image height + - `raw_ann_info`: the raw annotation information + Optional keys: + - `crowd_index`: measure the crowding level of an image, + defined in CrowdPose dataset + It is worth mentioning that, in order to compute `CocoMetric`, + there are some required keys in the `raw_ann_info`: + - `id`: the id to distinguish different annotations + - `image_id`: the image id of this annotation + - `category_id`: the category of the instance. + - `bbox`: the object bounding box + - `keypoints`: the keypoints cooridinates along with their + visibilities. Note that it need to be aligned + with the official COCO format, e.g., a list with length + N * 3, in which N is the number of keypoints. And each + triplet represent the [x, y, visible] of the keypoint. + - 'keypoints' + - `iscrowd`: indicating whether the annotation is a crowd. + It is useful when matching the detection results to + the ground truth. + There are some optional keys as well: + - `area`: it is necessary when `self.use_area` is `True` + - `num_keypoints`: it is necessary when `self.iou_type` + is set as `keypoints_crowd`. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json file will be named + "somepath/xxx.gt.json". + Returns: + str: The filename of the json file. + """ + image_infos = [] + annotations = [] + img_ids = [] + ann_ids = [] + + for gt_dict in gt_dicts: + # filter duplicate image_info + if gt_dict['img_id'] not in img_ids: + image_info = dict( + id=gt_dict['img_id'], + width=gt_dict['width'], + height=gt_dict['height'], + ) + if self.iou_type == 'keypoints_crowd': + image_info['crowdIndex'] = gt_dict['crowd_index'] + + image_infos.append(image_info) + img_ids.append(gt_dict['img_id']) + + # filter duplicate annotations + for ann in gt_dict['raw_ann_info']: + annotation = dict( + id=ann['id'], + image_id=ann['image_id'], + category_id=ann['category_id'], + bbox=ann['bbox'], + keypoints=ann['keypoints'], + foot_kpts=ann['foot_kpts'], + face_kpts=ann['face_kpts'], + lefthand_kpts=ann['lefthand_kpts'], + righthand_kpts=ann['righthand_kpts'], + iscrowd=ann['iscrowd'], + ) + if self.use_area: + assert 'area' in ann, \ + '`area` is required when `self.use_area` is `True`' + annotation['area'] = ann['area'] + + annotations.append(annotation) + ann_ids.append(ann['id']) + + info = dict( + date_created=str(datetime.datetime.now()), + description='Coco json file converted by mmpose CocoMetric.') + coco_json: dict = dict( + info=info, + images=image_infos, + categories=self.dataset_meta['CLASSES'], + licenses=None, + annotations=annotations, + ) + converted_json_path = f'{outfile_prefix}.gt.json' + dump(coco_json, converted_json_path, sort_keys=True, indent=4) + return converted_json_path + + def results2json(self, keypoints: Dict[int, list], + outfile_prefix: str) -> str: + """Dump the keypoint detection results to a COCO style json file. + + Args: + keypoints (Dict[int, list]): Keypoint detection results + of the dataset. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json", + + Returns: + str: The json file name of keypoint results. + """ + # the results with category_id + cat_id = 1 + cat_results = [] + + cuts = np.cumsum([ + 0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, + self.right_hand_num + ]) * 3 + + for _, img_kpts in keypoints.items(): + _keypoints = np.array( + [img_kpt['keypoints'] for img_kpt in img_kpts]) + num_keypoints = self.dataset_meta['num_keypoints'] + # collect all the person keypoints in current image + _keypoints = _keypoints.reshape(-1, num_keypoints * 3) + + result = [{ + 'image_id': img_kpt['img_id'], + 'category_id': cat_id, + 'keypoints': _keypoint[cuts[0]:cuts[1]].tolist(), + 'foot_kpts': _keypoint[cuts[1]:cuts[2]].tolist(), + 'face_kpts': _keypoint[cuts[2]:cuts[3]].tolist(), + 'lefthand_kpts': _keypoint[cuts[3]:cuts[4]].tolist(), + 'righthand_kpts': _keypoint[cuts[4]:cuts[5]].tolist(), + 'score': float(img_kpt['score']), + } for img_kpt, _keypoint in zip(img_kpts, _keypoints)] + + cat_results.extend(result) + + res_file = f'{outfile_prefix}.keypoints.json' + dump(cat_results, res_file, sort_keys=True, indent=4) + + def _do_python_keypoint_eval(self, outfile_prefix: str) -> list: + """Do keypoint evaluation using COCOAPI. + + Args: + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json", + + Returns: + list: a list of tuples. Each tuple contains the evaluation stats + name and corresponding stats value. + """ + res_file = f'{outfile_prefix}.keypoints.json' + coco_det = self.coco.loadRes(res_file) + sigmas = self.dataset_meta['sigmas'] + + cuts = np.cumsum([ + 0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, + self.right_hand_num + ]) + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_body', + sigmas[cuts[0]:cuts[1]], + use_area=self.use_area) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_foot', + sigmas[cuts[1]:cuts[2]], + use_area=self.use_area) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_face', + sigmas[cuts[2]:cuts[3]], + use_area=self.use_area) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_lefthand', + sigmas[cuts[3]:cuts[4]], + use_area=self.use_area) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_righthand', + sigmas[cuts[4]:cuts[5]], + use_area=self.use_area) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_wholebody', + sigmas, + use_area=self.use_area) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + stats_names = [ + 'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', + 'AR .75', 'AR (M)', 'AR (L)' + ] + + info_str = list(zip(stats_names, coco_eval.stats)) + + return info_str diff --git a/mmpose/evaluation/metrics/hand_metric.py b/mmpose/evaluation/metrics/hand_metric.py new file mode 100644 index 0000000000000000000000000000000000000000..004e168a7d195f2c93a1292f6c96880e82300318 --- /dev/null +++ b/mmpose/evaluation/metrics/hand_metric.py @@ -0,0 +1,200 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Sequence + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmpose.codecs.utils import pixel_to_camera +from mmpose.registry import METRICS +from ..functional import keypoint_epe + + +@METRICS.register_module() +class InterHandMetric(BaseMetric): + + METRICS = {'MPJPE', 'MRRPE', 'HandednessAcc'} + + def __init__(self, + modes: List[str] = ['MPJPE', 'MRRPE', 'HandednessAcc'], + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + for mode in modes: + if mode not in self.METRICS: + raise ValueError("`mode` should be 'MPJPE', 'MRRPE', or " + f"'HandednessAcc', but got '{mode}'.") + + self.modes = modes + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + _, K, _ = pred_coords.shape + pred_coords_cam = pred_coords.copy() + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints_cam'] + + keypoints_cam = gt_coords.copy() + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) + + pred_hand_type = data_sample['pred_instances']['hand_type'] + gt_hand_type = data_sample['hand_type'] + if pred_hand_type is None and 'HandednessAcc' in self.modes: + raise KeyError('metric HandednessAcc is not supported') + + pred_root_depth = data_sample['pred_instances']['rel_root_depth'] + if pred_root_depth is None and 'MRRPE' in self.modes: + raise KeyError('metric MRRPE is not supported') + + abs_depth = data_sample['abs_depth'] + focal = data_sample['focal'] + principal_pt = data_sample['principal_pt'] + + result = {} + + if 'MPJPE' in self.modes: + keypoints_cam[..., :21, :] -= keypoints_cam[..., 20, :] + keypoints_cam[..., 21:, :] -= keypoints_cam[..., 41, :] + + pred_coords_cam[..., :21, 2] += abs_depth[0] + pred_coords_cam[..., 21:, 2] += abs_depth[1] + pred_coords_cam = pixel_to_camera(pred_coords_cam, focal[0], + focal[1], principal_pt[0], + principal_pt[1]) + + pred_coords_cam[..., :21, :] -= pred_coords_cam[..., 20, :] + pred_coords_cam[..., 21:, :] -= pred_coords_cam[..., 41, :] + + if gt_hand_type.all(): + single_mask = np.zeros((1, K), dtype=bool) + interacting_mask = mask + else: + single_mask = mask + interacting_mask = np.zeros((1, K), dtype=bool) + + result['pred_coords'] = pred_coords_cam + result['gt_coords'] = keypoints_cam + result['mask'] = mask + result['single_mask'] = single_mask + result['interacting_mask'] = interacting_mask + + if 'HandednessAcc' in self.modes: + hand_type_mask = data_sample['hand_type_valid'] > 0 + result['pred_hand_type'] = pred_hand_type + result['gt_hand_type'] = gt_hand_type + result['hand_type_mask'] = hand_type_mask + + if 'MRRPE' in self.modes: + keypoints_visible = gt['keypoints_visible'] + if gt_hand_type.all() and keypoints_visible[ + ..., 20] and keypoints_visible[..., 41]: + rel_root_mask = np.array([True]) + + pred_left_root_coords = np.array( + pred_coords[..., 41, :], dtype=np.float32) + pred_left_root_coords[..., + 2] += abs_depth[0] + pred_root_depth + pred_left_root_coords = pixel_to_camera( + pred_left_root_coords, focal[0], focal[1], + principal_pt[0], principal_pt[1]) + + pred_right_root_coords = np.array( + pred_coords[..., 20, :], dtype=np.float32) + pred_right_root_coords[..., 2] += abs_depth[0] + pred_right_root_coords = pixel_to_camera( + pred_right_root_coords, focal[0], focal[1], + principal_pt[0], principal_pt[1]) + pred_rel_root_coords = pred_left_root_coords - \ + pred_right_root_coords + pred_rel_root_coords = np.expand_dims( + pred_rel_root_coords, axis=0) + gt_rel_root_coords = gt_coords[..., + 41, :] - gt_coords[..., + 20, :] + gt_rel_root_coords = np.expand_dims( + gt_rel_root_coords, axis=0) + else: + rel_root_mask = np.array([False]) + pred_rel_root_coords = np.array([[0, 0, 0]]) + pred_rel_root_coords = pred_rel_root_coords.reshape( + 1, 1, 3) + gt_rel_root_coords = np.array([[0, 0, 0]]).reshape(1, 1, 3) + + result['pred_rel_root_coords'] = pred_rel_root_coords + result['gt_rel_root_coords'] = gt_rel_root_coords + result['rel_root_mask'] = rel_root_mask + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + metrics = dict() + + logger.info(f'Evaluating {self.__class__.__name__}...') + + if 'MPJPE' in self.modes: + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate( + [result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + single_mask = np.concatenate( + [result['single_mask'] for result in results]) + interacting_mask = np.concatenate( + [result['interacting_mask'] for result in results]) + + metrics['MPJPE_all'] = keypoint_epe(pred_coords, gt_coords, mask) + metrics['MPJPE_single'] = keypoint_epe(pred_coords, gt_coords, + single_mask) + metrics['MPJPE_interacting'] = keypoint_epe( + pred_coords, gt_coords, interacting_mask) + + if 'HandednessAcc' in self.modes: + pred_hand_type = np.concatenate( + [result['pred_hand_type'] for result in results]) + gt_hand_type = np.concatenate( + [result['gt_hand_type'] for result in results]) + hand_type_mask = np.concatenate( + [result['hand_type_mask'] for result in results]) + acc = (pred_hand_type == gt_hand_type).all(axis=-1) + metrics['HandednessAcc'] = np.mean(acc[hand_type_mask]) + + if 'MRRPE' in self.modes: + pred_rel_root_coords = np.concatenate( + [result['pred_rel_root_coords'] for result in results]) + gt_rel_root_coords = np.concatenate( + [result['gt_rel_root_coords'] for result in results]) + rel_root_mask = np.array( + [result['rel_root_mask'] for result in results]) + metrics['MRRPE'] = keypoint_epe(pred_rel_root_coords, + gt_rel_root_coords, rel_root_mask) + return metrics diff --git a/mmpose/evaluation/metrics/keypoint_2d_metrics.py b/mmpose/evaluation/metrics/keypoint_2d_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..c0be4b398f2d7310aa687a2376efee3eb068d3cd --- /dev/null +++ b/mmpose/evaluation/metrics/keypoint_2d_metrics.py @@ -0,0 +1,924 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Dict, Optional, Sequence, Union + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmpose.registry import METRICS +from ..functional import (keypoint_auc, keypoint_epe, keypoint_nme, + keypoint_pck_accuracy) + + +@METRICS.register_module() +class PCKAccuracy(BaseMetric): + """PCK accuracy evaluation metric. + Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for + each individual keypoint and the averaged accuracy across all keypoints. + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the person bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + Args: + thr(float): Threshold of PCK calculation. Default: 0.05. + norm_item (str | Sequence[str]): The item used for normalization. + Valid items include 'bbox', 'head', 'torso', which correspond + to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'bbox'``. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + + Examples: + + >>> from mmpose.evaluation.metrics import PCKAccuracy + >>> import numpy as np + >>> from mmengine.structures import InstanceData + >>> num_keypoints = 15 + >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 + >>> gt_instances = InstanceData() + >>> gt_instances.keypoints = keypoints + >>> gt_instances.keypoints_visible = np.ones( + ... (1, num_keypoints, 1)).astype(bool) + >>> gt_instances.bboxes = np.random.random((1, 4)) * 20 + >>> pred_instances = InstanceData() + >>> pred_instances.keypoints = keypoints + >>> data_sample = { + ... 'gt_instances': gt_instances.to_dict(), + ... 'pred_instances': pred_instances.to_dict(), + ... } + >>> data_samples = [data_sample] + >>> data_batch = [{'inputs': None}] + >>> pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox') + ...: UserWarning: The prefix is not set in metric class PCKAccuracy. + >>> pck_metric.process(data_batch, data_samples) + >>> pck_metric.evaluate(1) + 10/26 15:37:57 - mmengine - INFO - Evaluating PCKAccuracy (normalized by ``"bbox_size"``)... # noqa + {'PCK': 1.0} + + """ + + def __init__(self, + thr: float = 0.05, + norm_item: Union[str, Sequence[str]] = 'bbox', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.thr = thr + self.norm_item = norm_item if isinstance(norm_item, + (tuple, + list)) else [norm_item] + allow_normalized_items = ['bbox', 'head', 'torso'] + for item in self.norm_item: + if item not in allow_normalized_items: + raise KeyError( + f'The normalized item {item} is not supported by ' + f"{self.__class__.__name__}. Should be one of 'bbox', " + f"'head', 'torso', but got {item}.") + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool) + if mask.ndim == 3: + mask = mask[:, :, 0] + mask = mask.reshape(1, -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + if 'bbox' in self.norm_item: + assert 'bboxes' in gt, 'The ground truth data info do not ' \ + 'have the expected normalized_item ``"bbox"``.' + # ground truth bboxes, [1, 4] + bbox_size_ = np.max(gt['bboxes'][0][2:] - gt['bboxes'][0][:2]) + bbox_size = np.array([bbox_size_, bbox_size_]).reshape(-1, 2) + result['bbox_size'] = bbox_size + + if 'head' in self.norm_item: + assert 'head_size' in gt, 'The ground truth data info do ' \ + 'not have the expected normalized_item ``"head_size"``.' + # ground truth bboxes + head_size_ = gt['head_size'] + head_size = np.array([head_size_, head_size_]).reshape(-1, 2) + result['head_size'] = head_size + + if 'torso' in self.norm_item: + # used in JhmdbDataset + torso_size_ = np.linalg.norm(gt_coords[0][4] - gt_coords[0][5]) + if torso_size_ < 1: + torso_size_ = np.linalg.norm(pred_coords[0][4] - + pred_coords[0][5]) + warnings.warn('Ground truth torso size < 1. ' + 'Use torso size from predicted ' + 'keypoint results instead.') + torso_size = np.array([torso_size_, + torso_size_]).reshape(-1, 2) + result['torso_size'] = torso_size + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + The returned result dict may have the following keys: + - 'PCK': The pck accuracy normalized by `bbox_size`. + - 'PCKh': The pck accuracy normalized by `head_size`. + - 'tPCK': The pck accuracy normalized by `torso_size`. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + metrics = dict() + if 'bbox' in self.norm_item: + norm_size_bbox = np.concatenate( + [result['bbox_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"bbox_size"``)...') + + _, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_bbox) + metrics['PCK'] = pck + + if 'head' in self.norm_item: + norm_size_head = np.concatenate( + [result['head_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"head_size"``)...') + + _, pckh, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_head) + metrics['PCKh'] = pckh + + if 'torso' in self.norm_item: + norm_size_torso = np.concatenate( + [result['torso_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"torso_size"``)...') + + _, tpck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_torso) + metrics['tPCK'] = tpck + + return metrics + + +@METRICS.register_module() +class MpiiPCKAccuracy(PCKAccuracy): + """PCKh accuracy evaluation metric for MPII dataset. + + Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for + each individual keypoint and the averaged accuracy across all keypoints. + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the person bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + thr(float): Threshold of PCK calculation. Default: 0.05. + norm_item (str | Sequence[str]): The item used for normalization. + Valid items include 'bbox', 'head', 'torso', which correspond + to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'head'``. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + + Examples: + + >>> from mmpose.evaluation.metrics import MpiiPCKAccuracy + >>> import numpy as np + >>> from mmengine.structures import InstanceData + >>> num_keypoints = 16 + >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 + >>> gt_instances = InstanceData() + >>> gt_instances.keypoints = keypoints + 1.0 + >>> gt_instances.keypoints_visible = np.ones( + ... (1, num_keypoints, 1)).astype(bool) + >>> gt_instances.head_size = np.random.random((1, 1)) * 10 + >>> pred_instances = InstanceData() + >>> pred_instances.keypoints = keypoints + >>> data_sample = { + ... 'gt_instances': gt_instances.to_dict(), + ... 'pred_instances': pred_instances.to_dict(), + ... } + >>> data_samples = [data_sample] + >>> data_batch = [{'inputs': None}] + >>> mpii_pck_metric = MpiiPCKAccuracy(thr=0.3, norm_item='head') + ... UserWarning: The prefix is not set in metric class MpiiPCKAccuracy. + >>> mpii_pck_metric.process(data_batch, data_samples) + >>> mpii_pck_metric.evaluate(1) + 10/26 17:43:39 - mmengine - INFO - Evaluating MpiiPCKAccuracy (normalized by ``"head_size"``)... # noqa + {'Head PCK': 100.0, 'Shoulder PCK': 100.0, 'Elbow PCK': 100.0, + Wrist PCK': 100.0, 'Hip PCK': 100.0, 'Knee PCK': 100.0, + 'Ankle PCK': 100.0, 'PCK': 100.0, 'PCK@0.1': 100.0} + """ + + def __init__(self, + thr: float = 0.5, + norm_item: Union[str, Sequence[str]] = 'head', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__( + thr=thr, + norm_item=norm_item, + collect_device=collect_device, + prefix=prefix) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + If `'head'` in `self.norm_item`, the returned results are the pck + accuracy normalized by `head_size`, which have the following keys: + - 'Head PCK': The PCK of head + - 'Shoulder PCK': The PCK of shoulder + - 'Elbow PCK': The PCK of elbow + - 'Wrist PCK': The PCK of wrist + - 'Hip PCK': The PCK of hip + - 'Knee PCK': The PCK of knee + - 'Ankle PCK': The PCK of ankle + - 'PCK': The mean PCK over all keypoints + - 'PCK@0.1': The mean PCK at threshold 0.1 + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + # MPII uses matlab format, gt index is 1-based, + # convert 0-based index to 1-based index + pred_coords = pred_coords + 1.0 + + metrics = {} + if 'head' in self.norm_item: + norm_size_head = np.concatenate( + [result['head_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"head_size"``)...') + + pck_p, _, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_head) + + jnt_count = np.sum(mask, axis=0) + PCKh = 100. * pck_p + + rng = np.arange(0, 0.5 + 0.01, 0.01) + pckAll = np.zeros((len(rng), 16), dtype=np.float32) + + for r, threshold in enumerate(rng): + _pck, _, _ = keypoint_pck_accuracy(pred_coords, gt_coords, + mask, threshold, + norm_size_head) + pckAll[r, :] = 100. * _pck + + PCKh = np.ma.array(PCKh, mask=False) + PCKh.mask[6:8] = True + + jnt_count = np.ma.array(jnt_count, mask=False) + jnt_count.mask[6:8] = True + jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64) + + # dataset_joints_idx: + # head 9 + # lsho 13 rsho 12 + # lelb 14 relb 11 + # lwri 15 rwri 10 + # lhip 3 rhip 2 + # lkne 4 rkne 1 + # lank 5 rank 0 + stats = { + 'Head PCK': PCKh[9], + 'Shoulder PCK': 0.5 * (PCKh[13] + PCKh[12]), + 'Elbow PCK': 0.5 * (PCKh[14] + PCKh[11]), + 'Wrist PCK': 0.5 * (PCKh[15] + PCKh[10]), + 'Hip PCK': 0.5 * (PCKh[3] + PCKh[2]), + 'Knee PCK': 0.5 * (PCKh[4] + PCKh[1]), + 'Ankle PCK': 0.5 * (PCKh[5] + PCKh[0]), + 'PCK': np.sum(PCKh * jnt_ratio), + 'PCK@0.1': np.sum(pckAll[10, :] * jnt_ratio) + } + + for stats_name, stat in stats.items(): + metrics[stats_name] = stat + + return metrics + + +@METRICS.register_module() +class JhmdbPCKAccuracy(PCKAccuracy): + """PCK accuracy evaluation metric for Jhmdb dataset. + + Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for + each individual keypoint and the averaged accuracy across all keypoints. + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the person bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + thr(float): Threshold of PCK calculation. Default: 0.05. + norm_item (str | Sequence[str]): The item used for normalization. + Valid items include 'bbox', 'head', 'torso', which correspond + to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'bbox'``. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + + Examples: + + >>> from mmpose.evaluation.metrics import JhmdbPCKAccuracy + >>> import numpy as np + >>> from mmengine.structures import InstanceData + >>> num_keypoints = 15 + >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 + >>> gt_instances = InstanceData() + >>> gt_instances.keypoints = keypoints + >>> gt_instances.keypoints_visible = np.ones( + ... (1, num_keypoints, 1)).astype(bool) + >>> gt_instances.bboxes = np.random.random((1, 4)) * 20 + >>> gt_instances.head_size = np.random.random((1, 1)) * 10 + >>> pred_instances = InstanceData() + >>> pred_instances.keypoints = keypoints + >>> data_sample = { + ... 'gt_instances': gt_instances.to_dict(), + ... 'pred_instances': pred_instances.to_dict(), + ... } + >>> data_samples = [data_sample] + >>> data_batch = [{'inputs': None}] + >>> jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item=['bbox', 'torso']) + ... UserWarning: The prefix is not set in metric class JhmdbPCKAccuracy. + >>> jhmdb_pck_metric.process(data_batch, data_samples) + >>> jhmdb_pck_metric.evaluate(1) + 10/26 17:48:09 - mmengine - INFO - Evaluating JhmdbPCKAccuracy (normalized by ``"bbox_size"``)... # noqa + 10/26 17:48:09 - mmengine - INFO - Evaluating JhmdbPCKAccuracy (normalized by ``"torso_size"``)... # noqa + {'Head PCK': 1.0, 'Sho PCK': 1.0, 'Elb PCK': 1.0, 'Wri PCK': 1.0, + 'Hip PCK': 1.0, 'Knee PCK': 1.0, 'Ank PCK': 1.0, 'PCK': 1.0, + 'Head tPCK': 1.0, 'Sho tPCK': 1.0, 'Elb tPCK': 1.0, 'Wri tPCK': 1.0, + 'Hip tPCK': 1.0, 'Knee tPCK': 1.0, 'Ank tPCK': 1.0, 'tPCK': 1.0} + """ + + def __init__(self, + thr: float = 0.05, + norm_item: Union[str, Sequence[str]] = 'bbox', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__( + thr=thr, + norm_item=norm_item, + collect_device=collect_device, + prefix=prefix) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + If `'bbox'` in `self.norm_item`, the returned results are the pck + accuracy normalized by `bbox_size`, which have the following keys: + - 'Head PCK': The PCK of head + - 'Sho PCK': The PCK of shoulder + - 'Elb PCK': The PCK of elbow + - 'Wri PCK': The PCK of wrist + - 'Hip PCK': The PCK of hip + - 'Knee PCK': The PCK of knee + - 'Ank PCK': The PCK of ankle + - 'PCK': The mean PCK over all keypoints + If `'torso'` in `self.norm_item`, the returned results are the pck + accuracy normalized by `torso_size`, which have the following keys: + - 'Head tPCK': The PCK of head + - 'Sho tPCK': The PCK of shoulder + - 'Elb tPCK': The PCK of elbow + - 'Wri tPCK': The PCK of wrist + - 'Hip tPCK': The PCK of hip + - 'Knee tPCK': The PCK of knee + - 'Ank tPCK': The PCK of ankle + - 'tPCK': The mean PCK over all keypoints + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + metrics = dict() + if 'bbox' in self.norm_item: + norm_size_bbox = np.concatenate( + [result['bbox_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"bbox_size"``)...') + + pck_p, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_bbox) + stats = { + 'Head PCK': pck_p[2], + 'Sho PCK': 0.5 * pck_p[3] + 0.5 * pck_p[4], + 'Elb PCK': 0.5 * pck_p[7] + 0.5 * pck_p[8], + 'Wri PCK': 0.5 * pck_p[11] + 0.5 * pck_p[12], + 'Hip PCK': 0.5 * pck_p[5] + 0.5 * pck_p[6], + 'Knee PCK': 0.5 * pck_p[9] + 0.5 * pck_p[10], + 'Ank PCK': 0.5 * pck_p[13] + 0.5 * pck_p[14], + 'PCK': pck + } + + for stats_name, stat in stats.items(): + metrics[stats_name] = stat + + if 'torso' in self.norm_item: + norm_size_torso = np.concatenate( + [result['torso_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"torso_size"``)...') + + pck_p, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_torso) + + stats = { + 'Head tPCK': pck_p[2], + 'Sho tPCK': 0.5 * pck_p[3] + 0.5 * pck_p[4], + 'Elb tPCK': 0.5 * pck_p[7] + 0.5 * pck_p[8], + 'Wri tPCK': 0.5 * pck_p[11] + 0.5 * pck_p[12], + 'Hip tPCK': 0.5 * pck_p[5] + 0.5 * pck_p[6], + 'Knee tPCK': 0.5 * pck_p[9] + 0.5 * pck_p[10], + 'Ank tPCK': 0.5 * pck_p[13] + 0.5 * pck_p[14], + 'tPCK': pck + } + + for stats_name, stat in stats.items(): + metrics[stats_name] = stat + + return metrics + + +@METRICS.register_module() +class AUC(BaseMetric): + """AUC evaluation metric. + + Calculate the Area Under Curve (AUC) of keypoint PCK accuracy. + + By altering the threshold percentage in the calculation of PCK accuracy, + AUC can be generated to further evaluate the pose estimation algorithms. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + norm_factor (float): AUC normalization factor, Default: 30 (pixels). + num_thrs (int): number of thresholds to calculate auc. Default: 20. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + """ + + def __init__(self, + norm_factor: float = 30, + num_thrs: int = 20, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.norm_factor = norm_factor + self.num_thrs = num_thrs + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_sample (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool) + if mask.ndim == 3: + mask = mask[:, :, 0] + mask = mask.reshape(1, -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__}...') + + auc = keypoint_auc(pred_coords, gt_coords, mask, self.norm_factor, + self.num_thrs) + + metrics = dict() + metrics['AUC'] = auc + + return metrics + + +@METRICS.register_module() +class EPE(BaseMetric): + """EPE evaluation metric. + + Calculate the end-point error (EPE) of keypoints. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + """ + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool) + if mask.ndim == 3: + mask = mask[:, :, 0] + mask = mask.reshape(1, -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__}...') + + epe = keypoint_epe(pred_coords, gt_coords, mask) + + metrics = dict() + metrics['EPE'] = epe + + return metrics + + +@METRICS.register_module() +class NME(BaseMetric): + """NME evaluation metric. + + Calculate the normalized mean error (NME) of keypoints. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + norm_mode (str): The normalization mode. There are two valid modes: + `'use_norm_item'` and `'keypoint_distance'`. + When set as `'use_norm_item'`, should specify the argument + `norm_item`, which represents the item in the datainfo that + will be used as the normalization factor. + When set as `'keypoint_distance'`, should specify the argument + `keypoint_indices` that are used to calculate the keypoint + distance as the normalization factor. + norm_item (str, optional): The item used as the normalization factor. + For example, `'bbox_size'` in `'AFLWDataset'`. Only valid when + ``norm_mode`` is ``use_norm_item``. + Default: ``None``. + keypoint_indices (Sequence[int], optional): The keypoint indices used + to calculate the keypoint distance as the normalization factor. + Only valid when ``norm_mode`` is ``keypoint_distance``. + If set as None, will use the default ``keypoint_indices`` in + `DEFAULT_KEYPOINT_INDICES` for specific datasets, else use the + given ``keypoint_indices`` of the dataset. Default: ``None``. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + """ + + DEFAULT_KEYPOINT_INDICES = { + # horse10: corresponding to `nose` and `eye` keypoints + 'horse10': [0, 1], + # 300w: corresponding to `right-most` and `left-most` eye keypoints + '300w': [36, 45], + # coco_wholebody_face corresponding to `right-most` and `left-most` + # eye keypoints + 'coco_wholebody_face': [36, 45], + # cofw: corresponding to `right-most` and `left-most` eye keypoints + 'cofw': [8, 9], + # wflw: corresponding to `right-most` and `left-most` eye keypoints + 'wflw': [60, 72], + # lapa: corresponding to `right-most` and `left-most` eye keypoints + 'lapa': [66, 79], + } + + def __init__(self, + norm_mode: str, + norm_item: Optional[str] = None, + keypoint_indices: Optional[Sequence[int]] = None, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + allowed_norm_modes = ['use_norm_item', 'keypoint_distance'] + if norm_mode not in allowed_norm_modes: + raise KeyError("`norm_mode` should be 'use_norm_item' or " + f"'keypoint_distance', but got {norm_mode}.") + + self.norm_mode = norm_mode + if self.norm_mode == 'use_norm_item': + if not norm_item: + raise KeyError('`norm_mode` is set to `"use_norm_item"`, ' + 'please specify the `norm_item` in the ' + 'datainfo used as the normalization factor.') + self.norm_item = norm_item + self.keypoint_indices = keypoint_indices + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool) + if mask.ndim == 3: + mask = mask[:, :, 0] + mask = mask.reshape(1, -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + if self.norm_item: + if self.norm_item == 'bbox_size': + assert 'bboxes' in gt, 'The ground truth data info do ' \ + 'not have the item ``bboxes`` for expected ' \ + 'normalized_item ``"bbox_size"``.' + # ground truth bboxes, [1, 4] + bbox_size = np.max(gt['bboxes'][0][2:] - + gt['bboxes'][0][:2]) + result['bbox_size'] = np.array([bbox_size]).reshape(-1, 1) + else: + assert self.norm_item in gt, f'The ground truth data ' \ + f'info do not have the expected normalized factor ' \ + f'"{self.norm_item}"' + # ground truth norm_item + result[self.norm_item] = np.array( + gt[self.norm_item]).reshape([-1, 1]) + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__}...') + metrics = dict() + + if self.norm_mode == 'use_norm_item': + normalize_factor_ = np.concatenate( + [result[self.norm_item] for result in results]) + # normalize_factor: [N, 2] + normalize_factor = np.tile(normalize_factor_, [1, 2]) + nme = keypoint_nme(pred_coords, gt_coords, mask, normalize_factor) + metrics['NME'] = nme + + else: + if self.keypoint_indices is None: + # use default keypoint_indices in some datasets + dataset_name = self.dataset_meta['dataset_name'] + if dataset_name not in self.DEFAULT_KEYPOINT_INDICES: + raise KeyError( + '`norm_mode` is set to `keypoint_distance`, and the ' + 'keypoint_indices is set to None, can not find the ' + 'keypoint_indices in `DEFAULT_KEYPOINT_INDICES`, ' + 'please specify `keypoint_indices` appropriately.') + self.keypoint_indices = self.DEFAULT_KEYPOINT_INDICES[ + dataset_name] + else: + assert len(self.keypoint_indices) == 2, 'The keypoint '\ + 'indices used for normalization should be a pair.' + keypoint_id2name = self.dataset_meta['keypoint_id2name'] + dataset_name = self.dataset_meta['dataset_name'] + for idx in self.keypoint_indices: + assert idx in keypoint_id2name, f'The {dataset_name} '\ + f'dataset does not contain the required '\ + f'{idx}-th keypoint.' + # normalize_factor: [N, 2] + normalize_factor = self._get_normalize_factor(gt_coords=gt_coords) + nme = keypoint_nme(pred_coords, gt_coords, mask, normalize_factor) + metrics['NME'] = nme + + return metrics + + def _get_normalize_factor(self, gt_coords: np.ndarray) -> np.ndarray: + """Get the normalize factor. generally inter-ocular distance measured + as the Euclidean distance between the outer corners of the eyes is + used. + + Args: + gt_coords (np.ndarray[N, K, 2]): Groundtruth keypoint coordinates. + + Returns: + np.ndarray[N, 2]: normalized factor + """ + idx1, idx2 = self.keypoint_indices + + interocular = np.linalg.norm( + gt_coords[:, idx1, :] - gt_coords[:, idx2, :], + axis=1, + keepdims=True) + + return np.tile(interocular, [1, 2]) diff --git a/mmpose/evaluation/metrics/keypoint_3d_metrics.py b/mmpose/evaluation/metrics/keypoint_3d_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..fb3447bb3ff4a94f192a912c17062f048e838b98 --- /dev/null +++ b/mmpose/evaluation/metrics/keypoint_3d_metrics.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from os import path as osp +from typing import Dict, List, Optional, Sequence + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmpose.registry import METRICS +from ..functional import keypoint_mpjpe + + +@METRICS.register_module() +class MPJPE(BaseMetric): + """MPJPE evaluation metric. + + Calculate the mean per-joint position error (MPJPE) of keypoints. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + mode (str): Method to align the prediction with the + ground truth. Supported options are: + + - ``'mpjpe'``: no alignment will be applied + - ``'p-mpjpe'``: align in the least-square sense in scale + - ``'n-mpjpe'``: align in the least-square sense in + scale, rotation, and translation. + + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + skip_list (list, optional): The list of subject and action combinations + to be skipped. Default: []. + """ + + ALIGNMENT = {'mpjpe': 'none', 'p-mpjpe': 'procrustes', 'n-mpjpe': 'scale'} + + def __init__(self, + mode: str = 'mpjpe', + collect_device: str = 'cpu', + prefix: Optional[str] = None, + skip_list: List[str] = []) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + allowed_modes = self.ALIGNMENT.keys() + if mode not in allowed_modes: + raise KeyError("`mode` should be 'mpjpe', 'p-mpjpe', or " + f"'n-mpjpe', but got '{mode}'.") + + self.mode = mode + self.skip_list = skip_list + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [T, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + if pred_coords.ndim == 4: + pred_coords = np.squeeze(pred_coords, axis=0) + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [T, K, D] + gt_coords = gt['lifting_target'] + # ground truth keypoints_visible, [T, K, 1] + mask = gt['lifting_target_visible'].astype(bool).reshape( + gt_coords.shape[0], -1) + # instance action + img_path = data_sample['target_img_path'][0] + _, rest = osp.basename(img_path).split('_', 1) + action, _ = rest.split('.', 1) + actions = np.array([action] * gt_coords.shape[0]) + + subj_act = osp.basename(img_path).split('.')[0] + if subj_act in self.skip_list: + continue + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + 'actions': actions + } + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are the corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + # action_category_indices: Dict[List[int]] + action_category_indices = defaultdict(list) + actions = np.concatenate([result['actions'] for result in results]) + for idx, action in enumerate(actions): + action_category = action.split('_')[0] + action_category_indices[action_category].append(idx) + + error_name = self.mode.upper() + + logger.info(f'Evaluating {self.mode.upper()}...') + metrics = dict() + + metrics[error_name] = keypoint_mpjpe(pred_coords, gt_coords, mask, + self.ALIGNMENT[self.mode]) + + for action_category, indices in action_category_indices.items(): + metrics[f'{error_name}_{action_category}'] = keypoint_mpjpe( + pred_coords[indices], gt_coords[indices], mask[indices], + self.ALIGNMENT[self.mode]) + + return metrics diff --git a/mmpose/evaluation/metrics/keypoint_partition_metric.py b/mmpose/evaluation/metrics/keypoint_partition_metric.py new file mode 100644 index 0000000000000000000000000000000000000000..fb30eca0d57f68e94cba93deec1f63bd333468aa --- /dev/null +++ b/mmpose/evaluation/metrics/keypoint_partition_metric.py @@ -0,0 +1,203 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from collections import OrderedDict +from copy import deepcopy +from typing import Sequence + +import numpy as np +from mmengine.evaluator import BaseMetric + +from mmpose.registry import METRICS + + +@METRICS.register_module() +class KeypointPartitionMetric(BaseMetric): + """Wrapper metric for evaluating pose metric on user-defined body parts. + + Sometimes one may be interested in the performance of a pose model on + certain body parts rather than on all the keypoints. For example, + ``CocoWholeBodyMetric`` evaluates coco metric on body, foot, face, + lefthand and righthand. However, ``CocoWholeBodyMetric`` cannot be + applied to arbitrary custom datasets. This wrapper metric solves this + problem. + + Supported metrics: + ``CocoMetric`` Note 1: all keypoint ground truth should be stored in + `keypoints` not other data fields. Note 2: `ann_file` is not + supported, it will be ignored. Note 3: `score_mode` other than + 'bbox' may produce results different from the + ``CocoWholebodyMetric``. Note 4: `nms_mode` other than 'none' may + produce results different from the ``CocoWholebodyMetric``. + ``PCKAccuracy`` Note 1: data fields required by ``PCKAccuracy`` should + be provided, such as bbox, head_size, etc. Note 2: In terms of + 'torso', since it is specifically designed for ``JhmdbDataset``, it is + not recommended to use it for other datasets. + ``AUC`` supported without limitations. + ``EPE`` supported without limitations. + ``NME`` only `norm_mode` = 'use_norm_item' is supported, + 'keypoint_distance' is incompatible with ``KeypointPartitionMetric``. + + Incompatible metrics: + The following metrics are dataset specific metrics: + ``CocoWholeBodyMetric`` + ``MpiiPCKAccuracy`` + ``JhmdbPCKAccuracy`` + ``PoseTrack18Metric`` + Keypoint partitioning is included in these metrics. + + Args: + metric (dict): arguments to instantiate a metric, please refer to the + arguments required by the metric of your choice. + partitions (dict): definition of body partitions. For example, if we + have 10 keypoints in total, the first 7 keypoints belong to body + and the last 3 keypoints belong to foot, this field can be like + this: + dict( + body=[0, 1, 2, 3, 4, 5, 6], + foot=[7, 8, 9], + all=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + ) + where the numbers are the indices of keypoints and they can be + discontinuous. + """ + + def __init__( + self, + metric: dict, + partitions: dict, + ) -> None: + super().__init__() + # check metric type + supported_metric_types = [ + 'CocoMetric', 'PCKAccuracy', 'AUC', 'EPE', 'NME' + ] + if metric['type'] not in supported_metric_types: + raise ValueError( + 'Metrics supported by KeypointPartitionMetric are CocoMetric, ' + 'PCKAccuracy, AUC, EPE and NME, ' + f"but got {metric['type']}") + + # check CocoMetric arguments + if metric['type'] == 'CocoMetric': + if 'ann_file' in metric: + warnings.warn( + 'KeypointPartitionMetric does not support the ann_file ' + 'argument of CocoMetric, this argument will be ignored.') + metric['ann_file'] = None + score_mode = metric.get('score_mode', 'bbox_keypoint') + if score_mode != 'bbox': + warnings.warn( + 'When using KeypointPartitionMetric with CocoMetric, ' + "if score_mode is not 'bbox', pose scores will be " + "calculated part by part rather than by 'wholebody'. " + 'Therefore, this may produce results different from the ' + 'CocoWholebodyMetric.') + nms_mode = metric.get('nms_mode', 'oks_nms') + if nms_mode != 'none': + warnings.warn( + 'When using KeypointPartitionMetric with CocoMetric, ' + 'oks_nms and soft_oks_nms will be calculated part by part ' + "rather than by 'wholebody'. Therefore, this may produce " + 'results different from the CocoWholebodyMetric.') + + # check PCKAccuracy arguments + if metric['type'] == 'PCKAccuracy': + norm_item = metric.get('norm_item', 'bbox') + if norm_item == 'torso' or 'torso' in norm_item: + warnings.warn( + 'norm_item torso is used in JhmdbDataset, it may not be ' + 'compatible with other datasets, use at your own risk.') + + # check NME arguments + if metric['type'] == 'NME': + assert 'norm_mode' in metric, \ + 'Missing norm_mode required by the NME metric.' + if metric['norm_mode'] != 'use_norm_item': + raise ValueError( + "NME norm_mode 'keypoint_distance' is incompatible with " + 'KeypointPartitionMetric.') + + # check partitions + assert len(partitions) > 0, 'There should be at least one partition.' + for partition_name, partition in partitions.items(): + assert isinstance(partition, Sequence), \ + 'Each partition should be a sequence.' + assert len(partition) > 0, \ + 'Each partition should have at least one element.' + self.partitions = partitions + + # instantiate metrics for each partition + self.metrics = {} + for partition_name in partitions.keys(): + _metric = deepcopy(metric) + if 'outfile_prefix' in _metric: + _metric['outfile_prefix'] = _metric[ + 'outfile_prefix'] + '.' + partition_name + self.metrics[partition_name] = METRICS.build(_metric) + + @BaseMetric.dataset_meta.setter + def dataset_meta(self, dataset_meta: dict) -> None: + """Set the dataset meta info to the metric.""" + self._dataset_meta = dataset_meta + # sigmas required by coco metric have to be split as well + for partition_name, keypoint_ids in self.partitions.items(): + _dataset_meta = deepcopy(dataset_meta) + _dataset_meta['num_keypoints'] = len(keypoint_ids) + _dataset_meta['sigmas'] = _dataset_meta['sigmas'][keypoint_ids] + self.metrics[partition_name].dataset_meta = _dataset_meta + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Split data samples by partitions, then call metric.process part by + part.""" + parted_data_samples = { + partition_name: [] + for partition_name in self.partitions.keys() + } + for data_sample in data_samples: + for partition_name, keypoint_ids in self.partitions.items(): + _data_sample = deepcopy(data_sample) + if 'keypoint_scores' in _data_sample['pred_instances']: + _data_sample['pred_instances'][ + 'keypoint_scores'] = _data_sample['pred_instances'][ + 'keypoint_scores'][:, keypoint_ids] + _data_sample['pred_instances']['keypoints'] = _data_sample[ + 'pred_instances']['keypoints'][:, keypoint_ids] + _data_sample['gt_instances']['keypoints'] = _data_sample[ + 'gt_instances']['keypoints'][:, keypoint_ids] + _data_sample['gt_instances'][ + 'keypoints_visible'] = _data_sample['gt_instances'][ + 'keypoints_visible'][:, keypoint_ids] + + # for coco metric + if 'raw_ann_info' in _data_sample: + raw_ann_info = _data_sample['raw_ann_info'] + anns = raw_ann_info if isinstance( + raw_ann_info, list) else [raw_ann_info] + for ann in anns: + if 'keypoints' in ann: + keypoints = np.array(ann['keypoints']).reshape( + -1, 3) + keypoints = keypoints[keypoint_ids] + num_keypoints = np.sum(keypoints[:, 2] > 0) + ann['keypoints'] = keypoints.flatten().tolist() + ann['num_keypoints'] = num_keypoints + + parted_data_samples[partition_name].append(_data_sample) + + for partition_name, metric in self.metrics.items(): + metric.process(data_batch, parted_data_samples[partition_name]) + + def compute_metrics(self, results: list) -> dict: + pass + + def evaluate(self, size: int) -> dict: + """Run evaluation for each partition.""" + eval_results = OrderedDict() + for partition_name, metric in self.metrics.items(): + _eval_results = metric.evaluate(size) + for key in list(_eval_results.keys()): + new_key = partition_name + '/' + key + _eval_results[new_key] = _eval_results.pop(key) + eval_results.update(_eval_results) + return eval_results diff --git a/mmpose/evaluation/metrics/posetrack18_metric.py b/mmpose/evaluation/metrics/posetrack18_metric.py new file mode 100644 index 0000000000000000000000000000000000000000..86f801455a62467aaf45722210a6018c95b0bdd4 --- /dev/null +++ b/mmpose/evaluation/metrics/posetrack18_metric.py @@ -0,0 +1,220 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +from typing import Dict, List, Optional + +import numpy as np +from mmengine.fileio import dump, load +from mmengine.logging import MMLogger + +from mmpose.registry import METRICS +from .coco_metric import CocoMetric + +try: + from poseval import eval_helpers + from poseval.evaluateAP import evaluateAP + has_poseval = True +except (ImportError, ModuleNotFoundError): + has_poseval = False + + +@METRICS.register_module() +class PoseTrack18Metric(CocoMetric): + """PoseTrack18 evaluation metric. + + Evaluate AP, and mAP for keypoint detection tasks. + Support PoseTrack18 (video) dataset. Please refer to + ``__ + for more details. + + Args: + ann_file (str, optional): Path to the coco format annotation file. + If not specified, ground truth annotations from the dataset will + be converted to coco format. Defaults to None + score_mode (str): The mode to score the prediction results which + should be one of the following options: + + - ``'bbox'``: Take the score of bbox as the score of the + prediction results. + - ``'bbox_keypoint'``: Use keypoint score to rescore the + prediction results. + + Defaults to ``'bbox_keypoint'` + keypoint_score_thr (float): The threshold of keypoint score. The + keypoints with score lower than it will not be included to + rescore the prediction results. Valid only when ``score_mode`` is + ``bbox_keypoint``. Defaults to ``0.2`` + nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), + which should be one of the following options: + + - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to + perform NMS. + - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) + to perform soft NMS. + - ``'none'``: Do not perform NMS. Typically for bottomup mode + output. + + Defaults to ``'oks_nms'` + nms_thr (float): The Object Keypoint Similarity (OKS) threshold + used in NMS when ``nms_mode`` is ``'oks_nms'`` or + ``'soft_oks_nms'``. Will retain the prediction results with OKS + lower than ``nms_thr``. Defaults to ``0.9`` + format_only (bool): Whether only format the output results without + doing quantitative evaluation. This is designed for the need of + test submission when the ground truth annotations are absent. If + set to ``True``, ``outfile_prefix`` should specify the path to + store the output results. Defaults to ``False`` + outfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., ``'a/b/prefix'``. + If not specified, a temp file will be created. Defaults to ``None`` + **kwargs: Keyword parameters passed to :class:`mmeval.BaseMetric` + """ + default_prefix: Optional[str] = 'posetrack18' + + def __init__(self, + ann_file: Optional[str] = None, + score_mode: str = 'bbox_keypoint', + keypoint_score_thr: float = 0.2, + nms_mode: str = 'oks_nms', + nms_thr: float = 0.9, + format_only: bool = False, + outfile_prefix: Optional[str] = None, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + # raise an error to avoid long time running without getting results + if not has_poseval: + raise ImportError('Please install ``poseval`` package for ' + 'evaluation on PoseTrack dataset ' + '(see `requirements/optional.txt`)') + super().__init__( + ann_file=ann_file, + score_mode=score_mode, + keypoint_score_thr=keypoint_score_thr, + nms_mode=nms_mode, + nms_thr=nms_thr, + format_only=format_only, + outfile_prefix=outfile_prefix, + collect_device=collect_device, + prefix=prefix) + + def results2json(self, keypoints: Dict[int, list], + outfile_prefix: str) -> str: + """Dump the keypoint detection results into a json file. + + Args: + keypoints (Dict[int, list]): Keypoint detection results + of the dataset. + outfile_prefix (str): The filename prefix of the json files. + If the prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json". + + Returns: + str: The json file name of keypoint results. + """ + categories = [] + + cat = {} + cat['supercategory'] = 'person' + cat['id'] = 1 + cat['name'] = 'person' + cat['keypoints'] = [ + 'nose', 'head_bottom', 'head_top', 'left_ear', 'right_ear', + 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', + 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', + 'right_knee', 'left_ankle', 'right_ankle' + ] + cat['skeleton'] = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], + [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], + [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], + [4, 6], [5, 7]] + categories.append(cat) + + # path of directory for official gt files + gt_folder = osp.join( + osp.dirname(self.ann_file), + osp.splitext(self.ann_file.split('_')[-1])[0]) + # the json file for each video sequence + json_files = [ + pos for pos in os.listdir(gt_folder) if pos.endswith('.json') + ] + + for json_file in json_files: + gt = load(osp.join(gt_folder, json_file)) + annotations = [] + images = [] + + for image in gt['images']: + img = {} + img['id'] = image['id'] + img['file_name'] = image['file_name'] + images.append(img) + + img_kpts = keypoints[img['id']] + + for track_id, img_kpt in enumerate(img_kpts): + ann = {} + ann['image_id'] = img_kpt['img_id'] + ann['keypoints'] = np.array( + img_kpt['keypoints']).reshape(-1).tolist() + ann['scores'] = np.array(ann['keypoints']).reshape( + [-1, 3])[:, 2].tolist() + ann['score'] = float(img_kpt['score']) + ann['track_id'] = track_id + annotations.append(ann) + + pred_file = osp.join(osp.dirname(outfile_prefix), json_file) + info = {} + info['images'] = images + info['categories'] = categories + info['annotations'] = annotations + + dump(info, pred_file, sort_keys=True, indent=4) + + def _do_python_keypoint_eval(self, outfile_prefix: str) -> List[tuple]: + """Do keypoint evaluation using `poseval` package. + + Args: + outfile_prefix (str): The filename prefix of the json files. + If the prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json". + + Returns: + list: a list of tuples. Each tuple contains the evaluation stats + name and corresponding stats value. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # path of directory for official gt files + # 'xxx/posetrack18_train.json' -> 'xxx/train/' + gt_folder = osp.join( + osp.dirname(self.ann_file), + osp.splitext(self.ann_file.split('_')[-1])[0]) + pred_folder = osp.dirname(outfile_prefix) + + argv = ['', gt_folder + '/', pred_folder + '/'] + + logger.info('Loading data') + gtFramesAll, prFramesAll = eval_helpers.load_data_dir(argv) + + logger.info(f'# gt frames : {len(gtFramesAll)}') + logger.info(f'# pred frames: {len(prFramesAll)}') + + # evaluate per-frame multi-person pose estimation (AP) + # compute AP + logger.info('Evaluation of per-frame multi-person pose estimation') + apAll, _, _ = evaluateAP(gtFramesAll, prFramesAll, None, False, False) + + # print AP + logger.info('Average Precision (AP) metric:') + eval_helpers.printTable(apAll) + + stats = eval_helpers.getCum(apAll) + + stats_names = [ + 'Head AP', 'Shou AP', 'Elb AP', 'Wri AP', 'Hip AP', 'Knee AP', + 'Ankl AP', 'AP' + ] + + info_str = list(zip(stats_names, stats)) + + return info_str diff --git a/mmpose/evaluation/metrics/simple_keypoint_3d_metrics.py b/mmpose/evaluation/metrics/simple_keypoint_3d_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..dc0065d5b9596c8cafa60abc7fc61a09d7313aac --- /dev/null +++ b/mmpose/evaluation/metrics/simple_keypoint_3d_metrics.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Sequence + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmpose.registry import METRICS +from ..functional import keypoint_mpjpe + + +@METRICS.register_module() +class SimpleMPJPE(BaseMetric): + """MPJPE evaluation metric. + + Calculate the mean per-joint position error (MPJPE) of keypoints. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + mode (str): Method to align the prediction with the + ground truth. Supported options are: + + - ``'mpjpe'``: no alignment will be applied + - ``'p-mpjpe'``: align in the least-square sense in scale + - ``'n-mpjpe'``: align in the least-square sense in + scale, rotation, and translation. + + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + skip_list (list, optional): The list of subject and action combinations + to be skipped. Default: []. + """ + + ALIGNMENT = {'mpjpe': 'none', 'p-mpjpe': 'procrustes', 'n-mpjpe': 'scale'} + + def __init__(self, + mode: str = 'mpjpe', + collect_device: str = 'cpu', + prefix: Optional[str] = None, + skip_list: List[str] = []) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + allowed_modes = self.ALIGNMENT.keys() + if mode not in allowed_modes: + raise KeyError("`mode` should be 'mpjpe', 'p-mpjpe', or " + f"'n-mpjpe', but got '{mode}'.") + + self.mode = mode + self.skip_list = skip_list + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [T, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + if pred_coords.ndim == 4: + pred_coords = np.squeeze(pred_coords, axis=0) + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [T, K, D] + gt_coords = gt['lifting_target'] + # ground truth keypoints_visible, [T, K, 1] + mask = gt['lifting_target_visible'].astype(bool).reshape( + gt_coords.shape[0], -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are the corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + error_name = self.mode.upper() + + logger.info(f'Evaluating {self.mode.upper()}...') + return { + error_name: + keypoint_mpjpe(pred_coords, gt_coords, mask, + self.ALIGNMENT[self.mode]) + } diff --git a/mmpose/models/__init__.py b/mmpose/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7e7b386b92dc4f6900efdc88ee690e6b4d86a43e --- /dev/null +++ b/mmpose/models/__init__.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backbones import * # noqa +from .builder import (BACKBONES, HEADS, LOSSES, NECKS, build_backbone, + build_head, build_loss, build_neck, build_pose_estimator, + build_posenet) +from .data_preprocessors import * # noqa +from .distillers import * # noqa +from .heads import * # noqa +from .losses import * # noqa +from .necks import * # noqa +from .pose_estimators import * # noqa + +__all__ = [ + 'BACKBONES', + 'HEADS', + 'NECKS', + 'LOSSES', + 'build_backbone', + 'build_head', + 'build_loss', + 'build_posenet', + 'build_neck', + 'build_pose_estimator', +] diff --git a/mmpose/models/backbones/__init__.py b/mmpose/models/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1559b6288b846248cdabe3e47cdb4620a87f8087 --- /dev/null +++ b/mmpose/models/backbones/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .alexnet import AlexNet +from .cpm import CPM +from .csp_darknet import CSPDarknet +from .cspnext import CSPNeXt +from .dstformer import DSTFormer +from .hourglass import HourglassNet +from .hourglass_ae import HourglassAENet +from .hrformer import HRFormer +from .hrnet import HRNet +from .litehrnet import LiteHRNet +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .mspn import MSPN +from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2 +from .regnet import RegNet +from .resnest import ResNeSt +from .resnet import ResNet, ResNetV1d +from .resnext import ResNeXt +from .rsn import RSN +from .scnet import SCNet +from .seresnet import SEResNet +from .seresnext import SEResNeXt +from .shufflenet_v1 import ShuffleNetV1 +from .shufflenet_v2 import ShuffleNetV2 +from .swin import SwinTransformer +from .tcn import TCN +from .v2v_net import V2VNet +from .vgg import VGG +from .vipnas_mbv3 import ViPNAS_MobileNetV3 +from .vipnas_resnet import ViPNAS_ResNet + +__all__ = [ + 'AlexNet', 'HourglassNet', 'HourglassAENet', 'HRNet', 'MobileNetV2', + 'MobileNetV3', 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SCNet', + 'SEResNet', 'SEResNeXt', 'ShuffleNetV1', 'ShuffleNetV2', 'CPM', 'RSN', + 'MSPN', 'ResNeSt', 'VGG', 'TCN', 'ViPNAS_ResNet', 'ViPNAS_MobileNetV3', + 'LiteHRNet', 'V2VNet', 'HRFormer', 'PyramidVisionTransformer', + 'PyramidVisionTransformerV2', 'SwinTransformer', 'DSTFormer', 'CSPDarknet', + 'CSPNeXt' +] diff --git a/mmpose/models/backbones/alexnet.py b/mmpose/models/backbones/alexnet.py new file mode 100644 index 0000000000000000000000000000000000000000..2262658f4718a079b2effc276282be4d39fbe6ad --- /dev/null +++ b/mmpose/models/backbones/alexnet.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class AlexNet(BaseBackbone): + """`AlexNet `__ backbone. + + The input for AlexNet is a 224x224 RGB image. + + Args: + num_classes (int): number of classes for classification. + The default value is -1, which uses the backbone as + a feature extractor without the top classifier. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, num_classes=-1, init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + + return (x, ) diff --git a/mmpose/models/backbones/base_backbone.py b/mmpose/models/backbones/base_backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..6094b4e831f992b052e4db206022f489a7f729b3 --- /dev/null +++ b/mmpose/models/backbones/base_backbone.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmengine.model import BaseModule + + +class BaseBackbone(BaseModule, metaclass=ABCMeta): + """Base backbone. + + This class defines the basic functions of a backbone. Any backbone that + inherits this class should at least define its own `forward` function. + """ + + @abstractmethod + def forward(self, x): + """Forward function. + + Args: + x (Tensor | tuple[Tensor]): x could be a torch.Tensor or a tuple of + torch.Tensor, containing input data for forward computation. + """ + + def train(self, mode=True): + """Set module status before forward computation. + + Args: + mode (bool): Whether it is train_mode or test_mode + """ + super(BaseBackbone, self).train(mode) diff --git a/mmpose/models/backbones/cpm.py b/mmpose/models/backbones/cpm.py new file mode 100644 index 0000000000000000000000000000000000000000..256769c43a4d7b9d0cdd40fb6de19a90727012e8 --- /dev/null +++ b/mmpose/models/backbones/cpm.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class CpmBlock(BaseModule): + """CpmBlock for Convolutional Pose Machine. + + Args: + in_channels (int): Input channels of this block. + channels (list): Output channels of each conv module. + kernels (list): Kernel sizes of each conv module. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + channels=(128, 128, 128), + kernels=(11, 11, 11), + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + assert len(channels) == len(kernels) + layers = [] + for i in range(len(channels)): + if i == 0: + input_channels = in_channels + else: + input_channels = channels[i - 1] + layers.append( + ConvModule( + input_channels, + channels[i], + kernels[i], + padding=(kernels[i] - 1) // 2, + norm_cfg=norm_cfg)) + self.model = nn.Sequential(*layers) + + def forward(self, x): + """Model forward function.""" + out = self.model(x) + return out + + +@MODELS.register_module() +class CPM(BaseBackbone): + """CPM backbone. + + Convolutional Pose Machines. + More details can be found in the `paper + `__ . + + Args: + in_channels (int): The input channels of the CPM. + out_channels (int): The output channels of the CPM. + feat_channels (int): Feature channel of each CPM stage. + middle_channels (int): Feature channel of conv after the middle stage. + num_stages (int): Number of stages. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import CPM + >>> import torch + >>> self = CPM(3, 17) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 368, 368) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... print(tuple(level_output.shape)) + (1, 17, 46, 46) + (1, 17, 46, 46) + (1, 17, 46, 46) + (1, 17, 46, 46) + (1, 17, 46, 46) + (1, 17, 46, 46) + """ + + def __init__( + self, + in_channels, + out_channels, + feat_channels=128, + middle_channels=32, + num_stages=6, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + assert in_channels == 3 + + self.num_stages = num_stages + assert self.num_stages >= 1 + + self.stem = nn.Sequential( + ConvModule(in_channels, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 32, 5, padding=2, norm_cfg=norm_cfg), + ConvModule(32, 512, 9, padding=4, norm_cfg=norm_cfg), + ConvModule(512, 512, 1, padding=0, norm_cfg=norm_cfg), + ConvModule(512, out_channels, 1, padding=0, act_cfg=None)) + + self.middle = nn.Sequential( + ConvModule(in_channels, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + + self.cpm_stages = nn.ModuleList([ + CpmBlock( + middle_channels + out_channels, + channels=[feat_channels, feat_channels, feat_channels], + kernels=[11, 11, 11], + norm_cfg=norm_cfg) for _ in range(num_stages - 1) + ]) + + self.middle_conv = nn.ModuleList([ + nn.Sequential( + ConvModule( + 128, middle_channels, 5, padding=2, norm_cfg=norm_cfg)) + for _ in range(num_stages - 1) + ]) + + self.out_convs = nn.ModuleList([ + nn.Sequential( + ConvModule( + feat_channels, + feat_channels, + 1, + padding=0, + norm_cfg=norm_cfg), + ConvModule(feat_channels, out_channels, 1, act_cfg=None)) + for _ in range(num_stages - 1) + ]) + + def forward(self, x): + """Model forward function.""" + stage1_out = self.stem(x) + middle_out = self.middle(x) + out_feats = [] + + out_feats.append(stage1_out) + + for ind in range(self.num_stages - 1): + single_stage = self.cpm_stages[ind] + out_conv = self.out_convs[ind] + + inp_feat = torch.cat( + [out_feats[-1], self.middle_conv[ind](middle_out)], 1) + cpm_feat = single_stage(inp_feat) + out_feat = out_conv(cpm_feat) + out_feats.append(out_feat) + + return out_feats diff --git a/mmpose/models/backbones/csp_darknet.py b/mmpose/models/backbones/csp_darknet.py new file mode 100644 index 0000000000000000000000000000000000000000..dbaba0cfd93d9765713c92f6854c91ad014e3a9d --- /dev/null +++ b/mmpose/models/backbones/csp_darknet.py @@ -0,0 +1,286 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from ..utils import CSPLayer + + +class Focus(nn.Module): + """Focus width and height information into channel space. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + kernel_size (int): The kernel size of the convolution. Default: 1 + stride (int): The stride of the convolution. Default: 1 + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish'). + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')): + super().__init__() + self.conv = ConvModule( + in_channels * 4, + out_channels, + kernel_size, + stride, + padding=(kernel_size - 1) // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) + patch_top_left = x[..., ::2, ::2] + patch_top_right = x[..., ::2, 1::2] + patch_bot_left = x[..., 1::2, ::2] + patch_bot_right = x[..., 1::2, 1::2] + x = torch.cat( + ( + patch_top_left, + patch_bot_left, + patch_top_right, + patch_bot_right, + ), + dim=1, + ) + return self.conv(x) + + +class SPPBottleneck(BaseModule): + """Spatial pyramid pooling layer used in YOLOv3-SPP. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling + layers. Default: (5, 9, 13). + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_sizes=(5, 9, 13), + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=None): + super().__init__(init_cfg) + mid_channels = in_channels // 2 + self.conv1 = ConvModule( + in_channels, + mid_channels, + 1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.poolings = nn.ModuleList([ + nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) + for ks in kernel_sizes + ]) + conv2_channels = mid_channels * (len(kernel_sizes) + 1) + self.conv2 = ConvModule( + conv2_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + x = self.conv1(x) + with torch.cuda.amp.autocast(enabled=False): + x = torch.cat( + [x] + [pooling(x) for pooling in self.poolings], dim=1) + x = self.conv2(x) + return x + + +@MODELS.register_module() +class CSPDarknet(BaseModule): + """CSP-Darknet backbone used in YOLOv5 and YOLOX. + + Args: + arch (str): Architecture of CSP-Darknet, from {P5, P6}. + Default: P5. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Default: 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Default: -1. + use_depthwise (bool): Whether to use depthwise separable convolution. + Default: False. + arch_ovewrite(list): Overwrite default arch settings. Default: None. + spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP + layers. Default: (5, 9, 13). + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Example: + >>> from mmpose.models import CSPDarknet + >>> import torch + >>> self = CSPDarknet(depth=53) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + # From left to right: + # in_channels, out_channels, num_blocks, add_identity, use_spp + arch_settings = { + 'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False], + [256, 512, 9, True, False], [512, 1024, 3, False, True]], + 'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False], + [256, 512, 9, True, False], [512, 768, 3, True, False], + [768, 1024, 3, False, True]] + } + + def __init__(self, + arch='P5', + deepen_factor=1.0, + widen_factor=1.0, + out_indices=(2, 3, 4), + frozen_stages=-1, + use_depthwise=False, + arch_ovewrite=None, + spp_kernal_sizes=(5, 9, 13), + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + norm_eval=False, + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu')): + super().__init__(init_cfg) + arch_setting = self.arch_settings[arch] + if arch_ovewrite: + arch_setting = arch_ovewrite + assert set(out_indices).issubset( + i for i in range(len(arch_setting) + 1)) + if frozen_stages not in range(-1, len(arch_setting) + 1): + raise ValueError('frozen_stages must be in range(-1, ' + 'len(arch_setting) + 1). But received ' + f'{frozen_stages}') + + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.use_depthwise = use_depthwise + self.norm_eval = norm_eval + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + self.stem = Focus( + 3, + int(arch_setting[0][0] * widen_factor), + kernel_size=3, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.layers = ['stem'] + + for i, (in_channels, out_channels, num_blocks, add_identity, + use_spp) in enumerate(arch_setting): + in_channels = int(in_channels * widen_factor) + out_channels = int(out_channels * widen_factor) + num_blocks = max(round(num_blocks * deepen_factor), 1) + stage = [] + conv_layer = conv( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(conv_layer) + if use_spp: + spp = SPPBottleneck( + out_channels, + out_channels, + kernel_sizes=spp_kernal_sizes, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(spp) + csp_layer = CSPLayer( + out_channels, + out_channels, + num_blocks=num_blocks, + add_identity=add_identity, + use_depthwise=use_depthwise, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(csp_layer) + self.add_module(f'stage{i + 1}', nn.Sequential(*stage)) + self.layers.append(f'stage{i + 1}') + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for i in range(self.frozen_stages + 1): + m = getattr(self, self.layers[i]) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(CSPDarknet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/mmpose/models/backbones/cspnext.py b/mmpose/models/backbones/cspnext.py new file mode 100644 index 0000000000000000000000000000000000000000..5275bb255a5bf2c610c90544c7d7e8227b3111c2 --- /dev/null +++ b/mmpose/models/backbones/cspnext.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional, Sequence, Tuple + +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmengine.model import BaseModule +from torch import Tensor +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from mmpose.utils.typing import ConfigType +from ..utils import CSPLayer +from .csp_darknet import SPPBottleneck + + +@MODELS.register_module() +class CSPNeXt(BaseModule): + """CSPNeXt backbone used in RTMDet. + + Args: + arch (str): Architecture of CSPNeXt, from {P5, P6}. + Defaults to P5. + expand_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Defaults to 0.5. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + out_indices (Sequence[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False. + arch_ovewrite (list): Overwrite default arch settings. + Defaults to None. + spp_kernel_sizes: (tuple[int]): Sequential of kernel sizes of SPP + layers. Defaults to (5, 9, 13). + channel_attention (bool): Whether to add channel attention in each + stage. Defaults to True. + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and + config norm layer. Defaults to dict(type='BN', requires_grad=True). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='SiLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (:obj:`ConfigDict` or dict or list[dict] or + list[:obj:`ConfigDict`]): Initialization config dict. + """ + # From left to right: + # in_channels, out_channels, num_blocks, add_identity, use_spp + arch_settings = { + 'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False], + [256, 512, 6, True, False], [512, 1024, 3, False, True]], + 'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False], + [256, 512, 6, True, False], [512, 768, 3, True, False], + [768, 1024, 3, False, True]] + } + + def __init__( + self, + arch: str = 'P5', + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + out_indices: Sequence[int] = (2, 3, 4), + frozen_stages: int = -1, + use_depthwise: bool = False, + expand_ratio: float = 0.5, + arch_ovewrite: dict = None, + spp_kernel_sizes: Sequence[int] = (5, 9, 13), + channel_attention: bool = True, + conv_cfg: Optional[ConfigType] = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU'), + norm_eval: bool = False, + init_cfg: Optional[ConfigType] = dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu') + ) -> None: + super().__init__(init_cfg=init_cfg) + arch_setting = self.arch_settings[arch] + if arch_ovewrite: + arch_setting = arch_ovewrite + assert set(out_indices).issubset( + i for i in range(len(arch_setting) + 1)) + if frozen_stages not in range(-1, len(arch_setting) + 1): + raise ValueError('frozen_stages must be in range(-1, ' + 'len(arch_setting) + 1). But received ' + f'{frozen_stages}') + + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.use_depthwise = use_depthwise + self.norm_eval = norm_eval + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + self.stem = nn.Sequential( + ConvModule( + 3, + int(arch_setting[0][0] * widen_factor // 2), + 3, + padding=1, + stride=2, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + int(arch_setting[0][0] * widen_factor // 2), + int(arch_setting[0][0] * widen_factor // 2), + 3, + padding=1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + int(arch_setting[0][0] * widen_factor // 2), + int(arch_setting[0][0] * widen_factor), + 3, + padding=1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.layers = ['stem'] + + for i, (in_channels, out_channels, num_blocks, add_identity, + use_spp) in enumerate(arch_setting): + in_channels = int(in_channels * widen_factor) + out_channels = int(out_channels * widen_factor) + num_blocks = max(round(num_blocks * deepen_factor), 1) + stage = [] + conv_layer = conv( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(conv_layer) + if use_spp: + spp = SPPBottleneck( + out_channels, + out_channels, + kernel_sizes=spp_kernel_sizes, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(spp) + csp_layer = CSPLayer( + out_channels, + out_channels, + num_blocks=num_blocks, + add_identity=add_identity, + use_depthwise=use_depthwise, + use_cspnext_block=True, + expand_ratio=expand_ratio, + channel_attention=channel_attention, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(csp_layer) + self.add_module(f'stage{i + 1}', nn.Sequential(*stage)) + self.layers.append(f'stage{i + 1}') + + def _freeze_stages(self) -> None: + if self.frozen_stages >= 0: + for i in range(self.frozen_stages + 1): + m = getattr(self, self.layers[i]) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True) -> None: + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def forward(self, x: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]: + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/mmpose/models/backbones/dstformer.py b/mmpose/models/backbones/dstformer.py new file mode 100644 index 0000000000000000000000000000000000000000..2ef13bdb02fffe0ce19cd478c12abf5c9e45f499 --- /dev/null +++ b/mmpose/models/backbones/dstformer.py @@ -0,0 +1,304 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule, constant_init +from mmengine.model.weight_init import trunc_normal_ + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class Attention(BaseModule): + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + mode='spatial'): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.mode = mode + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj_drop = nn.Dropout(proj_drop) + + self.attn_count_s = None + self.attn_count_t = None + + def forward(self, x, seq_len=1): + B, N, C = x.shape + + if self.mode == 'temporal': + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // + self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + x = self.forward_temporal(q, k, v, seq_len=seq_len) + elif self.mode == 'spatial': + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // + self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + x = self.forward_spatial(q, k, v) + else: + raise NotImplementedError(self.mode) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def forward_spatial(self, q, k, v): + B, _, N, C = q.shape + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = attn @ v + x = x.transpose(1, 2).reshape(B, N, C * self.num_heads) + return x + + def forward_temporal(self, q, k, v, seq_len=8): + B, _, N, C = q.shape + qt = q.reshape(-1, seq_len, self.num_heads, N, + C).permute(0, 2, 3, 1, 4) # (B, H, N, T, C) + kt = k.reshape(-1, seq_len, self.num_heads, N, + C).permute(0, 2, 3, 1, 4) # (B, H, N, T, C) + vt = v.reshape(-1, seq_len, self.num_heads, N, + C).permute(0, 2, 3, 1, 4) # (B, H, N, T, C) + + attn = (qt @ kt.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = attn @ vt # (B, H, N, T, C) + x = x.permute(0, 3, 2, 1, 4).reshape(B, N, C * self.num_heads) + return x + + +class AttentionBlock(BaseModule): + + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + mlp_out_ratio=1., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + st_mode='st'): + super().__init__() + + self.st_mode = st_mode + self.norm1_s = nn.LayerNorm(dim, eps=1e-06) + self.norm1_t = nn.LayerNorm(dim, eps=1e-06) + + self.attn_s = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + mode='spatial') + self.attn_t = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + mode='temporal') + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2_s = nn.LayerNorm(dim, eps=1e-06) + self.norm2_t = nn.LayerNorm(dim, eps=1e-06) + + mlp_hidden_dim = int(dim * mlp_ratio) + mlp_out_dim = int(dim * mlp_out_ratio) + self.mlp_s = nn.Sequential( + nn.Linear(dim, mlp_hidden_dim), nn.GELU(), + nn.Linear(mlp_hidden_dim, mlp_out_dim), nn.Dropout(drop)) + self.mlp_t = nn.Sequential( + nn.Linear(dim, mlp_hidden_dim), nn.GELU(), + nn.Linear(mlp_hidden_dim, mlp_out_dim), nn.Dropout(drop)) + + def forward(self, x, seq_len=1): + if self.st_mode == 'st': + x = x + self.drop_path(self.attn_s(self.norm1_s(x), seq_len)) + x = x + self.drop_path(self.mlp_s(self.norm2_s(x))) + x = x + self.drop_path(self.attn_t(self.norm1_t(x), seq_len)) + x = x + self.drop_path(self.mlp_t(self.norm2_t(x))) + elif self.st_mode == 'ts': + x = x + self.drop_path(self.attn_t(self.norm1_t(x), seq_len)) + x = x + self.drop_path(self.mlp_t(self.norm2_t(x))) + x = x + self.drop_path(self.attn_s(self.norm1_s(x), seq_len)) + x = x + self.drop_path(self.mlp_s(self.norm2_s(x))) + else: + raise NotImplementedError(self.st_mode) + return x + + +@MODELS.register_module() +class DSTFormer(BaseBackbone): + """Dual-stream Spatio-temporal Transformer Module. + + Args: + in_channels (int): Number of input channels. + feat_size: Number of feature channels. Default: 256. + depth: The network depth. Default: 5. + num_heads: Number of heads in multi-Head self-attention blocks. + Default: 8. + mlp_ratio (int, optional): The expansion ratio of FFN. Default: 4. + num_keypoints: num_keypoints (int): Number of keypoints. Default: 17. + seq_len: The sequence length. Default: 243. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout ratio of input. Default: 0. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0. + drop_path_rate (float, optional): Stochastic depth rate. Default: 0. + att_fuse: Whether to fuse the results of attention blocks. + Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmpose.models import DSTFormer + >>> import torch + >>> self = DSTFormer(in_channels=3) + >>> self.eval() + >>> inputs = torch.rand(1, 2, 17, 3) + >>> level_outputs = self.forward(inputs) + >>> print(tuple(level_outputs.shape)) + (1, 2, 17, 512) + """ + + def __init__(self, + in_channels, + feat_size=256, + depth=5, + num_heads=8, + mlp_ratio=4, + num_keypoints=17, + seq_len=243, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + att_fuse=True, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.in_channels = in_channels + self.feat_size = feat_size + + self.joints_embed = nn.Linear(in_channels, feat_size) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + + self.blocks_st = nn.ModuleList([ + AttentionBlock( + dim=feat_size, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + st_mode='st') for i in range(depth) + ]) + self.blocks_ts = nn.ModuleList([ + AttentionBlock( + dim=feat_size, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + st_mode='ts') for i in range(depth) + ]) + + self.norm = nn.LayerNorm(feat_size, eps=1e-06) + + self.temp_embed = nn.Parameter(torch.zeros(1, seq_len, 1, feat_size)) + self.spat_embed = nn.Parameter( + torch.zeros(1, num_keypoints, feat_size)) + + trunc_normal_(self.temp_embed, std=.02) + trunc_normal_(self.spat_embed, std=.02) + + self.att_fuse = att_fuse + if self.att_fuse: + self.attn_regress = nn.ModuleList( + [nn.Linear(feat_size * 2, 2) for i in range(depth)]) + for i in range(depth): + self.attn_regress[i].weight.data.fill_(0) + self.attn_regress[i].bias.data.fill_(0.5) + + def forward(self, x): + if len(x.shape) == 3: + x = x[None, :] + assert len(x.shape) == 4 + + B, F, K, C = x.shape + x = x.reshape(-1, K, C) + BF = x.shape[0] + x = self.joints_embed(x) # (BF, K, feat_size) + x = x + self.spat_embed + _, K, C = x.shape + x = x.reshape(-1, F, K, C) + self.temp_embed[:, :F, :, :] + x = x.reshape(BF, K, C) # (BF, K, feat_size) + x = self.pos_drop(x) + + for idx, (blk_st, + blk_ts) in enumerate(zip(self.blocks_st, self.blocks_ts)): + x_st = blk_st(x, F) + x_ts = blk_ts(x, F) + if self.att_fuse: + att = self.attn_regress[idx] + alpha = torch.cat([x_st, x_ts], dim=-1) + BF, K = alpha.shape[:2] + alpha = att(alpha) + alpha = alpha.softmax(dim=-1) + x = x_st * alpha[:, :, 0:1] + x_ts * alpha[:, :, 1:2] + else: + x = (x_st + x_ts) * 0.5 + x = self.norm(x) # (BF, K, feat_size) + x = x.reshape(B, F, K, -1) + return x + + def init_weights(self): + """Initialize the weights in backbone.""" + super(DSTFormer, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + return + + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + constant_init(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + constant_init(m.bias, 0) + constant_init(m.weight, 1.0) diff --git a/mmpose/models/backbones/hourglass.py b/mmpose/models/backbones/hourglass.py new file mode 100644 index 0000000000000000000000000000000000000000..cfc8d6d328da5b63094015351cc10084cda46da0 --- /dev/null +++ b/mmpose/models/backbones/hourglass.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .resnet import BasicBlock, ResLayer + + +class HourglassModule(BaseModule): + """Hourglass Module for HourglassNet backbone. + + Generate module recursively and use BasicBlock as the base unit. + + Args: + depth (int): Depth of current HourglassModule. + stage_channels (list[int]): Feature channels of sub-modules in current + and follow-up HourglassModule. + stage_blocks (list[int]): Number of sub-modules stacked in current and + follow-up HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + depth, + stage_channels, + stage_blocks, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + self.depth = depth + + cur_block = stage_blocks[0] + next_block = stage_blocks[1] + + cur_channel = stage_channels[0] + next_channel = stage_channels[1] + + self.up1 = ResLayer( + BasicBlock, cur_block, cur_channel, cur_channel, norm_cfg=norm_cfg) + + self.low1 = ResLayer( + BasicBlock, + cur_block, + cur_channel, + next_channel, + stride=2, + norm_cfg=norm_cfg) + + if self.depth > 1: + self.low2 = HourglassModule(depth - 1, stage_channels[1:], + stage_blocks[1:]) + else: + self.low2 = ResLayer( + BasicBlock, + next_block, + next_channel, + next_channel, + norm_cfg=norm_cfg) + + self.low3 = ResLayer( + BasicBlock, + cur_block, + next_channel, + cur_channel, + norm_cfg=norm_cfg, + downsample_first=False) + + self.up2 = nn.Upsample(scale_factor=2) + + def forward(self, x): + """Model forward function.""" + up1 = self.up1(x) + low1 = self.low1(x) + low2 = self.low2(low1) + low3 = self.low3(low2) + up2 = self.up2(low3) + return up1 + up2 + + +@MODELS.register_module() +class HourglassNet(BaseBackbone): + """HourglassNet backbone. + + Stacked Hourglass Networks for Human Pose Estimation. + More details can be found in the `paper + `__ . + + Args: + downsample_times (int): Downsample times in a HourglassModule. + num_stacks (int): Number of HourglassModule modules stacked, + 1 for Hourglass-52, 2 for Hourglass-104. + stage_channels (list[int]): Feature channel of each sub-module in a + HourglassModule. + stage_blocks (list[int]): Number of sub-modules stacked in a + HourglassModule. + feat_channel (int): Feature channel of conv after a HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HourglassNet + >>> import torch + >>> self = HourglassNet() + >>> self.eval() + >>> inputs = torch.rand(1, 3, 511, 511) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... print(tuple(level_output.shape)) + (1, 256, 128, 128) + (1, 256, 128, 128) + """ + + def __init__( + self, + downsample_times=5, + num_stacks=2, + stage_channels=(256, 256, 384, 384, 384, 512), + stage_blocks=(2, 2, 2, 2, 2, 4), + feat_channel=256, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + self.num_stacks = num_stacks + assert self.num_stacks >= 1 + assert len(stage_channels) == len(stage_blocks) + assert len(stage_channels) > downsample_times + + cur_channel = stage_channels[0] + + self.stem = nn.Sequential( + ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg), + ResLayer(BasicBlock, 1, 128, 256, stride=2, norm_cfg=norm_cfg)) + + self.hourglass_modules = nn.ModuleList([ + HourglassModule(downsample_times, stage_channels, stage_blocks) + for _ in range(num_stacks) + ]) + + self.inters = ResLayer( + BasicBlock, + num_stacks - 1, + cur_channel, + cur_channel, + norm_cfg=norm_cfg) + + self.conv1x1s = nn.ModuleList([ + ConvModule( + cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) + for _ in range(num_stacks - 1) + ]) + + self.out_convs = nn.ModuleList([ + ConvModule( + cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) + for _ in range(num_stacks) + ]) + + self.remap_convs = nn.ModuleList([ + ConvModule( + feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) + for _ in range(num_stacks - 1) + ]) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Model forward function.""" + inter_feat = self.stem(x) + out_feats = [] + + for ind in range(self.num_stacks): + single_hourglass = self.hourglass_modules[ind] + out_conv = self.out_convs[ind] + + hourglass_feat = single_hourglass(inter_feat) + out_feat = out_conv(hourglass_feat) + out_feats.append(out_feat) + + if ind < self.num_stacks - 1: + inter_feat = self.conv1x1s[ind]( + inter_feat) + self.remap_convs[ind]( + out_feat) + inter_feat = self.inters[ind](self.relu(inter_feat)) + + return out_feats diff --git a/mmpose/models/backbones/hourglass_ae.py b/mmpose/models/backbones/hourglass_ae.py new file mode 100644 index 0000000000000000000000000000000000000000..93e62dd4067c3489de00c5cd1f7875489725de2e --- /dev/null +++ b/mmpose/models/backbones/hourglass_ae.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import ConvModule, MaxPool2d +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class HourglassAEModule(BaseModule): + """Modified Hourglass Module for HourglassNet_AE backbone. + + Generate module recursively and use BasicBlock as the base unit. + + Args: + depth (int): Depth of current HourglassModule. + stage_channels (list[int]): Feature channels of sub-modules in current + and follow-up HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + depth, + stage_channels, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + self.depth = depth + + cur_channel = stage_channels[0] + next_channel = stage_channels[1] + + self.up1 = ConvModule( + cur_channel, cur_channel, 3, padding=1, norm_cfg=norm_cfg) + + self.pool1 = MaxPool2d(2, 2) + + self.low1 = ConvModule( + cur_channel, next_channel, 3, padding=1, norm_cfg=norm_cfg) + + if self.depth > 1: + self.low2 = HourglassAEModule(depth - 1, stage_channels[1:]) + else: + self.low2 = ConvModule( + next_channel, next_channel, 3, padding=1, norm_cfg=norm_cfg) + + self.low3 = ConvModule( + next_channel, cur_channel, 3, padding=1, norm_cfg=norm_cfg) + + self.up2 = nn.UpsamplingNearest2d(scale_factor=2) + + def forward(self, x): + """Model forward function.""" + up1 = self.up1(x) + pool1 = self.pool1(x) + low1 = self.low1(pool1) + low2 = self.low2(low1) + low3 = self.low3(low2) + up2 = self.up2(low3) + return up1 + up2 + + +@MODELS.register_module() +class HourglassAENet(BaseBackbone): + """Hourglass-AE Network proposed by Newell et al. + + Associative Embedding: End-to-End Learning for Joint + Detection and Grouping. + + More details can be found in the `paper + `__ . + + Args: + downsample_times (int): Downsample times in a HourglassModule. + num_stacks (int): Number of HourglassModule modules stacked, + 1 for Hourglass-52, 2 for Hourglass-104. + stage_channels (list[int]): Feature channel of each sub-module in a + HourglassModule. + stage_blocks (list[int]): Number of sub-modules stacked in a + HourglassModule. + feat_channels (int): Feature channel of conv after a HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HourglassAENet + >>> import torch + >>> self = HourglassAENet() + >>> self.eval() + >>> inputs = torch.rand(1, 3, 512, 512) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... print(tuple(level_output.shape)) + (1, 34, 128, 128) + """ + + def __init__( + self, + downsample_times=4, + num_stacks=1, + out_channels=34, + stage_channels=(256, 384, 512, 640, 768), + feat_channels=256, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + self.num_stacks = num_stacks + assert self.num_stacks >= 1 + assert len(stage_channels) > downsample_times + + cur_channels = stage_channels[0] + + self.stem = nn.Sequential( + ConvModule(3, 64, 7, padding=3, stride=2, norm_cfg=norm_cfg), + ConvModule(64, 128, 3, padding=1, norm_cfg=norm_cfg), + MaxPool2d(2, 2), + ConvModule(128, 128, 3, padding=1, norm_cfg=norm_cfg), + ConvModule(128, feat_channels, 3, padding=1, norm_cfg=norm_cfg), + ) + + self.hourglass_modules = nn.ModuleList([ + nn.Sequential( + HourglassAEModule( + downsample_times, stage_channels, norm_cfg=norm_cfg), + ConvModule( + feat_channels, + feat_channels, + 3, + padding=1, + norm_cfg=norm_cfg), + ConvModule( + feat_channels, + feat_channels, + 3, + padding=1, + norm_cfg=norm_cfg)) for _ in range(num_stacks) + ]) + + self.out_convs = nn.ModuleList([ + ConvModule( + cur_channels, + out_channels, + 1, + padding=0, + norm_cfg=None, + act_cfg=None) for _ in range(num_stacks) + ]) + + self.remap_out_convs = nn.ModuleList([ + ConvModule( + out_channels, + feat_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=None) for _ in range(num_stacks - 1) + ]) + + self.remap_feature_convs = nn.ModuleList([ + ConvModule( + feat_channels, + feat_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=None) for _ in range(num_stacks - 1) + ]) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Model forward function.""" + inter_feat = self.stem(x) + out_feats = [] + + for ind in range(self.num_stacks): + single_hourglass = self.hourglass_modules[ind] + out_conv = self.out_convs[ind] + + hourglass_feat = single_hourglass(inter_feat) + out_feat = out_conv(hourglass_feat) + out_feats.append(out_feat) + + if ind < self.num_stacks - 1: + inter_feat = inter_feat + self.remap_out_convs[ind]( + out_feat) + self.remap_feature_convs[ind]( + hourglass_feat) + + return out_feats diff --git a/mmpose/models/backbones/hrformer.py b/mmpose/models/backbones/hrformer.py new file mode 100644 index 0000000000000000000000000000000000000000..0b86617f14e3104c84e3d5af5dd82bcf8cbd7879 --- /dev/null +++ b/mmpose/models/backbones/hrformer.py @@ -0,0 +1,758 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import math + +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer +from mmcv.cnn.bricks.transformer import build_dropout +from mmengine.model import BaseModule, trunc_normal_init +from torch.nn.functional import pad + +from mmpose.registry import MODELS +from .hrnet import Bottleneck, HRModule, HRNet + + +def nlc_to_nchw(x, hw_shape): + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len doesn\'t match H, W' + return x.transpose(1, 2).reshape(B, C, H, W) + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +def build_drop_path(drop_path_rate): + """Build drop path layer.""" + return build_dropout(dict(type='DropPath', drop_prob=drop_path_rate)) + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + with_rpe (bool, optional): If True, use relative position bias. + Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + with_rpe=True, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + self.with_rpe = with_rpe + if self.with_rpe: + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros( + (2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + trunc_normal_init(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (B*num_windows, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.with_rpe: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class LocalWindowSelfAttention(BaseModule): + r""" Local-window Self Attention (LSA) module with relative position bias. + + This module is the short-range self-attention module in the + Interlaced Sparse Self-Attention `_. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int] | int): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + with_rpe (bool, optional): If True, use relative position bias. + Default: True. + with_pad_mask (bool, optional): If True, mask out the padded tokens in + the attention process. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + with_rpe=True, + with_pad_mask=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if isinstance(window_size, int): + window_size = (window_size, window_size) + self.window_size = window_size + self.with_pad_mask = with_pad_mask + self.attn = WindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate, + with_rpe=with_rpe, + init_cfg=init_cfg) + + def forward(self, x, H, W, **kwargs): + """Forward function.""" + B, N, C = x.shape + x = x.view(B, H, W, C) + Wh, Ww = self.window_size + + # center-pad the feature on H and W axes + pad_h = math.ceil(H / Wh) * Wh - H + pad_w = math.ceil(W / Ww) * Ww - W + x = pad(x, (0, 0, pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2)) + + # permute + x = x.view(B, math.ceil(H / Wh), Wh, math.ceil(W / Ww), Ww, C) + x = x.permute(0, 1, 3, 2, 4, 5) + x = x.reshape(-1, Wh * Ww, C) # (B*num_window, Wh*Ww, C) + + # attention + if self.with_pad_mask and pad_h > 0 and pad_w > 0: + pad_mask = x.new_zeros(1, H, W, 1) + pad_mask = pad( + pad_mask, [ + 0, 0, pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ], + value=-float('inf')) + pad_mask = pad_mask.view(1, math.ceil(H / Wh), Wh, + math.ceil(W / Ww), Ww, 1) + pad_mask = pad_mask.permute(1, 3, 0, 2, 4, 5) + pad_mask = pad_mask.reshape(-1, Wh * Ww) + pad_mask = pad_mask[:, None, :].expand([-1, Wh * Ww, -1]) + out = self.attn(x, pad_mask, **kwargs) + else: + out = self.attn(x, **kwargs) + + # reverse permutation + out = out.reshape(B, math.ceil(H / Wh), math.ceil(W / Ww), Wh, Ww, C) + out = out.permute(0, 1, 3, 2, 4, 5) + out = out.reshape(B, H + pad_h, W + pad_w, C) + + # de-pad + out = out[:, pad_h // 2:H + pad_h // 2, pad_w // 2:W + pad_w // 2] + return out.reshape(B, N, C) + + +class CrossFFN(BaseModule): + r"""FFN with Depthwise Conv of HRFormer. + + Args: + in_features (int): The feature dimension. + hidden_features (int, optional): The hidden dimension of FFNs. + Defaults: The same as in_features. + act_cfg (dict, optional): Config of activation layer. + Default: dict(type='GELU'). + dw_act_cfg (dict, optional): Config of activation layer appended + right after DW Conv. Default: dict(type='GELU'). + norm_cfg (dict, optional): Config of norm layer. + Default: dict(type='SyncBN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_cfg=dict(type='GELU'), + dw_act_cfg=dict(type='GELU'), + norm_cfg=dict(type='SyncBN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1) + self.act1 = build_activation_layer(act_cfg) + self.norm1 = build_norm_layer(norm_cfg, hidden_features)[1] + self.dw3x3 = nn.Conv2d( + hidden_features, + hidden_features, + kernel_size=3, + stride=1, + groups=hidden_features, + padding=1) + self.act2 = build_activation_layer(dw_act_cfg) + self.norm2 = build_norm_layer(norm_cfg, hidden_features)[1] + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1) + self.act3 = build_activation_layer(act_cfg) + self.norm3 = build_norm_layer(norm_cfg, out_features)[1] + + def forward(self, x, H, W): + """Forward function.""" + x = nlc_to_nchw(x, (H, W)) + x = self.act1(self.norm1(self.fc1(x))) + x = self.act2(self.norm2(self.dw3x3(x))) + x = self.act3(self.norm3(self.fc2(x))) + x = nchw_to_nlc(x) + return x + + +class HRFormerBlock(BaseModule): + """High-Resolution Block for HRFormer. + + Args: + in_features (int): The input dimension. + out_features (int): The output dimension. + num_heads (int): The number of head within each LSA. + window_size (int, optional): The window size for the LSA. + Default: 7 + mlp_ratio (int, optional): The expansion ration of FFN. + Default: 4 + act_cfg (dict, optional): Config of activation layer. + Default: dict(type='GELU'). + norm_cfg (dict, optional): Config of norm layer. + Default: dict(type='SyncBN'). + transformer_norm_cfg (dict, optional): Config of transformer norm + layer. Default: dict(type='LN', eps=1e-6). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + expansion = 1 + + def __init__(self, + in_features, + out_features, + num_heads, + window_size=7, + mlp_ratio=4.0, + drop_path=0.0, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='SyncBN'), + transformer_norm_cfg=dict(type='LN', eps=1e-6), + init_cfg=None, + **kwargs): + super(HRFormerBlock, self).__init__(init_cfg=init_cfg) + self.num_heads = num_heads + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + self.norm1 = build_norm_layer(transformer_norm_cfg, in_features)[1] + self.attn = LocalWindowSelfAttention( + in_features, + num_heads=num_heads, + window_size=window_size, + init_cfg=None, + **kwargs) + + self.norm2 = build_norm_layer(transformer_norm_cfg, out_features)[1] + self.ffn = CrossFFN( + in_features=in_features, + hidden_features=int(in_features * mlp_ratio), + out_features=out_features, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dw_act_cfg=act_cfg, + init_cfg=None) + + self.drop_path = build_drop_path( + drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + """Forward function.""" + B, C, H, W = x.size() + # Attention + x = x.view(B, C, -1).permute(0, 2, 1) + x = x + self.drop_path(self.attn(self.norm1(x), H, W)) + # FFN + x = x + self.drop_path(self.ffn(self.norm2(x), H, W)) + x = x.permute(0, 2, 1).view(B, C, H, W) + return x + + def extra_repr(self): + """(Optional) Set the extra information about this module.""" + return 'num_heads={}, window_size={}, mlp_ratio={}'.format( + self.num_heads, self.window_size, self.mlp_ratio) + + +class HRFomerModule(HRModule): + """High-Resolution Module for HRFormer. + + Args: + num_branches (int): The number of branches in the HRFormerModule. + block (nn.Module): The building block of HRFormer. + The block should be the HRFormerBlock. + num_blocks (tuple): The number of blocks in each branch. + The length must be equal to num_branches. + num_inchannels (tuple): The number of input channels in each branch. + The length must be equal to num_branches. + num_channels (tuple): The number of channels in each branch. + The length must be equal to num_branches. + num_heads (tuple): The number of heads within the LSAs. + num_window_sizes (tuple): The window size for the LSAs. + num_mlp_ratios (tuple): The expansion ratio for the FFNs. + drop_path (int, optional): The drop path rate of HRFomer. + Default: 0.0 + multiscale_output (bool, optional): Whether to output multi-level + features produced by multiple branches. If False, only the first + level feature will be output. Default: True. + conv_cfg (dict, optional): Config of the conv layers. + Default: None. + norm_cfg (dict, optional): Config of the norm layers appended + right after conv. Default: dict(type='SyncBN', requires_grad=True) + transformer_norm_cfg (dict, optional): Config of the norm layers. + Default: dict(type='LN', eps=1e-6) + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False + upsample_cfg(dict, optional): The config of upsample layers in fuse + layers. Default: dict(mode='bilinear', align_corners=False) + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + num_heads, + num_window_sizes, + num_mlp_ratios, + multiscale_output=True, + drop_paths=0.0, + with_rpe=True, + with_pad_mask=False, + conv_cfg=None, + norm_cfg=dict(type='SyncBN', requires_grad=True), + transformer_norm_cfg=dict(type='LN', eps=1e-6), + with_cp=False, + upsample_cfg=dict(mode='bilinear', align_corners=False), + **kwargs): + + self.transformer_norm_cfg = transformer_norm_cfg + self.drop_paths = drop_paths + self.num_heads = num_heads + self.num_window_sizes = num_window_sizes + self.num_mlp_ratios = num_mlp_ratios + self.with_rpe = with_rpe + self.with_pad_mask = with_pad_mask + + super().__init__(num_branches, block, num_blocks, num_inchannels, + num_channels, multiscale_output, with_cp, conv_cfg, + norm_cfg, upsample_cfg, **kwargs) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + """Build one branch.""" + # HRFormerBlock does not support down sample layer yet. + assert stride == 1 and self.in_channels[branch_index] == num_channels[ + branch_index] + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + num_heads=self.num_heads[branch_index], + window_size=self.num_window_sizes[branch_index], + mlp_ratio=self.num_mlp_ratios[branch_index], + drop_path=self.drop_paths[0], + norm_cfg=self.norm_cfg, + transformer_norm_cfg=self.transformer_norm_cfg, + init_cfg=None, + with_rpe=self.with_rpe, + with_pad_mask=self.with_pad_mask)) + + self.in_channels[ + branch_index] = self.in_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + num_heads=self.num_heads[branch_index], + window_size=self.num_window_sizes[branch_index], + mlp_ratio=self.num_mlp_ratios[branch_index], + drop_path=self.drop_paths[i], + norm_cfg=self.norm_cfg, + transformer_norm_cfg=self.transformer_norm_cfg, + init_cfg=None, + with_rpe=self.with_rpe, + with_pad_mask=self.with_pad_mask)) + return nn.Sequential(*layers) + + def _make_fuse_layers(self): + """Build fuse layers.""" + if self.num_branches == 1: + return None + num_branches = self.num_branches + num_inchannels = self.in_channels + fuse_layers = [] + for i in range(num_branches if self.multiscale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_inchannels[j], + num_inchannels[i], + kernel_size=1, + stride=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_inchannels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), + mode=self.upsample_cfg['mode'], + align_corners=self. + upsample_cfg['align_corners']))) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + with_out_act = False + else: + num_outchannels_conv3x3 = num_inchannels[j] + with_out_act = True + sub_modules = [ + build_conv_layer( + self.conv_cfg, + num_inchannels[j], + num_inchannels[j], + kernel_size=3, + stride=2, + padding=1, + groups=num_inchannels[j], + bias=False, + ), + build_norm_layer(self.norm_cfg, + num_inchannels[j])[1], + build_conv_layer( + self.conv_cfg, + num_inchannels[j], + num_outchannels_conv3x3, + kernel_size=1, + stride=1, + bias=False, + ), + build_norm_layer(self.norm_cfg, + num_outchannels_conv3x3)[1] + ] + if with_out_act: + sub_modules.append(nn.ReLU(False)) + conv3x3s.append(nn.Sequential(*sub_modules)) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + """Return the number of input channels.""" + return self.in_channels + + +@MODELS.register_module() +class HRFormer(HRNet): + """HRFormer backbone. + + This backbone is the implementation of `HRFormer: High-Resolution + Transformer for Dense Prediction `_. + + Args: + extra (dict): Detailed configuration for each stage of HRNet. + There must be 4 stages, the configuration for each stage must have + 5 keys: + + - num_modules (int): The number of HRModule in this stage. + - num_branches (int): The number of branches in the HRModule. + - block (str): The type of block. + - num_blocks (tuple): The number of blocks in each branch. + The length must be equal to num_branches. + - num_channels (tuple): The number of channels in each branch. + The length must be equal to num_branches. + in_channels (int): Number of input image channels. Normally 3. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Config of norm layer. + Use `SyncBN` by default. + transformer_norm_cfg (dict): Config of transformer norm layer. + Use `LN` by default. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HRFormer + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(2, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='HRFORMER', + >>> window_sizes=(7, 7), + >>> num_heads=(1, 2), + >>> mlp_ratios=(4, 4), + >>> num_blocks=(2, 2), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='HRFORMER', + >>> window_sizes=(7, 7, 7), + >>> num_heads=(1, 2, 4), + >>> mlp_ratios=(4, 4, 4), + >>> num_blocks=(2, 2, 2), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=2, + >>> num_branches=4, + >>> block='HRFORMER', + >>> window_sizes=(7, 7, 7, 7), + >>> num_heads=(1, 2, 4, 8), + >>> mlp_ratios=(4, 4, 4, 4), + >>> num_blocks=(2, 2, 2, 2), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRFormer(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + (1, 64, 4, 4) + (1, 128, 2, 2) + (1, 256, 1, 1) + """ + + blocks_dict = {'BOTTLENECK': Bottleneck, 'HRFORMERBLOCK': HRFormerBlock} + + def __init__( + self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + transformer_norm_cfg=dict(type='LN', eps=1e-6), + norm_eval=False, + with_cp=False, + zero_init_residual=False, + frozen_stages=-1, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + + # stochastic depth + depths = [ + extra[stage]['num_blocks'][0] * extra[stage]['num_modules'] + for stage in ['stage2', 'stage3', 'stage4'] + ] + depth_s2, depth_s3, _ = depths + drop_path_rate = extra['drop_path_rate'] + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] + extra['stage2']['drop_path_rates'] = dpr[0:depth_s2] + extra['stage3']['drop_path_rates'] = dpr[depth_s2:depth_s2 + depth_s3] + extra['stage4']['drop_path_rates'] = dpr[depth_s2 + depth_s3:] + + # HRFormer use bilinear upsample as default + upsample_cfg = extra.get('upsample', { + 'mode': 'bilinear', + 'align_corners': False + }) + extra['upsample'] = upsample_cfg + self.transformer_norm_cfg = transformer_norm_cfg + self.with_rpe = extra.get('with_rpe', True) + self.with_pad_mask = extra.get('with_pad_mask', False) + + super().__init__(extra, in_channels, conv_cfg, norm_cfg, norm_eval, + with_cp, zero_init_residual, frozen_stages, init_cfg) + + def _make_stage(self, + layer_config, + num_inchannels, + multiscale_output=True): + """Make each stage.""" + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + num_heads = layer_config['num_heads'] + num_window_sizes = layer_config['window_sizes'] + num_mlp_ratios = layer_config['mlp_ratios'] + drop_path_rates = layer_config['drop_path_rates'] + + modules = [] + for i in range(num_modules): + # multiscale_output is only used at the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + modules.append( + HRFomerModule( + num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + num_heads, + num_window_sizes, + num_mlp_ratios, + reset_multiscale_output, + drop_paths=drop_path_rates[num_blocks[0] * + i:num_blocks[0] * (i + 1)], + with_rpe=self.with_rpe, + with_pad_mask=self.with_pad_mask, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + transformer_norm_cfg=self.transformer_norm_cfg, + with_cp=self.with_cp, + upsample_cfg=self.upsample_cfg)) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels diff --git a/mmpose/models/backbones/hrnet.py b/mmpose/models/backbones/hrnet.py new file mode 100644 index 0000000000000000000000000000000000000000..381b22d60ec886ecb6d8c52fc9e7ccab52c05e99 --- /dev/null +++ b/mmpose/models/backbones/hrnet.py @@ -0,0 +1,610 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, constant_init +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .resnet import BasicBlock, Bottleneck, get_expansion + + +class HRModule(BaseModule): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + """ + + def __init__(self, + num_branches, + blocks, + num_blocks, + in_channels, + num_channels, + multiscale_output=False, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + upsample_cfg=dict(mode='nearest', align_corners=None), + init_cfg=None): + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.upsample_cfg = upsample_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, blocks, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=True) + + @staticmethod + def _check_branches(num_branches, num_blocks, in_channels, num_channels): + """Check input to avoid ValueError.""" + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_BLOCKS({len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_CHANNELS({len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + """Make one branch.""" + downsample = None + if stride != 1 or \ + self.in_channels[branch_index] != \ + num_channels[branch_index] * get_expansion(block): + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer( + self.norm_cfg, + num_channels[branch_index] * get_expansion(block))[1]) + + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + stride=stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + self.in_channels[branch_index] = \ + num_channels[branch_index] * get_expansion(block) + for _ in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + """Make branches.""" + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + """Make fuse layer.""" + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), + mode=self.upsample_cfg['mode'], + align_corners=self. + upsample_cfg['align_corners']))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=True))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@MODELS.register_module() +class HRNet(BaseBackbone): + """HRNet backbone. + + `High-Resolution Representations for Labeling Pixels and Regions + `__ + + Args: + extra (dict): detailed configuration for each stage of HRNet. + in_channels (int): Number of input image channels. Default: 3. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HRNet + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + + def __init__( + self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=False, + with_cp=False, + zero_init_residual=False, + frozen_stages=-1, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.init_cfg = init_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + self.frozen_stages = frozen_stages + + # stem net + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + 64, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + self.upsample_cfg = self.extra.get('upsample', { + 'mode': 'nearest', + 'align_corners': None + }) + + # stage 1 + self.stage1_cfg = self.extra['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'][0] + + block = self.blocks_dict[block_type] + stage1_out_channels = num_channels * get_expansion(block) + self.layer1 = self._make_layer(block, 64, stage1_out_channels, + num_blocks) + + # stage 2 + self.stage2_cfg = self.extra['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = self.stage2_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition1 = self._make_transition_layer([stage1_out_channels], + num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = self.extra['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = self.stage3_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition2 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = self.extra['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = self.stage4_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition3 = self._make_transition_layer(pre_stage_channels, + num_channels) + + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, + num_channels, + multiscale_output=self.stage4_cfg.get('multiscale_output', False)) + + self._freeze_stages() + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + """Make transition layer.""" + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, in_channels, out_channels, blocks, stride=1): + """Make layer.""" + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1]) + + layers = [] + layers.append( + block( + in_channels, + out_channels, + stride=stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + for _ in range(1, blocks): + layers.append( + block( + out_channels, + out_channels, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + """Make stage.""" + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + upsample_cfg=self.upsample_cfg)) + + in_channels = hr_modules[-1].in_channels + + return nn.Sequential(*hr_modules), in_channels + + def _freeze_stages(self): + """Freeze parameters.""" + if self.frozen_stages >= 0: + self.norm1.eval() + self.norm2.eval() + + for m in [self.conv1, self.norm1, self.conv2, self.norm2]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + if i == 1: + m = getattr(self, 'layer1') + else: + m = getattr(self, f'stage{i}') + + m.eval() + for param in m.parameters(): + param.requires_grad = False + + if i < 4: + m = getattr(self, f'transition{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initialize the weights in backbone.""" + super(HRNet, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + """Forward function.""" + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + return tuple(y_list) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/litehrnet.py b/mmpose/models/backbones/litehrnet.py new file mode 100644 index 0000000000000000000000000000000000000000..1ad5f63014553129a02ca3dc4bfda4c181fcd6a6 --- /dev/null +++ b/mmpose/models/backbones/litehrnet.py @@ -0,0 +1,999 @@ +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/HRNet/Lite-HRNet +# Original licence: Apache License 2.0. +# ------------------------------------------------------------------------------ + +import mmengine +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, + build_conv_layer, build_norm_layer) +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import channel_shuffle + + +class SpatialWeighting(BaseModule): + """Spatial weighting module. + + Args: + channels (int): The channels of the module. + ratio (int): channel reduction ratio. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + act_cfg (dict): Config dict for activation layer. + Default: (dict(type='ReLU'), dict(type='Sigmoid')). + The last ConvModule uses Sigmoid by default. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + norm_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmengine.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=int(channels / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(channels / ratio), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out + + +class CrossResolutionWeighting(BaseModule): + """Cross-resolution channel weighting module. + + Args: + channels (int): The channels of the module. + ratio (int): channel reduction ratio. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + act_cfg (dict): Config dict for activation layer. + Default: (dict(type='ReLU'), dict(type='Sigmoid')). + The last ConvModule uses Sigmoid by default. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + norm_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmengine.is_tuple_of(act_cfg, dict) + self.channels = channels + total_channel = sum(channels) + self.conv1 = ConvModule( + in_channels=total_channel, + out_channels=int(total_channel / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(total_channel / ratio), + out_channels=total_channel, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + mini_size = x[-1].size()[-2:] + out = [F.adaptive_avg_pool2d(s, mini_size) for s in x[:-1]] + [x[-1]] + out = torch.cat(out, dim=1) + out = self.conv1(out) + out = self.conv2(out) + out = torch.split(out, self.channels, dim=1) + out = [ + s * F.interpolate(a, size=s.size()[-2:], mode='nearest') + for s, a in zip(x, out) + ] + return out + + +class ConditionalChannelWeighting(BaseModule): + """Conditional channel weighting block. + + Args: + in_channels (int): The input channels of the block. + stride (int): Stride of the 3x3 convolution layer. + reduce_ratio (int): channel reduction ratio. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + stride, + reduce_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.with_cp = with_cp + self.stride = stride + assert stride in [1, 2] + + branch_channels = [channel // 2 for channel in in_channels] + + self.cross_resolution_weighting = CrossResolutionWeighting( + branch_channels, + ratio=reduce_ratio, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + self.depthwise_convs = nn.ModuleList([ + ConvModule( + channel, + channel, + kernel_size=3, + stride=self.stride, + padding=1, + groups=channel, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) for channel in branch_channels + ]) + + self.spatial_weighting = nn.ModuleList([ + SpatialWeighting(channels=channel, ratio=4) + for channel in branch_channels + ]) + + def forward(self, x): + + def _inner_forward(x): + x = [s.chunk(2, dim=1) for s in x] + x1 = [s[0] for s in x] + x2 = [s[1] for s in x] + + x2 = self.cross_resolution_weighting(x2) + x2 = [dw(s) for s, dw in zip(x2, self.depthwise_convs)] + x2 = [sw(s) for s, sw in zip(x2, self.spatial_weighting)] + + out = [torch.cat([s1, s2], dim=1) for s1, s2 in zip(x1, x2)] + out = [channel_shuffle(s, 2) for s in out] + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class Stem(BaseModule): + """Stem network block. + + Args: + in_channels (int): The input channels of the block. + stem_channels (int): Output channels of the stem layer. + out_channels (int): The output channels of the block. + expand_ratio (int): adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + stem_channels, + out_channels, + expand_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=stem_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='ReLU')) + + mid_channels = int(round(stem_channels * expand_ratio)) + branch_channels = stem_channels // 2 + if stem_channels == self.out_channels: + inc_channels = self.out_channels - branch_channels + else: + inc_channels = self.out_channels - stem_channels + + self.branch1 = nn.Sequential( + ConvModule( + branch_channels, + branch_channels, + kernel_size=3, + stride=2, + padding=1, + groups=branch_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_channels, + inc_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')), + ) + + self.expand_conv = ConvModule( + branch_channels, + mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')) + self.depthwise_conv = ConvModule( + mid_channels, + mid_channels, + kernel_size=3, + stride=2, + padding=1, + groups=mid_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + self.linear_conv = ConvModule( + mid_channels, + branch_channels + if stem_channels == self.out_channels else stem_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')) + + def forward(self, x): + + def _inner_forward(x): + x = self.conv1(x) + x1, x2 = x.chunk(2, dim=1) + + x2 = self.expand_conv(x2) + x2 = self.depthwise_conv(x2) + x2 = self.linear_conv(x2) + + out = torch.cat((self.branch1(x1), x2), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class IterativeHead(BaseModule): + """Extra iterative head for feature learning. + + Args: + in_channels (int): The input channels of the block. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, in_channels, norm_cfg=dict(type='BN'), init_cfg=None): + super().__init__(init_cfg=init_cfg) + projects = [] + num_branchs = len(in_channels) + self.in_channels = in_channels[::-1] + + for i in range(num_branchs): + if i != num_branchs - 1: + projects.append( + DepthwiseSeparableConvModule( + in_channels=self.in_channels[i], + out_channels=self.in_channels[i + 1], + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + dw_act_cfg=None, + pw_act_cfg=dict(type='ReLU'))) + else: + projects.append( + DepthwiseSeparableConvModule( + in_channels=self.in_channels[i], + out_channels=self.in_channels[i], + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + dw_act_cfg=None, + pw_act_cfg=dict(type='ReLU'))) + self.projects = nn.ModuleList(projects) + + def forward(self, x): + x = x[::-1] + + y = [] + last_x = None + for i, s in enumerate(x): + if last_x is not None: + last_x = F.interpolate( + last_x, + size=s.size()[-2:], + mode='bilinear', + align_corners=True) + s = s + last_x + s = self.projects[i](s) + y.append(s) + last_x = s + + return y[::-1] + + +class ShuffleUnit(BaseModule): + """InvertedResidual block for ShuffleNetV2 backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 convolution layer. Default: 1 + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.stride = stride + self.with_cp = with_cp + + branch_features = out_channels // 2 + if self.stride == 1: + assert in_channels == branch_features * 2, ( + f'in_channels ({in_channels}) should equal to ' + f'branch_features * 2 ({branch_features * 2}) ' + 'when stride is 1') + + if in_channels != branch_features * 2: + assert self.stride != 1, ( + f'stride ({self.stride}) should not equal 1 when ' + f'in_channels != branch_features * 2') + + if self.stride > 1: + self.branch1 = nn.Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=self.stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.branch2 = nn.Sequential( + ConvModule( + in_channels if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1, + groups=branch_features, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + + def _inner_forward(x): + if self.stride > 1: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + else: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class LiteHRModule(BaseModule): + """High-Resolution Module for LiteHRNet. + + It contains conditional channel weighting blocks and + shuffle blocks. + + + Args: + num_branches (int): Number of branches in the module. + num_blocks (int): Number of blocks in the module. + in_channels (list(int)): Number of input image channels. + reduce_ratio (int): Channel reduction ratio. + module_type (str): 'LITE' or 'NAIVE' + multiscale_output (bool): Whether to output multi-scale features. + with_fuse (bool): Whether to use fuse layers. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_branches, + num_blocks, + in_channels, + reduce_ratio, + module_type, + multiscale_output=False, + with_fuse=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self._check_branches(num_branches, in_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.module_type = module_type + self.multiscale_output = multiscale_output + self.with_fuse = with_fuse + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.with_cp = with_cp + + if self.module_type.upper() == 'LITE': + self.layers = self._make_weighting_blocks(num_blocks, reduce_ratio) + elif self.module_type.upper() == 'NAIVE': + self.layers = self._make_naive_branches(num_branches, num_blocks) + else: + raise ValueError("module_type should be either 'LITE' or 'NAIVE'.") + if self.with_fuse: + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU() + + def _check_branches(self, num_branches, in_channels): + """Check input to avoid ValueError.""" + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_weighting_blocks(self, num_blocks, reduce_ratio, stride=1): + """Make channel weighting blocks.""" + layers = [] + for i in range(num_blocks): + layers.append( + ConditionalChannelWeighting( + self.in_channels, + stride=stride, + reduce_ratio=reduce_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + with_cp=self.with_cp)) + + return nn.Sequential(*layers) + + def _make_one_branch(self, branch_index, num_blocks, stride=1): + """Make one branch.""" + layers = [] + layers.append( + ShuffleUnit( + self.in_channels[branch_index], + self.in_channels[branch_index], + stride=stride, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='ReLU'), + with_cp=self.with_cp)) + for i in range(1, num_blocks): + layers.append( + ShuffleUnit( + self.in_channels[branch_index], + self.in_channels[branch_index], + stride=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='ReLU'), + with_cp=self.with_cp)) + + return nn.Sequential(*layers) + + def _make_naive_branches(self, num_branches, num_blocks): + """Make branches.""" + branches = [] + + for i in range(num_branches): + branches.append(self._make_one_branch(i, num_blocks)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + """Make fuse layer.""" + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + groups=in_channels[j], + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + groups=in_channels[j], + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=True))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.layers[0](x[0])] + + if self.module_type.upper() == 'LITE': + out = self.layers(x) + elif self.module_type.upper() == 'NAIVE': + for i in range(self.num_branches): + x[i] = self.layers[i](x[i]) + out = x + + if self.with_fuse: + out_fuse = [] + for i in range(len(self.fuse_layers)): + # `y = 0` will lead to decreased accuracy (0.5~1 mAP) + y = out[0] if i == 0 else self.fuse_layers[i][0](out[0]) + for j in range(self.num_branches): + if i == j: + y += out[j] + else: + y += self.fuse_layers[i][j](out[j]) + out_fuse.append(self.relu(y)) + out = out_fuse + if not self.multiscale_output: + out = [out[0]] + return out + + +@MODELS.register_module() +class LiteHRNet(BaseBackbone): + """Lite-HRNet backbone. + + `Lite-HRNet: A Lightweight High-Resolution Network + `_. + + Code adapted from 'https://github.com/HRNet/Lite-HRNet'. + + Args: + extra (dict): detailed configuration for each stage of HRNet. + in_channels (int): Number of input image channels. Default: 3. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import LiteHRNet + >>> import torch + >>> extra=dict( + >>> stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + >>> num_stages=3, + >>> stages_spec=dict( + >>> num_modules=(2, 4, 2), + >>> num_branches=(2, 3, 4), + >>> num_blocks=(2, 2, 2), + >>> module_type=('LITE', 'LITE', 'LITE'), + >>> with_fuse=(True, True, True), + >>> reduce_ratios=(8, 8, 8), + >>> num_channels=( + >>> (40, 80), + >>> (40, 80, 160), + >>> (40, 80, 160, 320), + >>> )), + >>> with_head=False) + >>> self = LiteHRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 40, 8, 8) + """ + + def __init__(self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super().__init__(init_cfg=init_cfg) + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.stem = Stem( + in_channels, + stem_channels=self.extra['stem']['stem_channels'], + out_channels=self.extra['stem']['out_channels'], + expand_ratio=self.extra['stem']['expand_ratio'], + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + + self.num_stages = self.extra['num_stages'] + self.stages_spec = self.extra['stages_spec'] + + num_channels_last = [ + self.stem.out_channels, + ] + for i in range(self.num_stages): + num_channels = self.stages_spec['num_channels'][i] + num_channels = [num_channels[i] for i in range(len(num_channels))] + setattr( + self, f'transition{i}', + self._make_transition_layer(num_channels_last, num_channels)) + + stage, num_channels_last = self._make_stage( + self.stages_spec, i, num_channels, multiscale_output=True) + setattr(self, f'stage{i}', stage) + + self.with_head = self.extra['with_head'] + if self.with_head: + self.head_layer = IterativeHead( + in_channels=num_channels_last, + norm_cfg=self.norm_cfg, + ) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + """Make transition layer.""" + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_pre_layer[i], + kernel_size=3, + stride=1, + padding=1, + groups=num_channels_pre_layer[i], + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_pre_layer[i])[1], + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU())) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=1, + groups=in_channels, + bias=False), + build_norm_layer(self.norm_cfg, in_channels)[1], + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU())) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_stage(self, + stages_spec, + stage_index, + in_channels, + multiscale_output=True): + num_modules = stages_spec['num_modules'][stage_index] + num_branches = stages_spec['num_branches'][stage_index] + num_blocks = stages_spec['num_blocks'][stage_index] + reduce_ratio = stages_spec['reduce_ratios'][stage_index] + with_fuse = stages_spec['with_fuse'][stage_index] + module_type = stages_spec['module_type'][stage_index] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + modules.append( + LiteHRModule( + num_branches, + num_blocks, + in_channels, + reduce_ratio, + module_type, + multiscale_output=reset_multiscale_output, + with_fuse=with_fuse, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + with_cp=self.with_cp)) + in_channels = modules[-1].in_channels + + return nn.Sequential(*modules), in_channels + + def forward(self, x): + """Forward function.""" + x = self.stem(x) + + y_list = [x] + for i in range(self.num_stages): + x_list = [] + transition = getattr(self, f'transition{i}') + for j in range(self.stages_spec['num_branches'][i]): + if transition[j]: + if j >= len(y_list): + x_list.append(transition[j](y_list[-1])) + else: + x_list.append(transition[j](y_list[j])) + else: + x_list.append(y_list[j]) + y_list = getattr(self, f'stage{i}')(x_list) + + x = y_list + if self.with_head: + x = self.head_layer(x) + + return (x[0], ) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/mobilenet_v2.py b/mmpose/models/backbones/mobilenet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..b64c0d73d41d3763018a8e46621c6ab695be6856 --- /dev/null +++ b/mmpose/models/backbones/mobilenet_v2.py @@ -0,0 +1,279 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import make_divisible + + +class InvertedResidual(BaseModule): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=1, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class MobileNetV2(BaseBackbone): + """MobileNetV2 backbone. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + # Parameters to build layers. 4 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks, stride. + arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], + [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], + [6, 320, 1, 1]] + + def __init__(self, + widen_factor=1., + out_indices=(7, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.widen_factor = widen_factor + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 8): + raise ValueError('the item in out_indices must in ' + f'range(0, 8). But received {index}') + + if frozen_stages not in range(-1, 8): + raise ValueError('frozen_stages must be in range(-1, 8). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks, stride = layer_cfg + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + if widen_factor > 1.0: + self.out_channel = int(1280 * widen_factor) + else: + self.out_channel = 1280 + + layer = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channel, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.add_module('conv2', layer) + self.layers.append('conv2') + + def make_layer(self, out_channels, num_blocks, stride, expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. Default: 6. + """ + layers = [] + for i in range(num_blocks): + if i >= 1: + stride = 1 + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride, + expand_ratio=expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/mobilenet_v3.py b/mmpose/models/backbones/mobilenet_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..03ecf90dd22d42a3650a4eac00c070ec556c7912 --- /dev/null +++ b/mmpose/models/backbones/mobilenet_v3.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +from mmcv.cnn import ConvModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import InvertedResidual + + +@MODELS.register_module() +class MobileNetV3(BaseBackbone): + """MobileNetV3 backbone. + + Args: + arch (str): Architecture of mobilnetv3, from {small, big}. + Default: small. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (None or Sequence[int]): Output from which stages. + Default: (-1, ), which means output tensors from final stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm']) + ]`` + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 2], + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'big': [[3, 16, 16, False, 'ReLU', 1], + [3, 64, 24, False, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN'), + out_indices=(-1, ), + frozen_stages=-1, + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + assert arch in self.arch_settings + for index in out_indices: + if index not in range(-len(self.arch_settings[arch]), + len(self.arch_settings[arch])): + raise ValueError('the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch])}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch])): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch])}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = 16 + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type='HSwish')) + + self.layers = self._make_layer() + self.feat_dim = self.arch_settings[arch][-1][2] + + def _make_layer(self): + layers = [] + layer_setting = self.arch_settings[self.arch] + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=1.0, divisor=2.0))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=self.in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + with_expand_conv=True, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + self.in_channels = out_channels + layer_name = f'layer{i + 1}' + self.add_module(layer_name, layer) + layers.append(layer_name) + return layers + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices or \ + i - len(self.layers) in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/mspn.py b/mmpose/models/backbones/mspn.py new file mode 100644 index 0000000000000000000000000000000000000000..bcb636b1a3fdc0357fa7dc7c3751738914d58980 --- /dev/null +++ b/mmpose/models/backbones/mspn.py @@ -0,0 +1,541 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy as cp +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, MaxPool2d +from mmengine.model import BaseModule +from mmengine.runner import load_state_dict + +from mmpose.registry import MODELS +from mmpose.utils import get_root_logger +from .base_backbone import BaseBackbone +from .resnet import Bottleneck as _Bottleneck +from .utils import get_state_dict + + +class Bottleneck(_Bottleneck): + expansion = 4 + """Bottleneck block for MSPN. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + stride (int): stride of the block. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, in_channels, out_channels, **kwargs): + super().__init__(in_channels, out_channels * 4, **kwargs) + + +class DownsampleModule(BaseModule): + """Downsample module for MSPN. + + Args: + block (nn.Module): Downsample block. + num_blocks (list): Number of blocks in each downsample unit. + num_units (int): Numbers of downsample units. Default: 4 + has_skip (bool): Have skip connections from prior upsample + module or not. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + in_channels (int): Number of channels of the input feature to + downsample module. Default: 64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + block, + num_blocks, + num_units=4, + has_skip=False, + norm_cfg=dict(type='BN'), + in_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.has_skip = has_skip + self.in_channels = in_channels + assert len(num_blocks) == num_units + self.num_blocks = num_blocks + self.num_units = num_units + self.norm_cfg = norm_cfg + self.layer1 = self._make_layer(block, in_channels, num_blocks[0]) + for i in range(1, num_units): + module_name = f'layer{i + 1}' + self.add_module( + module_name, + self._make_layer( + block, in_channels * pow(2, i), num_blocks[i], stride=2)) + + def _make_layer(self, block, out_channels, blocks, stride=1): + downsample = None + if stride != 1 or self.in_channels != out_channels * block.expansion: + downsample = ConvModule( + self.in_channels, + out_channels * block.expansion, + kernel_size=1, + stride=stride, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + + units = list() + units.append( + block( + self.in_channels, + out_channels, + stride=stride, + downsample=downsample, + norm_cfg=self.norm_cfg)) + self.in_channels = out_channels * block.expansion + for _ in range(1, blocks): + units.append(block(self.in_channels, out_channels)) + + return nn.Sequential(*units) + + def forward(self, x, skip1, skip2): + out = list() + for i in range(self.num_units): + module_name = f'layer{i + 1}' + module_i = getattr(self, module_name) + x = module_i(x) + if self.has_skip: + x = x + skip1[i] + skip2[i] + out.append(x) + out.reverse() + + return tuple(out) + + +class UpsampleUnit(BaseModule): + """Upsample unit for upsample module. + + Args: + ind (int): Indicates whether to interpolate (>0) and whether to + generate feature map for the next hourglass-like module. + num_units (int): Number of units that form a upsample module. Along + with ind and gen_cross_conv, nm_units is used to decide whether + to generate feature map for the next hourglass-like module. + in_channels (int): Channel number of the skip-in feature maps from + the corresponding downsample unit. + unit_channels (int): Channel number in this unit. Default:256. + gen_skip: (bool): Whether or not to generate skips for the posterior + downsample module. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + out_channels (int): Number of channels of feature output by upsample + module. Must equal to in_channels of downsample module. Default:64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + ind, + num_units, + in_channels, + unit_channels=256, + gen_skip=False, + gen_cross_conv=False, + norm_cfg=dict(type='BN'), + out_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.num_units = num_units + self.norm_cfg = norm_cfg + self.in_skip = ConvModule( + in_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + self.relu = nn.ReLU(inplace=True) + + self.ind = ind + if self.ind > 0: + self.up_conv = ConvModule( + unit_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + + self.gen_skip = gen_skip + if self.gen_skip: + self.out_skip1 = ConvModule( + in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + self.out_skip2 = ConvModule( + unit_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + self.gen_cross_conv = gen_cross_conv + if self.ind == num_units - 1 and self.gen_cross_conv: + self.cross_conv = ConvModule( + unit_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + def forward(self, x, up_x): + out = self.in_skip(x) + + if self.ind > 0: + up_x = F.interpolate( + up_x, + size=(x.size(2), x.size(3)), + mode='bilinear', + align_corners=True) + up_x = self.up_conv(up_x) + out = out + up_x + out = self.relu(out) + + skip1 = None + skip2 = None + if self.gen_skip: + skip1 = self.out_skip1(x) + skip2 = self.out_skip2(out) + + cross_conv = None + if self.ind == self.num_units - 1 and self.gen_cross_conv: + cross_conv = self.cross_conv(out) + + return out, skip1, skip2, cross_conv + + +class UpsampleModule(BaseModule): + """Upsample module for MSPN. + + Args: + unit_channels (int): Channel number in the upsample units. + Default:256. + num_units (int): Numbers of upsample units. Default: 4 + gen_skip (bool): Whether to generate skip for posterior downsample + module or not. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + out_channels (int): Number of channels of feature output by upsample + module. Must equal to in_channels of downsample module. Default:64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + unit_channels=256, + num_units=4, + gen_skip=False, + gen_cross_conv=False, + norm_cfg=dict(type='BN'), + out_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = list() + for i in range(num_units): + self.in_channels.append(Bottleneck.expansion * out_channels * + pow(2, i)) + self.in_channels.reverse() + self.num_units = num_units + self.gen_skip = gen_skip + self.gen_cross_conv = gen_cross_conv + self.norm_cfg = norm_cfg + for i in range(num_units): + module_name = f'up{i + 1}' + self.add_module( + module_name, + UpsampleUnit( + i, + self.num_units, + self.in_channels[i], + unit_channels, + self.gen_skip, + self.gen_cross_conv, + norm_cfg=self.norm_cfg, + out_channels=64)) + + def forward(self, x): + out = list() + skip1 = list() + skip2 = list() + cross_conv = None + for i in range(self.num_units): + module_i = getattr(self, f'up{i + 1}') + if i == 0: + outi, skip1_i, skip2_i, _ = module_i(x[i], None) + elif i == self.num_units - 1: + outi, skip1_i, skip2_i, cross_conv = module_i(x[i], out[i - 1]) + else: + outi, skip1_i, skip2_i, _ = module_i(x[i], out[i - 1]) + out.append(outi) + skip1.append(skip1_i) + skip2.append(skip2_i) + skip1.reverse() + skip2.reverse() + + return out, skip1, skip2, cross_conv + + +class SingleStageNetwork(BaseModule): + """Single_stage Network. + + Args: + unit_channels (int): Channel number in the upsample units. Default:256. + num_units (int): Numbers of downsample/upsample units. Default: 4 + gen_skip (bool): Whether to generate skip for posterior downsample + module or not. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + has_skip (bool): Have skip connections from prior upsample + module or not. Default:False + num_blocks (list): Number of blocks in each downsample unit. + Default: [2, 2, 2, 2] Note: Make sure num_units==len(num_blocks) + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + in_channels (int): Number of channels of the feature from ResNetTop. + Default: 64. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + has_skip=False, + gen_skip=False, + gen_cross_conv=False, + unit_channels=256, + num_units=4, + num_blocks=[2, 2, 2, 2], + norm_cfg=dict(type='BN'), + in_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + num_blocks = cp.deepcopy(num_blocks) + super().__init__(init_cfg=init_cfg) + assert len(num_blocks) == num_units + self.has_skip = has_skip + self.gen_skip = gen_skip + self.gen_cross_conv = gen_cross_conv + self.num_units = num_units + self.unit_channels = unit_channels + self.num_blocks = num_blocks + self.norm_cfg = norm_cfg + + self.downsample = DownsampleModule(Bottleneck, num_blocks, num_units, + has_skip, norm_cfg, in_channels) + self.upsample = UpsampleModule(unit_channels, num_units, gen_skip, + gen_cross_conv, norm_cfg, in_channels) + + def forward(self, x, skip1, skip2): + mid = self.downsample(x, skip1, skip2) + out, skip1, skip2, cross_conv = self.upsample(mid) + + return out, skip1, skip2, cross_conv + + +class ResNetTop(BaseModule): + """ResNet top for MSPN. + + Args: + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + channels (int): Number of channels of the feature output by ResNetTop. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, norm_cfg=dict(type='BN'), channels=64, init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.top = nn.Sequential( + ConvModule( + 3, + channels, + kernel_size=7, + stride=2, + padding=3, + norm_cfg=norm_cfg, + inplace=True), MaxPool2d(kernel_size=3, stride=2, padding=1)) + + def forward(self, img): + return self.top(img) + + +@MODELS.register_module() +class MSPN(BaseBackbone): + """MSPN backbone. Paper ref: Li et al. "Rethinking on Multi-Stage Networks + for Human Pose Estimation" (CVPR 2020). + + Args: + unit_channels (int): Number of Channels in an upsample unit. + Default: 256 + num_stages (int): Number of stages in a multi-stage MSPN. Default: 4 + num_units (int): Number of downsample/upsample units in a single-stage + network. Default: 4 + Note: Make sure num_units == len(self.num_blocks) + num_blocks (list): Number of bottlenecks in each + downsample unit. Default: [2, 2, 2, 2] + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + res_top_channels (int): Number of channels of feature from ResNetTop. + Default: 64. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict( + type='Normal', + std=0.01, + layer=['Linear']), + ]`` + + Example: + >>> from mmpose.models import MSPN + >>> import torch + >>> self = MSPN(num_stages=2,num_units=2,num_blocks=[2,2]) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 511, 511) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... for feature in level_output: + ... print(tuple(feature.shape)) + ... + (1, 256, 64, 64) + (1, 256, 128, 128) + (1, 256, 64, 64) + (1, 256, 128, 128) + """ + + def __init__(self, + unit_channels=256, + num_stages=4, + num_units=4, + num_blocks=[2, 2, 2, 2], + norm_cfg=dict(type='BN'), + res_top_channels=64, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict(type='Normal', std=0.01, layer=['Linear']), + ]): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + num_blocks = cp.deepcopy(num_blocks) + super().__init__(init_cfg=init_cfg) + self.unit_channels = unit_channels + self.num_stages = num_stages + self.num_units = num_units + self.num_blocks = num_blocks + self.norm_cfg = norm_cfg + + assert self.num_stages > 0 + assert self.num_units > 1 + assert self.num_units == len(self.num_blocks) + self.top = ResNetTop(norm_cfg=norm_cfg) + self.multi_stage_mspn = nn.ModuleList([]) + for i in range(self.num_stages): + if i == 0: + has_skip = False + else: + has_skip = True + if i != self.num_stages - 1: + gen_skip = True + gen_cross_conv = True + else: + gen_skip = False + gen_cross_conv = False + self.multi_stage_mspn.append( + SingleStageNetwork(has_skip, gen_skip, gen_cross_conv, + unit_channels, num_units, num_blocks, + norm_cfg, res_top_channels)) + + def forward(self, x): + """Model forward function.""" + out_feats = [] + skip1 = None + skip2 = None + x = self.top(x) + for i in range(self.num_stages): + out, skip1, skip2, x = self.multi_stage_mspn[i](x, skip1, skip2) + out_feats.append(out) + + return out_feats + + def init_weights(self): + """Initialize model weights.""" + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + logger = get_root_logger() + state_dict_tmp = get_state_dict(self.init_cfg['checkpoint']) + state_dict = OrderedDict() + state_dict['top'] = OrderedDict() + state_dict['bottlenecks'] = OrderedDict() + for k, v in state_dict_tmp.items(): + if k.startswith('layer'): + if 'downsample.0' in k: + state_dict['bottlenecks'][k.replace( + 'downsample.0', 'downsample.conv')] = v + elif 'downsample.1' in k: + state_dict['bottlenecks'][k.replace( + 'downsample.1', 'downsample.bn')] = v + else: + state_dict['bottlenecks'][k] = v + elif k.startswith('conv1'): + state_dict['top'][k.replace('conv1', 'top.0.conv')] = v + elif k.startswith('bn1'): + state_dict['top'][k.replace('bn1', 'top.0.bn')] = v + + load_state_dict( + self.top, state_dict['top'], strict=False, logger=logger) + for i in range(self.num_stages): + load_state_dict( + self.multi_stage_mspn[i].downsample, + state_dict['bottlenecks'], + strict=False, + logger=logger) + else: + super(MSPN, self).init_weights() diff --git a/mmpose/models/backbones/pvt.py b/mmpose/models/backbones/pvt.py new file mode 100644 index 0000000000000000000000000000000000000000..3f2b6495482b4feadd86f51fa11b64ee10878fef --- /dev/null +++ b/mmpose/models/backbones/pvt.py @@ -0,0 +1,569 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import MultiheadAttention +from mmengine.model import BaseModule, ModuleList, Sequential +from mmengine.model.weight_init import trunc_normal_ +from mmengine.runner import load_state_dict +from mmengine.utils import to_2tuple + +from mmpose.registry import MODELS +from ...utils import get_root_logger +from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw, pvt_convert +from .utils import get_state_dict + + +class MixFFN(BaseModule): + """An implementation of MixFFN of PVT. + + The differences between MixFFN & FFN: + 1. Use 1X1 Conv to replace Linear layer. + 2. Introduce 3X3 Depth-wise Conv to encode positional information. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. + feedforward_channels (int): The hidden dimension of FFNs. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + Default: None. + use_conv (bool): If True, add 3x3 DWConv between two Linear layers. + Defaults: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + feedforward_channels, + act_cfg=dict(type='GELU'), + ffn_drop=0., + dropout_layer=None, + use_conv=False, + init_cfg=None): + super(MixFFN, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.act_cfg = act_cfg + activate = build_activation_layer(act_cfg) + + in_channels = embed_dims + fc1 = Conv2d( + in_channels=in_channels, + out_channels=feedforward_channels, + kernel_size=1, + stride=1, + bias=True) + if use_conv: + # 3x3 depth wise conv to provide positional encode information + dw_conv = Conv2d( + in_channels=feedforward_channels, + out_channels=feedforward_channels, + kernel_size=3, + stride=1, + padding=(3 - 1) // 2, + bias=True, + groups=feedforward_channels) + fc2 = Conv2d( + in_channels=feedforward_channels, + out_channels=in_channels, + kernel_size=1, + stride=1, + bias=True) + drop = nn.Dropout(ffn_drop) + layers = [fc1, activate, drop, fc2, drop] + if use_conv: + layers.insert(1, dw_conv) + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + + def forward(self, x, hw_shape, identity=None): + out = nlc_to_nchw(x, hw_shape) + out = self.layers(out) + out = nchw_to_nlc(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +class SpatialReductionAttention(MultiheadAttention): + """An implementation of Spatial Reduction Attention of PVT. + + This module is modified from MultiheadAttention which is a module from + mmcv.cnn.bricks.transformer. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. Default: None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: False. + qkv_bias (bool): enable bias for qkv if True. Default: True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of Spatial Reduction + Attention of PVT. Default: 1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=None, + batch_first=True, + qkv_bias=True, + norm_cfg=dict(type='LN'), + sr_ratio=1, + init_cfg=None): + super().__init__( + embed_dims, + num_heads, + attn_drop, + proj_drop, + batch_first=batch_first, + dropout_layer=dropout_layer, + bias=qkv_bias, + init_cfg=init_cfg) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=sr_ratio, + stride=sr_ratio) + # The ret[0] of build_norm_layer is norm name. + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + + # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa + from mmpose import digit_version, mmcv_version + if mmcv_version < digit_version('1.3.17'): + warnings.warn('The legacy version of forward function in' + 'SpatialReductionAttention is deprecated in' + 'mmcv>=1.3.17 and will no longer support in the' + 'future. Please upgrade your mmcv.') + self.forward = self.legacy_forward + + def forward(self, x, hw_shape, identity=None): + + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + x_q = x_q.transpose(0, 1) + x_kv = x_kv.transpose(0, 1) + + out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + def legacy_forward(self, x, hw_shape, identity=None): + """multi head attention forward in mmcv version < 1.3.17.""" + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] + + return identity + self.dropout_layer(self.proj_drop(out)) + + +class PVTEncoderLayer(BaseModule): + """Implements one encoder layer in PVT. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed. + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): stochastic depth rate. Default: 0.0. + qkv_bias (bool): enable bias for qkv if True. + Default: True. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of Spatial Reduction + Attention of PVT. Default: 1. + use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=1, + use_conv_ffn=False, + init_cfg=None): + super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg) + + # The ret[0] of build_norm_layer is norm name. + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.attn = SpatialReductionAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + # The ret[0] of build_norm_layer is norm name. + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.ffn = MixFFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + use_conv=use_conv_ffn, + act_cfg=act_cfg) + + def forward(self, x, hw_shape): + x = self.attn(self.norm1(x), hw_shape, identity=x) + x = self.ffn(self.norm2(x), hw_shape, identity=x) + + return x + + +class AbsolutePositionEmbedding(BaseModule): + """An implementation of the absolute position embedding in PVT. + + Args: + pos_shape (int): The shape of the absolute position embedding. + pos_dim (int): The dimension of the absolute position embedding. + drop_rate (float): Probability of an element to be zeroed. + Default: 0.0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(pos_shape, int): + pos_shape = to_2tuple(pos_shape) + elif isinstance(pos_shape, tuple): + if len(pos_shape) == 1: + pos_shape = to_2tuple(pos_shape[0]) + assert len(pos_shape) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pos_shape)}' + self.pos_shape = pos_shape + self.pos_dim = pos_dim + + self.pos_embed = nn.Parameter( + torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim)) + self.drop = nn.Dropout(p=drop_rate) + + def init_weights(self): + trunc_normal_(self.pos_embed, std=0.02) + + def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'): + """Resize pos_embed weights. + + Resize pos_embed using bilinear interpolate method. + + Args: + pos_embed (torch.Tensor): Position embedding weights. + input_shape (tuple): Tuple for (downsampled input image height, + downsampled input image width). + mode (str): Algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'bilinear'``. + + Return: + torch.Tensor: The resized pos_embed of shape [B, L_new, C]. + """ + assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' + pos_h, pos_w = self.pos_shape + pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] + pos_embed_weight = pos_embed_weight.reshape( + 1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous() + pos_embed_weight = F.interpolate( + pos_embed_weight, size=input_shape, mode=mode) + pos_embed_weight = torch.flatten(pos_embed_weight, + 2).transpose(1, 2).contiguous() + pos_embed = pos_embed_weight + + return pos_embed + + def forward(self, x, hw_shape, mode='bilinear'): + pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode) + return self.drop(x + pos_embed) + + +@MODELS.register_module() +class PyramidVisionTransformer(BaseModule): + """Pyramid Vision Transformer (PVT) + + Implementation of `Pyramid Vision Transformer: A Versatile Backbone for + Dense Prediction without Convolutions + `_. + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): Embedding dimension. Default: 64. + num_stags (int): The num of stages. Default: 4. + num_layers (Sequence[int]): The layer number of each transformer encode + layer. Default: [3, 4, 6, 3]. + num_heads (Sequence[int]): The attention heads of each transformer + encode layer. Default: [1, 2, 5, 8]. + patch_sizes (Sequence[int]): The patch_size of each patch embedding. + Default: [4, 2, 2, 2]. + strides (Sequence[int]): The stride of each patch embedding. + Default: [4, 2, 2, 2]. + paddings (Sequence[int]): The padding of each patch embedding. + Default: [0, 0, 0, 0]. + sr_ratios (Sequence[int]): The spatial reduction rate of each + transformer encode layer. Default: [8, 4, 2, 1]. + out_indices (Sequence[int] | int): Output from which stages. + Default: (0, 1, 2, 3). + mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the + embedding dim of each transformer encode layer. + Default: [8, 8, 4, 4]. + qkv_bias (bool): Enable bias for qkv if True. Default: True. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0. + drop_path_rate (float): stochastic depth rate. Default 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: True. + use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. + Default: False. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + pretrained (str, optional): model pretrained path. Default: None. + convert_weights (bool): The flag indicates whether the + pre-trained model is from the original repo. We may need + to convert some keys to make it compatible. + Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='TruncNormal', std=.02, layer=['Linear']), + dict(type='Constant', val=1, layer=['LayerNorm']), + dict(type='Normal', std=0.01, layer=['Conv2d']) + ]`` + """ + + def __init__(self, + pretrain_img_size=224, + in_channels=3, + embed_dims=64, + num_stages=4, + num_layers=[3, 4, 6, 3], + num_heads=[1, 2, 5, 8], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + paddings=[0, 0, 0, 0], + sr_ratios=[8, 4, 2, 1], + out_indices=(0, 1, 2, 3), + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + use_abs_pos_embed=True, + norm_after_stage=False, + use_conv_ffn=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN', eps=1e-6), + convert_weights=True, + init_cfg=[ + dict(type='TruncNormal', std=.02, layer=['Linear']), + dict(type='Constant', val=1, layer=['LayerNorm']), + dict(type='Kaiming', layer=['Conv2d']) + ]): + super().__init__(init_cfg=init_cfg) + + self.convert_weights = convert_weights + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + self.embed_dims = embed_dims + + self.num_stages = num_stages + self.num_layers = num_layers + self.num_heads = num_heads + self.patch_sizes = patch_sizes + self.strides = strides + self.sr_ratios = sr_ratios + assert num_stages == len(num_layers) == len(num_heads) \ + == len(patch_sizes) == len(strides) == len(sr_ratios) + + self.out_indices = out_indices + assert max(out_indices) < self.num_stages + + # transformer encoder + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(num_layers)) + ] # stochastic num_layer decay rule + + cur = 0 + self.layers = ModuleList() + for i, num_layer in enumerate(num_layers): + embed_dims_i = embed_dims * num_heads[i] + patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims_i, + kernel_size=patch_sizes[i], + stride=strides[i], + padding=paddings[i], + bias=True, + norm_cfg=norm_cfg) + + layers = ModuleList() + if use_abs_pos_embed: + pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1]) + pos_embed = AbsolutePositionEmbedding( + pos_shape=pos_shape, + pos_dim=embed_dims_i, + drop_rate=drop_rate) + layers.append(pos_embed) + layers.extend([ + PVTEncoderLayer( + embed_dims=embed_dims_i, + num_heads=num_heads[i], + feedforward_channels=mlp_ratios[i] * embed_dims_i, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[cur + idx], + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + sr_ratio=sr_ratios[i], + use_conv_ffn=use_conv_ffn) for idx in range(num_layer) + ]) + in_channels = embed_dims_i + # The ret[0] of build_norm_layer is norm name. + if norm_after_stage: + norm = build_norm_layer(norm_cfg, embed_dims_i)[1] + else: + norm = nn.Identity() + self.layers.append(ModuleList([patch_embed, layers, norm])) + cur += num_layer + + def init_weights(self): + """Initialize the weights in backbone.""" + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + logger = get_root_logger() + state_dict = get_state_dict( + self.init_cfg['checkpoint'], map_location='cpu') + logger.warn(f'Load pre-trained model for ' + f'{self.__class__.__name__} from original repo') + + if self.convert_weights: + # Because pvt backbones are not supported by mmcls, + # so we need to convert pre-trained weights to match this + # implementation. + state_dict = pvt_convert(state_dict) + load_state_dict(self, state_dict, strict=False, logger=logger) + + else: + super(PyramidVisionTransformer, self).init_weights() + + def forward(self, x): + outs = [] + + for i, layer in enumerate(self.layers): + x, hw_shape = layer[0](x) + + for block in layer[1]: + x = block(x, hw_shape) + x = layer[2](x) + x = nlc_to_nchw(x, hw_shape) + if i in self.out_indices: + outs.append(x) + + return outs + + +@MODELS.register_module() +class PyramidVisionTransformerV2(PyramidVisionTransformer): + """Implementation of `PVTv2: Improved Baselines with Pyramid Vision + Transformer `_.""" + + def __init__(self, **kwargs): + super(PyramidVisionTransformerV2, self).__init__( + patch_sizes=[7, 3, 3, 3], + paddings=[3, 1, 1, 1], + use_abs_pos_embed=False, + norm_after_stage=True, + use_conv_ffn=True, + **kwargs) diff --git a/mmpose/models/backbones/regnet.py b/mmpose/models/backbones/regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..120523e658ecb2b3134eba45508ac47457a87f1d --- /dev/null +++ b/mmpose/models/backbones/regnet.py @@ -0,0 +1,331 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import numpy as np +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpose.registry import MODELS +from .resnet import ResNet +from .resnext import Bottleneck + + +@MODELS.register_module() +class RegNet(ResNet): + """RegNet backbone. + + More details can be found in `paper `__ . + + Args: + arch (dict): The parameter of RegNets. + - w0 (int): initial width + - wa (float): slope of width + - wm (float): quantization parameter to quantize the width + - depth (int): depth of the backbone + - group_w (int): width of group + - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. + strides (Sequence[int]): Strides of the first block of each stage. + base_channels (int): Base channels after stem layer. + in_channels (int): Number of input image channels. Default: 3. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: "pytorch". + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import RegNet + >>> import torch + >>> self = RegNet( + arch=dict( + w0=88, + wa=26.31, + wm=2.25, + group_w=48, + depth=25, + bot_mul=1.0), + out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 96, 8, 8) + (1, 192, 4, 4) + (1, 432, 2, 2) + (1, 1008, 1, 1) + """ + arch_settings = { + 'regnetx_400mf': + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + 'regnetx_800mf': + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), + 'regnetx_1.6gf': + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), + 'regnetx_3.2gf': + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), + 'regnetx_4.0gf': + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), + 'regnetx_6.4gf': + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), + 'regnetx_8.0gf': + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), + 'regnetx_12gf': + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), + } + + def __init__(self, + arch, + in_channels=3, + stem_channels=32, + base_channels=32, + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super(ResNet, self).__init__(init_cfg=init_cfg) + + # Generate RegNet parameters first + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the' \ + ' arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + widths, num_stages = self.generate_regnet( + arch['w0'], + arch['wa'], + arch['wm'], + arch['depth'], + ) + # Convert to per stage format + stage_widths, stage_blocks = self.get_stages_from_blocks(widths) + # Generate group widths and bot muls + group_widths = [arch['group_w'] for _ in range(num_stages)] + self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] + # Adjust the compatibility of stage_widths and group_widths + stage_widths, group_widths = self.adjust_width_group( + stage_widths, self.bottleneck_ratio, group_widths) + + # Group params by stage + self.stage_widths = stage_widths + self.group_widths = group_widths + self.depth = sum(stage_blocks) + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + if self.deep_stem: + raise NotImplementedError( + 'deep_stem has not been implemented for RegNet') + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.stage_blocks = stage_blocks[:num_stages] + + self._make_stem_layer(in_channels, stem_channels) + + _in_channels = stem_channels + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + group_width = self.group_widths[i] + width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) + stage_groups = width // group_width + + res_layer = self.make_res_layer( + block=Bottleneck, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=self.stage_widths[i], + expansion=1, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + base_channels=self.stage_widths[i], + groups=stage_groups, + width_per_group=group_width) + _in_channels = self.stage_widths[i] + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = stage_widths[-1] + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + @staticmethod + def generate_regnet(initial_width, + width_slope, + width_parameter, + depth, + divisor=8): + """Generates per block width from RegNet parameters. + + Args: + initial_width ([int]): Initial width of the backbone + width_slope ([float]): Slope of the quantized linear function + width_parameter ([int]): Parameter used to quantize the width. + depth ([int]): Depth of the backbone. + divisor (int, optional): The divisor of channels. Defaults to 8. + + Returns: + list, int: return a list of widths of each stage and the number of + stages + """ + assert width_slope >= 0 + assert initial_width > 0 + assert width_parameter > 1 + assert initial_width % divisor == 0 + widths_cont = np.arange(depth) * width_slope + initial_width + ks = np.round( + np.log(widths_cont / initial_width) / np.log(width_parameter)) + widths = initial_width * np.power(width_parameter, ks) + widths = np.round(np.divide(widths, divisor)) * divisor + num_stages = len(np.unique(widths)) + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages + + @staticmethod + def quantize_float(number, divisor): + """Converts a float to closest non-zero int divisible by divior. + + Args: + number (int): Original number to be quantized. + divisor (int): Divisor used to quantize the number. + + Returns: + int: quantized number that is divisible by devisor. + """ + return int(round(number / divisor) * divisor) + + def adjust_width_group(self, widths, bottleneck_ratio, groups): + """Adjusts the compatibility of widths and groups. + + Args: + widths (list[int]): Width of each stage. + bottleneck_ratio (float): Bottleneck ratio. + groups (int): number of groups in each stage + + Returns: + tuple(list): The adjusted widths and groups of each stage. + """ + bottleneck_width = [ + int(w * b) for w, b in zip(widths, bottleneck_ratio) + ] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] + bottleneck_width = [ + self.quantize_float(w_bot, g) + for w_bot, g in zip(bottleneck_width, groups) + ] + widths = [ + int(w_bot / b) + for w_bot, b in zip(bottleneck_width, bottleneck_ratio) + ] + return widths, groups + + def get_stages_from_blocks(self, widths): + """Gets widths/stage_blocks of network at each stage. + + Args: + widths (list[int]): Width in each stage. + + Returns: + tuple(list): width and depth of each stage + """ + width_diff = [ + width != width_prev + for width, width_prev in zip(widths + [0], [0] + widths) + ] + stage_widths = [ + width for width, diff in zip(widths, width_diff[:-1]) if diff + ] + stage_blocks = np.diff([ + depth for depth, diff in zip(range(len(width_diff)), width_diff) + if diff + ]).tolist() + return stage_widths, stage_blocks + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/mmpose/models/backbones/resnest.py b/mmpose/models/backbones/resnest.py new file mode 100644 index 0000000000000000000000000000000000000000..b5eea8ad7e50c2ab997e2df17316943fcaf3a5fe --- /dev/null +++ b/mmpose/models/backbones/resnest.py @@ -0,0 +1,353 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(BaseModule): + """Split-Attention Conv2d. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + return getattr(self, self.norm0_name) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + groups=1, + width_per_group=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = SplitAttentionConv2d( + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + Please refer to the `paper `__ + for details. + + Args: + depth (int): Network depth, from {50, 101, 152, 200}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)), + 269: (Bottleneck, (3, 30, 48, 8)) + } + + def __init__(self, + depth, + groups=1, + width_per_group=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.width_per_group = width_per_group + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super().__init__(depth=depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/mmpose/models/backbones/resnet.py b/mmpose/models/backbones/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..a04853f60d179ee2450ca199b0a8c28ae893941f --- /dev/null +++ b/mmpose/models/backbones/resnet.py @@ -0,0 +1,715 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, constant_init +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class BasicBlock(BaseModule): + """BasicBlock for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the output channels of conv1. This is a + reserved argument in BasicBlock and should always be 1. Default: 1. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + style (str): `pytorch` or `caffe`. It is unused and reserved for + unified API with Bottleneck. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + expansion=1, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert self.expansion == 1 + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, out_channels, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + 3, + padding=1, + bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + """Bottleneck block for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. Default: 4. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: "pytorch". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + expansion=4, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + assert style in ['pytorch', 'caffe'] + + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: the normalization layer named "norm3" """ + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def get_expansion(block, expansion=None): + """Get the expansion of a residual block. + + The block expansion will be obtained by the following order: + + 1. If ``expansion`` is given, just return it. + 2. If ``block`` has the attribute ``expansion``, then return + ``block.expansion``. + 3. Return the default value according the the block type: + 1 for ``BasicBlock`` and 4 for ``Bottleneck``. + + Args: + block (class): The block class. + expansion (int | None): The given expansion ratio. + + Returns: + int: The expansion of the block. + """ + if isinstance(expansion, int): + assert expansion > 0 + elif expansion is None: + if hasattr(block, 'expansion'): + expansion = block.expansion + elif issubclass(block, BasicBlock): + expansion = 1 + elif issubclass(block, Bottleneck): + expansion = 4 + else: + raise TypeError(f'expansion is not specified for {block.__name__}') + else: + raise TypeError('expansion must be an integer or None') + + return expansion + + +class ResLayer(nn.Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): Residual block used to build ResLayer. + num_blocks (int): Number of blocks. + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int, optional): The expansion for BasicBlock/Bottleneck. + If not specified, it will firstly be obtained via + ``block.expansion``. If the block has no attribute "expansion", + the following default values will be used: 1 for BasicBlock and + 4 for Bottleneck. Default: None. + stride (int): stride of the first block. Default: 1. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + """ + + def __init__(self, + block, + num_blocks, + in_channels, + out_channels, + expansion=None, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + **kwargs): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + self.block = block + self.expansion = get_expansion(block, expansion) + + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + in_channels = out_channels + for _ in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + else: # downsample_first=False is for HourglassModule + for i in range(0, num_blocks - 1): + layers.append( + block( + in_channels=in_channels, + out_channels=in_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + + super().__init__(*layers) + + +@MODELS.register_module() +class ResNet(BaseBackbone): + """ResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import ResNet + >>> import torch + >>> self = ResNet(depth=18, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=64, + base_channels=64, + expansion=None, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super(ResNet, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.expansion = get_expansion(self.block, expansion) + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + _in_channels = stem_channels + _out_channels = base_channels * self.expansion + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + res_layer = self.make_res_layer( + block=self.block, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=_out_channels, + expansion=self.expansion, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + _in_channels = _out_channels + _out_channels *= 2 + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = res_layer[-1].out_channels + + def make_res_layer(self, **kwargs): + """Make a ResLayer.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + """Make stem layer.""" + if self.deep_stem: + self.stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + """Freeze parameters.""" + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initialize the weights in backbone.""" + super(ResNet, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@MODELS.register_module() +class ResNetV1d(ResNet): + r"""ResNetV1d variant described in `Bag of Tricks + `__. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super().__init__(deep_stem=True, avg_down=True, **kwargs) diff --git a/mmpose/models/backbones/resnext.py b/mmpose/models/backbones/resnext.py new file mode 100644 index 0000000000000000000000000000000000000000..241f83a11449d3e816d4dbb16bd5715cf9ba6e3f --- /dev/null +++ b/mmpose/models/backbones/resnext.py @@ -0,0 +1,171 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpose.registry import MODELS +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNet + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@MODELS.register_module() +class ResNeXt(ResNet): + """ResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import ResNeXt + >>> import torch + >>> self = ResNeXt(depth=50, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super().__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/mmpose/models/backbones/rsn.py b/mmpose/models/backbones/rsn.py new file mode 100644 index 0000000000000000000000000000000000000000..8267d23d952f9639dff524cfea8e8d111ce19584 --- /dev/null +++ b/mmpose/models/backbones/rsn.py @@ -0,0 +1,640 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy as cp + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, MaxPool2d +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class RSB(BaseModule): + """Residual Steps block for RSN. Paper ref: Cai et al. "Learning Delicate + Local Representations for Multi-Person Pose Estimation" (ECCV 2020). + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + num_steps (int): Numbers of steps in RSB + stride (int): stride of the block. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + expand_times (int): Times by which the in_channels are expanded. + Default:26. + res_top_channels (int): Number of channels of feature output by + ResNet_top. Default:64. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + expansion = 1 + + def __init__(self, + in_channels, + out_channels, + num_steps=4, + stride=1, + downsample=None, + with_cp=False, + norm_cfg=dict(type='BN'), + expand_times=26, + res_top_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + assert num_steps > 1 + self.in_channels = in_channels + self.branch_channels = self.in_channels * expand_times + self.branch_channels //= res_top_channels + self.out_channels = out_channels + self.stride = stride + self.downsample = downsample + self.with_cp = with_cp + self.norm_cfg = norm_cfg + self.num_steps = num_steps + self.conv_bn_relu1 = ConvModule( + self.in_channels, + self.num_steps * self.branch_channels, + kernel_size=1, + stride=self.stride, + padding=0, + norm_cfg=self.norm_cfg, + inplace=False) + for i in range(self.num_steps): + for j in range(i + 1): + module_name = f'conv_bn_relu2_{i + 1}_{j + 1}' + self.add_module( + module_name, + ConvModule( + self.branch_channels, + self.branch_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + inplace=False)) + self.conv_bn3 = ConvModule( + self.num_steps * self.branch_channels, + self.out_channels * self.expansion, + kernel_size=1, + stride=1, + padding=0, + act_cfg=None, + norm_cfg=self.norm_cfg, + inplace=False) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + """Forward function.""" + + identity = x + x = self.conv_bn_relu1(x) + spx = torch.split(x, self.branch_channels, 1) + outputs = list() + outs = list() + for i in range(self.num_steps): + outputs_i = list() + outputs.append(outputs_i) + for j in range(i + 1): + if j == 0: + inputs = spx[i] + else: + inputs = outputs[i][j - 1] + if i > j: + inputs = inputs + outputs[i - 1][j] + module_name = f'conv_bn_relu2_{i + 1}_{j + 1}' + module_i_j = getattr(self, module_name) + outputs[i].append(module_i_j(inputs)) + + outs.append(outputs[i][i]) + out = torch.cat(tuple(outs), 1) + out = self.conv_bn3(out) + + if self.downsample is not None: + identity = self.downsample(identity) + out = out + identity + + out = self.relu(out) + + return out + + +class Downsample_module(BaseModule): + """Downsample module for RSN. + + Args: + block (nn.Module): Downsample block. + num_blocks (list): Number of blocks in each downsample unit. + num_units (int): Numbers of downsample units. Default: 4 + has_skip (bool): Have skip connections from prior upsample + module or not. Default:False + num_steps (int): Number of steps in a block. Default:4 + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + in_channels (int): Number of channels of the input feature to + downsample module. Default: 64 + expand_times (int): Times by which the in_channels are expanded. + Default:26. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + block, + num_blocks, + num_steps=4, + num_units=4, + has_skip=False, + norm_cfg=dict(type='BN'), + in_channels=64, + expand_times=26, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.has_skip = has_skip + self.in_channels = in_channels + assert len(num_blocks) == num_units + self.num_blocks = num_blocks + self.num_units = num_units + self.num_steps = num_steps + self.norm_cfg = norm_cfg + self.layer1 = self._make_layer( + block, + in_channels, + num_blocks[0], + expand_times=expand_times, + res_top_channels=in_channels) + for i in range(1, num_units): + module_name = f'layer{i + 1}' + self.add_module( + module_name, + self._make_layer( + block, + in_channels * pow(2, i), + num_blocks[i], + stride=2, + expand_times=expand_times, + res_top_channels=in_channels)) + + def _make_layer(self, + block, + out_channels, + blocks, + stride=1, + expand_times=26, + res_top_channels=64): + downsample = None + if stride != 1 or self.in_channels != out_channels * block.expansion: + downsample = ConvModule( + self.in_channels, + out_channels * block.expansion, + kernel_size=1, + stride=stride, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + + units = list() + units.append( + block( + self.in_channels, + out_channels, + num_steps=self.num_steps, + stride=stride, + downsample=downsample, + norm_cfg=self.norm_cfg, + expand_times=expand_times, + res_top_channels=res_top_channels)) + self.in_channels = out_channels * block.expansion + for _ in range(1, blocks): + units.append( + block( + self.in_channels, + out_channels, + num_steps=self.num_steps, + expand_times=expand_times, + res_top_channels=res_top_channels)) + + return nn.Sequential(*units) + + def forward(self, x, skip1, skip2): + out = list() + for i in range(self.num_units): + module_name = f'layer{i + 1}' + module_i = getattr(self, module_name) + x = module_i(x) + if self.has_skip: + x = x + skip1[i] + skip2[i] + out.append(x) + out.reverse() + + return tuple(out) + + +class Upsample_unit(BaseModule): + """Upsample unit for upsample module. + + Args: + ind (int): Indicates whether to interpolate (>0) and whether to + generate feature map for the next hourglass-like module. + num_units (int): Number of units that form a upsample module. Along + with ind and gen_cross_conv, nm_units is used to decide whether + to generate feature map for the next hourglass-like module. + in_channels (int): Channel number of the skip-in feature maps from + the corresponding downsample unit. + unit_channels (int): Channel number in this unit. Default:256. + gen_skip: (bool): Whether or not to generate skips for the posterior + downsample module. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + out_channels (in): Number of channels of feature output by upsample + module. Must equal to in_channels of downsample module. Default:64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + ind, + num_units, + in_channels, + unit_channels=256, + gen_skip=False, + gen_cross_conv=False, + norm_cfg=dict(type='BN'), + out_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.num_units = num_units + self.norm_cfg = norm_cfg + self.in_skip = ConvModule( + in_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + self.relu = nn.ReLU(inplace=True) + + self.ind = ind + if self.ind > 0: + self.up_conv = ConvModule( + unit_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + + self.gen_skip = gen_skip + if self.gen_skip: + self.out_skip1 = ConvModule( + in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + self.out_skip2 = ConvModule( + unit_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + self.gen_cross_conv = gen_cross_conv + if self.ind == num_units - 1 and self.gen_cross_conv: + self.cross_conv = ConvModule( + unit_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + def forward(self, x, up_x): + out = self.in_skip(x) + + if self.ind > 0: + up_x = F.interpolate( + up_x, + size=(x.size(2), x.size(3)), + mode='bilinear', + align_corners=True) + up_x = self.up_conv(up_x) + out = out + up_x + out = self.relu(out) + + skip1 = None + skip2 = None + if self.gen_skip: + skip1 = self.out_skip1(x) + skip2 = self.out_skip2(out) + + cross_conv = None + if self.ind == self.num_units - 1 and self.gen_cross_conv: + cross_conv = self.cross_conv(out) + + return out, skip1, skip2, cross_conv + + +class Upsample_module(BaseModule): + """Upsample module for RSN. + + Args: + unit_channels (int): Channel number in the upsample units. + Default:256. + num_units (int): Numbers of upsample units. Default: 4 + gen_skip (bool): Whether to generate skip for posterior downsample + module or not. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + out_channels (int): Number of channels of feature output by upsample + module. Must equal to in_channels of downsample module. Default:64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + unit_channels=256, + num_units=4, + gen_skip=False, + gen_cross_conv=False, + norm_cfg=dict(type='BN'), + out_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = list() + for i in range(num_units): + self.in_channels.append(RSB.expansion * out_channels * pow(2, i)) + self.in_channels.reverse() + self.num_units = num_units + self.gen_skip = gen_skip + self.gen_cross_conv = gen_cross_conv + self.norm_cfg = norm_cfg + for i in range(num_units): + module_name = f'up{i + 1}' + self.add_module( + module_name, + Upsample_unit( + i, + self.num_units, + self.in_channels[i], + unit_channels, + self.gen_skip, + self.gen_cross_conv, + norm_cfg=self.norm_cfg, + out_channels=64)) + + def forward(self, x): + out = list() + skip1 = list() + skip2 = list() + cross_conv = None + for i in range(self.num_units): + module_i = getattr(self, f'up{i + 1}') + if i == 0: + outi, skip1_i, skip2_i, _ = module_i(x[i], None) + elif i == self.num_units - 1: + outi, skip1_i, skip2_i, cross_conv = module_i(x[i], out[i - 1]) + else: + outi, skip1_i, skip2_i, _ = module_i(x[i], out[i - 1]) + out.append(outi) + skip1.append(skip1_i) + skip2.append(skip2_i) + skip1.reverse() + skip2.reverse() + + return out, skip1, skip2, cross_conv + + +class Single_stage_RSN(BaseModule): + """Single_stage Residual Steps Network. + + Args: + unit_channels (int): Channel number in the upsample units. Default:256. + num_units (int): Numbers of downsample/upsample units. Default: 4 + gen_skip (bool): Whether to generate skip for posterior downsample + module or not. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + has_skip (bool): Have skip connections from prior upsample + module or not. Default:False + num_steps (int): Number of steps in RSB. Default: 4 + num_blocks (list): Number of blocks in each downsample unit. + Default: [2, 2, 2, 2] Note: Make sure num_units==len(num_blocks) + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + in_channels (int): Number of channels of the feature from ResNet_Top. + Default: 64. + expand_times (int): Times by which the in_channels are expanded in RSB. + Default:26. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + has_skip=False, + gen_skip=False, + gen_cross_conv=False, + unit_channels=256, + num_units=4, + num_steps=4, + num_blocks=[2, 2, 2, 2], + norm_cfg=dict(type='BN'), + in_channels=64, + expand_times=26, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + num_blocks = cp.deepcopy(num_blocks) + super().__init__(init_cfg=init_cfg) + assert len(num_blocks) == num_units + self.has_skip = has_skip + self.gen_skip = gen_skip + self.gen_cross_conv = gen_cross_conv + self.num_units = num_units + self.num_steps = num_steps + self.unit_channels = unit_channels + self.num_blocks = num_blocks + self.norm_cfg = norm_cfg + + self.downsample = Downsample_module(RSB, num_blocks, num_steps, + num_units, has_skip, norm_cfg, + in_channels, expand_times) + self.upsample = Upsample_module(unit_channels, num_units, gen_skip, + gen_cross_conv, norm_cfg, in_channels) + + def forward(self, x, skip1, skip2): + mid = self.downsample(x, skip1, skip2) + out, skip1, skip2, cross_conv = self.upsample(mid) + + return out, skip1, skip2, cross_conv + + +class ResNet_top(BaseModule): + """ResNet top for RSN. + + Args: + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + channels (int): Number of channels of the feature output by ResNet_top. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, norm_cfg=dict(type='BN'), channels=64, init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.top = nn.Sequential( + ConvModule( + 3, + channels, + kernel_size=7, + stride=2, + padding=3, + norm_cfg=norm_cfg, + inplace=True), MaxPool2d(kernel_size=3, stride=2, padding=1)) + + def forward(self, img): + return self.top(img) + + +@MODELS.register_module() +class RSN(BaseBackbone): + """Residual Steps Network backbone. Paper ref: Cai et al. "Learning + Delicate Local Representations for Multi-Person Pose Estimation" (ECCV + 2020). + + Args: + unit_channels (int): Number of Channels in an upsample unit. + Default: 256 + num_stages (int): Number of stages in a multi-stage RSN. Default: 4 + num_units (int): NUmber of downsample/upsample units in a single-stage + RSN. Default: 4 Note: Make sure num_units == len(self.num_blocks) + num_blocks (list): Number of RSBs (Residual Steps Block) in each + downsample unit. Default: [2, 2, 2, 2] + num_steps (int): Number of steps in a RSB. Default:4 + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + res_top_channels (int): Number of channels of feature from ResNet_top. + Default: 64. + expand_times (int): Times by which the in_channels are expanded in RSB. + Default:26. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict( + type='Normal', + std=0.01, + layer=['Linear']), + ]`` + Example: + >>> from mmpose.models import RSN + >>> import torch + >>> self = RSN(num_stages=2,num_units=2,num_blocks=[2,2]) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 511, 511) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... for feature in level_output: + ... print(tuple(feature.shape)) + ... + (1, 256, 64, 64) + (1, 256, 128, 128) + (1, 256, 64, 64) + (1, 256, 128, 128) + """ + + def __init__(self, + unit_channels=256, + num_stages=4, + num_units=4, + num_blocks=[2, 2, 2, 2], + num_steps=4, + norm_cfg=dict(type='BN'), + res_top_channels=64, + expand_times=26, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict(type='Normal', std=0.01, layer=['Linear']), + ]): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + num_blocks = cp.deepcopy(num_blocks) + super().__init__(init_cfg=init_cfg) + self.unit_channels = unit_channels + self.num_stages = num_stages + self.num_units = num_units + self.num_blocks = num_blocks + self.num_steps = num_steps + self.norm_cfg = norm_cfg + + assert self.num_stages > 0 + assert self.num_steps > 1 + assert self.num_units > 1 + assert self.num_units == len(self.num_blocks) + self.top = ResNet_top(norm_cfg=norm_cfg) + self.multi_stage_rsn = nn.ModuleList([]) + for i in range(self.num_stages): + if i == 0: + has_skip = False + else: + has_skip = True + if i != self.num_stages - 1: + gen_skip = True + gen_cross_conv = True + else: + gen_skip = False + gen_cross_conv = False + self.multi_stage_rsn.append( + Single_stage_RSN(has_skip, gen_skip, gen_cross_conv, + unit_channels, num_units, num_steps, + num_blocks, norm_cfg, res_top_channels, + expand_times)) + + def forward(self, x): + """Model forward function.""" + out_feats = [] + skip1 = None + skip2 = None + x = self.top(x) + for i in range(self.num_stages): + out, skip1, skip2, x = self.multi_stage_rsn[i](x, skip1, skip2) + out_feats.append(out) + + return out_feats diff --git a/mmpose/models/backbones/scnet.py b/mmpose/models/backbones/scnet.py new file mode 100644 index 0000000000000000000000000000000000000000..5c802d256e711aa70c955ac5bb91d2f7ff724604 --- /dev/null +++ b/mmpose/models/backbones/scnet.py @@ -0,0 +1,252 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .resnet import Bottleneck, ResNet + + +class SCConv(BaseModule): + """SCConv (Self-calibrated Convolution) + + Args: + in_channels (int): The input channels of the SCConv. + out_channels (int): The output channel of the SCConv. + stride (int): stride of SCConv. + pooling_r (int): size of pooling for scconv. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride, + pooling_r, + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.1), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + assert in_channels == out_channels + + self.k2 = nn.Sequential( + nn.AvgPool2d(kernel_size=pooling_r, stride=pooling_r), + build_conv_layer( + conv_cfg, + in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(norm_cfg, in_channels)[1], + ) + self.k3 = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(norm_cfg, in_channels)[1], + ) + self.k4 = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + in_channels, + kernel_size=3, + stride=stride, + padding=1, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1], + nn.ReLU(inplace=True), + ) + + def forward(self, x): + """Forward function.""" + identity = x + + out = torch.sigmoid( + torch.add(identity, F.interpolate(self.k2(x), + identity.size()[2:]))) + out = torch.mul(self.k3(x), out) + out = self.k4(out) + + return out + + +class SCBottleneck(Bottleneck): + """SC(Self-calibrated) Bottleneck. + + Args: + in_channels (int): The input channels of the SCBottleneck block. + out_channels (int): The output channel of the SCBottleneck block. + """ + + pooling_r = 4 + + def __init__(self, in_channels, out_channels, **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + self.mid_channels = out_channels // self.expansion // 2 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=1, + bias=False) + self.add_module(self.norm1_name, norm1) + + self.k1 = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.stride, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, self.mid_channels)[1], + nn.ReLU(inplace=True)) + + self.conv2 = build_conv_layer( + self.conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=1, + bias=False) + self.add_module(self.norm2_name, norm2) + + self.scconv = SCConv(self.mid_channels, self.mid_channels, self.stride, + self.pooling_r, self.conv_cfg, self.norm_cfg) + + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels * 2, + out_channels, + kernel_size=1, + stride=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out_a = self.conv1(x) + out_a = self.norm1(out_a) + out_a = self.relu(out_a) + + out_a = self.k1(out_a) + + out_b = self.conv2(x) + out_b = self.norm2(out_b) + out_b = self.relu(out_b) + + out_b = self.scconv(out_b) + + out = self.conv3(torch.cat([out_a, out_b], dim=1)) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class SCNet(ResNet): + """SCNet backbone. + + Improving Convolutional Networks with Self-Calibrated Convolutions, + Jiang-Jiang Liu, Qibin Hou, Ming-Ming Cheng, Changhu Wang, Jiashi Feng, + IEEE CVPR, 2020. + http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf + + Args: + depth (int): Depth of scnet, from {50, 101}. + in_channels (int): Number of input image channels. Normally 3. + base_channels (int): Number of base channels of hidden layer. + num_stages (int): SCNet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from mmpose.models import SCNet + >>> import torch + >>> self = SCNet(depth=50, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 56, 56) + (1, 512, 28, 28) + (1, 1024, 14, 14) + (1, 2048, 7, 7) + """ + + arch_settings = { + 50: (SCBottleneck, [3, 4, 6, 3]), + 101: (SCBottleneck, [3, 4, 23, 3]) + } + + def __init__(self, depth, **kwargs): + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for SCNet') + super().__init__(depth, **kwargs) diff --git a/mmpose/models/backbones/seresnet.py b/mmpose/models/backbones/seresnet.py new file mode 100644 index 0000000000000000000000000000000000000000..617a1b72bee737ef0f3fb305e83ce33d8c8a7ea1 --- /dev/null +++ b/mmpose/models/backbones/seresnet.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.utils.checkpoint as cp + +from mmpose.registry import MODELS +from .resnet import Bottleneck, ResLayer, ResNet +from .utils.se_layer import SELayer + + +class SEBottleneck(Bottleneck): + """SEBottleneck block for SEResNet. + + Args: + in_channels (int): The input channels of the SEBottleneck block. + out_channels (int): The output channel of the SEBottleneck block. + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + """ + + def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + self.se_layer = SELayer(out_channels, ratio=se_ratio) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + out = self.se_layer(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class SEResNet(ResNet): + """SEResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import SEResNet + >>> import torch + >>> self = SEResNet(depth=50, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 56, 56) + (1, 512, 28, 28) + (1, 1024, 14, 14) + (1, 2048, 7, 7) + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, se_ratio=16, **kwargs): + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for SEResNet') + self.se_ratio = se_ratio + super().__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer(se_ratio=self.se_ratio, **kwargs) diff --git a/mmpose/models/backbones/seresnext.py b/mmpose/models/backbones/seresnext.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f5a6c8f3fe6b602aceb331781cd119958518b7 --- /dev/null +++ b/mmpose/models/backbones/seresnext.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpose.registry import MODELS +from .resnet import ResLayer +from .seresnet import SEBottleneck as _SEBottleneck +from .seresnet import SEResNet + + +class SEBottleneck(_SEBottleneck): + """SEBottleneck block for SEResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + base_channels (int): Middle channels of the first stage. Default: 64. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + se_ratio=16, + **kwargs): + super().__init__(in_channels, out_channels, se_ratio, **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # We follow the same rational of ResNext to compute mid_channels. + # For SEResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for SEResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@MODELS.register_module() +class SEResNeXt(SEResNet): + """SEResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import SEResNeXt + >>> import torch + >>> self = SEResNet(depth=50, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 56, 56) + (1, 512, 28, 28) + (1, 1024, 14, 14) + (1, 2048, 7, 7) + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super().__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/mmpose/models/backbones/shufflenet_v1.py b/mmpose/models/backbones/shufflenet_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..17491910e9c1c2ec4eea04ca715dc91293f00cd4 --- /dev/null +++ b/mmpose/models/backbones/shufflenet_v1.py @@ -0,0 +1,338 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_activation_layer +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import channel_shuffle, make_divisible + + +class ShuffleUnit(BaseModule): + """ShuffleUnit block. + + ShuffleNet unit with pointwise group convolution (GConv) and channel + shuffle. + + Args: + in_channels (int): The input channels of the ShuffleUnit. + out_channels (int): The output channels of the ShuffleUnit. + groups (int, optional): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3 + first_block (bool, optional): Whether it is the first ShuffleUnit of a + sequential ShuffleUnits. Default: True, which means not using the + grouped 1x1 convolution. + combine (str, optional): The ways to combine the input and output + branches. Default: 'add'. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + groups=3, + first_block=True, + combine='add', + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.first_block = first_block + self.combine = combine + self.groups = groups + self.bottleneck_channels = self.out_channels // 4 + self.with_cp = with_cp + + if self.combine == 'add': + self.depthwise_stride = 1 + self._combine_func = self._add + assert in_channels == out_channels, ( + 'in_channels must be equal to out_channels when combine ' + 'is add') + elif self.combine == 'concat': + self.depthwise_stride = 2 + self._combine_func = self._concat + self.out_channels -= self.in_channels + self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + else: + raise ValueError(f'Cannot combine tensors with {self.combine}. ' + 'Only "add" and "concat" are supported') + + self.first_1x1_groups = 1 if first_block else self.groups + self.g_conv_1x1_compress = ConvModule( + in_channels=self.in_channels, + out_channels=self.bottleneck_channels, + kernel_size=1, + groups=self.first_1x1_groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.depthwise_conv3x3_bn = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.bottleneck_channels, + kernel_size=3, + stride=self.depthwise_stride, + padding=1, + groups=self.bottleneck_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.g_conv_1x1_expand = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.out_channels, + kernel_size=1, + groups=self.groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.act = build_activation_layer(act_cfg) + + @staticmethod + def _add(x, out): + # residual connection + return x + out + + @staticmethod + def _concat(x, out): + # concatenate along channel axis + return torch.cat((x, out), 1) + + def forward(self, x): + + def _inner_forward(x): + residual = x + + out = self.g_conv_1x1_compress(x) + out = self.depthwise_conv3x3_bn(out) + + if self.groups > 1: + out = channel_shuffle(out, self.groups) + + out = self.g_conv_1x1_expand(out) + + if self.combine == 'concat': + residual = self.avgpool(residual) + out = self.act(out) + out = self._combine_func(residual, out) + else: + out = self._combine_func(residual, out) + out = self.act(out) + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class ShuffleNetV1(BaseBackbone): + """ShuffleNetV1 backbone. + + Args: + groups (int, optional): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3. + widen_factor (float, optional): Width multiplier - adjusts the number + of channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (2, ) + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.01, layer=['Conv2d']), + dict( + type='Constant', + val=1, + bias=0.0001 + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + def __init__(self, + groups=3, + widen_factor=1.0, + out_indices=(2, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Normal', std=0.01, layer=['Conv2d']), + dict( + type='Constant', + val=1, + bias=0.0001, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.stage_blocks = [4, 8, 4] + self.groups = groups + + for index in out_indices: + if index not in range(0, 3): + raise ValueError('the item in out_indices must in ' + f'range(0, 3). But received {index}') + + if frozen_stages not in range(-1, 3): + raise ValueError('frozen_stages must be in range(-1, 3). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if groups == 1: + channels = (144, 288, 576) + elif groups == 2: + channels = (200, 400, 800) + elif groups == 3: + channels = (240, 480, 960) + elif groups == 4: + channels = (272, 544, 1088) + elif groups == 8: + channels = (384, 768, 1536) + else: + raise ValueError(f'{groups} groups is not supported for 1x1 ' + 'Grouped Convolutions') + + channels = [make_divisible(ch * widen_factor, 8) for ch in channels] + + self.in_channels = int(24 * widen_factor) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + first_block = (i == 0) + layer = self.make_layer(channels[i], num_blocks, first_block) + self.layers.append(layer) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + layer = self.layers[i] + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + super(ShuffleNetV1, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d) and 'conv1' not in name: + nn.init.normal_(m.weight, mean=0, std=1.0 / m.weight.shape[1]) + + def make_layer(self, out_channels, num_blocks, first_block=False): + """Stack ShuffleUnit blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): Number of blocks. + first_block (bool, optional): Whether is the first ShuffleUnit of a + sequential ShuffleUnits. Default: False, which means using + the grouped 1x1 convolution. + """ + layers = [] + for i in range(num_blocks): + first_block = first_block if i == 0 else False + combine_mode = 'concat' if i == 0 else 'add' + layers.append( + ShuffleUnit( + self.in_channels, + out_channels, + groups=self.groups, + first_block=first_block, + combine=combine_mode, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/shufflenet_v2.py b/mmpose/models/backbones/shufflenet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..9757841e73bf547fde77cf847a917c46acfb0b00 --- /dev/null +++ b/mmpose/models/backbones/shufflenet_v2.py @@ -0,0 +1,311 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import channel_shuffle + + +class InvertedResidual(BaseModule): + """InvertedResidual block for ShuffleNetV2 backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 convolution layer. Default: 1 + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.stride = stride + self.with_cp = with_cp + + branch_features = out_channels // 2 + if self.stride == 1: + assert in_channels == branch_features * 2, ( + f'in_channels ({in_channels}) should equal to ' + f'branch_features * 2 ({branch_features * 2}) ' + 'when stride is 1') + + if in_channels != branch_features * 2: + assert self.stride != 1, ( + f'stride ({self.stride}) should not equal 1 when ' + f'in_channels != branch_features * 2') + + if self.stride > 1: + self.branch1 = nn.Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=self.stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.branch2 = nn.Sequential( + ConvModule( + in_channels if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1, + groups=branch_features, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + + def _inner_forward(x): + if self.stride > 1: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + else: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class ShuffleNetV2(BaseBackbone): + """ShuffleNetV2 backbone. + + Args: + widen_factor (float): Width multiplier - adjusts the number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (0, 1, 2, 3). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.01, layer=['Conv2d']), + dict( + type='Constant', + val=1, + bias=0.0001 + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + def __init__(self, + widen_factor=1.0, + out_indices=(3, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Normal', std=0.01, layer=['Conv2d']), + dict( + type='Constant', + val=1, + bias=0.0001, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.stage_blocks = [4, 8, 4] + for index in out_indices: + if index not in range(0, 4): + raise ValueError('the item in out_indices must in ' + f'range(0, 4). But received {index}') + + if frozen_stages not in range(-1, 4): + raise ValueError('frozen_stages must be in range(-1, 4). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if widen_factor == 0.5: + channels = [48, 96, 192, 1024] + elif widen_factor == 1.0: + channels = [116, 232, 464, 1024] + elif widen_factor == 1.5: + channels = [176, 352, 704, 1024] + elif widen_factor == 2.0: + channels = [244, 488, 976, 2048] + else: + raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. ' + f'But received {widen_factor}') + + self.in_channels = 24 + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + layer = self._make_layer(channels[i], num_blocks) + self.layers.append(layer) + + output_channels = channels[-1] + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=output_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def _make_layer(self, out_channels, num_blocks): + """Stack blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): number of blocks. + """ + layers = [] + for i in range(num_blocks): + stride = 2 if i == 0 else 1 + layers.append( + InvertedResidual( + in_channels=self.in_channels, + out_channels=out_channels, + stride=stride, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d) and 'conv1' not in name: + nn.init.normal_(m.weight, mean=0, std=1.0 / m.weight.shape[1]) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/mmpose/models/backbones/swin.py b/mmpose/models/backbones/swin.py new file mode 100644 index 0000000000000000000000000000000000000000..a8f7c972787c19f64eb398615966722c5bdcd533 --- /dev/null +++ b/mmpose/models/backbones/swin.py @@ -0,0 +1,739 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, build_dropout +from mmengine.model import BaseModule +from mmengine.model.weight_init import trunc_normal_ +from mmengine.runner import load_state_dict +from mmengine.utils import to_2tuple + +from mmpose.registry import MODELS +from mmpose.utils import get_root_logger +from ..utils.transformer import PatchEmbed, PatchMerging +from .base_backbone import BaseBackbone +from .utils import get_state_dict +from .utils.ckpt_convert import swin_converter + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + # make torchscript happy (cannot use tensor as tuple) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class ShiftWindowMSA(BaseModule): + """Shifted Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Defaults: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Defaults: 0. + proj_drop_rate (float, optional): Dropout ratio of output. + Defaults: 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults: dict(type='DropPath', drop_prob=0.). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + shift_size=0, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0, + proj_drop_rate=0, + dropout_layer=dict(type='DropPath', drop_prob=0.), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.window_size = window_size + self.shift_size = shift_size + assert 0 <= self.shift_size < self.window_size + + self.w_msa = WindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=to_2tuple(window_size), + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate) + + self.drop = build_dropout(dropout_layer) + + def forward(self, query, hw_shape): + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, 'input feature has wrong size' + query = query.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if self.shift_size > 0: + shifted_query = torch.roll( + query, + shifts=(-self.shift_size, -self.shift_size), + dims=(1, 2)) + + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = self.window_partition(img_mask) + mask_windows = mask_windows.view( + -1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-100.0)).masked_fill( + attn_mask == 0, float(0.0)) + else: + shifted_query = query + attn_mask = None + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(shifted_query) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, self.window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, + self.window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + return x + + def window_reverse(self, windows, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + window_size = self.window_size + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + def window_partition(self, x): + """ + Args: + x: (B, H, W, C) + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + window_size = self.window_size + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + +class SwinBlock(BaseModule): + """" + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + window_size (int, optional): The local window scale. Default: 7. + shift (bool, optional): whether to shift window or not. Default False. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float, optional): Stochastic depth rate. Default: 0. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + window_size=7, + shift=False, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super(SwinBlock, self).__init__(init_cfg=init_cfg) + + self.with_cp = with_cp + + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ShiftWindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + shift_size=window_size // 2 if shift else 0, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate)) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=2, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=True, + init_cfg=None) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockSequence(BaseModule): + """Implements one stage in Swin Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + depth (int): The number of blocks in this stage. + window_size (int, optional): The local window scale. Default: 7. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float | list[float], optional): Stochastic depth + rate. Default: 0. + downsample (nn.Module | None, optional): The downsample operation + module. Default: None. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + depth, + window_size=7, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + downsample=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(drop_path_rate, list): + drop_path_rates = drop_path_rate + assert len(drop_path_rates) == depth + else: + drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] + + self.blocks = nn.ModuleList() + for i in range(depth): + block = SwinBlock( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + window_size=window_size, + shift=False if i % 2 == 0 else True, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rates[i], + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp) + self.blocks.append(block) + + self.downsample = downsample + + def forward(self, x, hw_shape): + for block in self.blocks: + x = block(x, hw_shape) + + if self.downsample: + x_down, down_hw_shape = self.downsample(x, hw_shape) + return x_down, down_hw_shape, x, hw_shape + else: + return x, hw_shape, x, hw_shape + + +@MODELS.register_module() +class SwinTransformer(BaseBackbone): + """ Swin Transformer + A PyTorch implement of : `Swin Transformer: + Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/abs/2103.14030 + + Inspiration from + https://github.com/microsoft/Swin-Transformer + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): The num of input channels. + Defaults: 3. + embed_dims (int): The feature dimension. Default: 96. + patch_size (int | tuple[int]): Patch size. Default: 4. + window_size (int): Window size. Default: 7. + mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. + Default: 4. + depths (tuple[int]): Depths of each Swin Transformer stage. + Default: (2, 2, 6, 2). + num_heads (tuple[int]): Parallel attention heads of each Swin + Transformer stage. Default: (3, 6, 12, 24). + strides (tuple[int]): The patch merging or patch embedding stride of + each Swin Transformer stage. (In swin, we set kernel size equal to + stride.) Default: (4, 2, 2, 2). + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool, optional): If True, add a learnable bias to query, key, + value. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + patch_norm (bool): If add a norm layer for patch embed and patch + merging. Default: True. + drop_rate (float): Dropout rate. Defaults: 0. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: False. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LN'). + norm_cfg (dict): Config dict for normalization layer at + output of backone. Defaults: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + pretrained (str, optional): model pretrained path. Default: None. + convert_weights (bool): The flag indicates whether the + pre-trained model is from the original repo. We may need + to convert some keys to make it compatible. + Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + Default: -1 (-1 means not freezing any parameters). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: ``[ + dict(type='TruncNormal', std=.02, layer=['Linear']), + dict(type='Constant', val=1, layer=['LayerNorm']), + ]`` + """ + + def __init__(self, + pretrain_img_size=224, + in_channels=3, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + convert_weights=False, + frozen_stages=-1, + init_cfg=[ + dict(type='TruncNormal', std=.02, layer=['Linear']), + dict(type='Constant', val=1, layer=['LayerNorm']), + ]): + self.convert_weights = convert_weights + self.frozen_stages = frozen_stages + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + super(SwinTransformer, self).__init__(init_cfg=init_cfg) + + num_layers = len(depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + + assert strides[0] == patch_size, 'Use non-overlapping patch embed.' + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=strides[0], + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + + if self.use_abs_pos_embed: + patch_row = pretrain_img_size[0] // patch_size + patch_col = pretrain_img_size[1] // patch_size + num_patches = patch_row * patch_col + self.absolute_pos_embed = nn.Parameter( + torch.zeros((1, num_patches, embed_dims))) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + # set stochastic depth decay rule + total_depth = sum(depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] + + self.stages = nn.ModuleList() + in_channels = embed_dims + for i in range(num_layers): + if i < num_layers - 1: + downsample = PatchMerging( + in_channels=in_channels, + out_channels=2 * in_channels, + stride=strides[i + 1], + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + else: + downsample = None + + stage = SwinBlockSequence( + embed_dims=in_channels, + num_heads=num_heads[i], + feedforward_channels=mlp_ratio * in_channels, + depth=depths[i], + window_size=window_size, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], + downsample=downsample, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp) + self.stages.append(stage) + if downsample: + in_channels = downsample.out_channels + + self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] + # Add a norm layer for each output + for i in out_indices: + layer = build_norm_layer(norm_cfg, self.num_features[i])[1] + layer_name = f'norm{i}' + self.add_module(layer_name, layer) + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super(SwinTransformer, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + if self.use_abs_pos_embed: + self.absolute_pos_embed.requires_grad = False + self.drop_after_pos.eval() + + for i in range(1, self.frozen_stages + 1): + + if (i - 1) in self.out_indices: + norm_layer = getattr(self, f'norm{i-1}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + m = self.stages[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + logger = get_root_logger() + state_dict = get_state_dict( + self.init_cfg['checkpoint'], map_location='cpu') + if self.convert_weights: + # supported loading weight from original repo + state_dict = swin_converter(state_dict) + + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + # reshape absolute position embedding + if state_dict.get('absolute_pos_embed') is not None: + absolute_pos_embed = state_dict['absolute_pos_embed'] + N1, L, C1 = absolute_pos_embed.size() + N2, C2, H, W = self.absolute_pos_embed.size() + if N1 != N2 or C1 != C2 or L != H * W: + logger.warning('Error in loading absolute_pos_embed, pass') + else: + state_dict['absolute_pos_embed'] = absolute_pos_embed.view( + N2, H, W, C2).permute(0, 3, 1, 2).contiguous() + + # interpolate position bias table if needed + relative_position_bias_table_keys = [ + k for k in state_dict.keys() + if 'relative_position_bias_table' in k + ] + for table_key in relative_position_bias_table_keys: + table_pretrained = state_dict[table_key] + table_current = self.state_dict()[table_key] + L1, nH1 = table_pretrained.size() + L2, nH2 = table_current.size() + if nH1 != nH2: + logger.warning(f'Error in loading {table_key}, pass') + elif L1 != L2: + S1 = int(L1**0.5) + S2 = int(L2**0.5) + table_pretrained_resized = F.interpolate( + table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), + size=(S2, S2), + mode='bicubic') + state_dict[table_key] = table_pretrained_resized.view( + nH2, L2).permute(1, 0).contiguous() + + # load state_dict + load_state_dict(self, state_dict, strict=False, logger=logger) + + else: + super(SwinTransformer, self).init_weights() + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape, out, out_hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(out) + out = out.view(-1, *out_hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return tuple(outs) diff --git a/mmpose/models/backbones/tcn.py b/mmpose/models/backbones/tcn.py new file mode 100644 index 0000000000000000000000000000000000000000..ef49a1ff075288cc7a23f51f47c5b1bcdd383894 --- /dev/null +++ b/mmpose/models/backbones/tcn.py @@ -0,0 +1,284 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import ConvModule, build_conv_layer +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from ..utils.regularizations import WeightNormClipHook +from .base_backbone import BaseBackbone + + +class BasicTemporalBlock(BaseModule): + """Basic block for VideoPose3D. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + mid_channels (int): The output channels of conv1. Default: 1024. + kernel_size (int): Size of the convolving kernel. Default: 3. + dilation (int): Spacing between kernel elements. Default: 3. + dropout (float): Dropout rate. Default: 0.25. + causal (bool): Use causal convolutions instead of symmetric + convolutions (for real-time applications). Default: False. + residual (bool): Use residual connection. Default: True. + use_stride_conv (bool): Use optimized TCN that designed + specifically for single-frame batching, i.e. where batches have + input length = receptive field, and output length = 1. This + implementation replaces dilated convolutions with strided + convolutions to avoid generating unused intermediate results. + Default: False. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: dict(type='Conv1d'). + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN1d'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels=1024, + kernel_size=3, + dilation=3, + dropout=0.25, + causal=False, + residual=True, + use_stride_conv=False, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + init_cfg=None): + # Protect mutable default arguments + conv_cfg = copy.deepcopy(conv_cfg) + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.mid_channels = mid_channels + self.kernel_size = kernel_size + self.dilation = dilation + self.dropout = dropout + self.causal = causal + self.residual = residual + self.use_stride_conv = use_stride_conv + + self.pad = (kernel_size - 1) * dilation // 2 + if use_stride_conv: + self.stride = kernel_size + self.causal_shift = kernel_size // 2 if causal else 0 + self.dilation = 1 + else: + self.stride = 1 + self.causal_shift = kernel_size // 2 * dilation if causal else 0 + + self.conv1 = nn.Sequential( + ConvModule( + in_channels, + mid_channels, + kernel_size=kernel_size, + stride=self.stride, + dilation=self.dilation, + bias='auto', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + self.conv2 = nn.Sequential( + ConvModule( + mid_channels, + out_channels, + kernel_size=1, + bias='auto', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + + if residual and in_channels != out_channels: + self.short_cut = build_conv_layer(conv_cfg, in_channels, + out_channels, 1) + else: + self.short_cut = None + + self.dropout = nn.Dropout(dropout) if dropout > 0 else None + + def forward(self, x): + """Forward function.""" + if self.use_stride_conv: + assert self.causal_shift + self.kernel_size // 2 < x.shape[2] + else: + assert 0 <= self.pad + self.causal_shift < x.shape[2] - \ + self.pad + self.causal_shift <= x.shape[2] + + out = self.conv1(x) + if self.dropout is not None: + out = self.dropout(out) + + out = self.conv2(out) + if self.dropout is not None: + out = self.dropout(out) + + if self.residual: + if self.use_stride_conv: + res = x[:, :, self.causal_shift + + self.kernel_size // 2::self.kernel_size] + else: + res = x[:, :, + (self.pad + self.causal_shift):(x.shape[2] - self.pad + + self.causal_shift)] + + if self.short_cut is not None: + res = self.short_cut(res) + out = out + res + + return out + + +@MODELS.register_module() +class TCN(BaseBackbone): + """TCN backbone. + + Temporal Convolutional Networks. + More details can be found in the + `paper `__ . + + Args: + in_channels (int): Number of input channels, which equals to + num_keypoints * num_features. + stem_channels (int): Number of feature channels. Default: 1024. + num_blocks (int): NUmber of basic temporal convolutional blocks. + Default: 2. + kernel_sizes (Sequence[int]): Sizes of the convolving kernel of + each basic block. Default: ``(3, 3, 3)``. + dropout (float): Dropout rate. Default: 0.25. + causal (bool): Use causal convolutions instead of symmetric + convolutions (for real-time applications). + Default: False. + residual (bool): Use residual connection. Default: True. + use_stride_conv (bool): Use TCN backbone optimized for + single-frame batching, i.e. where batches have input length = + receptive field, and output length = 1. This implementation + replaces dilated convolutions with strided convolutions to avoid + generating unused intermediate results. The weights are + interchangeable with the reference implementation. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: dict(type='Conv1d'). + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN1d'). + max_norm (float|None): if not None, the weight of convolution layers + will be clipped to have a maximum norm of max_norm. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict( + type='Kaiming', + mode='fan_in', + nonlinearity='relu', + layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import TCN + >>> import torch + >>> self = TCN(in_channels=34) + >>> self.eval() + >>> inputs = torch.rand(1, 34, 243) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 1024, 235) + (1, 1024, 217) + """ + + def __init__(self, + in_channels, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + causal=False, + residual=True, + use_stride_conv=False, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + max_norm=None, + init_cfg=[ + dict( + type='Kaiming', + mode='fan_in', + nonlinearity='relu', + layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + conv_cfg = copy.deepcopy(conv_cfg) + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__() + self.in_channels = in_channels + self.stem_channels = stem_channels + self.num_blocks = num_blocks + self.kernel_sizes = kernel_sizes + self.dropout = dropout + self.causal = causal + self.residual = residual + self.use_stride_conv = use_stride_conv + self.max_norm = max_norm + + assert num_blocks == len(kernel_sizes) - 1 + for ks in kernel_sizes: + assert ks % 2 == 1, 'Only odd filter widths are supported.' + + self.expand_conv = ConvModule( + in_channels, + stem_channels, + kernel_size=kernel_sizes[0], + stride=kernel_sizes[0] if use_stride_conv else 1, + bias='auto', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + dilation = kernel_sizes[0] + self.tcn_blocks = nn.ModuleList() + for i in range(1, num_blocks + 1): + self.tcn_blocks.append( + BasicTemporalBlock( + in_channels=stem_channels, + out_channels=stem_channels, + mid_channels=stem_channels, + kernel_size=kernel_sizes[i], + dilation=dilation, + dropout=dropout, + causal=causal, + residual=residual, + use_stride_conv=use_stride_conv, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + dilation *= kernel_sizes[i] + + if self.max_norm is not None: + # Apply weight norm clip to conv layers + weight_clip = WeightNormClipHook(self.max_norm) + for module in self.modules(): + if isinstance(module, nn.modules.conv._ConvNd): + weight_clip.register(module) + + self.dropout = nn.Dropout(dropout) if dropout > 0 else None + + def forward(self, x): + """Forward function.""" + x = self.expand_conv(x) + + if self.dropout is not None: + x = self.dropout(x) + + outs = [] + for i in range(self.num_blocks): + x = self.tcn_blocks[i](x) + outs.append(x) + + return tuple(outs) diff --git a/mmpose/models/backbones/utils/__init__.py b/mmpose/models/backbones/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..07e42f89126c9e5663123794f92987b4f9b347f1 --- /dev/null +++ b/mmpose/models/backbones/utils/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .channel_shuffle import channel_shuffle +from .inverted_residual import InvertedResidual +from .make_divisible import make_divisible +from .se_layer import SELayer +from .utils import get_state_dict, load_checkpoint + +__all__ = [ + 'channel_shuffle', 'make_divisible', 'InvertedResidual', 'SELayer', + 'load_checkpoint', 'get_state_dict' +] diff --git a/mmpose/models/backbones/utils/channel_shuffle.py b/mmpose/models/backbones/utils/channel_shuffle.py new file mode 100644 index 0000000000000000000000000000000000000000..aedd826bee690d42d92ed8a7f538b221e5b069e2 --- /dev/null +++ b/mmpose/models/backbones/utils/channel_shuffle.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def channel_shuffle(x, groups): + """Channel Shuffle operation. + + This function enables cross-group information flow for multiple groups + convolution layers. + + Args: + x (Tensor): The input tensor. + groups (int): The number of groups to divide the input tensor + in the channel dimension. + + Returns: + Tensor: The output tensor after channel shuffle operation. + """ + + batch_size, num_channels, height, width = x.size() + assert (num_channels % groups == 0), ('num_channels should be ' + 'divisible by groups') + channels_per_group = num_channels // groups + + x = x.view(batch_size, groups, channels_per_group, height, width) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(batch_size, groups * channels_per_group, height, width) + + return x diff --git a/mmpose/models/backbones/utils/ckpt_convert.py b/mmpose/models/backbones/utils/ckpt_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..14a43892c6630be31e915ed1f8b9164ba250e8bd --- /dev/null +++ b/mmpose/models/backbones/utils/ckpt_convert.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +# This script consists of several convert functions which +# can modify the weights of model in original repo to be +# pre-trained weights. + +from collections import OrderedDict + + +def swin_converter(ckpt): + + new_ckpt = OrderedDict() + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + for k, v in ckpt.items(): + if k.startswith('head'): + continue + elif k.startswith('layers'): + new_v = v + if 'attn.' in k: + new_k = k.replace('attn.', 'attn.w_msa.') + elif 'mlp.' in k: + if 'mlp.fc1.' in k: + new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') + elif 'mlp.fc2.' in k: + new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') + else: + new_k = k.replace('mlp.', 'ffn.') + elif 'downsample' in k: + new_k = k + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(v) + else: + new_k = k + new_k = new_k.replace('layers', 'stages', 1) + elif k.startswith('patch_embed'): + new_v = v + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + else: + new_v = v + new_k = k + + new_ckpt['backbone.' + new_k] = new_v + + return new_ckpt diff --git a/mmpose/models/backbones/utils/inverted_residual.py b/mmpose/models/backbones/utils/inverted_residual.py new file mode 100644 index 0000000000000000000000000000000000000000..dff762c570550e4a738ae1833a4c82c18777115d --- /dev/null +++ b/mmpose/models/backbones/utils/inverted_residual.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule + +from .se_layer import SELayer + + +class InvertedResidual(nn.Module): + """Inverted Residual Block. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Default: 3. + groups (None or int): The group number of the depthwise convolution. + Default: None, which means group number = mid_channels. + stride (int): The stride of the depthwise convolution. Default: 1. + se_cfg (dict): Config dict for se layer. Default: None, which means no + se layer. + with_expand_conv (bool): Use expand conv or not. If set False, + mid_channels must be the same with in_channels. + Default: True. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + groups=None, + stride=1, + se_cfg=None, + with_expand_conv=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__() + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2] + self.with_cp = with_cp + self.with_se = se_cfg is not None + self.with_expand_conv = with_expand_conv + + if groups is None: + groups = mid_channels + + if self.with_se: + assert isinstance(se_cfg, dict) + if not self.with_expand_conv: + assert mid_channels == in_channels + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if self.with_se: + self.se = SELayer(**se_cfg) + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + out + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/mmpose/models/backbones/utils/make_divisible.py b/mmpose/models/backbones/utils/make_divisible.py new file mode 100644 index 0000000000000000000000000000000000000000..b7666be65939d5c76057e73927c230029cb1871d --- /dev/null +++ b/mmpose/models/backbones/utils/make_divisible.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number down to the nearest value that can + be divisible by the divisor. + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int, optional): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float, optional): The minimum ratio of the rounded channel + number to the original channel number. Default: 0.9. + Returns: + int: The modified output channel number + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/mmpose/models/backbones/utils/se_layer.py b/mmpose/models/backbones/utils/se_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..ec6d7aeaa9a990dbaf437b4ff4f4ba685e008245 --- /dev/null +++ b/mmpose/models/backbones/utils/se_layer.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmengine +import torch.nn as nn +from mmcv.cnn import ConvModule + + +class SELayer(nn.Module): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will be + ``int(channels/ratio)``. Default: 16. + conv_cfg (None or dict): Config dict for convolution layer. + Default: None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configurated + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configurated by the first dict and the + second activation layer will be configurated by the second dict. + Default: (dict(type='ReLU'), dict(type='Sigmoid')) + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'))): + super().__init__() + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmengine.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=int(channels / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(channels / ratio), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out diff --git a/mmpose/models/backbones/utils/utils.py b/mmpose/models/backbones/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc4fe40cd481391edf73872e2d4f6eb35592779 --- /dev/null +++ b/mmpose/models/backbones/utils/utils.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +from mmengine.runner import CheckpointLoader, load_state_dict + + +def load_checkpoint(model, + filename, + map_location='cpu', + strict=False, + logger=None): + """Load checkpoint from a file or URI. + + Args: + model (Module): Module to load checkpoint. + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. + map_location (str): Same as :func:`torch.load`. + strict (bool): Whether to allow different params for the model and + checkpoint. + logger (:mod:`logging.Logger` or None): The logger for error message. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + checkpoint = CheckpointLoader.load_checkpoint(filename, map_location) + # OrderedDict is a subclass of dict + if not isinstance(checkpoint, dict): + raise RuntimeError( + f'No state_dict found in checkpoint file {filename}') + # get state_dict from checkpoint + if 'state_dict' in checkpoint: + state_dict_tmp = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict_tmp = checkpoint['model'] + else: + state_dict_tmp = checkpoint + + state_dict = OrderedDict() + # strip prefix of state_dict + for k, v in state_dict_tmp.items(): + if k.startswith('module.backbone.'): + state_dict[k[16:]] = v + elif k.startswith('module.'): + state_dict[k[7:]] = v + elif k.startswith('backbone.'): + state_dict[k[9:]] = v + else: + state_dict[k] = v + # load state_dict + load_state_dict(model, state_dict, strict, logger) + return checkpoint + + +def get_state_dict(filename, map_location='cpu'): + """Get state_dict from a file or URI. + + Args: + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. + map_location (str): Same as :func:`torch.load`. + + Returns: + OrderedDict: The state_dict. + """ + checkpoint = CheckpointLoader.load_checkpoint(filename, map_location) + # OrderedDict is a subclass of dict + if not isinstance(checkpoint, dict): + raise RuntimeError( + f'No state_dict found in checkpoint file {filename}') + # get state_dict from checkpoint + if 'state_dict' in checkpoint: + state_dict_tmp = checkpoint['state_dict'] + else: + state_dict_tmp = checkpoint + + state_dict = OrderedDict() + # strip prefix of state_dict + for k, v in state_dict_tmp.items(): + if k.startswith('module.backbone.'): + state_dict[k[16:]] = v + elif k.startswith('module.'): + state_dict[k[7:]] = v + elif k.startswith('backbone.'): + state_dict[k[9:]] = v + else: + state_dict[k] = v + + return state_dict diff --git a/mmpose/models/backbones/v2v_net.py b/mmpose/models/backbones/v2v_net.py new file mode 100644 index 0000000000000000000000000000000000000000..2cd1ab93b105b345aabc0ace2c7e776cd99e36a9 --- /dev/null +++ b/mmpose/models/backbones/v2v_net.py @@ -0,0 +1,275 @@ +# ------------------------------------------------------------------------------ +# Copyright and License Information +# Adapted from +# https://github.com/microsoft/voxelpose-pytorch/blob/main/lib/models/v2v_net.py +# Original Licence: MIT License +# ------------------------------------------------------------------------------ + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class Basic3DBlock(BaseModule): + """A basic 3D convolutional block. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + kernel_size (int): Kernel size of the convolution operation + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: dict(type='Conv3d') + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN3d') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + init_cfg=None): + super(Basic3DBlock, self).__init__(init_cfg=init_cfg) + self.block = ConvModule( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=((kernel_size - 1) // 2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True) + + def forward(self, x): + """Forward function.""" + return self.block(x) + + +class Res3DBlock(BaseModule): + """A residual 3D convolutional block. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + kernel_size (int): Kernel size of the convolution operation + Default: 3 + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: dict(type='Conv3d') + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN3d') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + init_cfg=None): + super(Res3DBlock, self).__init__(init_cfg=init_cfg) + self.res_branch = nn.Sequential( + ConvModule( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=((kernel_size - 1) // 2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True), + ConvModule( + out_channels, + out_channels, + kernel_size, + stride=1, + padding=((kernel_size - 1) // 2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + bias=True)) + + if in_channels == out_channels: + self.skip_con = nn.Sequential() + else: + self.skip_con = ConvModule( + in_channels, + out_channels, + 1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + bias=True) + + def forward(self, x): + """Forward function.""" + res = self.res_branch(x) + skip = self.skip_con(x) + return F.relu(res + skip, True) + + +class Pool3DBlock(BaseModule): + """A 3D max-pool block. + + Args: + pool_size (int): Pool size of the 3D max-pool layer + """ + + def __init__(self, pool_size): + super(Pool3DBlock, self).__init__() + self.pool_size = pool_size + + def forward(self, x): + """Forward function.""" + return F.max_pool3d( + x, kernel_size=self.pool_size, stride=self.pool_size) + + +class Upsample3DBlock(BaseModule): + """A 3D upsample block. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + kernel_size (int): Kernel size of the transposed convolution operation. + Default: 2 + stride (int): Kernel size of the transposed convolution operation. + Default: 2 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=2, + init_cfg=None): + super(Upsample3DBlock, self).__init__(init_cfg=init_cfg) + assert kernel_size == 2 + assert stride == 2 + self.block = nn.Sequential( + nn.ConvTranspose3d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=0, + output_padding=0), nn.BatchNorm3d(out_channels), nn.ReLU(True)) + + def forward(self, x): + """Forward function.""" + return self.block(x) + + +class EncoderDecorder(BaseModule): + """An encoder-decoder block. + + Args: + in_channels (int): Input channels of this block + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, in_channels=32, init_cfg=None): + super(EncoderDecorder, self).__init__(init_cfg=init_cfg) + + self.encoder_pool1 = Pool3DBlock(2) + self.encoder_res1 = Res3DBlock(in_channels, in_channels * 2) + self.encoder_pool2 = Pool3DBlock(2) + self.encoder_res2 = Res3DBlock(in_channels * 2, in_channels * 4) + + self.mid_res = Res3DBlock(in_channels * 4, in_channels * 4) + + self.decoder_res2 = Res3DBlock(in_channels * 4, in_channels * 4) + self.decoder_upsample2 = Upsample3DBlock(in_channels * 4, + in_channels * 2, 2, 2) + self.decoder_res1 = Res3DBlock(in_channels * 2, in_channels * 2) + self.decoder_upsample1 = Upsample3DBlock(in_channels * 2, in_channels, + 2, 2) + + self.skip_res1 = Res3DBlock(in_channels, in_channels) + self.skip_res2 = Res3DBlock(in_channels * 2, in_channels * 2) + + def forward(self, x): + """Forward function.""" + skip_x1 = self.skip_res1(x) + x = self.encoder_pool1(x) + x = self.encoder_res1(x) + + skip_x2 = self.skip_res2(x) + x = self.encoder_pool2(x) + x = self.encoder_res2(x) + + x = self.mid_res(x) + + x = self.decoder_res2(x) + x = self.decoder_upsample2(x) + x = x + skip_x2 + + x = self.decoder_res1(x) + x = self.decoder_upsample1(x) + x = x + skip_x1 + + return x + + +@MODELS.register_module() +class V2VNet(BaseBackbone): + """V2VNet. + + Please refer to the `paper ` + for details. + + Args: + input_channels (int): + Number of channels of the input feature volume. + output_channels (int): + Number of channels of the output volume. + mid_channels (int): + Input and output channels of the encoder-decoder block. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: ``dict( + type='Normal', + std=0.001, + layer=['Conv3d', 'ConvTranspose3d'] + )`` + """ + + def __init__(self, + input_channels, + output_channels, + mid_channels=32, + init_cfg=dict( + type='Normal', + std=0.001, + layer=['Conv3d', 'ConvTranspose3d'])): + super(V2VNet, self).__init__(init_cfg=init_cfg) + + self.front_layers = nn.Sequential( + Basic3DBlock(input_channels, mid_channels // 2, 7), + Res3DBlock(mid_channels // 2, mid_channels), + ) + + self.encoder_decoder = EncoderDecorder(in_channels=mid_channels) + + self.output_layer = nn.Conv3d( + mid_channels, output_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, x): + """Forward function.""" + x = self.front_layers(x) + x = self.encoder_decoder(x) + x = self.output_layer(x) + + return (x, ) diff --git a/mmpose/models/backbones/vgg.py b/mmpose/models/backbones/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..8fa09d8dc7ded75678e8e23846474acee763a532 --- /dev/null +++ b/mmpose/models/backbones/vgg.py @@ -0,0 +1,201 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +def make_vgg_layer(in_channels, + out_channels, + num_blocks, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + dilation=1, + with_norm=False, + ceil_mode=False): + layers = [] + for _ in range(num_blocks): + layer = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + dilation=dilation, + padding=dilation, + bias=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + layers.append(layer) + in_channels = out_channels + layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) + + return layers + + +@MODELS.register_module() +class VGG(BaseBackbone): + """VGG backbone. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_norm (bool): Use BatchNorm or not. + num_classes (int): number of classes for classification. + num_stages (int): VGG stages, normally 5. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. When it is None, the default behavior depends on + whether num_classes is specified. If num_classes <= 0, the default + value is (4, ), outputting the last feature map before classifier. + If num_classes > 0, the default value is (5, ), outputting the + classification score. Default: None. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + ceil_mode (bool): Whether to use ceil_mode of MaxPool. Default: False. + with_last_pool (bool): Whether to keep the last pooling before + classifier. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict( + type='Normal', + std=0.01, + layer=['Linear']), + ]`` + """ + + # Parameters to build layers. Each element specifies the number of conv in + # each stage. For example, VGG11 contains 11 layers with learnable + # parameters. 11 is computed as 11 = (1 + 1 + 2 + 2 + 2) + 3, + # where 3 indicates the last three fully-connected layers. + arch_settings = { + 11: (1, 1, 2, 2, 2), + 13: (2, 2, 2, 2, 2), + 16: (2, 2, 3, 3, 3), + 19: (2, 2, 4, 4, 4) + } + + def __init__(self, + depth, + num_classes=-1, + num_stages=5, + dilations=(1, 1, 1, 1, 1), + out_indices=None, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + norm_eval=False, + ceil_mode=False, + with_last_pool=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict(type='Normal', std=0.01, layer=['Linear']), + ]): + super().__init__(init_cfg=init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for vgg') + assert num_stages >= 1 and num_stages <= 5 + stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + assert len(dilations) == num_stages + + self.num_classes = num_classes + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + with_norm = norm_cfg is not None + + if out_indices is None: + out_indices = (5, ) if num_classes > 0 else (4, ) + assert max(out_indices) <= num_stages + self.out_indices = out_indices + + self.in_channels = 3 + start_idx = 0 + vgg_layers = [] + self.range_sub_modules = [] + for i, num_blocks in enumerate(self.stage_blocks): + num_modules = num_blocks + 1 + end_idx = start_idx + num_modules + dilation = dilations[i] + out_channels = 64 * 2**i if i < 4 else 512 + vgg_layer = make_vgg_layer( + self.in_channels, + out_channels, + num_blocks, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dilation=dilation, + with_norm=with_norm, + ceil_mode=ceil_mode) + vgg_layers.extend(vgg_layer) + self.in_channels = out_channels + self.range_sub_modules.append([start_idx, end_idx]) + start_idx = end_idx + if not with_last_pool: + vgg_layers.pop(-1) + self.range_sub_modules[-1][1] -= 1 + self.module_name = 'features' + self.add_module(self.module_name, nn.Sequential(*vgg_layers)) + + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + outs = [] + vgg_layers = getattr(self, self.module_name) + for i in range(len(self.stage_blocks)): + for j in range(*self.range_sub_modules[i]): + vgg_layer = vgg_layers[j] + x = vgg_layer(x) + if i in self.out_indices: + outs.append(x) + if self.num_classes > 0: + x = x.view(x.size(0), -1) + x = self.classifier(x) + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + vgg_layers = getattr(self, self.module_name) + for i in range(self.frozen_stages): + for j in range(*self.range_sub_modules[i]): + m = vgg_layers[j] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/vipnas_mbv3.py b/mmpose/models/backbones/vipnas_mbv3.py new file mode 100644 index 0000000000000000000000000000000000000000..9156cafa56d4f15766e48c77cd492e52345aed65 --- /dev/null +++ b/mmpose/models/backbones/vipnas_mbv3.py @@ -0,0 +1,173 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +from mmcv.cnn import ConvModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import InvertedResidual + + +@MODELS.register_module() +class ViPNAS_MobileNetV3(BaseBackbone): + """ViPNAS_MobileNetV3 backbone. + + "ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search" + More details can be found in the `paper + `__ . + + Args: + wid (list(int)): Searched width config for each stage. + expan (list(int)): Searched expansion ratio config for each stage. + dep (list(int)): Searched depth config for each stage. + ks (list(int)): Searched kernel size config for each stage. + group (list(int)): Searched group number config for each stage. + att (list(bool)): Searched attention config for each stage. + stride (list(int)): Stride config for each stage. + act (list(dict)): Activation config for each stage. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + def __init__( + self, + wid=[16, 16, 24, 40, 80, 112, 160], + expan=[None, 1, 5, 4, 5, 5, 6], + dep=[None, 1, 4, 4, 4, 4, 4], + ks=[3, 3, 7, 7, 5, 7, 5], + group=[None, 8, 120, 20, 100, 280, 240], + att=[None, True, True, False, True, True, True], + stride=[2, 1, 2, 2, 2, 1, 2], + act=['HSwish', 'ReLU', 'ReLU', 'ReLU', 'HSwish', 'HSwish', 'HSwish'], + conv_cfg=None, + norm_cfg=dict(type='BN'), + frozen_stages=-1, + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.wid = wid + self.expan = expan + self.dep = dep + self.ks = ks + self.group = group + self.att = att + self.stride = stride + self.act = act + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.wid[0], + kernel_size=self.ks[0], + stride=self.stride[0], + padding=self.ks[0] // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type=self.act[0])) + + self.layers = self._make_layer() + + def _make_layer(self): + layers = [] + layer_index = 0 + for i, dep in enumerate(self.dep[1:]): + mid_channels = self.wid[i + 1] * self.expan[i + 1] + + if self.att[i + 1]: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=1.0, divisor=2.0))) + else: + se_cfg = None + + if self.expan[i + 1] == 1: + with_expand_conv = False + else: + with_expand_conv = True + + for j in range(dep): + if j == 0: + stride = self.stride[i + 1] + in_channels = self.wid[i] + else: + stride = 1 + in_channels = self.wid[i + 1] + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=self.wid[i + 1], + mid_channels=mid_channels, + kernel_size=self.ks[i + 1], + groups=self.group[i + 1], + stride=stride, + se_cfg=se_cfg, + with_expand_conv=with_expand_conv, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=self.act[i + 1]), + with_cp=self.with_cp) + layer_index += 1 + layer_name = f'layer{layer_index}' + self.add_module(layer_name, layer) + layers.append(layer_name) + return layers + + def forward(self, x): + x = self.conv1(x) + + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + + return (x, ) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/vipnas_resnet.py b/mmpose/models/backbones/vipnas_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..7be810b449c1a840c425c69e3d1d1340583e52ea --- /dev/null +++ b/mmpose/models/backbones/vipnas_resnet.py @@ -0,0 +1,596 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer +from mmcv.cnn.bricks import ContextBlock +from mmengine.model import BaseModule, Sequential +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class ViPNAS_Bottleneck(BaseModule): + """Bottleneck block for ViPNAS_ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. Default: 4. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: "pytorch". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + kernel_size (int): kernel size of conv2 searched in ViPANS. + groups (int): group number of conv2 searched in ViPNAS. + attention (bool): whether to use attention module in the end of + the block. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + expansion=4, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + kernel_size=3, + groups=1, + attention=False, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + assert style in ['pytorch', 'caffe'] + + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=kernel_size, + stride=self.conv2_stride, + padding=kernel_size // 2, + groups=groups, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + if attention: + self.attention = ContextBlock(out_channels, + max(1.0 / 16, 16.0 / out_channels)) + else: + self.attention = None + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: the normalization layer named "norm3" """ + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.attention is not None: + out = self.attention(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def get_expansion(block, expansion=None): + """Get the expansion of a residual block. + + The block expansion will be obtained by the following order: + + 1. If ``expansion`` is given, just return it. + 2. If ``block`` has the attribute ``expansion``, then return + ``block.expansion``. + 3. Return the default value according the the block type: + 4 for ``ViPNAS_Bottleneck``. + + Args: + block (class): The block class. + expansion (int | None): The given expansion ratio. + + Returns: + int: The expansion of the block. + """ + if isinstance(expansion, int): + assert expansion > 0 + elif expansion is None: + if hasattr(block, 'expansion'): + expansion = block.expansion + elif issubclass(block, ViPNAS_Bottleneck): + expansion = 1 + else: + raise TypeError(f'expansion is not specified for {block.__name__}') + else: + raise TypeError('expansion must be an integer or None') + + return expansion + + +class ViPNAS_ResLayer(Sequential): + """ViPNAS_ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): Residual block used to build ViPNAS ResLayer. + num_blocks (int): Number of blocks. + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int, optional): The expansion for BasicBlock/Bottleneck. + If not specified, it will firstly be obtained via + ``block.expansion``. If the block has no attribute "expansion", + the following default values will be used: 1 for BasicBlock and + 4 for Bottleneck. Default: None. + stride (int): stride of the first block. Default: 1. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + kernel_size (int): Kernel Size of the corresponding convolution layer + searched in the block. + groups (int): Group number of the corresponding convolution layer + searched in the block. + attention (bool): Whether to use attention module in the end of the + block. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + block, + num_blocks, + in_channels, + out_channels, + expansion=None, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + kernel_size=3, + groups=1, + attention=False, + init_cfg=None, + **kwargs): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + self.block = block + self.expansion = get_expansion(block, expansion) + + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=kernel_size, + groups=groups, + attention=attention, + **kwargs)) + in_channels = out_channels + for _ in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=kernel_size, + groups=groups, + attention=attention, + **kwargs)) + else: # downsample_first=False is for HourglassModule + for i in range(0, num_blocks - 1): + layers.append( + block( + in_channels=in_channels, + out_channels=in_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=kernel_size, + groups=groups, + attention=attention, + **kwargs)) + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=kernel_size, + groups=groups, + attention=attention, + **kwargs)) + + super().__init__(*layers, init_cfg=init_cfg) + + +@MODELS.register_module() +class ViPNAS_ResNet(BaseBackbone): + """ViPNAS_ResNet backbone. + + "ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search" + More details can be found in the `paper + `__ . + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + wid (list(int)): Searched width config for each stage. + expan (list(int)): Searched expansion ratio config for each stage. + dep (list(int)): Searched depth config for each stage. + ks (list(int)): Searched kernel size config for each stage. + group (list(int)): Searched group number config for each stage. + att (list(bool)): Searched attention config for each stage. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + arch_settings = { + 50: ViPNAS_Bottleneck, + } + + def __init__(self, + depth, + in_channels=3, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + wid=[48, 80, 160, 304, 608], + expan=[None, 1, 1, 1, 1], + dep=[None, 4, 6, 7, 3], + ks=[7, 3, 5, 5, 5], + group=[None, 16, 16, 16, 16], + att=[None, True, False, True, True], + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = dep[0] + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block = self.arch_settings[depth] + self.stage_blocks = dep[1:1 + num_stages] + + self._make_stem_layer(in_channels, wid[0], ks[0]) + + self.res_layers = [] + _in_channels = wid[0] + for i, num_blocks in enumerate(self.stage_blocks): + expansion = get_expansion(self.block, expan[i + 1]) + _out_channels = wid[i + 1] * expansion + stride = strides[i] + dilation = dilations[i] + res_layer = self.make_res_layer( + block=self.block, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=_out_channels, + expansion=expansion, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=ks[i + 1], + groups=group[i + 1], + attention=att[i + 1]) + _in_channels = _out_channels + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = res_layer[-1].out_channels + + def make_res_layer(self, **kwargs): + """Make a ViPNAS ResLayer.""" + return ViPNAS_ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels, kernel_size): + """Make stem layer.""" + if self.deep_stem: + self.stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=kernel_size, + stride=2, + padding=kernel_size // 2, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + """Freeze parameters.""" + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/builder.py b/mmpose/models/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..cefaedc29100bcbc4c5b9cde55db8f66b25ab637 --- /dev/null +++ b/mmpose/models/builder.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmpose.registry import MODELS + +BACKBONES = MODELS +NECKS = MODELS +HEADS = MODELS +LOSSES = MODELS +POSE_ESTIMATORS = MODELS + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_pose_estimator(cfg): + """Build pose estimator.""" + return POSE_ESTIMATORS.build(cfg) + + +def build_posenet(cfg): + """Build posenet.""" + warnings.warn( + '``build_posenet`` will be deprecated soon, ' + 'please use ``build_pose_estimator`` instead.', DeprecationWarning) + return build_pose_estimator(cfg) diff --git a/mmpose/models/data_preprocessors/__init__.py b/mmpose/models/data_preprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..89980f1f6e8538f81faa10a933028eec923b30b0 --- /dev/null +++ b/mmpose/models/data_preprocessors/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .batch_augmentation import BatchSyncRandomResize +from .data_preprocessor import PoseDataPreprocessor + +__all__ = [ + 'PoseDataPreprocessor', + 'BatchSyncRandomResize', +] diff --git a/mmpose/models/data_preprocessors/batch_augmentation.py b/mmpose/models/data_preprocessors/batch_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..e4dcd568e53b5d9cd6f6b2e2fd8f716c44bf3c7d --- /dev/null +++ b/mmpose/models/data_preprocessors/batch_augmentation.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +from typing import List, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine import MessageHub +from mmengine.dist import barrier, broadcast, get_dist_info +from mmengine.structures import PixelData +from torch import Tensor + +from mmpose.registry import MODELS +from mmpose.structures import PoseDataSample + + +@MODELS.register_module() +class BatchSyncRandomResize(nn.Module): + """Batch random resize which synchronizes the random size across ranks. + + Args: + random_size_range (tuple): The multi-scale random range during + multi-scale training. + interval (int): The iter interval of change + image size. Defaults to 10. + size_divisor (int): Image size divisible factor. + Defaults to 32. + """ + + def __init__(self, + random_size_range: Tuple[int, int], + interval: int = 10, + size_divisor: int = 32) -> None: + super().__init__() + self.rank, self.world_size = get_dist_info() + self._input_size = None + self._random_size_range = (round(random_size_range[0] / size_divisor), + round(random_size_range[1] / size_divisor)) + self._interval = interval + self._size_divisor = size_divisor + + def forward(self, inputs: Tensor, data_samples: List[PoseDataSample] + ) -> Tuple[Tensor, List[PoseDataSample]]: + """resize a batch of images and bboxes to shape ``self._input_size``""" + h, w = inputs.shape[-2:] + if self._input_size is None: + self._input_size = (h, w) + scale_y = self._input_size[0] / h + scale_x = self._input_size[1] / w + if scale_x != 1 or scale_y != 1: + inputs = F.interpolate( + inputs, + size=self._input_size, + mode='bilinear', + align_corners=False) + for data_sample in data_samples: + img_shape = (int(data_sample.img_shape[0] * scale_y), + int(data_sample.img_shape[1] * scale_x)) + pad_shape = (int(data_sample.pad_shape[0] * scale_y), + int(data_sample.pad_shape[1] * scale_x)) + data_sample.set_metainfo({ + 'img_shape': img_shape, + 'pad_shape': pad_shape, + 'batch_input_shape': self._input_size + }) + + if 'gt_instance_labels' not in data_sample: + continue + + if 'bboxes' in data_sample.gt_instance_labels: + data_sample.gt_instance_labels.bboxes[..., 0::2] *= scale_x + data_sample.gt_instance_labels.bboxes[..., 1::2] *= scale_y + + if 'keypoints' in data_sample.gt_instance_labels: + data_sample.gt_instance_labels.keypoints[..., 0] *= scale_x + data_sample.gt_instance_labels.keypoints[..., 1] *= scale_y + + if 'areas' in data_sample.gt_instance_labels: + data_sample.gt_instance_labels.areas *= scale_x * scale_y + + if 'gt_fields' in data_sample \ + and 'heatmap_mask' in data_sample.gt_fields: + + mask = data_sample.gt_fields.heatmap_mask.unsqueeze(0) + gt_fields = PixelData() + gt_fields.set_field( + F.interpolate( + mask.float(), + size=self._input_size, + mode='bilinear', + align_corners=False).squeeze(0), 'heatmap_mask') + + data_sample.gt_fields = gt_fields + + message_hub = MessageHub.get_current_instance() + if (message_hub.get_info('iter') + 1) % self._interval == 0: + self._input_size = self._get_random_size( + aspect_ratio=float(w / h), device=inputs.device) + return inputs, data_samples + + def _get_random_size(self, aspect_ratio: float, + device: torch.device) -> Tuple[int, int]: + """Randomly generate a shape in ``_random_size_range`` and broadcast to + all ranks.""" + tensor = torch.LongTensor(2).to(device) + if self.rank == 0: + size = random.randint(*self._random_size_range) + size = (self._size_divisor * size, + self._size_divisor * int(aspect_ratio * size)) + tensor[0] = size[0] + tensor[1] = size[1] + barrier() + broadcast(tensor, 0) + input_size = (tensor[0].item(), tensor[1].item()) + return input_size diff --git a/mmpose/models/data_preprocessors/data_preprocessor.py b/mmpose/models/data_preprocessors/data_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..9442d0ed50bdf0e9ca219e496619c1880777bda4 --- /dev/null +++ b/mmpose/models/data_preprocessors/data_preprocessor.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Union + +import numpy as np +import torch +import torch.nn as nn +from mmengine.model import ImgDataPreprocessor +from mmengine.utils import is_seq_of + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class PoseDataPreprocessor(ImgDataPreprocessor): + """Image pre-processor for pose estimation tasks. + + Comparing with the :class:`ImgDataPreprocessor`, + + 1. It will additionally append batch_input_shape + to data_samples considering the DETR-based pose estimation tasks. + + 2. Support image augmentation transforms on batched data. + + It provides the data pre-processing as follows + + - Collate and move data to the target device. + - Pad inputs to the maximum size of current batch with defined + ``pad_value``. The padding size can be divisible by a defined + ``pad_size_divisor`` + - Stack inputs to batch_inputs. + - Convert inputs from bgr to rgb if the shape of input is (3, H, W). + - Normalize image with defined std and mean. + - Apply batch augmentation transforms. + + Args: + mean (sequence of float, optional): The pixel mean of R, G, B + channels. Defaults to None. + std (sequence of float, optional): The pixel standard deviation + of R, G, B channels. Defaults to None. + pad_size_divisor (int): The size of padded image should be + divisible by ``pad_size_divisor``. Defaults to 1. + pad_value (float or int): The padded pixel value. Defaults to 0. + bgr_to_rgb (bool): whether to convert image from BGR to RGB. + Defaults to False. + rgb_to_bgr (bool): whether to convert image from RGB to BGR. + Defaults to False. + non_blocking (bool): Whether block current process + when transferring data to device. Defaults to False. + batch_augments: (list of dict, optional): Configs of augmentation + transforms on batched data. Defaults to None. + """ + + def __init__(self, + mean: Sequence[float] = None, + std: Sequence[float] = None, + pad_size_divisor: int = 1, + pad_value: Union[float, int] = 0, + bgr_to_rgb: bool = False, + rgb_to_bgr: bool = False, + non_blocking: Optional[bool] = False, + batch_augments: Optional[List[dict]] = None): + super().__init__( + mean=mean, + std=std, + pad_size_divisor=pad_size_divisor, + pad_value=pad_value, + bgr_to_rgb=bgr_to_rgb, + rgb_to_bgr=rgb_to_bgr, + non_blocking=non_blocking) + + if batch_augments is not None: + self.batch_augments = nn.ModuleList( + [MODELS.build(aug) for aug in batch_augments]) + else: + self.batch_augments = None + + def forward(self, data: dict, training: bool = False) -> dict: + """Perform normalization, padding and bgr2rgb conversion based on + ``BaseDataPreprocessor``. + + Args: + data (dict): Data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + + Returns: + dict: Data in the same format as the model input. + """ + batch_pad_shape = self._get_pad_shape(data) + data = super().forward(data=data, training=training) + inputs, data_samples = data['inputs'], data['data_samples'] + + # update metainfo since the image shape might change + batch_input_shape = tuple(inputs[0].size()[-2:]) + for data_sample, pad_shape in zip(data_samples, batch_pad_shape): + data_sample.set_metainfo({ + 'batch_input_shape': batch_input_shape, + 'pad_shape': pad_shape + }) + + # apply batch augmentations + if training and self.batch_augments is not None: + for batch_aug in self.batch_augments: + inputs, data_samples = batch_aug(inputs, data_samples) + + return {'inputs': inputs, 'data_samples': data_samples} + + def _get_pad_shape(self, data: dict) -> List[tuple]: + """Get the pad_shape of each image based on data and + pad_size_divisor.""" + _batch_inputs = data['inputs'] + # Process data with `pseudo_collate`. + if is_seq_of(_batch_inputs, torch.Tensor): + batch_pad_shape = [] + for ori_input in _batch_inputs: + pad_h = int( + np.ceil(ori_input.shape[1] / + self.pad_size_divisor)) * self.pad_size_divisor + pad_w = int( + np.ceil(ori_input.shape[2] / + self.pad_size_divisor)) * self.pad_size_divisor + batch_pad_shape.append((pad_h, pad_w)) + # Process data with `default_collate`. + elif isinstance(_batch_inputs, torch.Tensor): + assert _batch_inputs.dim() == 4, ( + 'The input of `ImgDataPreprocessor` should be a NCHW tensor ' + 'or a list of tensor, but got a tensor with shape: ' + f'{_batch_inputs.shape}') + pad_h = int( + np.ceil(_batch_inputs.shape[1] / + self.pad_size_divisor)) * self.pad_size_divisor + pad_w = int( + np.ceil(_batch_inputs.shape[2] / + self.pad_size_divisor)) * self.pad_size_divisor + batch_pad_shape = [(pad_h, pad_w)] * _batch_inputs.shape[0] + else: + raise TypeError('Output of `cast_data` should be a dict ' + 'or a tuple with inputs and data_samples, but got' + f'{type(data)}: {data}') + return batch_pad_shape diff --git a/mmpose/models/distillers/__init__.py b/mmpose/models/distillers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4cc22a61105dabf5a3d60d0f2f7f6ee2df512bf1 --- /dev/null +++ b/mmpose/models/distillers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dwpose_distiller import DWPoseDistiller + +__all__ = ['DWPoseDistiller'] diff --git a/mmpose/models/distillers/dwpose_distiller.py b/mmpose/models/distillers/dwpose_distiller.py new file mode 100644 index 0000000000000000000000000000000000000000..d267951cd549a03eacb4473846c574ada5262144 --- /dev/null +++ b/mmpose/models/distillers/dwpose_distiller.py @@ -0,0 +1,290 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta +from typing import Tuple + +import torch +import torch.nn as nn +from mmengine.config import Config +from mmengine.logging import MessageHub +from mmengine.model import BaseModel +from mmengine.runner.checkpoint import load_checkpoint +from torch import Tensor + +from mmpose.evaluation.functional import simcc_pck_accuracy +from mmpose.models import build_pose_estimator +from mmpose.registry import MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ForwardResults, OptConfigType, OptMultiConfig, + OptSampleList, SampleList) + + +@MODELS.register_module() +class DWPoseDistiller(BaseModel, metaclass=ABCMeta): + """Distiller introduced in `DWPose`_ by Yang et al (2023). This distiller + is designed for distillation of RTMPose. + + It typically consists of teacher_model and student_model. Please use the + script `tools/misc/pth_transfer.py` to transfer the distilled model to the + original RTMPose model. + + Args: + teacher_cfg (str): Config file of the teacher model. + student_cfg (str): Config file of the student model. + two_dis (bool): Whether this is the second stage of distillation. + Defaults to False. + distill_cfg (dict): Config for distillation. Defaults to None. + teacher_pretrained (str): Path of the pretrained teacher model. + Defaults to None. + train_cfg (dict, optional): The runtime config for training process. + Defaults to ``None`` + data_preprocessor (dict, optional): The data preprocessing config to + build the instance of :class:`BaseDataPreprocessor`. Defaults to + ``None`` + init_cfg (dict, optional): The config to control the initialization. + Defaults to ``None`` + + .. _`DWPose`: https://arxiv.org/abs/2307.15880 + """ + + def __init__(self, + teacher_cfg, + student_cfg, + two_dis=False, + distill_cfg=None, + teacher_pretrained=None, + train_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + self.teacher = build_pose_estimator( + (Config.fromfile(teacher_cfg)).model) + self.teacher_pretrained = teacher_pretrained + self.teacher.eval() + for param in self.teacher.parameters(): + param.requires_grad = False + + self.student = build_pose_estimator( + (Config.fromfile(student_cfg)).model) + + self.distill_cfg = distill_cfg + self.distill_losses = nn.ModuleDict() + if self.distill_cfg is not None: + for item_loc in distill_cfg: + for item_loss in item_loc.methods: + loss_name = item_loss.name + use_this = item_loss.use_this + if use_this: + self.distill_losses[loss_name] = MODELS.build( + item_loss) + + self.two_dis = two_dis + self.train_cfg = train_cfg if train_cfg else self.student.train_cfg + self.test_cfg = self.student.test_cfg + self.metainfo = self.student.metainfo + + def init_weights(self): + if self.teacher_pretrained is not None: + load_checkpoint( + self.teacher, self.teacher_pretrained, map_location='cpu') + self.student.init_weights() + + def set_epoch(self): + """Set epoch for distiller. + + Used for the decay of distillation loss. + """ + self.message_hub = MessageHub.get_current_instance() + self.epoch = self.message_hub.get_info('epoch') + self.max_epochs = self.message_hub.get_info('max_epochs') + + def forward(self, + inputs: torch.Tensor, + data_samples: OptSampleList, + mode: str = 'tensor') -> ForwardResults: + if mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + # use customed metainfo to override the default metainfo + if self.metainfo is not None: + for data_sample in data_samples: + data_sample.set_metainfo(self.metainfo) + return self.predict(inputs, data_samples) + elif mode == 'tensor': + return self._forward(inputs) + else: + raise RuntimeError(f'Invalid mode "{mode}". ' + 'Only supports loss, predict and tensor mode.') + + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples. + + Returns: + dict: A dictionary of losses. + """ + self.set_epoch() + + losses = dict() + + with torch.no_grad(): + fea_t = self.teacher.extract_feat(inputs) + lt_x, lt_y = self.teacher.head(fea_t) + pred_t = (lt_x, lt_y) + + if not self.two_dis: + fea_s = self.student.extract_feat(inputs) + ori_loss, pred, gt, target_weight = self.head_loss( + fea_s, data_samples, train_cfg=self.train_cfg) + losses.update(ori_loss) + else: + ori_loss, pred, gt, target_weight = self.head_loss( + fea_t, data_samples, train_cfg=self.train_cfg) + + all_keys = self.distill_losses.keys() + + if 'loss_fea' in all_keys: + loss_name = 'loss_fea' + losses[loss_name] = self.distill_losses[loss_name](fea_s[-1], + fea_t[-1]) + if not self.two_dis: + losses[loss_name] = ( + 1 - self.epoch / self.max_epochs) * losses[loss_name] + + if 'loss_logit' in all_keys: + loss_name = 'loss_logit' + losses[loss_name] = self.distill_losses[loss_name]( + pred, pred_t, self.student.head.loss_module.beta, + target_weight) + if not self.two_dis: + losses[loss_name] = ( + 1 - self.epoch / self.max_epochs) * losses[loss_name] + + return losses + + def predict(self, inputs, data_samples): + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W) + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + + Returns: + list[:obj:`PoseDataSample`]: The pose estimation results of the + input images. The return value is `PoseDataSample` instances with + ``pred_instances`` and ``pred_fields``(optional) field , and + ``pred_instances`` usually contains the following keys: + + - keypoints (Tensor): predicted keypoint coordinates in shape + (num_instances, K, D) where K is the keypoint number and D + is the keypoint dimension + - keypoint_scores (Tensor): predicted keypoint scores in shape + (num_instances, K) + """ + if self.two_dis: + assert self.student.with_head, ( + 'The model must have head to perform prediction.') + + if self.test_cfg.get('flip_test', False): + _feats = self.extract_feat(inputs) + _feats_flip = self.extract_feat(inputs.flip(-1)) + feats = [_feats, _feats_flip] + else: + feats = self.extract_feat(inputs) + + preds = self.student.head.predict( + feats, data_samples, test_cfg=self.student.test_cfg) + + if isinstance(preds, tuple): + batch_pred_instances, batch_pred_fields = preds + else: + batch_pred_instances = preds + batch_pred_fields = None + + results = self.student.add_pred_to_datasample( + batch_pred_instances, batch_pred_fields, data_samples) + + return results + else: + return self.student.predict(inputs, data_samples) + + def extract_feat(self, inputs: Tensor) -> Tuple[Tensor]: + """Extract features. + + Args: + inputs (Tensor): Image tensor with shape (N, C, H ,W). + + Returns: + tuple[Tensor]: Multi-level features that may have various + resolutions. + """ + x = self.teacher.extract_feat(inputs) + return x + + def head_loss( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}, + ) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_x, pred_y = self.student.head.forward(feats) + + gt_x = torch.cat([ + d.gt_instance_labels.keypoint_x_labels for d in batch_data_samples + ], + dim=0) + gt_y = torch.cat([ + d.gt_instance_labels.keypoint_y_labels for d in batch_data_samples + ], + dim=0) + keypoint_weights = torch.cat( + [ + d.gt_instance_labels.keypoint_weights + for d in batch_data_samples + ], + dim=0, + ) + + pred_simcc = (pred_x, pred_y) + gt_simcc = (gt_x, gt_y) + + # calculate losses + losses = dict() + loss = self.student.head.loss_module(pred_simcc, gt_simcc, + keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = simcc_pck_accuracy( + output=to_numpy(pred_simcc), + target=to_numpy(gt_simcc), + simcc_split_ratio=self.student.head.simcc_split_ratio, + mask=to_numpy(keypoint_weights) > 0, + ) + + acc_pose = torch.tensor(avg_acc, device=gt_x.device) + losses.update(acc_pose=acc_pose) + + return losses, pred_simcc, gt_simcc, keypoint_weights + + def _forward(self, inputs: Tensor): + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + + Returns: + Union[Tensor | Tuple[Tensor]]: forward output of the network. + """ + return self.student._forward(inputs) diff --git a/mmpose/models/heads/__init__.py b/mmpose/models/heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..319f0c6836be700a335b9667ca91f442e86ad70a --- /dev/null +++ b/mmpose/models/heads/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_head import BaseHead +from .coord_cls_heads import RTMCCHead, RTMWHead, SimCCHead +from .heatmap_heads import (AssociativeEmbeddingHead, CIDHead, CPMHead, + HeatmapHead, InternetHead, MSPNHead, ViPNASHead) +from .hybrid_heads import DEKRHead, RTMOHead, VisPredictHead +from .regression_heads import (DSNTHead, IntegralRegressionHead, + MotionRegressionHead, RegressionHead, RLEHead, + TemporalRegressionHead, + TrajectoryRegressionHead) +from .transformer_heads import EDPoseHead + +__all__ = [ + 'BaseHead', 'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead', + 'RegressionHead', 'IntegralRegressionHead', 'SimCCHead', 'RLEHead', + 'DSNTHead', 'AssociativeEmbeddingHead', 'DEKRHead', 'VisPredictHead', + 'CIDHead', 'RTMCCHead', 'TemporalRegressionHead', + 'TrajectoryRegressionHead', 'MotionRegressionHead', 'EDPoseHead', + 'InternetHead', 'RTMWHead', 'RTMOHead' +] diff --git a/mmpose/models/heads/base_head.py b/mmpose/models/heads/base_head.py new file mode 100644 index 0000000000000000000000000000000000000000..d35c27b8b2b8db0ea737765d962de204304d2f19 --- /dev/null +++ b/mmpose/models/heads/base_head.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Tuple, Union + +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor + +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (Features, InstanceList, OptConfigType, + OptSampleList, Predictions) + + +class BaseHead(BaseModule, metaclass=ABCMeta): + """Base head. A subclass should override :meth:`predict` and :meth:`loss`. + + Args: + init_cfg (dict, optional): The extra init config of layers. + Defaults to None. + """ + + @abstractmethod + def forward(self, feats: Tuple[Tensor]): + """Forward the network.""" + + @abstractmethod + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}) -> Predictions: + """Predict results from features.""" + + @abstractmethod + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + def decode(self, batch_outputs: Union[Tensor, + Tuple[Tensor]]) -> InstanceList: + """Decode keypoints from outputs. + + Args: + batch_outputs (Tensor | Tuple[Tensor]): The network outputs of + a data batch + + Returns: + List[InstanceData]: A list of InstanceData, each contains the + decoded pose information of the instances of one data sample. + """ + + def _pack_and_call(args, func): + if not isinstance(args, tuple): + args = (args, ) + return func(*args) + + if self.decoder is None: + raise RuntimeError( + f'The decoder has not been set in {self.__class__.__name__}. ' + 'Please set the decoder configs in the init parameters to ' + 'enable head methods `head.predict()` and `head.decode()`') + + if self.decoder.support_batch_decoding: + batch_keypoints, batch_scores = _pack_and_call( + batch_outputs, self.decoder.batch_decode) + if isinstance(batch_scores, tuple) and len(batch_scores) == 2: + batch_scores, batch_visibility = batch_scores + else: + batch_visibility = [None] * len(batch_keypoints) + + else: + batch_output_np = to_numpy(batch_outputs, unzip=True) + batch_keypoints = [] + batch_scores = [] + batch_visibility = [] + for outputs in batch_output_np: + keypoints, scores = _pack_and_call(outputs, + self.decoder.decode) + batch_keypoints.append(keypoints) + if isinstance(scores, tuple) and len(scores) == 2: + batch_scores.append(scores[0]) + batch_visibility.append(scores[1]) + else: + batch_scores.append(scores) + batch_visibility.append(None) + + preds = [] + for keypoints, scores, visibility in zip(batch_keypoints, batch_scores, + batch_visibility): + pred = InstanceData(keypoints=keypoints, keypoint_scores=scores) + if visibility is not None: + pred.keypoints_visible = visibility + preds.append(pred) + + return preds diff --git a/mmpose/models/heads/coord_cls_heads/__init__.py b/mmpose/models/heads/coord_cls_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a4e51c4d7307486a8d14a49f757caddacfbe2cc --- /dev/null +++ b/mmpose/models/heads/coord_cls_heads/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .rtmcc_head import RTMCCHead +from .rtmw_head import RTMWHead +from .simcc_head import SimCCHead + +__all__ = ['SimCCHead', 'RTMCCHead', 'RTMWHead'] diff --git a/mmpose/models/heads/coord_cls_heads/rtmcc_head.py b/mmpose/models/heads/coord_cls_heads/rtmcc_head.py new file mode 100644 index 0000000000000000000000000000000000000000..5df0733c4827af56ffe7635d7ba083890efb9f2b --- /dev/null +++ b/mmpose/models/heads/coord_cls_heads/rtmcc_head.py @@ -0,0 +1,303 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmengine.dist import get_dist_info +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.codecs.utils import get_simcc_normalized +from mmpose.evaluation.functional import simcc_pck_accuracy +from mmpose.models.utils.rtmcc_block import RTMCCBlock, ScaleNorm +from mmpose.models.utils.tta import flip_vectors +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptSampleList) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class RTMCCHead(BaseHead): + """Top-down head introduced in RTMPose (2023). The head is composed of a + large-kernel convolutional layer, a fully-connected layer and a Gated + Attention Unit to generate 1d representation from low-resolution feature + maps. + + Args: + in_channels (int | sequence[int]): Number of channels in the input + feature map. + out_channels (int): Number of channels in the output heatmap. + input_size (tuple): Size of input image in shape [w, h]. + in_featuremap_size (int | sequence[int]): Size of input feature map. + simcc_split_ratio (float): Split ratio of pixels. + Default: 2.0. + final_layer_kernel_size (int): Kernel size of the convolutional layer. + Default: 1. + gau_cfg (Config): Config dict for the Gated Attention Unit. + Default: dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='ReLU', + use_rel_bias=False, + pos_enc=False). + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KLDiscretLoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__( + self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + input_size: Tuple[int, int], + in_featuremap_size: Tuple[int, int], + simcc_split_ratio: float = 2.0, + final_layer_kernel_size: int = 1, + gau_cfg: ConfigType = dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='ReLU', + use_rel_bias=False, + pos_enc=False), + loss: ConfigType = dict(type='KLDiscretLoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None, + ): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.input_size = input_size + self.in_featuremap_size = in_featuremap_size + self.simcc_split_ratio = simcc_split_ratio + + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + if isinstance(in_channels, (tuple, list)): + raise ValueError( + f'{self.__class__.__name__} does not support selecting ' + 'multiple input features.') + + # Define SimCC layers + flatten_dims = self.in_featuremap_size[0] * self.in_featuremap_size[1] + + self.final_layer = nn.Conv2d( + in_channels, + out_channels, + kernel_size=final_layer_kernel_size, + stride=1, + padding=final_layer_kernel_size // 2) + self.mlp = nn.Sequential( + ScaleNorm(flatten_dims), + nn.Linear(flatten_dims, gau_cfg['hidden_dims'], bias=False)) + + W = int(self.input_size[0] * self.simcc_split_ratio) + H = int(self.input_size[1] * self.simcc_split_ratio) + + self.gau = RTMCCBlock( + self.out_channels, + gau_cfg['hidden_dims'], + gau_cfg['hidden_dims'], + s=gau_cfg['s'], + expansion_factor=gau_cfg['expansion_factor'], + dropout_rate=gau_cfg['dropout_rate'], + drop_path=gau_cfg['drop_path'], + attn_type='self-attn', + act_fn=gau_cfg['act_fn'], + use_rel_bias=gau_cfg['use_rel_bias'], + pos_enc=gau_cfg['pos_enc']) + + self.cls_x = nn.Linear(gau_cfg['hidden_dims'], W, bias=False) + self.cls_y = nn.Linear(gau_cfg['hidden_dims'], H, bias=False) + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: + """Forward the network. + + The input is the featuremap extracted by backbone and the + output is the simcc representation. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + pred_x (Tensor): 1d representation of x. + pred_y (Tensor): 1d representation of y. + """ + feats = feats[-1] + + feats = self.final_layer(feats) # -> B, K, H, W + + # flatten the output heatmap + feats = torch.flatten(feats, 2) + + feats = self.mlp(feats) # -> B, K, hidden + + feats = self.gau(feats) + + pred_x = self.cls_x(feats) + pred_y = self.cls_y(feats) + + return pred_x, pred_y + + def predict( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}, + ) -> InstanceList: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + List[InstanceData]: The pose predictions, each contains + the following fields: + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + - keypoint_x_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the x direction + - keypoint_y_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the y direction + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _batch_pred_x, _batch_pred_y = self.forward(_feats) + + _batch_pred_x_flip, _batch_pred_y_flip = self.forward(_feats_flip) + _batch_pred_x_flip, _batch_pred_y_flip = flip_vectors( + _batch_pred_x_flip, + _batch_pred_y_flip, + flip_indices=flip_indices) + + batch_pred_x = (_batch_pred_x + _batch_pred_x_flip) * 0.5 + batch_pred_y = (_batch_pred_y + _batch_pred_y_flip) * 0.5 + else: + batch_pred_x, batch_pred_y = self.forward(feats) + + preds = self.decode((batch_pred_x, batch_pred_y)) + + if test_cfg.get('output_heatmaps', False): + rank, _ = get_dist_info() + if rank == 0: + warnings.warn('The predicted simcc values are normalized for ' + 'visualization. This may cause discrepancy ' + 'between the keypoint scores and the 1D heatmaps' + '.') + + # normalize the predicted 1d distribution + batch_pred_x = get_simcc_normalized(batch_pred_x) + batch_pred_y = get_simcc_normalized(batch_pred_y) + + B, K, _ = batch_pred_x.shape + # B, K, Wx -> B, K, Wx, 1 + x = batch_pred_x.reshape(B, K, 1, -1) + # B, K, Wy -> B, K, 1, Wy + y = batch_pred_y.reshape(B, K, -1, 1) + # B, K, Wx, Wy + batch_heatmaps = torch.matmul(y, x) + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + + for pred_instances, pred_x, pred_y in zip(preds, + to_numpy(batch_pred_x), + to_numpy(batch_pred_y)): + + pred_instances.keypoint_x_labels = pred_x[None] + pred_instances.keypoint_y_labels = pred_y[None] + + return preds, pred_fields + else: + return preds + + def loss( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}, + ) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_x, pred_y = self.forward(feats) + + gt_x = torch.cat([ + d.gt_instance_labels.keypoint_x_labels for d in batch_data_samples + ], + dim=0) + gt_y = torch.cat([ + d.gt_instance_labels.keypoint_y_labels for d in batch_data_samples + ], + dim=0) + keypoint_weights = torch.cat( + [ + d.gt_instance_labels.keypoint_weights + for d in batch_data_samples + ], + dim=0, + ) + + pred_simcc = (pred_x, pred_y) + gt_simcc = (gt_x, gt_y) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_simcc, gt_simcc, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = simcc_pck_accuracy( + output=to_numpy(pred_simcc), + target=to_numpy(gt_simcc), + simcc_split_ratio=self.simcc_split_ratio, + mask=to_numpy(keypoint_weights) > 0, + ) + + acc_pose = torch.tensor(avg_acc, device=gt_x.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [ + dict(type='Normal', layer=['Conv2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1), + dict(type='Normal', layer=['Linear'], std=0.01, bias=0), + ] + return init_cfg diff --git a/mmpose/models/heads/coord_cls_heads/rtmw_head.py b/mmpose/models/heads/coord_cls_heads/rtmw_head.py new file mode 100644 index 0000000000000000000000000000000000000000..7111f9044615e5e58726f42372406a973f86cf8b --- /dev/null +++ b/mmpose/models/heads/coord_cls_heads/rtmw_head.py @@ -0,0 +1,337 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmcv.cnn import ConvModule +from mmengine.dist import get_dist_info +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.codecs.utils import get_simcc_normalized +from mmpose.evaluation.functional import simcc_pck_accuracy +from mmpose.models.utils.rtmcc_block import RTMCCBlock, ScaleNorm +from mmpose.models.utils.tta import flip_vectors +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptSampleList) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class RTMWHead(BaseHead): + """Top-down head introduced in RTMPose-Wholebody (2023). + + Args: + in_channels (int | sequence[int]): Number of channels in the input + feature map. + out_channels (int): Number of channels in the output heatmap. + input_size (tuple): Size of input image in shape [w, h]. + in_featuremap_size (int | sequence[int]): Size of input feature map. + simcc_split_ratio (float): Split ratio of pixels. + Default: 2.0. + final_layer_kernel_size (int): Kernel size of the convolutional layer. + Default: 1. + gau_cfg (Config): Config dict for the Gated Attention Unit. + Default: dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='ReLU', + use_rel_bias=False, + pos_enc=False). + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KLDiscretLoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__( + self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + input_size: Tuple[int, int], + in_featuremap_size: Tuple[int, int], + simcc_split_ratio: float = 2.0, + final_layer_kernel_size: int = 1, + gau_cfg: ConfigType = dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='ReLU', + use_rel_bias=False, + pos_enc=False), + loss: ConfigType = dict(type='KLDiscretLoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None, + ): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.input_size = input_size + self.in_featuremap_size = in_featuremap_size + self.simcc_split_ratio = simcc_split_ratio + + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + if isinstance(in_channels, (tuple, list)): + raise ValueError( + f'{self.__class__.__name__} does not support selecting ' + 'multiple input features.') + + # Define SimCC layers + flatten_dims = self.in_featuremap_size[0] * self.in_featuremap_size[1] + + ps = 2 + self.ps = nn.PixelShuffle(ps) + self.conv_dec = ConvModule( + in_channels // ps**2, + in_channels // 4, + kernel_size=final_layer_kernel_size, + stride=1, + padding=final_layer_kernel_size // 2, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU')) + + self.final_layer = ConvModule( + in_channels, + out_channels, + kernel_size=final_layer_kernel_size, + stride=1, + padding=final_layer_kernel_size // 2, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU')) + self.final_layer2 = ConvModule( + in_channels // ps + in_channels // 4, + out_channels, + kernel_size=final_layer_kernel_size, + stride=1, + padding=final_layer_kernel_size // 2, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU')) + + self.mlp = nn.Sequential( + ScaleNorm(flatten_dims), + nn.Linear(flatten_dims, gau_cfg['hidden_dims'] // 2, bias=False)) + + self.mlp2 = nn.Sequential( + ScaleNorm(flatten_dims * ps**2), + nn.Linear( + flatten_dims * ps**2, gau_cfg['hidden_dims'] // 2, bias=False)) + + W = int(self.input_size[0] * self.simcc_split_ratio) + H = int(self.input_size[1] * self.simcc_split_ratio) + + self.gau = RTMCCBlock( + self.out_channels, + gau_cfg['hidden_dims'], + gau_cfg['hidden_dims'], + s=gau_cfg['s'], + expansion_factor=gau_cfg['expansion_factor'], + dropout_rate=gau_cfg['dropout_rate'], + drop_path=gau_cfg['drop_path'], + attn_type='self-attn', + act_fn=gau_cfg['act_fn'], + use_rel_bias=gau_cfg['use_rel_bias'], + pos_enc=gau_cfg['pos_enc']) + + self.cls_x = nn.Linear(gau_cfg['hidden_dims'], W, bias=False) + self.cls_y = nn.Linear(gau_cfg['hidden_dims'], H, bias=False) + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: + """Forward the network. + + The input is the featuremap extracted by backbone and the + output is the simcc representation. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + pred_x (Tensor): 1d representation of x. + pred_y (Tensor): 1d representation of y. + """ + # enc_b n / 2, h, w + # enc_t n, h, w + enc_b, enc_t = feats + + feats_t = self.final_layer(enc_t) + feats_t = torch.flatten(feats_t, 2) + feats_t = self.mlp(feats_t) + + dec_t = self.ps(enc_t) + dec_t = self.conv_dec(dec_t) + enc_b = torch.cat([dec_t, enc_b], dim=1) + + feats_b = self.final_layer2(enc_b) + feats_b = torch.flatten(feats_b, 2) + feats_b = self.mlp2(feats_b) + + feats = torch.cat([feats_t, feats_b], dim=2) + + feats = self.gau(feats) + + pred_x = self.cls_x(feats) + pred_y = self.cls_y(feats) + + return pred_x, pred_y + + def predict( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}, + ) -> InstanceList: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + List[InstanceData]: The pose predictions, each contains + the following fields: + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + - keypoint_x_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the x direction + - keypoint_y_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the y direction + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _batch_pred_x, _batch_pred_y = self.forward(_feats) + + _batch_pred_x_flip, _batch_pred_y_flip = self.forward(_feats_flip) + _batch_pred_x_flip, _batch_pred_y_flip = flip_vectors( + _batch_pred_x_flip, + _batch_pred_y_flip, + flip_indices=flip_indices) + + batch_pred_x = (_batch_pred_x + _batch_pred_x_flip) * 0.5 + batch_pred_y = (_batch_pred_y + _batch_pred_y_flip) * 0.5 + else: + batch_pred_x, batch_pred_y = self.forward(feats) + + preds = self.decode((batch_pred_x, batch_pred_y)) + + if test_cfg.get('output_heatmaps', False): + rank, _ = get_dist_info() + if rank == 0: + warnings.warn('The predicted simcc values are normalized for ' + 'visualization. This may cause discrepancy ' + 'between the keypoint scores and the 1D heatmaps' + '.') + + # normalize the predicted 1d distribution + batch_pred_x = get_simcc_normalized(batch_pred_x) + batch_pred_y = get_simcc_normalized(batch_pred_y) + + B, K, _ = batch_pred_x.shape + # B, K, Wx -> B, K, Wx, 1 + x = batch_pred_x.reshape(B, K, 1, -1) + # B, K, Wy -> B, K, 1, Wy + y = batch_pred_y.reshape(B, K, -1, 1) + # B, K, Wx, Wy + batch_heatmaps = torch.matmul(y, x) + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + + for pred_instances, pred_x, pred_y in zip(preds, + to_numpy(batch_pred_x), + to_numpy(batch_pred_y)): + + pred_instances.keypoint_x_labels = pred_x[None] + pred_instances.keypoint_y_labels = pred_y[None] + + return preds, pred_fields + else: + return preds + + def loss( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}, + ) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_x, pred_y = self.forward(feats) + + gt_x = torch.cat([ + d.gt_instance_labels.keypoint_x_labels for d in batch_data_samples + ], + dim=0) + gt_y = torch.cat([ + d.gt_instance_labels.keypoint_y_labels for d in batch_data_samples + ], + dim=0) + keypoint_weights = torch.cat( + [ + d.gt_instance_labels.keypoint_weights + for d in batch_data_samples + ], + dim=0, + ) + + pred_simcc = (pred_x, pred_y) + gt_simcc = (gt_x, gt_y) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_simcc, gt_simcc, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = simcc_pck_accuracy( + output=to_numpy(pred_simcc), + target=to_numpy(gt_simcc), + simcc_split_ratio=self.simcc_split_ratio, + mask=to_numpy(keypoint_weights) > 0, + ) + + acc_pose = torch.tensor(avg_acc, device=gt_x.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [ + dict(type='Normal', layer=['Conv2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1), + dict(type='Normal', layer=['Linear'], std=0.01, bias=0), + ] + return init_cfg diff --git a/mmpose/models/heads/coord_cls_heads/simcc_head.py b/mmpose/models/heads/coord_cls_heads/simcc_head.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e7001cbc31685d5a46f2cedde19606001fc8c8 --- /dev/null +++ b/mmpose/models/heads/coord_cls_heads/simcc_head.py @@ -0,0 +1,371 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmcv.cnn import build_conv_layer +from mmengine.dist import get_dist_info +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.codecs.utils import get_simcc_normalized +from mmpose.evaluation.functional import simcc_pck_accuracy +from mmpose.models.utils.tta import flip_vectors +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptSampleList) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class SimCCHead(BaseHead): + """Top-down heatmap head introduced in `SimCC`_ by Li et al (2022). The + head is composed of a few deconvolutional layers followed by a fully- + connected layer to generate 1d representation from low-resolution feature + maps. + + Args: + in_channels (int | sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + input_size (tuple): Input image size in shape [w, h] + in_featuremap_size (int | sequence[int]): Size of input feature map + simcc_split_ratio (float): Split ratio of pixels + deconv_type (str, optional): The type of deconv head which should + be one of the following options: + + - ``'heatmap'``: make deconv layers in `HeatmapHead` + - ``'vipnas'``: make deconv layers in `ViPNASHead` + + Defaults to ``'Heatmap'`` + deconv_out_channels (sequence[int]): The output channel number of each + deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + deconv_num_groups (Sequence[int], optional): The group number of each + deconv layer. Defaults to ``(16, 16, 16)`` + conv_out_channels (sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KLDiscretLoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`SimCC`: https://arxiv.org/abs/2107.03332 + """ + + _version = 2 + + def __init__( + self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + input_size: Tuple[int, int], + in_featuremap_size: Tuple[int, int], + simcc_split_ratio: float = 2.0, + deconv_type: str = 'heatmap', + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + deconv_num_groups: OptIntSeq = (16, 16, 16), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict(type='KLDiscretLoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None, + ): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + if deconv_type not in {'heatmap', 'vipnas'}: + raise ValueError( + f'{self.__class__.__name__} got invalid `deconv_type` value' + f'{deconv_type}. Should be one of ' + '{"heatmap", "vipnas"}') + + self.in_channels = in_channels + self.out_channels = out_channels + self.input_size = input_size + self.in_featuremap_size = in_featuremap_size + self.simcc_split_ratio = simcc_split_ratio + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + num_deconv = len(deconv_out_channels) if deconv_out_channels else 0 + if num_deconv != 0: + self.heatmap_size = tuple( + [s * (2**num_deconv) for s in in_featuremap_size]) + + # deconv layers + 1x1 conv + self.deconv_head = self._make_deconv_head( + in_channels=in_channels, + out_channels=out_channels, + deconv_type=deconv_type, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + deconv_num_groups=deconv_num_groups, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer) + + if final_layer is not None: + in_channels = out_channels + else: + in_channels = deconv_out_channels[-1] + + else: + self.deconv_head = None + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = None + + self.heatmap_size = in_featuremap_size + + # Define SimCC layers + flatten_dims = self.heatmap_size[0] * self.heatmap_size[1] + + W = int(self.input_size[0] * self.simcc_split_ratio) + H = int(self.input_size[1] * self.simcc_split_ratio) + + self.mlp_head_x = nn.Linear(flatten_dims, W) + self.mlp_head_y = nn.Linear(flatten_dims, H) + + def _make_deconv_head( + self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + deconv_type: str = 'heatmap', + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + deconv_num_groups: OptIntSeq = (16, 16, 16), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1) + ) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + if deconv_type == 'heatmap': + deconv_head = MODELS.build( + dict( + type='HeatmapHead', + in_channels=self.in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer)) + else: + deconv_head = MODELS.build( + dict( + type='ViPNASHead', + in_channels=in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_num_groups=deconv_num_groups, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer)) + + return deconv_head + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: + """Forward the network. + + The input is the featuremap extracted by backbone and the + output is the simcc representation. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + pred_x (Tensor): 1d representation of x. + pred_y (Tensor): 1d representation of y. + """ + if self.deconv_head is None: + feats = feats[-1] + if self.final_layer is not None: + feats = self.final_layer(feats) + else: + feats = self.deconv_head(feats) + + # flatten the output heatmap + x = torch.flatten(feats, 2) + + pred_x = self.mlp_head_x(x) + pred_y = self.mlp_head_y(x) + + return pred_x, pred_y + + def predict( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}, + ) -> InstanceList: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + List[InstanceData]: The pose predictions, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + - keypoint_x_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the x direction + - keypoint_y_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the y direction + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _batch_pred_x, _batch_pred_y = self.forward(_feats) + + _batch_pred_x_flip, _batch_pred_y_flip = self.forward(_feats_flip) + _batch_pred_x_flip, _batch_pred_y_flip = flip_vectors( + _batch_pred_x_flip, + _batch_pred_y_flip, + flip_indices=flip_indices) + + batch_pred_x = (_batch_pred_x + _batch_pred_x_flip) * 0.5 + batch_pred_y = (_batch_pred_y + _batch_pred_y_flip) * 0.5 + else: + batch_pred_x, batch_pred_y = self.forward(feats) + + preds = self.decode((batch_pred_x, batch_pred_y)) + + if test_cfg.get('output_heatmaps', False): + rank, _ = get_dist_info() + if rank == 0: + warnings.warn('The predicted simcc values are normalized for ' + 'visualization. This may cause discrepancy ' + 'between the keypoint scores and the 1D heatmaps' + '.') + + # normalize the predicted 1d distribution + sigma = self.decoder.sigma + batch_pred_x = get_simcc_normalized(batch_pred_x, sigma[0]) + batch_pred_y = get_simcc_normalized(batch_pred_y, sigma[1]) + + B, K, _ = batch_pred_x.shape + # B, K, Wx -> B, K, Wx, 1 + x = batch_pred_x.reshape(B, K, 1, -1) + # B, K, Wy -> B, K, 1, Wy + y = batch_pred_y.reshape(B, K, -1, 1) + # B, K, Wx, Wy + batch_heatmaps = torch.matmul(y, x) + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + + for pred_instances, pred_x, pred_y in zip(preds, + to_numpy(batch_pred_x), + to_numpy(batch_pred_y)): + + pred_instances.keypoint_x_labels = pred_x[None] + pred_instances.keypoint_y_labels = pred_y[None] + + return preds, pred_fields + else: + return preds + + def loss( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}, + ) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_x, pred_y = self.forward(feats) + + gt_x = torch.cat([ + d.gt_instance_labels.keypoint_x_labels for d in batch_data_samples + ], + dim=0) + gt_y = torch.cat([ + d.gt_instance_labels.keypoint_y_labels for d in batch_data_samples + ], + dim=0) + keypoint_weights = torch.cat( + [ + d.gt_instance_labels.keypoint_weights + for d in batch_data_samples + ], + dim=0, + ) + + pred_simcc = (pred_x, pred_y) + gt_simcc = (gt_x, gt_y) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_simcc, gt_simcc, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = simcc_pck_accuracy( + output=to_numpy(pred_simcc), + target=to_numpy(gt_simcc), + simcc_split_ratio=self.simcc_split_ratio, + mask=to_numpy(keypoint_weights) > 0, + ) + + acc_pose = torch.tensor(avg_acc, device=gt_x.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1), + dict(type='Normal', layer=['Linear'], std=0.01, bias=0), + ] + return init_cfg diff --git a/mmpose/models/heads/heatmap_heads/__init__.py b/mmpose/models/heads/heatmap_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c629455c195755ae1800e87390a56ab56d1dae96 --- /dev/null +++ b/mmpose/models/heads/heatmap_heads/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ae_head import AssociativeEmbeddingHead +from .cid_head import CIDHead +from .cpm_head import CPMHead +from .heatmap_head import HeatmapHead +from .internet_head import InternetHead +from .mspn_head import MSPNHead +from .vipnas_head import ViPNASHead + +__all__ = [ + 'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead', + 'AssociativeEmbeddingHead', 'CIDHead', 'InternetHead' +] diff --git a/mmpose/models/heads/heatmap_heads/ae_head.py b/mmpose/models/heads/heatmap_heads/ae_head.py new file mode 100644 index 0000000000000000000000000000000000000000..c9559eebc2696fa0363ffdb4807c9a0e70d04e26 --- /dev/null +++ b/mmpose/models/heads/heatmap_heads/ae_head.py @@ -0,0 +1,343 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple, Union + +import torch +from mmengine.structures import InstanceData, PixelData +from mmengine.utils import is_list_of +from torch import Tensor + +from mmpose.models.utils.tta import aggregate_heatmaps, flip_heatmaps +from mmpose.registry import MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, InstanceList, + OptConfigType, OptSampleList, Predictions) +from .heatmap_head import HeatmapHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class AssociativeEmbeddingHead(HeatmapHead): + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_keypoints: int, + tag_dim: int = 1, + tag_per_keypoint: bool = True, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + keypoint_loss: ConfigType = dict(type='KeypointMSELoss'), + tag_loss: ConfigType = dict(type='AssociativeEmbeddingLoss'), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if tag_per_keypoint: + out_channels = num_keypoints * (1 + tag_dim) + else: + out_channels = num_keypoints + tag_dim + + loss = dict( + type='CombinedLoss', + losses=dict(keypoint_loss=keypoint_loss, tag_loss=tag_loss)) + + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer, + loss=loss, + decoder=decoder, + init_cfg=init_cfg) + + self.num_keypoints = num_keypoints + self.tag_dim = tag_dim + self.tag_per_keypoint = tag_per_keypoint + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Features): The features which could be in following forms: + + - Tuple[Tensor]: multi-stage features from the backbone + - List[Tuple[Tensor]]: multiple features for TTA where either + `flip_test` or `multiscale_test` is applied + - List[List[Tuple[Tensor]]]: multiple features for TTA where + both `flip_test` and `multiscale_test` are applied + + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + # test configs + multiscale_test = test_cfg.get('multiscale_test', False) + flip_test = test_cfg.get('flip_test', False) + shift_heatmap = test_cfg.get('shift_heatmap', False) + align_corners = test_cfg.get('align_corners', False) + restore_heatmap_size = test_cfg.get('restore_heatmap_size', False) + output_heatmaps = test_cfg.get('output_heatmaps', False) + + # enable multi-scale test + if multiscale_test: + # TTA: multi-scale test + assert is_list_of(feats, list if flip_test else tuple) + else: + assert is_list_of(feats, tuple if flip_test else Tensor) + feats = [feats] + + # resize heatmaps to align with with input size + if restore_heatmap_size: + img_shape = batch_data_samples[0].metainfo['img_shape'] + assert all(d.metainfo['img_shape'] == img_shape + for d in batch_data_samples) + img_h, img_w = img_shape + heatmap_size = (img_w, img_h) + else: + heatmap_size = None + + multiscale_heatmaps = [] + multiscale_tags = [] + + for scale_idx, _feats in enumerate(feats): + if not flip_test: + _heatmaps, _tags = self.forward(_feats) + + else: + # TTA: flip test + assert isinstance(_feats, list) and len(_feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + # original + _feats_orig, _feats_flip = _feats + _heatmaps_orig, _tags_orig = self.forward(_feats_orig) + + # flipped + _heatmaps_flip, _tags_flip = self.forward(_feats_flip) + _heatmaps_flip = flip_heatmaps( + _heatmaps_flip, + flip_mode='heatmap', + flip_indices=flip_indices, + shift_heatmap=shift_heatmap) + _tags_flip = self._flip_tags( + _tags_flip, + flip_indices=flip_indices, + shift_heatmap=shift_heatmap) + + # aggregated heatmaps + _heatmaps = aggregate_heatmaps( + [_heatmaps_orig, _heatmaps_flip], + size=heatmap_size, + align_corners=align_corners, + mode='average') + + # aggregated tags (only at original scale) + if scale_idx == 0: + _tags = aggregate_heatmaps([_tags_orig, _tags_flip], + size=heatmap_size, + align_corners=align_corners, + mode='concat') + else: + _tags = None + + multiscale_heatmaps.append(_heatmaps) + multiscale_tags.append(_tags) + + # aggregate multi-scale heatmaps + if len(feats) > 1: + batch_heatmaps = aggregate_heatmaps( + multiscale_heatmaps, + align_corners=align_corners, + mode='average') + else: + batch_heatmaps = multiscale_heatmaps[0] + # only keep tags at original scale + batch_tags = multiscale_tags[0] + + batch_outputs = tuple([batch_heatmaps, batch_tags]) + preds = self.decode(batch_outputs) + + if output_heatmaps: + pred_fields = [] + for _heatmaps, _tags in zip(batch_heatmaps.detach(), + batch_tags.detach()): + pred_fields.append(PixelData(heatmaps=_heatmaps, tags=_tags)) + + return preds, pred_fields + else: + return preds + + def _flip_tags(self, + tags: Tensor, + flip_indices: List[int], + shift_heatmap: bool = True): + """Flip the tagging heatmaps horizontally for test-time augmentation. + + Args: + tags (Tensor): batched tagging heatmaps to flip + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + shift_heatmap (bool): Shift the flipped heatmaps to align with the + original heatmaps and improve accuracy. Defaults to ``True`` + + Returns: + Tensor: flipped tagging heatmaps + """ + B, C, H, W = tags.shape + K = self.num_keypoints + L = self.tag_dim + + tags = tags.flip(-1) + + if self.tag_per_keypoint: + assert C == K * L + tags = tags.view(B, L, K, H, W) + tags = tags[:, :, flip_indices] + tags = tags.view(B, C, H, W) + + if shift_heatmap: + tags[..., 1:] = tags[..., :-1].clone() + + return tags + + def decode(self, batch_outputs: Union[Tensor, + Tuple[Tensor]]) -> InstanceList: + """Decode keypoints from outputs. + + Args: + batch_outputs (Tensor | Tuple[Tensor]): The network outputs of + a data batch + + Returns: + List[InstanceData]: A list of InstanceData, each contains the + decoded pose information of the instances of one data sample. + """ + + def _pack_and_call(args, func): + if not isinstance(args, tuple): + args = (args, ) + return func(*args) + + if self.decoder is None: + raise RuntimeError( + f'The decoder has not been set in {self.__class__.__name__}. ' + 'Please set the decoder configs in the init parameters to ' + 'enable head methods `head.predict()` and `head.decode()`') + + if self.decoder.support_batch_decoding: + batch_keypoints, batch_scores, batch_instance_scores = \ + _pack_and_call(batch_outputs, self.decoder.batch_decode) + + else: + batch_output_np = to_numpy(batch_outputs, unzip=True) + batch_keypoints = [] + batch_scores = [] + batch_instance_scores = [] + for outputs in batch_output_np: + keypoints, scores, instance_scores = _pack_and_call( + outputs, self.decoder.decode) + batch_keypoints.append(keypoints) + batch_scores.append(scores) + batch_instance_scores.append(instance_scores) + + preds = [ + InstanceData( + bbox_scores=instance_scores, + keypoints=keypoints, + keypoint_scores=scores) + for keypoints, scores, instance_scores in zip( + batch_keypoints, batch_scores, batch_instance_scores) + ] + + return preds + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: + """Forward the network. The input is multi scale feature maps and the + output is the heatmaps and tags. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + tuple: + - heatmaps (Tensor): output heatmaps + - tags (Tensor): output tags + """ + + output = super().forward(feats) + heatmaps = output[:, :self.num_keypoints] + tags = output[:, self.num_keypoints:] + return heatmaps, tags + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + pred_heatmaps, pred_tags = self.forward(feats) + + if not self.tag_per_keypoint: + pred_tags = pred_tags.repeat((1, self.num_keypoints, 1, 1)) + + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + gt_masks = torch.stack( + [d.gt_fields.heatmap_mask for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + keypoint_indices = [ + d.gt_instance_labels.keypoint_indices for d in batch_data_samples + ] + + loss_kpt = self.loss_module.keypoint_loss(pred_heatmaps, gt_heatmaps, + keypoint_weights, gt_masks) + + loss_pull, loss_push = self.loss_module.tag_loss( + pred_tags, keypoint_indices) + + losses = { + 'loss_kpt': loss_kpt, + 'loss_pull': loss_pull, + 'loss_push': loss_push + } + + return losses diff --git a/mmpose/models/heads/heatmap_heads/cid_head.py b/mmpose/models/heads/heatmap_heads/cid_head.py new file mode 100644 index 0000000000000000000000000000000000000000..39e0211a3e135c1c101c14e37956528d3330ca1b --- /dev/null +++ b/mmpose/models/heads/heatmap_heads/cid_head.py @@ -0,0 +1,743 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Dict, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import build_conv_layer +from mmengine.model import BaseModule, ModuleDict +from mmengine.structures import InstanceData, PixelData +from torch import Tensor + +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.typing import (ConfigType, Features, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + + +def smooth_heatmaps(heatmaps: Tensor, blur_kernel_size: int) -> Tensor: + """Smooth the heatmaps by blurring and averaging. + + Args: + heatmaps (Tensor): The heatmaps to smooth. + blur_kernel_size (int): The kernel size for blurring the heatmaps. + + Returns: + Tensor: The smoothed heatmaps. + """ + smoothed_heatmaps = torch.nn.functional.avg_pool2d( + heatmaps, blur_kernel_size, 1, (blur_kernel_size - 1) // 2) + smoothed_heatmaps = (heatmaps + smoothed_heatmaps) / 2.0 + return smoothed_heatmaps + + +class TruncSigmoid(nn.Sigmoid): + """A sigmoid activation function that truncates the output to the given + range. + + Args: + min (float, optional): The minimum value to clamp the output to. + Defaults to 0.0 + max (float, optional): The maximum value to clamp the output to. + Defaults to 1.0 + """ + + def __init__(self, min: float = 0.0, max: float = 1.0): + super(TruncSigmoid, self).__init__() + self.min = min + self.max = max + + def forward(self, input: Tensor) -> Tensor: + """Computes the truncated sigmoid activation of the input tensor.""" + output = torch.sigmoid(input) + output = output.clamp(min=self.min, max=self.max) + return output + + +class IIAModule(BaseModule): + """Instance Information Abstraction module introduced in `CID`. This module + extracts the feature representation vectors for each instance. + + Args: + in_channels (int): Number of channels in the input feature tensor + out_channels (int): Number of channels of the output heatmaps + clamp_delta (float, optional): A small value that prevents the sigmoid + activation from becoming saturated. Defaults to 1e-4. + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + clamp_delta: float = 1e-4, + init_cfg: OptConfigType = None, + ): + super().__init__(init_cfg=init_cfg) + + self.keypoint_root_conv = build_conv_layer( + dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1)) + self.sigmoid = TruncSigmoid(min=clamp_delta, max=1 - clamp_delta) + + def forward(self, feats: Tensor): + heatmaps = self.keypoint_root_conv(feats) + heatmaps = self.sigmoid(heatmaps) + return heatmaps + + def _sample_feats(self, feats: Tensor, indices: Tensor) -> Tensor: + """Extract feature vectors at the specified indices from the input + feature map. + + Args: + feats (Tensor): Input feature map. + indices (Tensor): Indices of the feature vectors to extract. + + Returns: + Tensor: Extracted feature vectors. + """ + assert indices.dtype == torch.long + if indices.shape[1] == 3: + b, w, h = [ind.squeeze(-1) for ind in indices.split(1, -1)] + instance_feats = feats[b, :, h, w] + elif indices.shape[1] == 2: + w, h = [ind.squeeze(-1) for ind in indices.split(1, -1)] + instance_feats = feats[:, :, h, w] + instance_feats = instance_feats.permute(0, 2, 1) + instance_feats = instance_feats.reshape(-1, + instance_feats.shape[-1]) + + else: + raise ValueError(f'`indices` should have 2 or 3 channels, ' + f'but got f{indices.shape[1]}') + return instance_feats + + def _hierarchical_pool(self, heatmaps: Tensor) -> Tensor: + """Conduct max pooling on the input heatmaps with different kernel size + according to the input size. + + Args: + heatmaps (Tensor): Input heatmaps. + + Returns: + Tensor: Result of hierarchical pooling. + """ + map_size = (heatmaps.shape[-1] + heatmaps.shape[-2]) / 2.0 + if map_size > 300: + maxm = torch.nn.functional.max_pool2d(heatmaps, 7, 1, 3) + elif map_size > 200: + maxm = torch.nn.functional.max_pool2d(heatmaps, 5, 1, 2) + else: + maxm = torch.nn.functional.max_pool2d(heatmaps, 3, 1, 1) + return maxm + + def forward_train(self, feats: Tensor, instance_coords: Tensor, + instance_imgids: Tensor) -> Tuple[Tensor, Tensor]: + """Forward pass during training. + + Args: + feats (Tensor): Input feature tensor. + instance_coords (Tensor): Coordinates of the instance roots. + instance_imgids (Tensor): Sample indices of each instances + in the batch. + + Returns: + Tuple[Tensor, Tensor]: Extracted feature vectors and heatmaps + for the instances. + """ + heatmaps = self.forward(feats) + indices = torch.cat((instance_imgids[:, None], instance_coords), dim=1) + instance_feats = self._sample_feats(feats, indices) + + return instance_feats, heatmaps + + def forward_test( + self, feats: Tensor, test_cfg: Dict + ) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: + """Forward pass during testing. + + Args: + feats (Tensor): Input feature tensor. + test_cfg (Dict): Testing configuration, including: + - blur_kernel_size (int, optional): Kernel size for blurring + the heatmaps. Defaults to 3. + - max_instances (int, optional): Maximum number of instances + to extract. Defaults to 30. + - score_threshold (float, optional): Minimum score for + extracting an instance. Defaults to 0.01. + - flip_test (bool, optional): Whether to compute the average + of the heatmaps across the batch dimension. + Defaults to False. + + Returns: + A tuple of Tensor including extracted feature vectors, + coordinates, and scores of the instances. Any of these can be + empty Tensor if no instances are extracted. + """ + blur_kernel_size = test_cfg.get('blur_kernel_size', 3) + max_instances = test_cfg.get('max_instances', 30) + score_threshold = test_cfg.get('score_threshold', 0.01) + H, W = feats.shape[-2:] + + # compute heatmaps + heatmaps = self.forward(feats).narrow(1, -1, 1) + if test_cfg.get('flip_test', False): + heatmaps = heatmaps.mean(dim=0, keepdims=True) + smoothed_heatmaps = smooth_heatmaps(heatmaps, blur_kernel_size) + + # decode heatmaps + maximums = self._hierarchical_pool(smoothed_heatmaps) + maximums = torch.eq(maximums, smoothed_heatmaps).float() + maximums = (smoothed_heatmaps * maximums).reshape(-1) + scores, pos_ind = maximums.topk(max_instances, dim=0) + select_ind = (scores > (score_threshold)).nonzero().squeeze(1) + scores, pos_ind = scores[select_ind], pos_ind[select_ind] + + # sample feature vectors from feature map + instance_coords = torch.stack((pos_ind % W, pos_ind // W), dim=1) + instance_feats = self._sample_feats(feats, instance_coords) + + return instance_feats, instance_coords, scores + + +class ChannelAttention(nn.Module): + """Channel-wise attention module introduced in `CID`. + + Args: + in_channels (int): The number of channels of the input instance + vectors. + out_channels (int): The number of channels of the transformed instance + vectors. + """ + + def __init__(self, in_channels: int, out_channels: int): + super(ChannelAttention, self).__init__() + self.atn = nn.Linear(in_channels, out_channels) + + def forward(self, global_feats: Tensor, instance_feats: Tensor) -> Tensor: + """Applies attention to the channel dimension of the input tensor.""" + + instance_feats = self.atn(instance_feats).unsqueeze(2).unsqueeze(3) + return global_feats * instance_feats + + +class SpatialAttention(nn.Module): + """Spatial-wise attention module introduced in `CID`. + + Args: + in_channels (int): The number of channels of the input instance + vectors. + out_channels (int): The number of channels of the transformed instance + vectors. + """ + + def __init__(self, in_channels, out_channels): + super(SpatialAttention, self).__init__() + self.atn = nn.Linear(in_channels, out_channels) + self.feat_stride = 4 + self.conv = nn.Conv2d(3, 1, 5, 1, 2) + + def _get_pixel_coords(self, heatmap_size: Tuple, device: str = 'cpu'): + """Get pixel coordinates for each element in the heatmap. + + Args: + heatmap_size (tuple): Size of the heatmap in (W, H) format. + device (str): Device to put the resulting tensor on. + + Returns: + Tensor of shape (batch_size, num_pixels, 2) containing the pixel + coordinates for each element in the heatmap. + """ + w, h = heatmap_size + y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) + pixel_coords = torch.stack((x, y), dim=-1).reshape(-1, 2) + pixel_coords = pixel_coords.float().to(device) + 0.5 + return pixel_coords + + def forward(self, global_feats: Tensor, instance_feats: Tensor, + instance_coords: Tensor) -> Tensor: + """Perform spatial attention. + + Args: + global_feats (Tensor): Tensor containing the global features. + instance_feats (Tensor): Tensor containing the instance feature + vectors. + instance_coords (Tensor): Tensor containing the root coordinates + of the instances. + + Returns: + Tensor containing the modulated global features. + """ + B, C, H, W = global_feats.size() + + instance_feats = self.atn(instance_feats).reshape(B, C, 1, 1) + feats = global_feats * instance_feats.expand_as(global_feats) + fsum = torch.sum(feats, dim=1, keepdim=True) + + pixel_coords = self._get_pixel_coords((W, H), feats.device) + relative_coords = instance_coords.reshape( + -1, 1, 2) - pixel_coords.reshape(1, -1, 2) + relative_coords = relative_coords.permute(0, 2, 1) / 32.0 + relative_coords = relative_coords.reshape(B, 2, H, W) + + input_feats = torch.cat((fsum, relative_coords), dim=1) + mask = self.conv(input_feats).sigmoid() + return global_feats * mask + + +class GFDModule(BaseModule): + """Global Feature Decoupling module introduced in `CID`. This module + extracts the decoupled heatmaps for each instance. + + Args: + in_channels (int): Number of channels in the input feature map + out_channels (int): Number of channels of the output heatmaps + for each instance + gfd_channels (int): Number of channels in the transformed feature map + clamp_delta (float, optional): A small value that prevents the sigmoid + activation from becoming saturated. Defaults to 1e-4. + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + gfd_channels: int, + clamp_delta: float = 1e-4, + init_cfg: OptConfigType = None, + ): + super().__init__(init_cfg=init_cfg) + + self.conv_down = build_conv_layer( + dict( + type='Conv2d', + in_channels=in_channels, + out_channels=gfd_channels, + kernel_size=1)) + + self.channel_attention = ChannelAttention(in_channels, gfd_channels) + self.spatial_attention = SpatialAttention(in_channels, gfd_channels) + self.fuse_attention = build_conv_layer( + dict( + type='Conv2d', + in_channels=gfd_channels * 2, + out_channels=gfd_channels, + kernel_size=1)) + self.heatmap_conv = build_conv_layer( + dict( + type='Conv2d', + in_channels=gfd_channels, + out_channels=out_channels, + kernel_size=1)) + self.sigmoid = TruncSigmoid(min=clamp_delta, max=1 - clamp_delta) + + def forward( + self, + feats: Tensor, + instance_feats: Tensor, + instance_coords: Tensor, + instance_imgids: Tensor, + ) -> Tensor: + """Extract decoupled heatmaps for each instance. + + Args: + feats (Tensor): Input feature maps. + instance_feats (Tensor): Tensor containing the instance feature + vectors. + instance_coords (Tensor): Tensor containing the root coordinates + of the instances. + instance_imgids (Tensor): Sample indices of each instances + in the batch. + + Returns: + A tensor containing decoupled heatmaps. + """ + + global_feats = self.conv_down(feats) + global_feats = global_feats[instance_imgids] + cond_instance_feats = torch.cat( + (self.channel_attention(global_feats, instance_feats), + self.spatial_attention(global_feats, instance_feats, + instance_coords)), + dim=1) + + cond_instance_feats = self.fuse_attention(cond_instance_feats) + cond_instance_feats = torch.nn.functional.relu(cond_instance_feats) + cond_instance_feats = self.heatmap_conv(cond_instance_feats) + heatmaps = self.sigmoid(cond_instance_feats) + + return heatmaps + + +@MODELS.register_module() +class CIDHead(BaseHead): + """Contextual Instance Decoupling head introduced in `Contextual Instance + Decoupling for Robust Multi-Person Pose Estimation (CID)`_ by Wang et al + (2022). The head is composed of an Instance Information Abstraction (IIA) + module and a Global Feature Decoupling (GFD) module. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + num_keypoints (int): Number of keypoints + gfd_channels (int): Number of filters in GFD module + max_train_instances (int): Maximum number of instances in a batch + during training. Defaults to 200 + heatmap_loss (Config): Config of the heatmap loss. Defaults to use + :class:`KeypointMSELoss` + coupled_heatmap_loss (Config): Config of the loss for coupled heatmaps. + Defaults to use :class:`SoftWeightSmoothL1Loss` + decoupled_heatmap_loss (Config): Config of the loss for decoupled + heatmaps. Defaults to use :class:`SoftWeightSmoothL1Loss` + contrastive_loss (Config): Config of the contrastive loss for + representation vectors of instances. Defaults to use + :class:`InfoNCELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`CID`: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_ + Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_ + CVPR_2022_paper.html + """ + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + gfd_channels: int, + num_keypoints: int, + prior_prob: float = 0.01, + coupled_heatmap_loss: OptConfigType = dict( + type='FocalHeatmapLoss'), + decoupled_heatmap_loss: OptConfigType = dict( + type='FocalHeatmapLoss'), + contrastive_loss: OptConfigType = dict(type='InfoNCELoss'), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_keypoints = num_keypoints + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # build sub-modules + bias_value = -math.log((1 - prior_prob) / prior_prob) + self.iia_module = IIAModule( + in_channels, + num_keypoints + 1, + init_cfg=init_cfg + [ + dict( + type='Normal', + layer=['Conv2d', 'Linear'], + std=0.001, + override=dict( + name='keypoint_root_conv', + type='Normal', + std=0.001, + bias=bias_value)) + ]) + self.gfd_module = GFDModule( + in_channels, + num_keypoints, + gfd_channels, + init_cfg=init_cfg + [ + dict( + type='Normal', + layer=['Conv2d', 'Linear'], + std=0.001, + override=dict( + name='heatmap_conv', + type='Normal', + std=0.001, + bias=bias_value)) + ]) + + # build losses + self.loss_module = ModuleDict( + dict( + heatmap_coupled=MODELS.build(coupled_heatmap_loss), + heatmap_decoupled=MODELS.build(decoupled_heatmap_loss), + contrastive=MODELS.build(contrastive_loss), + )) + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + @property + def default_init_cfg(self): + init_cfg = [ + dict(type='Normal', layer=['Conv2d', 'Linear'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the heatmap. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output heatmap. + """ + feats = feats[-1] + instance_info = self.iia_module.forward_test(feats, {}) + instance_feats, instance_coords, instance_scores = instance_info + instance_imgids = torch.zeros( + instance_coords.size(0), dtype=torch.long, device=feats.device) + instance_heatmaps = self.gfd_module(feats, instance_feats, + instance_coords, instance_imgids) + + return instance_heatmaps + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + metainfo = batch_data_samples[0].metainfo + + if test_cfg.get('flip_test', False): + assert isinstance(feats, list) and len(feats) == 2 + + feats_flipped = flip_heatmaps(feats[1][-1], shift_heatmap=False) + feats = torch.cat((feats[0][-1], feats_flipped)) + else: + feats = feats[-1] + + instance_info = self.iia_module.forward_test(feats, test_cfg) + instance_feats, instance_coords, instance_scores = instance_info + if len(instance_coords) > 0: + instance_imgids = torch.zeros( + instance_coords.size(0), dtype=torch.long, device=feats.device) + if test_cfg.get('flip_test', False): + instance_coords = torch.cat((instance_coords, instance_coords)) + instance_imgids = torch.cat( + (instance_imgids, instance_imgids + 1)) + instance_heatmaps = self.gfd_module(feats, instance_feats, + instance_coords, + instance_imgids) + if test_cfg.get('flip_test', False): + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + instance_heatmaps, instance_heatmaps_flip = torch.chunk( + instance_heatmaps, 2, dim=0) + instance_heatmaps_flip = \ + instance_heatmaps_flip[:, flip_indices, :, :] + instance_heatmaps = (instance_heatmaps + + instance_heatmaps_flip) / 2.0 + instance_heatmaps = smooth_heatmaps( + instance_heatmaps, test_cfg.get('blur_kernel_size', 3)) + + preds = self.decode((instance_heatmaps, instance_scores[:, None])) + preds = InstanceData.cat(preds) + preds.keypoints[..., 0] += metainfo['input_size'][ + 0] / instance_heatmaps.shape[-1] / 2.0 + preds.keypoints[..., 1] += metainfo['input_size'][ + 1] / instance_heatmaps.shape[-2] / 2.0 + preds = [preds] + + else: + preds = [ + InstanceData( + keypoints=np.empty((0, self.num_keypoints, 2)), + keypoint_scores=np.empty((0, self.num_keypoints))) + ] + instance_heatmaps = torch.empty(0, self.num_keypoints, + *feats.shape[-2:]) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData( + heatmaps=instance_heatmaps.reshape( + -1, *instance_heatmaps.shape[-2:])) + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + + # load targets + gt_heatmaps, gt_instance_coords, keypoint_weights = [], [], [] + heatmap_mask = [] + instance_imgids, gt_instance_heatmaps = [], [] + for i, d in enumerate(batch_data_samples): + gt_heatmaps.append(d.gt_fields.heatmaps) + gt_instance_coords.append(d.gt_instance_labels.instance_coords) + keypoint_weights.append(d.gt_instance_labels.keypoint_weights) + instance_imgids.append( + torch.ones( + len(d.gt_instance_labels.instance_coords), + dtype=torch.long) * i) + + instance_heatmaps = d.gt_fields.instance_heatmaps.reshape( + -1, self.num_keypoints, + *d.gt_fields.instance_heatmaps.shape[1:]) + gt_instance_heatmaps.append(instance_heatmaps) + + if 'heatmap_mask' in d.gt_fields: + heatmap_mask.append(d.gt_fields.heatmap_mask) + + gt_heatmaps = torch.stack(gt_heatmaps) + heatmap_mask = torch.stack(heatmap_mask) if heatmap_mask else None + + gt_instance_coords = torch.cat(gt_instance_coords, dim=0) + gt_instance_heatmaps = torch.cat(gt_instance_heatmaps, dim=0) + keypoint_weights = torch.cat(keypoint_weights, dim=0) + instance_imgids = torch.cat(instance_imgids).to(gt_heatmaps.device) + + # feed-forward + feats = feats[-1] + pred_instance_feats, pred_heatmaps = self.iia_module.forward_train( + feats, gt_instance_coords, instance_imgids) + + # conpute contrastive loss + contrastive_loss = 0 + for i in range(len(batch_data_samples)): + pred_instance_feat = pred_instance_feats[instance_imgids == i] + contrastive_loss += self.loss_module['contrastive']( + pred_instance_feat) + contrastive_loss = contrastive_loss / max(1, len(instance_imgids)) + + # limit the number of instances + max_train_instances = train_cfg.get('max_train_instances', -1) + if (max_train_instances > 0 + and len(instance_imgids) > max_train_instances): + selected_indices = torch.randperm( + len(instance_imgids), + device=gt_heatmaps.device, + dtype=torch.long)[:max_train_instances] + gt_instance_coords = gt_instance_coords[selected_indices] + keypoint_weights = keypoint_weights[selected_indices] + gt_instance_heatmaps = gt_instance_heatmaps[selected_indices] + instance_imgids = instance_imgids[selected_indices] + pred_instance_feats = pred_instance_feats[selected_indices] + + # calculate the decoupled heatmaps for each instance + pred_instance_heatmaps = self.gfd_module(feats, pred_instance_feats, + gt_instance_coords, + instance_imgids) + + # calculate losses + losses = { + 'loss/heatmap_coupled': + self.loss_module['heatmap_coupled'](pred_heatmaps, gt_heatmaps, + None, heatmap_mask) + } + if len(instance_imgids) > 0: + losses.update({ + 'loss/heatmap_decoupled': + self.loss_module['heatmap_decoupled'](pred_instance_heatmaps, + gt_instance_heatmaps, + keypoint_weights), + 'loss/contrastive': + contrastive_loss + }) + + return losses + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`CIDHead` (before MMPose v1.0.0) to a compatible format + of :class:`CIDHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for k in keys: + if 'keypoint_center_conv' in k: + v = state_dict.pop(k) + k = k.replace('keypoint_center_conv', + 'iia_module.keypoint_root_conv') + state_dict[k] = v + + if 'conv_down' in k: + v = state_dict.pop(k) + k = k.replace('conv_down', 'gfd_module.conv_down') + state_dict[k] = v + + if 'c_attn' in k: + v = state_dict.pop(k) + k = k.replace('c_attn', 'gfd_module.channel_attention') + state_dict[k] = v + + if 's_attn' in k: + v = state_dict.pop(k) + k = k.replace('s_attn', 'gfd_module.spatial_attention') + state_dict[k] = v + + if 'fuse_attn' in k: + v = state_dict.pop(k) + k = k.replace('fuse_attn', 'gfd_module.fuse_attention') + state_dict[k] = v + + if 'heatmap_conv' in k: + v = state_dict.pop(k) + k = k.replace('heatmap_conv', 'gfd_module.heatmap_conv') + state_dict[k] = v diff --git a/mmpose/models/heads/heatmap_heads/cpm_head.py b/mmpose/models/heads/heatmap_heads/cpm_head.py new file mode 100644 index 0000000000000000000000000000000000000000..1ba46357ec5cf72b29b43635a53354f2ed2fd048 --- /dev/null +++ b/mmpose/models/heads/heatmap_heads/cpm_head.py @@ -0,0 +1,307 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Union + +import torch +from mmcv.cnn import build_conv_layer, build_upsample_layer +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (Features, MultiConfig, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class CPMHead(BaseHead): + """Multi-stage heatmap head introduced in `Convolutional Pose Machines`_ by + Wei et al (2016) and used by `Stacked Hourglass Networks`_ by Newell et al + (2016). The head consists of multiple branches, each of which has some + deconv layers and a simple conv2d layer. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature maps. + out_channels (int): Number of channels in the output heatmaps. + num_stages (int): Number of stages. + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively. + Defaults to ``(4, 4, 4)`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config | List[Config]): Config of the keypoint loss of different + stages. Defaults to use :class:`KeypointMSELoss`. + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`Convolutional Pose Machines`: https://arxiv.org/abs/1602.00134 + .. _`Stacked Hourglass Networks`: https://arxiv.org/abs/1603.06937 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + num_stages: int, + deconv_out_channels: OptIntSeq = None, + deconv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: MultiConfig = dict( + type='KeypointMSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + super().__init__(init_cfg) + + self.num_stages = num_stages + self.in_channels = in_channels + self.out_channels = out_channels + + if isinstance(loss, list): + if len(loss) != num_stages: + raise ValueError( + f'The length of loss_module({len(loss)}) did not match ' + f'`num_stages`({num_stages})') + self.loss_module = nn.ModuleList( + MODELS.build(_loss) for _loss in loss) + else: + self.loss_module = MODELS.build(loss) + + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # build multi-stage deconv layers + self.multi_deconv_layers = nn.ModuleList([]) + if deconv_out_channels: + if deconv_kernel_sizes is None or len(deconv_out_channels) != len( + deconv_kernel_sizes): + raise ValueError( + '"deconv_out_channels" and "deconv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_kernel_sizes}') + + for _ in range(self.num_stages): + deconv_layers = self._make_deconv_layers( + in_channels=in_channels, + layer_out_channels=deconv_out_channels, + layer_kernel_sizes=deconv_kernel_sizes, + ) + self.multi_deconv_layers.append(deconv_layers) + in_channels = deconv_out_channels[-1] + else: + for _ in range(self.num_stages): + self.multi_deconv_layers.append(nn.Identity()) + + # build multi-stage final layers + self.multi_final_layers = nn.ModuleList([]) + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer) + for _ in range(self.num_stages): + self.multi_final_layers.append(build_conv_layer(cfg)) + else: + for _ in range(self.num_stages): + self.multi_final_layers.append(nn.Identity()) + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def _make_deconv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + if kernel_size == 4: + padding = 1 + output_padding = 0 + elif kernel_size == 3: + padding = 1 + output_padding = 1 + elif kernel_size == 2: + padding = 0 + output_padding = 0 + else: + raise ValueError(f'Unsupported kernel size {kernel_size} for' + 'deconvlutional layers in ' + f'{self.__class__.__name__}') + cfg = dict( + type='deconv', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False) + layers.append(build_upsample_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, feats: Sequence[Tensor]) -> List[Tensor]: + """Forward the network. The input is multi-stage feature maps and the + output is a list of heatmaps from multiple stages. + + Args: + feats (Sequence[Tensor]): Multi-stage feature maps. + + Returns: + List[Tensor]: A list of output heatmaps from multiple stages. + """ + out = [] + assert len(feats) == self.num_stages, ( + f'The length of feature maps did not match the ' + f'`num_stages` in {self.__class__.__name__}') + for i in range(self.num_stages): + y = self.multi_deconv_layers[i](feats[i]) + y = self.multi_final_layers[i](y) + out.append(y) + + return out + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}) -> Predictions: + """Predict results from multi-stage feature maps. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + _batch_heatmaps = self.forward(_feats)[-1] + _batch_heatmaps_flip = flip_heatmaps( + self.forward(_feats_flip)[-1], + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + else: + multi_stage_heatmaps = self.forward(feats) + batch_heatmaps = multi_stage_heatmaps[-1] + + preds = self.decode(batch_heatmaps) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: Sequence[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Sequence[Tensor]): Multi-stage feature maps. + batch_data_samples (List[:obj:`PoseDataSample`]): The Data + Samples. It usually includes information such as + `gt_instances`. + train_cfg (Config, optional): The training config. + + Returns: + dict: A dictionary of loss components. + """ + multi_stage_pred_heatmaps = self.forward(feats) + + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses over multiple stages + losses = dict() + for i in range(self.num_stages): + if isinstance(self.loss_module, nn.ModuleList): + # use different loss_module over different stages + loss_func = self.loss_module[i] + else: + # use the same loss_module over different stages + loss_func = self.loss_module + + # the `gt_heatmaps` and `keypoint_weights` used to calculate loss + # for different stages are the same + loss_i = loss_func(multi_stage_pred_heatmaps[i], gt_heatmaps, + keypoint_weights) + + if 'loss_kpt' not in losses: + losses['loss_kpt'] = loss_i + else: + losses['loss_kpt'] += loss_i + + # calculate accuracy + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(multi_stage_pred_heatmaps[-1]), + target=to_numpy(gt_heatmaps), + mask=to_numpy(keypoint_weights) > 0) + + acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) + losses.update(acc_pose=acc_pose) + + return losses diff --git a/mmpose/models/heads/heatmap_heads/heatmap_head.py b/mmpose/models/heads/heatmap_heads/heatmap_head.py new file mode 100644 index 0000000000000000000000000000000000000000..ccb10fcf546243a7b3f013f79806a91d180f1da5 --- /dev/null +++ b/mmpose/models/heads/heatmap_heads/heatmap_head.py @@ -0,0 +1,367 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmcv.cnn import build_conv_layer, build_upsample_layer +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class HeatmapHead(BaseHead): + """Top-down heatmap head introduced in `Simple Baselines`_ by Xiao et al + (2018). The head is composed of a few deconvolutional layers followed by a + convolutional layer to generate heatmaps from low-resolution feature maps. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + conv_out_channels (Sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KeypointMSELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`Simple Baselines`: https://arxiv.org/abs/1804.06208 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + if deconv_out_channels: + if deconv_kernel_sizes is None or len(deconv_out_channels) != len( + deconv_kernel_sizes): + raise ValueError( + '"deconv_out_channels" and "deconv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_kernel_sizes}') + + self.deconv_layers = self._make_deconv_layers( + in_channels=in_channels, + layer_out_channels=deconv_out_channels, + layer_kernel_sizes=deconv_kernel_sizes, + ) + in_channels = deconv_out_channels[-1] + else: + self.deconv_layers = nn.Identity() + + if conv_out_channels: + if conv_kernel_sizes is None or len(conv_out_channels) != len( + conv_kernel_sizes): + raise ValueError( + '"conv_out_channels" and "conv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {conv_out_channels} and ' + f'{conv_kernel_sizes}') + + self.conv_layers = self._make_conv_layers( + in_channels=in_channels, + layer_out_channels=conv_out_channels, + layer_kernel_sizes=conv_kernel_sizes) + in_channels = conv_out_channels[-1] + else: + self.conv_layers = nn.Identity() + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = nn.Identity() + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def _make_conv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create convolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + padding = (kernel_size - 1) // 2 + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding) + layers.append(build_conv_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) + + def _make_deconv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + if kernel_size == 4: + padding = 1 + output_padding = 0 + elif kernel_size == 3: + padding = 1 + output_padding = 1 + elif kernel_size == 2: + padding = 0 + output_padding = 0 + else: + raise ValueError(f'Unsupported kernel size {kernel_size} for' + 'deconvlutional layers in ' + f'{self.__class__.__name__}') + cfg = dict( + type='deconv', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False) + layers.append(build_upsample_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the heatmap. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output heatmap. + """ + x = feats[-1] + + x = self.deconv_layers(x) + x = self.conv_layers(x) + x = self.final_layer(x) + + return x + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + _batch_heatmaps = self.forward(_feats) + _batch_heatmaps_flip = flip_heatmaps( + self.forward(_feats_flip), + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + else: + batch_heatmaps = self.forward(feats) + + preds = self.decode(batch_heatmaps) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + pred_fields = self.forward(feats) + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_fields, gt_heatmaps, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + if train_cfg.get('compute_acc', True): + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(pred_fields), + target=to_numpy(gt_heatmaps), + mask=to_numpy(keypoint_weights) > 0) + + acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) + losses.update(acc_pose=acc_pose) + + return losses + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`TopdownHeatmapSimpleHead` (before MMPose v1.0.0) to a + compatible format of :class:`HeatmapHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k[len(prefix):] + # In old version, "final_layer" includes both intermediate + # conv layers (new "conv_layers") and final conv layers (new + # "final_layer"). + # + # If there is no intermediate conv layer, old "final_layer" will + # have keys like "final_layer.xxx", which should be still + # named "final_layer.xxx"; + # + # If there are intermediate conv layers, old "final_layer" will + # have keys like "final_layer.n.xxx", where the weights of the last + # one should be renamed "final_layer.xxx", and others should be + # renamed "conv_layers.n.xxx" + k_parts = k.split('.') + if k_parts[0] == 'final_layer': + if len(k_parts) == 3: + assert isinstance(self.conv_layers, nn.Sequential) + idx = int(k_parts[1]) + if idx < len(self.conv_layers): + # final_layer.n.xxx -> conv_layers.n.xxx + k_new = 'conv_layers.' + '.'.join(k_parts[1:]) + else: + # final_layer.n.xxx -> final_layer.xxx + k_new = 'final_layer.' + k_parts[2] + else: + # final_layer.xxx remains final_layer.xxx + k_new = k + else: + k_new = k + + state_dict[prefix + k_new] = v diff --git a/mmpose/models/heads/heatmap_heads/internet_head.py b/mmpose/models/heads/heatmap_heads/internet_head.py new file mode 100644 index 0000000000000000000000000000000000000000..62de8e96db769ec18b64d0483adbc0f2fadde635 --- /dev/null +++ b/mmpose/models/heads/heatmap_heads/internet_head.py @@ -0,0 +1,443 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import torch +import torch.nn.functional as F +from mmengine.model import normal_init +from mmengine.structures import InstanceData +from torch import Tensor, nn + +from mmpose.evaluation.functional import multilabel_classification_accuracy +from mmpose.models.necks import GlobalAveragePooling +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, InstanceList, + OptConfigType, OptSampleList, Predictions) +from ..base_head import BaseHead +from .heatmap_head import HeatmapHead + +OptIntSeq = Optional[Sequence[int]] + + +def make_linear_layers(feat_dims, relu_final=False): + """Make linear layers.""" + layers = [] + for i in range(len(feat_dims) - 1): + layers.append(nn.Linear(feat_dims[i], feat_dims[i + 1])) + if i < len(feat_dims) - 2 or \ + (i == len(feat_dims) - 2 and relu_final): + layers.append(nn.ReLU(inplace=True)) + return nn.Sequential(*layers) + + +class Heatmap3DHead(HeatmapHead): + """Heatmap3DHead is a sub-module of Interhand3DHead, and outputs 3D + heatmaps. Heatmap3DHead is composed of (>=0) number of deconv layers and a + simple conv2d layer. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + depth_size (int): Number of depth discretization size. Defaults to 64. + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)``. + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)``. + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings. + """ + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + depth_size: int = 64, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + final_layer: dict = dict(kernel_size=1), + init_cfg: OptConfigType = None): + + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + final_layer=final_layer, + init_cfg=init_cfg) + + assert out_channels % depth_size == 0 + self.depth_size = depth_size + + def forward(self, feats: Tensor) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the heatmap. + + Args: + feats (Tensor): Feature map. + + Returns: + Tensor: output heatmap. + """ + + x = self.deconv_layers(feats) + x = self.final_layer(x) + N, C, H, W = x.shape + # reshape the 2D heatmap to 3D heatmap + x = x.reshape(N, C // self.depth_size, self.depth_size, H, W) + + return x + + +class Heatmap1DHead(nn.Module): + """Heatmap1DHead is a sub-module of Interhand3DHead, and outputs 1D + heatmaps. + + Args: + in_channels (int): Number of input channels. Defaults to 2048. + heatmap_size (int): Heatmap size. Defaults to 64. + hidden_dims (Sequence[int]): Number of feature dimension of FC layers. + Defaults to ``(512, )``. + """ + + def __init__(self, + in_channels: int = 2048, + heatmap_size: int = 64, + hidden_dims: Sequence[int] = (512, )): + + super().__init__() + + self.in_channels = in_channels + self.heatmap_size = heatmap_size + + feature_dims = [in_channels, *hidden_dims, heatmap_size] + self.fc = make_linear_layers(feature_dims, relu_final=False) + + def soft_argmax_1d(self, heatmap1d): + heatmap1d = F.softmax(heatmap1d, 1) + accu = heatmap1d * torch.arange( + self.heatmap_size, dtype=heatmap1d.dtype, + device=heatmap1d.device)[None, :] + coord = accu.sum(dim=1) + return coord + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output heatmap. + """ + x = self.fc(feats) + x = self.soft_argmax_1d(x).view(-1, 1) + return x + + def init_weights(self): + """Initialize model weights.""" + for m in self.fc.modules(): + if isinstance(m, nn.Linear): + normal_init(m, mean=0, std=0.01, bias=0) + + +class MultilabelClassificationHead(nn.Module): + """MultilabelClassificationHead is a sub-module of Interhand3DHead, and + outputs hand type classification. + + Args: + in_channels (int): Number of input channels. Defaults to 2048. + num_labels (int): Number of labels. Defaults to 2. + hidden_dims (Sequence[int]): Number of hidden dimension of FC layers. + Defaults to ``(512, )``. + """ + + def __init__(self, + in_channels: int = 2048, + num_labels: int = 2, + hidden_dims: Sequence[int] = (512, )): + + super().__init__() + + self.in_channels = in_channels + + feature_dims = [in_channels, *hidden_dims, num_labels] + self.fc = make_linear_layers(feature_dims, relu_final=False) + + def init_weights(self): + for m in self.fc.modules(): + if isinstance(m, nn.Linear): + normal_init(m, mean=0, std=0.01, bias=0) + + def forward(self, x): + """Forward function.""" + labels = self.fc(x) + return labels + + +@MODELS.register_module() +class InternetHead(BaseHead): + """Internet head introduced in `Interhand 2.6M`_ by Moon et al (2020). + + Args: + keypoint_head_cfg (dict): Configs of Heatmap3DHead for hand + keypoint estimation. + root_head_cfg (dict): Configs of Heatmap1DHead for relative + hand root depth estimation. + hand_type_head_cfg (dict): Configs of ``MultilabelClassificationHead`` + for hand type classification. + loss (Config): Config of the keypoint loss. + Default: :class:`KeypointMSELoss`. + loss_root_depth (dict): Config for relative root depth loss. + Default: :class:`SmoothL1Loss`. + loss_hand_type (dict): Config for hand type classification + loss. Default: :class:`BCELoss`. + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Default: ``None``. + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`Interhand 2.6M`: https://arxiv.org/abs/2008.09309 + """ + + _version = 2 + + def __init__(self, + keypoint_head_cfg: ConfigType, + root_head_cfg: ConfigType, + hand_type_head_cfg: ConfigType, + loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + loss_root_depth: ConfigType = dict( + type='L1Loss', use_target_weight=True), + loss_hand_type: ConfigType = dict( + type='BCELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + super().__init__() + + # build sub-module heads + self.right_hand_head = Heatmap3DHead(**keypoint_head_cfg) + self.left_hand_head = Heatmap3DHead(**keypoint_head_cfg) + self.root_head = Heatmap1DHead(**root_head_cfg) + self.hand_type_head = MultilabelClassificationHead( + **hand_type_head_cfg) + self.neck = GlobalAveragePooling() + + self.loss_module = MODELS.build(loss) + self.root_loss_module = MODELS.build(loss_root_depth) + self.hand_loss_module = MODELS.build(loss_hand_type) + + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the heatmap. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tuple[Tensor]: Output heatmap, root depth estimation and hand type + classification. + """ + x = feats[-1] + outputs = [] + outputs.append( + torch.cat([self.right_hand_head(x), + self.left_hand_head(x)], dim=1)) + x = self.neck(x) + outputs.append(self.root_head(x)) + outputs.append(self.hand_type_head(x)) + return outputs + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + InstanceList: Return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + _batch_outputs = self.forward(_feats) + _batch_heatmaps = _batch_outputs[0] + + _batch_outputs_flip = self.forward(_feats_flip) + _batch_heatmaps_flip = flip_heatmaps( + _batch_outputs_flip[0], + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + + # flip relative hand root depth + _batch_root = _batch_outputs[1] + _batch_root_flip = -_batch_outputs_flip[1] + batch_root = (_batch_root + _batch_root_flip) * 0.5 + + # flip hand type + _batch_type = _batch_outputs[2] + _batch_type_flip = torch.empty_like(_batch_outputs_flip[2]) + _batch_type_flip[:, 0] = _batch_type[:, 1] + _batch_type_flip[:, 1] = _batch_type[:, 0] + batch_type = (_batch_type + _batch_type_flip) * 0.5 + + batch_outputs = [batch_heatmaps, batch_root, batch_type] + + else: + batch_outputs = self.forward(feats) + + preds = self.decode(tuple(batch_outputs)) + + return preds + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + pred_fields = self.forward(feats) + pred_heatmaps = pred_fields[0] + _, K, D, W, H = pred_heatmaps.shape + gt_heatmaps = torch.stack([ + d.gt_fields.heatmaps.reshape(K, D, W, H) + for d in batch_data_samples + ]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + + # hand keypoint loss + loss = self.loss_module(pred_heatmaps, gt_heatmaps, keypoint_weights) + losses.update(loss_kpt=loss) + + # relative root depth loss + gt_roots = torch.stack( + [d.gt_instance_labels.root_depth for d in batch_data_samples]) + root_weights = torch.stack([ + d.gt_instance_labels.root_depth_weight for d in batch_data_samples + ]) + loss_root = self.root_loss_module(pred_fields[1], gt_roots, + root_weights) + losses.update(loss_rel_root=loss_root) + + # hand type loss + gt_types = torch.stack([ + d.gt_instance_labels.type.reshape(-1) for d in batch_data_samples + ]) + type_weights = torch.stack( + [d.gt_instance_labels.type_weight for d in batch_data_samples]) + loss_type = self.hand_loss_module(pred_fields[2], gt_types, + type_weights) + losses.update(loss_hand_type=loss_type) + + # calculate accuracy + if train_cfg.get('compute_acc', True): + acc = multilabel_classification_accuracy( + pred=to_numpy(pred_fields[2]), + gt=to_numpy(gt_types), + mask=to_numpy(type_weights)) + + acc_pose = torch.tensor(acc, device=gt_types.device) + losses.update(acc_pose=acc_pose) + + return losses + + def decode(self, batch_outputs: Union[Tensor, + Tuple[Tensor]]) -> InstanceList: + """Decode keypoints from outputs. + + Args: + batch_outputs (Tensor | Tuple[Tensor]): The network outputs of + a data batch + + Returns: + List[InstanceData]: A list of InstanceData, each contains the + decoded pose information of the instances of one data sample. + """ + + def _pack_and_call(args, func): + if not isinstance(args, tuple): + args = (args, ) + return func(*args) + + if self.decoder is None: + raise RuntimeError( + f'The decoder has not been set in {self.__class__.__name__}. ' + 'Please set the decoder configs in the init parameters to ' + 'enable head methods `head.predict()` and `head.decode()`') + + batch_output_np = to_numpy(batch_outputs[0], unzip=True) + batch_root_np = to_numpy(batch_outputs[1], unzip=True) + batch_type_np = to_numpy(batch_outputs[2], unzip=True) + batch_keypoints = [] + batch_scores = [] + batch_roots = [] + batch_types = [] + for outputs, roots, types in zip(batch_output_np, batch_root_np, + batch_type_np): + keypoints, scores, rel_root_depth, hand_type = _pack_and_call( + tuple([outputs, roots, types]), self.decoder.decode) + batch_keypoints.append(keypoints) + batch_scores.append(scores) + batch_roots.append(rel_root_depth) + batch_types.append(hand_type) + + preds = [ + InstanceData( + keypoints=keypoints, + keypoint_scores=scores, + rel_root_depth=rel_root_depth, + hand_type=hand_type) + for keypoints, scores, rel_root_depth, hand_type in zip( + batch_keypoints, batch_scores, batch_roots, batch_types) + ] + + return preds diff --git a/mmpose/models/heads/heatmap_heads/mspn_head.py b/mmpose/models/heads/heatmap_heads/mspn_head.py new file mode 100644 index 0000000000000000000000000000000000000000..8b7cddf7988bfc57cae314ef944f44b4d0d7df09 --- /dev/null +++ b/mmpose/models/heads/heatmap_heads/mspn_head.py @@ -0,0 +1,432 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Optional, Sequence, Union + +import torch +from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, Linear, + build_activation_layer, build_norm_layer) +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, MultiConfig, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] +MSMUFeatures = Sequence[Sequence[Tensor]] # Multi-stage multi-unit features + + +class PRM(nn.Module): + """Pose Refine Machine. + + Please refer to "Learning Delicate Local Representations + for Multi-Person Pose Estimation" (ECCV 2020). + + Args: + out_channels (int): Number of the output channels, equals to + the number of keypoints. + norm_cfg (Config): Config to construct the norm layer. + Defaults to ``dict(type='BN')`` + """ + + def __init__(self, + out_channels: int, + norm_cfg: ConfigType = dict(type='BN')): + super().__init__() + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + self.out_channels = out_channels + self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) + self.middle_path = nn.Sequential( + Linear(self.out_channels, self.out_channels), + build_norm_layer(dict(type='BN1d'), out_channels)[1], + build_activation_layer(dict(type='ReLU')), + Linear(self.out_channels, self.out_channels), + build_norm_layer(dict(type='BN1d'), out_channels)[1], + build_activation_layer(dict(type='ReLU')), + build_activation_layer(dict(type='Sigmoid'))) + + self.bottom_path = nn.Sequential( + ConvModule( + self.out_channels, + self.out_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=norm_cfg, + inplace=False), + DepthwiseSeparableConvModule( + self.out_channels, + 1, + kernel_size=9, + stride=1, + padding=4, + norm_cfg=norm_cfg, + inplace=False), build_activation_layer(dict(type='Sigmoid'))) + self.conv_bn_relu_prm_1 = ConvModule( + self.out_channels, + self.out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + inplace=False) + + def forward(self, x: Tensor) -> Tensor: + """Forward the network. The input heatmaps will be refined. + + Args: + x (Tensor): The input heatmaps. + + Returns: + Tensor: output heatmaps. + """ + out = self.conv_bn_relu_prm_1(x) + out_1 = out + + out_2 = self.global_pooling(out_1) + out_2 = out_2.view(out_2.size(0), -1) + out_2 = self.middle_path(out_2) + out_2 = out_2.unsqueeze(2) + out_2 = out_2.unsqueeze(3) + + out_3 = self.bottom_path(out_1) + out = out_1 * (1 + out_2 * out_3) + + return out + + +class PredictHeatmap(nn.Module): + """Predict the heatmap for an input feature. + + Args: + unit_channels (int): Number of input channels. + out_channels (int): Number of output channels. + out_shape (tuple): Shape of the output heatmaps. + use_prm (bool): Whether to use pose refine machine. Default: False. + norm_cfg (Config): Config to construct the norm layer. + Defaults to ``dict(type='BN')`` + """ + + def __init__(self, + unit_channels: int, + out_channels: int, + out_shape: tuple, + use_prm: bool = False, + norm_cfg: ConfigType = dict(type='BN')): + + super().__init__() + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + self.unit_channels = unit_channels + self.out_channels = out_channels + self.out_shape = out_shape + self.use_prm = use_prm + if use_prm: + self.prm = PRM(out_channels, norm_cfg=norm_cfg) + self.conv_layers = nn.Sequential( + ConvModule( + unit_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=norm_cfg, + inplace=False), + ConvModule( + unit_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=None, + inplace=False)) + + def forward(self, feature: Tensor) -> Tensor: + """Forward the network. + + Args: + feature (Tensor): The input feature maps. + + Returns: + Tensor: output heatmaps. + """ + feature = self.conv_layers(feature) + output = nn.functional.interpolate( + feature, size=self.out_shape, mode='bilinear', align_corners=True) + if self.use_prm: + output = self.prm(output) + return output + + +@MODELS.register_module() +class MSPNHead(BaseHead): + """Multi-stage multi-unit heatmap head introduced in `Multi-Stage Pose + estimation Network (MSPN)`_ by Li et al (2019), and used by `Residual Steps + Networks (RSN)`_ by Cai et al (2020). The head consists of multiple stages + and each stage consists of multiple units. Each unit of each stage has some + conv layers. + + Args: + num_stages (int): Number of stages. + num_units (int): Number of units in each stage. + out_shape (tuple): The output shape of the output heatmaps. + unit_channels (int): Number of input channels. + out_channels (int): Number of output channels. + out_shape (tuple): Shape of the output heatmaps. + use_prm (bool): Whether to use pose refine machine (PRM). + Defaults to ``False``. + norm_cfg (Config): Config to construct the norm layer. + Defaults to ``dict(type='BN')`` + loss (Config | List[Config]): Config of the keypoint loss for + different stages and different units. + Defaults to use :class:`KeypointMSELoss`. + level_indices (Sequence[int]): The indices that specified the level + of target heatmaps. + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`MSPN`: https://arxiv.org/abs/1901.00148 + .. _`RSN`: https://arxiv.org/abs/2003.04030 + """ + _version = 2 + + def __init__(self, + num_stages: int = 4, + num_units: int = 4, + out_shape: tuple = (64, 48), + unit_channels: int = 256, + out_channels: int = 17, + use_prm: bool = False, + norm_cfg: ConfigType = dict(type='BN'), + level_indices: Sequence[int] = [], + loss: MultiConfig = dict( + type='KeypointMSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + if init_cfg is None: + init_cfg = self.default_init_cfg + super().__init__(init_cfg) + + self.num_stages = num_stages + self.num_units = num_units + self.out_shape = out_shape + self.unit_channels = unit_channels + self.out_channels = out_channels + if len(level_indices) != num_stages * num_units: + raise ValueError( + f'The length of level_indices({len(level_indices)}) did not ' + f'match `num_stages`({num_stages}) * `num_units`({num_units})') + + self.level_indices = level_indices + + if isinstance(loss, list) and len(loss) != num_stages * num_units: + raise ValueError( + f'The length of loss_module({len(loss)}) did not match ' + f'`num_stages`({num_stages}) * `num_units`({num_units})') + + if isinstance(loss, list): + if len(loss) != num_stages * num_units: + raise ValueError( + f'The length of loss_module({len(loss)}) did not match ' + f'`num_stages`({num_stages}) * `num_units`({num_units})') + self.loss_module = nn.ModuleList( + MODELS.build(_loss) for _loss in loss) + else: + self.loss_module = MODELS.build(loss) + + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + + self.predict_layers = nn.ModuleList([]) + for i in range(self.num_stages): + for j in range(self.num_units): + self.predict_layers.append( + PredictHeatmap( + unit_channels, + out_channels, + out_shape, + use_prm, + norm_cfg=norm_cfg)) + + @property + def default_init_cfg(self): + """Default config for weight initialization.""" + init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict(type='Normal', layer='Linear', std=0.01), + dict(type='Constant', layer='BatchNorm2d', val=1), + ] + return init_cfg + + def forward(self, feats: Sequence[Sequence[Tensor]]) -> List[Tensor]: + """Forward the network. The input is multi-stage multi-unit feature + maps and the output is a list of heatmaps from multiple stages. + + Args: + feats (Sequence[Sequence[Tensor]]): Feature maps from multiple + stages and units. + + Returns: + List[Tensor]: A list of output heatmaps from multiple stages + and units. + """ + out = [] + assert len(feats) == self.num_stages, ( + f'The length of feature maps did not match the ' + f'`num_stages` in {self.__class__.__name__}') + for feat in feats: + assert len(feat) == self.num_units, ( + f'The length of feature maps did not match the ' + f'`num_units` in {self.__class__.__name__}') + for f in feat: + assert f.shape[1] == self.unit_channels, ( + f'The number of feature map channels did not match the ' + f'`unit_channels` in {self.__class__.__name__}') + + for i in range(self.num_stages): + for j in range(self.num_units): + y = self.predict_layers[i * self.num_units + j](feats[i][j]) + out.append(y) + return out + + def predict(self, + feats: Union[MSMUFeatures, List[MSMUFeatures]], + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}) -> Predictions: + """Predict results from multi-stage feature maps. + + Args: + feats (Sequence[Sequence[Tensor]]): Multi-stage multi-unit + features (or multiple MSMU features for TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_labels`. + test_cfg (Config, optional): The testing/inference config + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + # multi-stage multi-unit batch heatmaps + if test_cfg.get('flip_test', False): + # TTA: flip test + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + _batch_heatmaps = self.forward(_feats)[-1] + _batch_heatmaps_flip = flip_heatmaps( + self.forward(_feats_flip)[-1], + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + else: + msmu_batch_heatmaps = self.forward(feats) + batch_heatmaps = msmu_batch_heatmaps[-1] + + preds = self.decode(batch_heatmaps) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: MSMUFeatures, + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Note: + - batch_size: B + - num_output_heatmap_levels: L + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + - num_instances: N (usually 1 in topdown heatmap heads) + + Args: + feats (Sequence[Sequence[Tensor]]): Feature maps from multiple + stages and units + batch_data_samples (List[:obj:`PoseDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_labels` and `gt_fields`. + train_cfg (Config, optional): The training config + + Returns: + dict: A dictionary of loss components. + """ + # multi-stage multi-unit predict heatmaps + msmu_pred_heatmaps = self.forward(feats) + + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) # shape: [B*N, L, K] + + # calculate losses over multiple stages and multiple units + losses = dict() + for i in range(self.num_stages * self.num_units): + if isinstance(self.loss_module, nn.ModuleList): + # use different loss_module over different stages and units + loss_func = self.loss_module[i] + else: + # use the same loss_module over different stages and units + loss_func = self.loss_module + + # select `gt_heatmaps` and `keypoint_weights` for different level + # according to `self.level_indices` to calculate loss + gt_heatmaps = torch.stack([ + d.gt_fields[self.level_indices[i]].heatmaps + for d in batch_data_samples + ]) + loss_i = loss_func(msmu_pred_heatmaps[i], gt_heatmaps, + keypoint_weights[:, self.level_indices[i]]) + + if 'loss_kpt' not in losses: + losses['loss_kpt'] = loss_i + else: + losses['loss_kpt'] += loss_i + + # calculate accuracy + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(msmu_pred_heatmaps[-1]), + target=to_numpy(gt_heatmaps), + mask=to_numpy(keypoint_weights[:, -1]) > 0) + + acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) + losses.update(acc_pose=acc_pose) + + return losses diff --git a/mmpose/models/heads/heatmap_heads/vipnas_head.py b/mmpose/models/heads/heatmap_heads/vipnas_head.py new file mode 100644 index 0000000000000000000000000000000000000000..949ee95b096124a162f6d9719446fa80bd26a201 --- /dev/null +++ b/mmpose/models/heads/heatmap_heads/vipnas_head.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Union + +from mmcv.cnn import build_conv_layer, build_upsample_layer +from torch import nn + +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.typing import ConfigType, OptConfigType +from .heatmap_head import HeatmapHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class ViPNASHead(HeatmapHead): + """ViPNAS heatmap head introduced in `ViPNAS`_ by Xu et al (2021). The head + is composed of a few deconvolutional layers followed by a convolutional + layer to generate heatmaps from low-resolution feature maps. Specifically, + different from the :class: `HeatmapHead` introduced by `Simple Baselines`_, + the group numbers in the deconvolutional layers are elastic and thus can be + optimized by neural architecture search (NAS). + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(144, 144, 144)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + deconv_num_groups (Sequence[int], optional): The group number of each + deconv layer. Defaults to ``(16, 16, 16)`` + conv_out_channels (Sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KeypointMSELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`ViPNAS`: https://arxiv.org/abs/2105.10154 + .. _`Simple Baselines`: https://arxiv.org/abs/1804.06208 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + deconv_out_channels: OptIntSeq = (144, 144, 144), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + deconv_num_groups: OptIntSeq = (16, 16, 16), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super(HeatmapHead, self).__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + if deconv_out_channels: + if deconv_kernel_sizes is None or len(deconv_out_channels) != len( + deconv_kernel_sizes): + raise ValueError( + '"deconv_out_channels" and "deconv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_kernel_sizes}') + if deconv_num_groups is None or len(deconv_out_channels) != len( + deconv_num_groups): + raise ValueError( + '"deconv_out_channels" and "deconv_num_groups" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_num_groups}') + + self.deconv_layers = self._make_deconv_layers( + in_channels=in_channels, + layer_out_channels=deconv_out_channels, + layer_kernel_sizes=deconv_kernel_sizes, + layer_groups=deconv_num_groups, + ) + in_channels = deconv_out_channels[-1] + else: + self.deconv_layers = nn.Identity() + + if conv_out_channels: + if conv_kernel_sizes is None or len(conv_out_channels) != len( + conv_kernel_sizes): + raise ValueError( + '"conv_out_channels" and "conv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {conv_out_channels} and ' + f'{conv_kernel_sizes}') + + self.conv_layers = self._make_conv_layers( + in_channels=in_channels, + layer_out_channels=conv_out_channels, + layer_kernel_sizes=conv_kernel_sizes) + in_channels = conv_out_channels[-1] + else: + self.conv_layers = nn.Identity() + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = nn.Identity() + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def _make_deconv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int], + layer_groups: Sequence[int]) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size, groups in zip(layer_out_channels, + layer_kernel_sizes, + layer_groups): + if kernel_size == 4: + padding = 1 + output_padding = 0 + elif kernel_size == 3: + padding = 1 + output_padding = 1 + elif kernel_size == 2: + padding = 0 + output_padding = 0 + else: + raise ValueError(f'Unsupported kernel size {kernel_size} for' + 'deconvlutional layers in ' + f'{self.__class__.__name__}') + cfg = dict( + type='deconv', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + groups=groups, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False) + layers.append(build_upsample_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) diff --git a/mmpose/models/heads/hybrid_heads/__init__.py b/mmpose/models/heads/hybrid_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..767f87a19c10871033c1cb5bbbadc226d5bd4551 --- /dev/null +++ b/mmpose/models/heads/hybrid_heads/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dekr_head import DEKRHead +from .rtmo_head import RTMOHead +from .vis_head import VisPredictHead +from .yoloxpose_head import YOLOXPoseHead +from .poseid_head import PoseIDHead +from .calibration_head import CalibrationHead + +__all__ = ['DEKRHead', 'VisPredictHead', 'YOLOXPoseHead', 'RTMOHead', 'PoseIDHead', 'CalibrationHead'] diff --git a/mmpose/models/heads/hybrid_heads/calibration_head.py b/mmpose/models/heads/hybrid_heads/calibration_head.py new file mode 100644 index 0000000000000000000000000000000000000000..d29d440a896165318cd5753f0627a9d6806c3bfe --- /dev/null +++ b/mmpose/models/heads/hybrid_heads/calibration_head.py @@ -0,0 +1,1242 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmcv.cnn import build_conv_layer, build_upsample_layer +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + +import numpy as np + +from sparsemax import Sparsemax + +import os +import shutil +import cv2 + +from mmpose.structures.keypoint import fix_bbox_aspect_ratio + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class CalibrationHead(BaseHead): + """Multi-variate head predicting all information about keypoints. Apart + from the heatmap, it also predicts: + 1) Heatmap for each keypoint + 2) Probability of keypoint being in the heatmap + 3) Visibility of each keypoint + 4) Predicted OKS per keypoint + 5) Predictd euclidean error per keypoint + The heatmap predicting part is the same as HeatmapHead introduced in + in `Simple Baselines`_ by Xiao et al (2018). + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + conv_out_channels (Sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer_dict (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + keypoint_loss (Config): Config of the keypoint loss. Defaults to use + :class:`KeypointMSELoss` + probability_loss (Config): Config of the probability loss. Defaults to use + :class:`BCELoss` + visibility_loss (Config): Config of the visibility loss. Defaults to use + :class:`BCELoss` + oks_loss (Config): Config of the oks loss. Defaults to use + :class:`MSELoss` + error_loss (Config): Config of the error loss. Defaults to use + :class:`L1LogLoss` + normalize (bool): Whether to normalize values in the heatmaps between + 0 and 1 with sigmoid. Defaults to ``False`` + detach_probability (bool): Whether to detach the probability + from gradient computation. Defaults to ``True`` + detach_visibility (bool): Whether to detach the visibility + from gradient computation. Defaults to ``True`` + learn_heatmaps_from_zeros (bool): Whether to learn the + heatmaps from zeros. Defaults to ``False`` + freeze_heatmaps (bool): Whether to freeze the heatmaps prediction. + Defaults to ``False`` + freeze_probability (bool): Whether to freeze the probability prediction. + Defaults to ``False`` + freeze_visibility (bool): Whether to freeze the visibility prediction. + Defaults to ``False`` + freeze_oks (bool): Whether to freeze the oks prediction. + Defaults to ``False`` + freeze_error (bool): Whether to freeze the error prediction. + Defaults to ``False`` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + + .. _`Simple Baselines`: https://arxiv.org/abs/1804.06208 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer_dict: dict = dict(kernel_size=1), + keypoint_loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + probability_loss: ConfigType = dict( + type='BCELoss', use_target_weight=True), + visibility_loss: ConfigType = dict( + type='BCELoss', use_target_weight=True), + oks_loss: ConfigType = dict( + type='MSELoss', use_target_weight=True), + error_loss: ConfigType = dict( + type='L1LogLoss', use_target_weight=True), + normalize: float = None, + detach_probability: bool = True, + detach_visibility: bool = True, + learn_heatmaps_from_zeros: bool = False, + freeze_heatmaps: bool = False, + freeze_probability: bool = False, + freeze_visibility: bool = False, + freeze_oks: bool = False, + freeze_error: bool = False, + decoder: OptConfigType = dict( + type='UDPHeatmap', input_size=(192, 256), + heatmap_size=(48, 64), sigma=2), + init_cfg: OptConfigType = None, + ): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.keypoint_loss_module = MODELS.build(keypoint_loss) + self.probability_loss_module = MODELS.build(probability_loss) + self.visibility_loss_module = MODELS.build(visibility_loss) + self.oks_loss_module = MODELS.build(oks_loss) + self.error_loss_module = MODELS.build(error_loss) + + self.temperature = torch.nn.Parameter(torch.tensor(1.0), requires_grad=True) + + self.gauss_sigma = 2.0 + self.gauss_kernel_size = int(2.0 * 3.0 * self.gauss_sigma + 1.0) + ts = torch.linspace( + - self.gauss_kernel_size // 2, + self.gauss_kernel_size // 2, + self.gauss_kernel_size + ) + gauss = torch.exp(-(ts / self.gauss_sigma)**2 / 2) + gauss = gauss / gauss.sum() + self.gauss_kernel = gauss.unsqueeze(0) * gauss.unsqueeze(1) + + self.decoder = KEYPOINT_CODECS.build(decoder) + self.nonlinearity = nn.ReLU(inplace=True) + self.learn_heatmaps_from_zeros = learn_heatmaps_from_zeros + + self.num_iters = 0 + unique_hash = np.random.randint(0, 100000) + self.loss_vis_folder = "work_dirs/loss_vis_{:05d}".format(unique_hash) + self.interval = 50 + shutil.rmtree(self.loss_vis_folder, ignore_errors=True) + print("Will save heatmap visualizations to folder '{:s}'".format(self.loss_vis_folder)) + + self._build_heatmap_head( + in_channels=in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer_dict=final_layer_dict, + normalize=normalize, + freeze=freeze_heatmaps) + + self.normalize = normalize + + self.detach_probability = detach_probability + self._build_probability_head( + in_channels=in_channels, + out_channels=out_channels, + freeze=freeze_probability) + + self.detach_visibility = detach_visibility + self._build_visibility_head( + in_channels=in_channels, + out_channels=out_channels, + freeze=freeze_visibility) + + self._build_oks_head( + in_channels=in_channels, + out_channels=out_channels, + freeze=freeze_oks) + self.freeze_oks = freeze_oks + + self._build_error_head( + in_channels=in_channels, + out_channels=out_channels, + freeze=freeze_error) + self.freeze_error = freeze_error + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + self._freeze_all_but_temperature() + + # Print all params and their gradients + print("\n", "="*20) + for name, param in self.named_parameters(): + print(name, param.requires_grad) + + + def _freeze_all_but_temperature(self): + for param in self.parameters(): + param.requires_grad = False + self.temperature.requires_grad = True + + def _build_heatmap_head(self, in_channels: int, out_channels: int, + deconv_out_channels: Sequence[int], + deconv_kernel_sizes: Sequence[int], + conv_out_channels: Sequence[int], + conv_kernel_sizes: Sequence[int], + final_layer_dict: dict, + normalize: bool = False, + freeze: bool = False) -> nn.Module: + """Build the heatmap head module.""" + if deconv_out_channels: + if deconv_kernel_sizes is None or len(deconv_out_channels) != len( + deconv_kernel_sizes): + raise ValueError( + '"deconv_out_channels" and "deconv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_kernel_sizes}') + + self.deconv_layers = self._make_deconv_layers( + in_channels=in_channels, + layer_out_channels=deconv_out_channels, + layer_kernel_sizes=deconv_kernel_sizes, + ) + in_channels = deconv_out_channels[-1] + else: + self.deconv_layers = nn.Identity() + + if conv_out_channels: + if conv_kernel_sizes is None or len(conv_out_channels) != len( + conv_kernel_sizes): + raise ValueError( + '"conv_out_channels" and "conv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {conv_out_channels} and ' + f'{conv_kernel_sizes}') + + self.conv_layers = self._make_conv_layers( + in_channels=in_channels, + layer_out_channels=conv_out_channels, + layer_kernel_sizes=conv_kernel_sizes) + in_channels = conv_out_channels[-1] + else: + self.conv_layers = nn.Identity() + + if final_layer_dict is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer_dict) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = nn.Identity() + # self.normalize_layer = lambda x: x / x.sum(dim=-1, keepdim=True) if normalize else nn.Identity() + # self.normalize_layer = nn.Softmax(dim=-1) if normalize else nn.Identity() + self.normalize_layer = nn.Identity() if normalize is None else Sparsemax(dim=-1) + + if freeze: + for param in self.deconv_layers.parameters(): + param.requires_grad = False + for param in self.conv_layers.parameters(): + param.requires_grad = False + for param in self.final_layer.parameters(): + param.requires_grad = False + + def _build_probability_head(self, in_channels: int, out_channels: int, + freeze: bool = False) -> nn.Module: + """Build the probability head module.""" + ppb_layers = [] + kernel_sizes = [(4, 3), (2, 2), (2, 2)] + for i in range(len(kernel_sizes)): + ppb_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=1, + padding=1)) + ppb_layers.append( + nn.BatchNorm2d(num_features=in_channels)) + ppb_layers.append( + nn.MaxPool2d(kernel_size=kernel_sizes[i], stride=kernel_sizes[i], padding=0)) + ppb_layers.append(self.nonlinearity) + ppb_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0)) + ppb_layers.append(nn.Sigmoid()) + self.probability_layers = nn.Sequential(*ppb_layers) + + if freeze: + for param in self.probability_layers.parameters(): + param.requires_grad = False + + def _build_visibility_head(self, in_channels: int, out_channels: int, + freeze: bool = False) -> nn.Module: + """Build the visibility head module.""" + vis_layers = [] + kernel_sizes = [(4, 3), (2, 2), (2, 2)] + for i in range(len(kernel_sizes)): + vis_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=1, + padding=1)) + vis_layers.append( + nn.BatchNorm2d(num_features=in_channels)) + vis_layers.append( + nn.MaxPool2d(kernel_size=kernel_sizes[i], stride=kernel_sizes[i], padding=0)) + vis_layers.append(self.nonlinearity) + vis_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0)) + vis_layers.append(nn.Sigmoid()) + self.visibility_layers = nn.Sequential(*vis_layers) + + if freeze: + for param in self.visibility_layers.parameters(): + param.requires_grad = False + + def _build_oks_head(self, in_channels: int, out_channels: int, + freeze: bool = False) -> nn.Module: + """Build the oks head module.""" + oks_layers = [] + kernel_sizes = [(4, 3), (2, 2), (2, 2)] + for i in range(len(kernel_sizes)): + oks_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=1, + padding=1)) + oks_layers.append( + nn.BatchNorm2d(num_features=in_channels)) + oks_layers.append( + nn.MaxPool2d(kernel_size=kernel_sizes[i], stride=kernel_sizes[i], padding=0)) + oks_layers.append(self.nonlinearity) + oks_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0)) + oks_layers.append(nn.Sigmoid()) + self.oks_layers = nn.Sequential(*oks_layers) + + if freeze: + for param in self.oks_layers.parameters(): + param.requires_grad = False + + def _build_error_head(self, in_channels: int, out_channels: int, + freeze: bool = False) -> nn.Module: + """Build the error head module.""" + error_layers = [] + kernel_sizes = [(4, 3), (2, 2), (2, 2)] + for i in range(len(kernel_sizes)): + error_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=1, + padding=1)) + error_layers.append( + nn.BatchNorm2d(num_features=in_channels)) + error_layers.append( + nn.MaxPool2d(kernel_size=kernel_sizes[i], stride=kernel_sizes[i], padding=0)) + error_layers.append(self.nonlinearity) + error_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0)) + error_layers.append(self.nonlinearity) + self.error_layers = nn.Sequential(*error_layers) + + if freeze: + for param in self.error_layers.parameters(): + param.requires_grad = False + + def _make_conv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create convolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + padding = (kernel_size - 1) // 2 + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding) + layers.append(build_conv_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(self.nonlinearity) + in_channels = out_channels + + return nn.Sequential(*layers) + + def _make_deconv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + if kernel_size == 4: + padding = 1 + output_padding = 0 + elif kernel_size == 3: + padding = 1 + output_padding = 1 + elif kernel_size == 2: + padding = 0 + output_padding = 0 + else: + raise ValueError(f'Unsupported kernel size {kernel_size} for' + 'deconvlutional layers in ' + f'{self.__class__.__name__}') + cfg = dict( + type='deconv', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False) + layers.append(build_upsample_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(self.nonlinearity) + in_channels = out_channels + + return nn.Sequential(*layers) + + def _error_from_heatmaps(self, gt_heatmaps: Tensor, dt_heatmaps: Tensor) -> Tensor: + """Calculate the error from heatmaps. + + Args: + heatmaps (Tensor): The predicted heatmaps. + + Returns: + Tensor: The predicted error. + """ + # Transform to numpy + gt_heatmaps = to_numpy(gt_heatmaps) + dt_heatmaps = to_numpy(dt_heatmaps) + + # Get locations from heatmaps + B, C, H, W = gt_heatmaps.shape + gt_coords = np.zeros((B, C, 2)) + dt_coords = np.zeros((B, C, 2)) + for i, (gt_htm, dt_htm) in enumerate(zip(gt_heatmaps, dt_heatmaps)): + coords, score = self.decoder.decode(gt_htm) + coords = coords.squeeze() + gt_coords[i, :, :] = coords + + coords, score = self.decoder.decode(dt_htm) + coords = coords.squeeze() + dt_coords[i, :, :] = coords + + # NaN coordinates mean empty heatmaps -> set them to -1 + # as the error will be ignored by weight + gt_coords[np.isnan(gt_coords)] = -1 + + # Calculate the error + target_errors = np.linalg.norm(gt_coords - dt_coords, axis=2) + assert (target_errors >= 0).all(), "Euclidean distance cannot be negative" + + return target_errors + + def _oks_from_heatmaps(self, gt_heatmaps: Tensor, dt_heatmaps: Tensor, weight: Tensor) -> Tensor: + """Calculate the OKS from heatmaps. + + Args: + heatmaps (Tensor): The predicted heatmaps. + + Returns: + Tensor: The predicted OKS. + """ + C = dt_heatmaps.shape[1] + + # Transform to numpy + gt_heatmaps = to_numpy(gt_heatmaps) + dt_heatmaps = to_numpy(dt_heatmaps) + B, C, H, W = gt_heatmaps.shape + weight = to_numpy(weight).squeeze().reshape((B, C, 1)) + + # Get locations from heatmaps + gt_coords = np.zeros((B, C, 2)) + dt_coords = np.zeros((B, C, 2)) + for i, (gt_htm, dt_htm) in enumerate(zip(gt_heatmaps, dt_heatmaps)): + coords, score = self.decoder.decode(gt_htm) + coords = coords.squeeze() + gt_coords[i, :, :] = coords + + coords, score = self.decoder.decode(dt_htm) + coords = coords.squeeze() + dt_coords[i, :, :] = coords + + # NaN coordinates mean empty heatmaps -> set them to 0 + gt_coords[np.isnan(gt_coords)] = 0 + + # Add probability as visibility + gt_coords = gt_coords * weight + dt_coords = dt_coords * weight + gt_coords = np.concatenate((gt_coords, weight*2), axis=2) + dt_coords = np.concatenate((dt_coords, weight*2), axis=2) + + # Calculate the oks + target_oks = [] + oks_weights = [] + for i in range(len(gt_coords)): + gt_kpts = gt_coords[i] + dt_kpts = dt_coords[i] + valid_gt_kpts = gt_kpts[:, 2] > 0 + if not valid_gt_kpts.any(): + # Changed for per-keypoint OKS + target_oks.append(np.zeros(C)) + oks_weights.append(0) + continue + + gt_bbox = np.array([ + 0, 0, + 64, 48, + ]) + gt = { + 'keypoints': gt_kpts, + 'bbox': gt_bbox, + 'area': gt_bbox[2] * gt_bbox[3], + } + dt = { + 'keypoints': dt_kpts, + 'bbox': gt_bbox, + 'area': gt_bbox[2] * gt_bbox[3], + } + # Changed for per-keypoint OKS + oks = compute_oks(gt, dt, use_area=False, per_kpt=True) + target_oks.append(oks) + oks_weights.append(1) + + target_oks = np.array(target_oks) + target_oks = torch.from_numpy(target_oks).float() + + oks_weights = np.array(oks_weights) + oks_weights = torch.from_numpy(oks_weights).float() + + return target_oks, oks_weights + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + """Forward the network. The input is multi scale feature maps and the + output is (1) the heatmap, (2) probability, (3) visibility, (4) oks and (5) error. + + Args: + feats (Tensor): Multi scale feature maps. + + Returns: + Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: outputs. + """ + x = feats[-1].detach() + + heatmaps = self.forward_heatmap(x) + probabilities = self.forward_probability(x) + visibilities = self.forward_visibility(x) + oks = self.forward_oks(x) + errors = self.forward_error(x) + + return heatmaps, probabilities, visibilities, oks, errors + + def forward_heatmap(self, x: Tensor) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the heatmap. + + Args: + x (Tensor): Multi scale feature maps. + + Returns: + Tensor: output heatmap. + """ + x = self.deconv_layers(x) + x = self.conv_layers(x) + x = self.final_layer(x) + B, C, H, W = x.shape + x = x.reshape((B, C, H*W)) + x = self.normalize_layer(x/self.temperature) + if self.normalize is not None: + x = x * self.normalize + x = torch.clamp(x, 0, 1) + x = x.reshape((B, C, H, W)) + + # # Blur the heatmaps with Gaussian + # x = x.reshape((B*C, 1, H, W)) + # x = nn.functional.conv2d(x, self.gauss_kernel[None, None, :, :].to(x.device), padding='same') + # x = x.reshape((B, C, H, W)) + + return x + + def forward_probability(self, x: Tensor) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the probability. + + Args: + x (Tensor): Multi scale feature maps. + detach (bool): Whether to detach the probability from gradient + + Returns: + Tensor: output probability. + """ + if self.detach_probability: + x = x.detach() + x = self.probability_layers(x) + return x + + def forward_visibility(self, x: Tensor) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the visibility. + + Args: + x (Tensor): Multi scale feature maps. + detach (bool): Whether to detach the visibility from gradient + + Returns: + Tensor: output visibility. + """ + if self.detach_visibility: + x = x.detach() + x = self.visibility_layers(x) + return x + + def forward_oks(self, x: Tensor) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the oks. + + Args: + x (Tensor): Multi scale feature maps. + + Returns: + Tensor: output oks. + """ + x = x.detach() + x = self.oks_layers(x) + return x + + def forward_error(self, x: Tensor) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the euclidean error. + + Args: + x (Tensor): Multi scale feature maps. + + Returns: + Tensor: output error. + """ + x = x.detach() + x = self.error_layers(x) + return x + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _htm, _prob, _vis, _oks, _err = self.forward(_feats) + _htm_flip, _prob_flip, _vis_flip, _oks_flip, _err_flip = self.forward(_feats_flip) + B, C, H, W = _htm.shape + + # Flip back the keypoints + _htm_flip = flip_heatmaps( + _htm_flip, + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + heatmaps = (_htm + _htm_flip) * 0.5 + + # Flip back scalars + _prob_flip = _prob_flip[:, flip_indices] + _vis_flip = _vis_flip[:, flip_indices] + _oks_flip = _oks_flip[:, flip_indices] + _err_flip = _err_flip[:, flip_indices] + + probabilities = (_prob + _prob_flip) * 0.5 + visibilities = (_vis + _vis_flip) * 0.5 + oks = (_oks + _oks_flip) * 0.5 + errors = (_err + _err_flip) * 0.5 + else: + heatmaps, probabilities, visibilities, oks, errors = self.forward(feats) + B, C, H, W = heatmaps.shape + + preds = self.decode(heatmaps) + probabilities = to_numpy(probabilities).reshape((B, 1, C)) + visibilities = to_numpy(visibilities).reshape((B, 1, C)) + oks = to_numpy(oks).reshape((B, 1, C)) + errors = to_numpy(errors).reshape((B, 1, C)) + + # Normalize errors by dividing with the diagonal of the heatmap + htm_diagonal = np.sqrt(H**2 + W**2) + errors = errors / htm_diagonal + + for pi, p in enumerate(preds): + p.set_field(p['keypoint_scores'], "keypoints_conf") + p.set_field(probabilities[pi], "keypoints_probs") + p.set_field(visibilities[pi], "keypoints_visible") + p.set_field(oks[pi], "keypoints_oks") + p.set_field(errors[pi], "keypoints_error") + + # Replace the keypoint scores with OKS/errors + if not self.freeze_oks: + p.set_field(oks[pi], "keypoint_scores") + # p.set_field(1-errors[pi], "keypoint_scores") + + # hm = heatmaps.detach().cpu().numpy() + # print("Heatmaps:", hm.shape, hm.min(), hm.max()) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + dt_heatmaps, dt_probs, dt_vis, dt_oks, dt_errs = self.forward(feats) + device=dt_heatmaps.device + B, C, H, W = dt_heatmaps.shape + + # Extract GT data + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + gt_probs = np.stack( + [d.gt_instances.in_image.astype(int) for d in batch_data_samples]) + gt_annotated = np.stack( + [d.gt_instances.keypoints_visible.astype(int) for d in batch_data_samples]) + gt_vis = np.stack( + [d.gt_instances.keypoints_visibility.astype(int) for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # Compute GT errors and OKS + if self.freeze_error: + gt_errs = torch.zeros((B, C, 1), device=device, dtype=dt_errs.dtype) + else: + gt_errs = self._error_from_heatmaps(gt_heatmaps, dt_heatmaps) + if self.freeze_oks: + gt_oks = torch.zeros((B, C, 1), device=device, dtype=dt_oks.dtype) + oks_weight = torch.zeros((B, C, 1), device=device, dtype=dt_oks.dtype) + else: + gt_oks, oks_weight = self._oks_from_heatmaps( + gt_heatmaps, + dt_heatmaps, + gt_probs & gt_annotated, + ) + + # Convert everything to tensors + gt_probs = torch.tensor(gt_probs, device=device, dtype=dt_probs.dtype) + gt_vis = torch.tensor(gt_vis, device=device, dtype=dt_vis.dtype) + gt_annotated = torch.tensor(gt_annotated, device=device) + + gt_oks = gt_oks.to(device).to(dt_oks.dtype) + oks_weight = oks_weight.to(device).to(dt_oks.dtype) + gt_errs = gt_errs.to(device).to(dt_errs.dtype) + + # Reshape everything to comparable shapes + gt_heatmaps = gt_heatmaps.view((B, C, H, W)) + dt_heatmaps = dt_heatmaps.view((B, C, H, W)) + gt_probs = gt_probs.view((B, C)) + dt_probs = dt_probs.view((B, C)) + gt_vis = gt_vis.view((B, C)) + dt_vis = dt_vis.view((B, C)) + gt_oks = gt_oks.view((B, C)) + dt_oks = dt_oks.view((B, C)) + gt_errs = gt_errs.view((B, C)) + dt_errs = dt_errs.view((B, C)) + keypoint_weights = keypoint_weights.view((B, C)) + gt_annotated = gt_annotated.view((B, C)) + # oks_weight = oks_weight.view((B, C)) + + annotated_in = gt_annotated & (gt_probs > 0.5) + + # calculate losses + losses = dict() + if self.learn_heatmaps_from_zeros: + heatmap_weights = gt_annotated + else: + # heatmap_weights = keypoint_weights + heatmap_weights = annotated_in + + heatmap_loss_pxl = self.keypoint_loss_module(dt_heatmaps, gt_heatmaps, annotated_in, per_pixel=True) + heatmap_loss = self.keypoint_loss_module(dt_heatmaps, gt_heatmaps, annotated_in) + # probability_loss = self.probability_loss_module(dt_probs, gt_probs, gt_annotated) + # visibility_loss = self.visibility_loss_module(dt_vis, gt_vis, annotated_in) + # oks_loss = self.oks_loss_module(dt_oks, gt_oks, annotated_in) + # error_loss = self.error_loss_module(dt_errs, gt_errs, annotated_in) + + # Visualize some heatmaps + for i in range(0, B): + # continue + if self.num_iters % self.interval == 0: + self.interval = int(self.interval * 1.3) + os.makedirs(self.loss_vis_folder, exist_ok=True) + for kpt_i in np.random.choice(C, 17, replace=False): + tgt = gt_heatmaps[i, kpt_i].detach().cpu().numpy() + htm = dt_heatmaps[i, kpt_i].detach().cpu().numpy() + lss = heatmap_loss_pxl[i, kpt_i].detach().cpu().numpy() + save_img = self._visualize_heatmaps( + htm, tgt, lss, keypoint_weights[i, kpt_i], gt_probs[i, kpt_i] + ) + + save_path = os.path.join( + self.loss_vis_folder, + "heatmap_{:07d}-{:d}-{:d}.png".format(self.num_iters, i, kpt_i) + ) + cv2.imwrite(save_path, save_img) + + self.num_iters += 1 + + + losses.update( + loss_kpt=heatmap_loss + ) + + # calculate accuracy + if train_cfg.get('compute_acc', True): + acc_pose = self.get_pose_accuracy( + dt_heatmaps, gt_heatmaps, keypoint_weights > 0.5 + ) + losses.update(acc_pose=acc_pose) + + # Calculate the best binary accuracy for probability + acc_prob, _ = self.get_binary_accuracy( + dt_probs, + gt_probs, + gt_annotated > 0.5, + force_balanced=True, + ) + losses.update(acc_prob=acc_prob) + + # Calculate the best binary accuracy for visibility + acc_vis, _ = self.get_binary_accuracy( + dt_vis, + gt_vis, + annotated_in > 0.5, + force_balanced=True, + ) + losses.update(acc_vis=acc_vis) + + # Calculate the MAE for OKS + acc_oks = self.get_mae( + dt_oks, + gt_oks, + annotated_in > 0.5, + ) + losses.update(mae_oks=acc_oks) + + # Calculate the MAE for euclidean error + acc_err = self.get_mae( + dt_errs, + gt_errs, + annotated_in > 0.5, + ) + losses.update(mae_err=acc_err) + + # Calculate the MAE between Euclidean error and OKS + err_to_oks_mae = self.get_mae( + self.error_to_OKS(dt_errs, area=H*W), + gt_oks, + annotated_in > 0.5, + ) + losses.update(mae_err_to_oks=err_to_oks_mae) + + print(self.temperature.item()) + + return losses + + def _visualize_heatmaps( + self, + htm, + tgt, + lss, + weight, + prob + ): + tgt_range = (tgt.min(), tgt.max()) + htm_range = (htm.min(), htm.max()) + lss_range = (lss.min(), lss.max()) + + tgt[tgt < 0] = 0 + htm[htm < 0] = 0 + lss[lss < 0] = 0 + + # Normalize heatmaps between 0 and 1 + tgt /= (tgt.max()+1e-10) + htm /= (htm.max()+1e-10) + lss /= (lss.max()+1e-10) + + scale = 6 + + htm_color = cv2.cvtColor((htm*255).astype(np.uint8), cv2.COLOR_GRAY2BGR) + htm_color = cv2.applyColorMap(htm_color, cv2.COLORMAP_JET) + htm_color = cv2.resize(htm_color, (htm.shape[1]*scale, htm.shape[0]*scale), interpolation=cv2.INTER_NEAREST) + + tgt_color = cv2.cvtColor((tgt*255).astype(np.uint8), cv2.COLOR_GRAY2BGR) + tgt_color = cv2.applyColorMap(tgt_color, cv2.COLORMAP_JET) + tgt_color = cv2.resize(tgt_color, (htm.shape[1]*scale, htm.shape[0]*scale), interpolation=cv2.INTER_NEAREST) + + lss_color = cv2.cvtColor((lss*255).astype(np.uint8), cv2.COLOR_GRAY2BGR) + lss_color = cv2.applyColorMap(lss_color, cv2.COLORMAP_JET) + lss_color = cv2.resize(lss_color, (htm.shape[1]*scale, htm.shape[0]*scale), interpolation=cv2.INTER_NEAREST) + + if scale > 2: + tgt_color_text = tgt_color.copy() + cv2.putText(tgt_color_text, "tgt ({:.1f}, {:.1f})".format(tgt_range[0]*10, tgt_range[1]*10), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) + tgt_color = cv2.addWeighted(tgt_color, 0.6, tgt_color_text, 0.4, 0) + + htm_color_text = htm_color.copy() + cv2.putText(htm_color_text, "htm ({:.1f}, {:.1f})".format(htm_range[0]*10, htm_range[1]*10), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) + htm_color = cv2.addWeighted(htm_color, 0.6, htm_color_text, 0.4, 0) + + lss_color_text = lss_color.copy() + cv2.putText(lss_color_text, "lss ({:.1f}, {:.1f})".format(lss_range[0]*10, lss_range[1]*10), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) + lss_color = cv2.addWeighted(lss_color, 0.6, lss_color_text, 0.4, 0) + + # Get argmax of the target and draw horizontal and vertical lines + tgt_argmax = np.unravel_index(tgt.argmax(), tgt.shape) + tgt_color_line = tgt_color.copy() + cv2.line(tgt_color_line, (0, tgt_argmax[0]*scale), (tgt_color.shape[1], tgt_argmax[0]*scale), (0, 255, 255), 1) + cv2.line(tgt_color_line, (tgt_argmax[1]*scale, 0), (tgt_argmax[1]*scale, tgt_color.shape[0]), (0, 255, 255), 1) + tgt_color = cv2.addWeighted(tgt_color, 0.6, tgt_color_line, 0.4, 0) + htm_color_line = htm_color.copy() + cv2.line(htm_color_line, (0, tgt_argmax[0]*scale), (tgt_color.shape[1], tgt_argmax[0]*scale), (0, 255, 255), 1) + cv2.line(htm_color_line, (tgt_argmax[1]*scale, 0), (tgt_argmax[1]*scale, tgt_color.shape[0]), (0, 255, 255), 1) + htm_color = cv2.addWeighted(htm_color, 0.6, htm_color_line, 0.4, 0) + lss_color_line = lss_color.copy() + cv2.line(lss_color_line, (0, tgt_argmax[0]*scale), (tgt_color.shape[1], tgt_argmax[0]*scale), (0, 255, 255), 1) + cv2.line(lss_color_line, (tgt_argmax[1]*scale, 0), (tgt_argmax[1]*scale, tgt_color.shape[0]), (0, 255, 255), 1) + lss_color = cv2.addWeighted(lss_color, 0.6, lss_color_line, 0.4, 0) + + white_column = np.ones((tgt_color.shape[0], 1, 3), dtype=np.uint8) * 255 + + save_img = np.concatenate(( + tgt_color, + white_column, + htm_color, + white_column, + lss_color, + ), axis=1) + + if weight < 0.5: + # Draw a red X across the whole save_img + cv2.line(save_img, (0, 0), (save_img.shape[1], save_img.shape[0]), (0, 0, 255), 2) + cv2.line(save_img, (0, save_img.shape[0]), (save_img.shape[1], 0), (0, 0, 255), 2) + elif prob < 0.5: + # Draw an yellow X across the whole save_img + cv2.line(save_img, (0, 0), (save_img.shape[1], save_img.shape[0]), (0, 255, 255), 2) + cv2.line(save_img, (0, save_img.shape[0]), (save_img.shape[1], 0), (0, 255, 255), 2) + return save_img + + + def get_pose_accuracy(self, dt, gt, mask): + """Calculate the accuracy of predicted pose.""" + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(dt), + target=to_numpy(gt), + mask=to_numpy(mask), + method='argmax', + ) + acc_pose = torch.tensor(avg_acc, device=gt.device) + return acc_pose + + def get_binary_accuracy(self, dt, gt, mask, force_balanced=False): + """Calculate the binary accuracy.""" + assert dt.shape == gt.shape + device = gt.device + dt = to_numpy(dt) + gt = to_numpy(gt) + mask = to_numpy(mask) + + dt = dt[mask] + gt = gt[mask] + gt = gt.astype(bool) + + if force_balanced: + # Force the number of positive and negative samples to be balanced + pos_num = np.sum(gt) + neg_num = len(gt) - pos_num + num = min(pos_num, neg_num) + if num == 0: + return torch.tensor([0.0], device=device), torch.tensor([0.0], device=device) + pos_idx = np.where(gt)[0] + neg_idx = np.where(~gt)[0] + + # Randomly sample the same number of positive and negative samples + np.random.shuffle(pos_idx) + np.random.shuffle(neg_idx) + idx = np.concatenate([pos_idx[:num], neg_idx[:num]]) + dt = dt[idx] + gt = gt[idx] + + n_samples = len(gt) + thresholds = np.arange(0.1, 1.0, 0.05) + preds = (dt[:, None] > thresholds) + correct = preds == gt[:, None] + counts = correct.sum(axis=0) + + # Find the threshold that maximizes the accuracy + best_idx = np.argmax(counts) + best_threshold = thresholds[best_idx] + best_acc = counts[best_idx] / n_samples + + best_acc = torch.tensor(best_acc, device=device).float() + best_threshold = torch.tensor(best_threshold, device=device).float() + return best_acc, best_threshold + + def get_mae(self, dt, gt, mask): + """Calculate the mean absolute error.""" + assert dt.shape == gt.shape + device = gt.device + dt = to_numpy(dt) + gt = to_numpy(gt) + mask = to_numpy(mask) + + dt = dt[mask] + gt = gt[mask] + mae = np.abs(dt - gt).mean() + + mae = torch.tensor(mae, device=device) + return mae + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`TopdownHeatmapSimpleHead` (before MMPose v1.0.0) to a + compatible format of :class:`HeatmapHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k[len(prefix):] + # In old version, "final_layer" includes both intermediate + # conv layers (new "conv_layers") and final conv layers (new + # "final_layer"). + # + # If there is no intermediate conv layer, old "final_layer" will + # have keys like "final_layer.xxx", which should be still + # named "final_layer.xxx"; + # + # If there are intermediate conv layers, old "final_layer" will + # have keys like "final_layer.n.xxx", where the weights of the last + # one should be renamed "final_layer.xxx", and others should be + # renamed "conv_layers.n.xxx" + k_parts = k.split('.') + if k_parts[0] == 'final_layer': + if len(k_parts) == 3: + assert isinstance(self.conv_layers, nn.Sequential) + idx = int(k_parts[1]) + if idx < len(self.conv_layers): + # final_layer.n.xxx -> conv_layers.n.xxx + k_new = 'conv_layers.' + '.'.join(k_parts[1:]) + else: + # final_layer.n.xxx -> final_layer.xxx + k_new = 'final_layer.' + k_parts[2] + else: + # final_layer.xxx remains final_layer.xxx + k_new = k + else: + k_new = k + + state_dict[prefix + k_new] = v + + def error_to_OKS(self, error, area=1.0): + """Convert the error to OKS.""" + sigmas = np.array( + [.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89])/10.0 + if isinstance(error, torch.Tensor): + sigmas = torch.tensor(sigmas, device=error.device) + vars = (sigmas * 2)**2 + norm_error = error**2 / vars / area / 2.0 + return torch.exp(-norm_error) + + +def compute_oks(gt, dt, use_area=True, per_kpt=False): + sigmas = np.array( + [.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89])/10.0 + vars = (sigmas * 2)**2 + k = len(sigmas) + visibility_condition = lambda x: x > 0 + g = np.array(gt['keypoints']).reshape(k, 3) + xg = g[:, 0]; yg = g[:, 1]; vg = g[:, 2] + k1 = np.count_nonzero(visibility_condition(vg)) + bb = gt['bbox'] + x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2 + y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2 + + d = np.array(dt['keypoints']).reshape((k, 3)) + xd = d[:, 0]; yd = d[:, 1] + + if k1>0: + # measure the per-keypoint distance if keypoints visible + dx = xd - xg + dy = yd - yg + + else: + # measure minimum distance to keypoints in (x0,y0) & (x1,y1) + z = np.zeros((k)) + dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0) + dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0) + + if use_area: + e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2 + else: + tmparea = gt['bbox'][3] * gt['bbox'][2] * 0.53 + e = (dx**2 + dy**2) / vars / (tmparea+np.spacing(1)) / 2 + + if per_kpt: + oks = np.exp(-e) + if k1 > 0: + oks[~visibility_condition(vg)] = 0 + + else: + if k1 > 0: + e=e[visibility_condition(vg)] + oks = np.sum(np.exp(-e)) / e.shape[0] + + return oks \ No newline at end of file diff --git a/mmpose/models/heads/hybrid_heads/dekr_head.py b/mmpose/models/heads/hybrid_heads/dekr_head.py new file mode 100644 index 0000000000000000000000000000000000000000..41f7cfc4ce9f7cbb061c18ba14a4847a67a07ffc --- /dev/null +++ b/mmpose/models/heads/hybrid_heads/dekr_head.py @@ -0,0 +1,581 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Tuple, Union + +import torch +from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmengine.model import BaseModule, ModuleDict, Sequential +from mmengine.structures import InstanceData, PixelData +from torch import Tensor + +from mmpose.evaluation.functional.nms import nearby_joints_nms +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, InstanceList, + OptConfigType, OptSampleList, Predictions) +from ...backbones.resnet import BasicBlock +from ..base_head import BaseHead + +try: + from mmcv.ops import DeformConv2d + has_mmcv_full = True +except (ImportError, ModuleNotFoundError): + has_mmcv_full = False + + +class AdaptiveActivationBlock(BaseModule): + """Adaptive activation convolution block. "Bottom-up human pose estimation + via disentangled keypoint regression", CVPR'2021. + + Args: + in_channels (int): Number of input channels + out_channels (int): Number of output channels + groups (int): Number of groups. Generally equal to the + number of joints. + norm_cfg (dict): Config for normalization layers. + act_cfg (dict): Config for activation layers. + """ + + def __init__(self, + in_channels, + out_channels, + groups=1, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(AdaptiveActivationBlock, self).__init__(init_cfg=init_cfg) + + assert in_channels % groups == 0 and out_channels % groups == 0 + self.groups = groups + + regular_matrix = torch.tensor([[-1, -1, -1, 0, 0, 0, 1, 1, 1], + [-1, 0, 1, -1, 0, 1, -1, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1]]) + self.register_buffer('regular_matrix', regular_matrix.float()) + + self.transform_matrix_conv = build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=6 * groups, + kernel_size=3, + padding=1, + groups=groups, + bias=True) + + if has_mmcv_full: + self.adapt_conv = DeformConv2d( + in_channels, + out_channels, + kernel_size=3, + padding=1, + bias=False, + groups=groups, + deform_groups=groups) + else: + raise ImportError('Please install the full version of mmcv ' + 'to use `DeformConv2d`.') + + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + self.act = build_activation_layer(act_cfg) + + def forward(self, x): + B, _, H, W = x.size() + residual = x + + affine_matrix = self.transform_matrix_conv(x) + affine_matrix = affine_matrix.permute(0, 2, 3, 1).contiguous() + affine_matrix = affine_matrix.view(B, H, W, self.groups, 2, 3) + offset = torch.matmul(affine_matrix, self.regular_matrix) + offset = offset.transpose(4, 5).reshape(B, H, W, self.groups * 18) + offset = offset.permute(0, 3, 1, 2).contiguous() + + x = self.adapt_conv(x, offset) + x = self.norm(x) + x = self.act(x + residual) + + return x + + +class RescoreNet(BaseModule): + """Rescore net used to predict the OKS score of predicted pose. We use the + off-the-shelf rescore net pretrained by authors of DEKR. + + Args: + in_channels (int): Input channels + norm_indexes (Tuple(int)): Indices of torso in skeleton + init_cfg (dict, optional): Initialization config dict + """ + + def __init__( + self, + in_channels, + norm_indexes, + init_cfg=None, + ): + super(RescoreNet, self).__init__(init_cfg=init_cfg) + + self.norm_indexes = norm_indexes + + hidden = 256 + + self.l1 = torch.nn.Linear(in_channels, hidden, bias=True) + self.l2 = torch.nn.Linear(hidden, hidden, bias=True) + self.l3 = torch.nn.Linear(hidden, 1, bias=True) + self.relu = torch.nn.ReLU() + + def make_feature(self, keypoints, keypoint_scores, skeleton): + """Combine original scores, joint distance and relative distance to + make feature. + + Args: + keypoints (torch.Tensor): predicetd keypoints + keypoint_scores (torch.Tensor): predicetd keypoint scores + skeleton (list(list(int))): joint links + + Returns: + torch.Tensor: feature for each instance + """ + joint_1, joint_2 = zip(*skeleton) + num_link = len(skeleton) + + joint_relate = (keypoints[:, joint_1] - + keypoints[:, joint_2])[:, :, :2] + joint_length = joint_relate.norm(dim=2) + + # To use the torso distance to normalize + normalize = (joint_length[:, self.norm_indexes[0]] + + joint_length[:, self.norm_indexes[1]]) / 2 + normalize = normalize.unsqueeze(1).expand(normalize.size(0), num_link) + normalize = normalize.clamp(min=1).contiguous() + + joint_length = joint_length / normalize[:, :] + joint_relate = joint_relate / normalize.unsqueeze(-1) + joint_relate = joint_relate.flatten(1) + + feature = torch.cat((joint_relate, joint_length, keypoint_scores), + dim=1).float() + return feature + + def forward(self, keypoints, keypoint_scores, skeleton): + feature = self.make_feature(keypoints, keypoint_scores, skeleton) + x = self.relu(self.l1(feature)) + x = self.relu(self.l2(x)) + x = self.l3(x) + return x.squeeze(1) + + +@MODELS.register_module() +class DEKRHead(BaseHead): + """DisEntangled Keypoint Regression head introduced in `Bottom-up human + pose estimation via disentangled keypoint regression`_ by Geng et al + (2021). The head is composed of a heatmap branch and a displacement branch. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + num_joints (int): Number of joints + num_heatmap_filters (int): Number of filters for heatmap branch. + Defaults to 32 + num_offset_filters_per_joint (int): Number of filters for each joint + in displacement branch. Defaults to 15 + heatmap_loss (Config): Config of the heatmap loss. Defaults to use + :class:`KeypointMSELoss` + displacement_loss (Config): Config of the displacement regression loss. + Defaults to use :class:`SoftWeightSmoothL1Loss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + rescore_cfg (Config, optional): The config for rescore net which + estimates OKS via predicted keypoints and keypoint scores. + Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`Bottom-up human pose estimation via disentangled keypoint regression`: + https://arxiv.org/abs/2104.02300 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_keypoints: int, + num_heatmap_filters: int = 32, + num_displacement_filters_per_keypoint: int = 15, + heatmap_loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + displacement_loss: ConfigType = dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False), + decoder: OptConfigType = None, + rescore_cfg: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_keypoints = num_keypoints + + # build heatmap branch + self.heatmap_conv_layers = self._make_heatmap_conv_layers( + in_channels=in_channels, + out_channels=1 + num_keypoints, + num_filters=num_heatmap_filters, + ) + + # build displacement branch + self.displacement_conv_layers = self._make_displacement_conv_layers( + in_channels=in_channels, + out_channels=2 * num_keypoints, + num_filters=num_keypoints * num_displacement_filters_per_keypoint, + groups=num_keypoints) + + # build losses + self.loss_module = ModuleDict( + dict( + heatmap=MODELS.build(heatmap_loss), + displacement=MODELS.build(displacement_loss), + )) + + # build decoder + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # build rescore net + if rescore_cfg is not None: + self.rescore_net = RescoreNet(**rescore_cfg) + else: + self.rescore_net = None + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def _make_heatmap_conv_layers(self, in_channels: int, out_channels: int, + num_filters: int): + """Create convolutional layers of heatmap branch by given + parameters.""" + layers = [ + ConvModule( + in_channels=in_channels, + out_channels=num_filters, + kernel_size=1, + norm_cfg=dict(type='BN')), + BasicBlock(num_filters, num_filters), + build_conv_layer( + dict(type='Conv2d'), + in_channels=num_filters, + out_channels=out_channels, + kernel_size=1), + ] + + return Sequential(*layers) + + def _make_displacement_conv_layers(self, in_channels: int, + out_channels: int, num_filters: int, + groups: int): + """Create convolutional layers of displacement branch by given + parameters.""" + layers = [ + ConvModule( + in_channels=in_channels, + out_channels=num_filters, + kernel_size=1, + norm_cfg=dict(type='BN')), + AdaptiveActivationBlock(num_filters, num_filters, groups=groups), + AdaptiveActivationBlock(num_filters, num_filters, groups=groups), + build_conv_layer( + dict(type='Conv2d'), + in_channels=num_filters, + out_channels=out_channels, + kernel_size=1, + groups=groups) + ] + + return Sequential(*layers) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is a tuple of heatmap and displacement. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tuple[Tensor]: output heatmap and displacement. + """ + x = feats[-1] + + heatmaps = self.heatmap_conv_layers(x) + displacements = self.displacement_conv_layers(x) + + return heatmaps, displacements + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + pred_heatmaps, pred_displacements = self.forward(feats) + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + heatmap_weights = torch.stack( + [d.gt_fields.heatmap_weights for d in batch_data_samples]) + gt_displacements = torch.stack( + [d.gt_fields.displacements for d in batch_data_samples]) + displacement_weights = torch.stack( + [d.gt_fields.displacement_weights for d in batch_data_samples]) + + if 'heatmap_mask' in batch_data_samples[0].gt_fields.keys(): + heatmap_mask = torch.stack( + [d.gt_fields.heatmap_mask for d in batch_data_samples]) + else: + heatmap_mask = None + + # calculate losses + losses = dict() + heatmap_loss = self.loss_module['heatmap'](pred_heatmaps, gt_heatmaps, + heatmap_weights, + heatmap_mask) + displacement_loss = self.loss_module['displacement']( + pred_displacements, gt_displacements, displacement_weights) + + losses.update({ + 'loss/heatmap': heatmap_loss, + 'loss/displacement': displacement_loss, + }) + + return losses + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-scale features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (1, h, w) + or (K+1, h, w) if keypoint heatmaps are predicted + - displacements (Tensor): The predicted displacement fields + in shape (K*2, h, w) + """ + + assert len(batch_data_samples) == 1, f'DEKRHead only supports ' \ + f'prediction with batch_size 1, but got {len(batch_data_samples)}' + + multiscale_test = test_cfg.get('multiscale_test', False) + flip_test = test_cfg.get('flip_test', False) + metainfo = batch_data_samples[0].metainfo + aug_scales = [1] + + if not multiscale_test: + feats = [feats] + else: + aug_scales = aug_scales + metainfo['aug_scales'] + + heatmaps, displacements = [], [] + for feat, s in zip(feats, aug_scales): + if flip_test: + assert isinstance(feat, list) and len(feat) == 2 + flip_indices = metainfo['flip_indices'] + _feat, _feat_flip = feat + _heatmaps, _displacements = self.forward(_feat) + _heatmaps_flip, _displacements_flip = self.forward(_feat_flip) + + _heatmaps_flip = flip_heatmaps( + _heatmaps_flip, + flip_mode='heatmap', + flip_indices=flip_indices + [len(flip_indices)], + shift_heatmap=test_cfg.get('shift_heatmap', False)) + _heatmaps = (_heatmaps + _heatmaps_flip) / 2.0 + + _displacements_flip = flip_heatmaps( + _displacements_flip, + flip_mode='offset', + flip_indices=flip_indices, + shift_heatmap=False) + + # this is a coordinate amendment. + x_scale_factor = s * ( + metainfo['input_size'][0] / _heatmaps.shape[-1]) + _displacements_flip[:, ::2] += (x_scale_factor - 1) / ( + x_scale_factor) + _displacements = (_displacements + _displacements_flip) / 2.0 + + else: + _heatmaps, _displacements = self.forward(feat) + + heatmaps.append(_heatmaps) + displacements.append(_displacements) + + preds = self.decode(heatmaps, displacements, test_cfg, metainfo) + + if test_cfg.get('output_heatmaps', False): + heatmaps = [hm.detach() for hm in heatmaps] + displacements = [dm.detach() for dm in displacements] + B = heatmaps[0].shape[0] + pred_fields = [] + for i in range(B): + pred_fields.append( + PixelData( + heatmaps=heatmaps[0][i], + displacements=displacements[0][i])) + return preds, pred_fields + else: + return preds + + def decode(self, + heatmaps: Tuple[Tensor], + displacements: Tuple[Tensor], + test_cfg: ConfigType = {}, + metainfo: dict = {}) -> InstanceList: + """Decode keypoints from outputs. + + Args: + heatmaps (Tuple[Tensor]): The output heatmaps inferred from one + image or multi-scale images. + displacements (Tuple[Tensor]): The output displacement fields + inferred from one image or multi-scale images. + test_cfg (dict): The runtime config for testing process. Defaults + to {} + metainfo (dict): The metainfo of test dataset. Defaults to {} + + Returns: + List[InstanceData]: A list of InstanceData, each contains the + decoded pose information of the instances of one data sample. + """ + + if self.decoder is None: + raise RuntimeError( + f'The decoder has not been set in {self.__class__.__name__}. ' + 'Please set the decoder configs in the init parameters to ' + 'enable head methods `head.predict()` and `head.decode()`') + + multiscale_test = test_cfg.get('multiscale_test', False) + skeleton = metainfo.get('skeleton_links', None) + + preds = [] + batch_size = heatmaps[0].shape[0] + + for b in range(batch_size): + if multiscale_test: + raise NotImplementedError + else: + keypoints, (root_scores, + keypoint_scores) = self.decoder.decode( + heatmaps[0][b], displacements[0][b]) + + # rescore each instance + if self.rescore_net is not None and skeleton and len( + keypoints) > 0: + instance_scores = self.rescore_net(keypoints, keypoint_scores, + skeleton) + instance_scores[torch.isnan(instance_scores)] = 0 + root_scores = root_scores * instance_scores + + # nms + keypoints, keypoint_scores = to_numpy((keypoints, keypoint_scores)) + scores = to_numpy(root_scores)[..., None] * keypoint_scores + if len(keypoints) > 0 and test_cfg.get('nms_dist_thr', 0) > 0: + kpts_db = [] + for i in range(len(keypoints)): + kpts_db.append( + dict(keypoints=keypoints[i], score=keypoint_scores[i])) + keep_instance_inds = nearby_joints_nms( + kpts_db, + test_cfg['nms_dist_thr'], + test_cfg.get('nms_joints_thr', None), + score_per_joint=True, + max_dets=test_cfg.get('max_num_people', 30)) + keypoints = keypoints[keep_instance_inds] + scores = scores[keep_instance_inds] + + # pack outputs + preds.append( + InstanceData(keypoints=keypoints, keypoint_scores=scores)) + + return preds + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`DEKRHead` (before MMPose v1.0.0) to a compatible format + of :class:`DEKRHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for k in keys: + if 'offset_conv_layer' in k: + v = state_dict.pop(k) + k = k.replace('offset_conv_layers', 'displacement_conv_layers') + if 'displacement_conv_layers.3.' in k: + # the source and target of displacement vectors are + # opposite between two versions. + v = -v + state_dict[k] = v + + if 'heatmap_conv_layers.2' in k: + # root heatmap is at the first/last channel of the + # heatmap tensor in MMPose v0.x/1.x, respectively. + v = state_dict.pop(k) + state_dict[k] = torch.cat((v[1:], v[:1])) + + if 'rescore_net' in k: + v = state_dict.pop(k) + k = k.replace('rescore_net', 'head.rescore_net') + state_dict[k] = v diff --git a/mmpose/models/heads/hybrid_heads/poseid_head.py b/mmpose/models/heads/hybrid_heads/poseid_head.py new file mode 100644 index 0000000000000000000000000000000000000000..218f7751b92943abd5e8d9602d437fe6621b58c6 --- /dev/null +++ b/mmpose/models/heads/hybrid_heads/poseid_head.py @@ -0,0 +1,623 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmcv.cnn import build_conv_layer, build_upsample_layer +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + +import numpy as np + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class PoseIDHead(BaseHead): + """Multi-variate head predicting all information about keypoints. Apart + from the heatmap, it also predicts: + 1) Heatmap for each keypoint + 2) Usefulness of the pose for identification + The heatmap predicting part is the same as HeatmapHead introduced in + in `Simple Baselines`_ by Xiao et al (2018). + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + conv_out_channels (Sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer_dict (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + keypoint_loss (Config): Config of the keypoint loss. Defaults to use + :class:`KeypointMSELoss` + usefulness_loss (Config): Config of the probability loss. Defaults to use + :class:`BCELoss` + freeze_heatmaps (bool): Whether to freeze the heatmaps prediction. + Defaults to ``False`` + freeze_usefulness (bool): Whether to freeze the usefulness prediction. + Defaults to ``False`` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + + .. _`Simple Baselines`: https://arxiv.org/abs/1804.06208 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer_dict: dict = dict(kernel_size=1), + keypoint_loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + usefulness_loss: ConfigType = dict( + type='MSELoss', use_target_weight=True), + usefulness_thr: float = None, + freeze_heatmaps: bool = False, + freeze_usefulness: bool = False, + detach_usefulness: bool = True, + decoder: OptConfigType = dict( + type='UDPHeatmap', input_size=(192, 256), + heatmap_size=(48, 64), sigma=2), + init_cfg: OptConfigType = None, + ): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.keypoint_loss_module = MODELS.build(keypoint_loss) + self.usefulness_loss_module = MODELS.build(usefulness_loss) + + self.decoder = KEYPOINT_CODECS.build(decoder) + self.nonlinearity = nn.ReLU(inplace=True) + + self.usefulness_thr = usefulness_thr + self.detach_usefulness = detach_usefulness + + self._build_heatmap_head( + in_channels=in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer_dict=final_layer_dict, + normalize=False, + freeze=freeze_heatmaps) + + self._build_usefulness_head( + in_channels=in_channels, + out_channels=out_channels, + freeze=freeze_usefulness) + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def _build_heatmap_head(self, in_channels: int, out_channels: int, + deconv_out_channels: Sequence[int], + deconv_kernel_sizes: Sequence[int], + conv_out_channels: Sequence[int], + conv_kernel_sizes: Sequence[int], + final_layer_dict: dict, + normalize: bool = False, + freeze: bool = False) -> nn.Module: + """Build the heatmap head module.""" + if deconv_out_channels: + if deconv_kernel_sizes is None or len(deconv_out_channels) != len( + deconv_kernel_sizes): + raise ValueError( + '"deconv_out_channels" and "deconv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_kernel_sizes}') + + self.deconv_layers = self._make_deconv_layers( + in_channels=in_channels, + layer_out_channels=deconv_out_channels, + layer_kernel_sizes=deconv_kernel_sizes, + ) + in_channels = deconv_out_channels[-1] + else: + self.deconv_layers = nn.Identity() + + if conv_out_channels: + if conv_kernel_sizes is None or len(conv_out_channels) != len( + conv_kernel_sizes): + raise ValueError( + '"conv_out_channels" and "conv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {conv_out_channels} and ' + f'{conv_kernel_sizes}') + + self.conv_layers = self._make_conv_layers( + in_channels=in_channels, + layer_out_channels=conv_out_channels, + layer_kernel_sizes=conv_kernel_sizes) + in_channels = conv_out_channels[-1] + else: + self.conv_layers = nn.Identity() + + if final_layer_dict is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer_dict) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = nn.Identity() + self.normalize_layer = nn.Sigmoid() if normalize else nn.Identity() + + if freeze: + for param in self.deconv_layers.parameters(): + param.requires_grad = False + for param in self.conv_layers.parameters(): + param.requires_grad = False + for param in self.final_layer.parameters(): + param.requires_grad = False + + def _build_usefulness_head(self, in_channels: int, out_channels: int, + freeze: bool = False) -> nn.Module: + """Build the probability head module.""" + usf_layers = [] + kernel_sizes = [(4, 3), (2, 2), (2, 2)] + for i in range(len(kernel_sizes)): + usf_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=1, + padding=1)) + usf_layers.append( + nn.BatchNorm2d(num_features=in_channels)) + usf_layers.append( + nn.MaxPool2d(kernel_size=kernel_sizes[i], stride=kernel_sizes[i], padding=0)) + usf_layers.append(self.nonlinearity) + usf_layers.append( + build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=1, + kernel_size=1, + stride=1, + padding=0)) + usf_layers.append(nn.Sigmoid()) + self.usefulness_layers = nn.Sequential(*usf_layers) + + if freeze: + for param in self.usefulness_layers.parameters(): + param.requires_grad = False + + def _make_conv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create convolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + padding = (kernel_size - 1) // 2 + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding) + layers.append(build_conv_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(self.nonlinearity) + in_channels = out_channels + + return nn.Sequential(*layers) + + def _make_deconv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + if kernel_size == 4: + padding = 1 + output_padding = 0 + elif kernel_size == 3: + padding = 1 + output_padding = 1 + elif kernel_size == 2: + padding = 0 + output_padding = 0 + else: + raise ValueError(f'Unsupported kernel size {kernel_size} for' + 'deconvlutional layers in ' + f'{self.__class__.__name__}') + cfg = dict( + type='deconv', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False) + layers.append(build_upsample_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(self.nonlinearity) + in_channels = out_channels + + return nn.Sequential(*layers) + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: + """Forward the network. The input is multi scale feature maps and the + output is (1) the heatmap, (2) probability, (3) visibility, (4) oks and (5) error. + + Args: + feats (Tensor): Multi scale feature maps. + + Returns: + Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: outputs. + """ + x = feats[-1] + + heatmaps = self.forward_heatmap(x) + usefulness = self.forward_usefulness(x) + + return heatmaps, usefulness + + def forward_heatmap(self, x: Tensor) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the heatmap. + + Args: + x (Tensor): Multi scale feature maps. + + Returns: + Tensor: output heatmap. + """ + x = self.deconv_layers(x) + x = self.conv_layers(x) + x = self.final_layer(x) + x = self.normalize_layer(x) + return x + + def forward_usefulness(self, x: Tensor) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the probability. + + Args: + x (Tensor): Multi scale feature maps. + detach (bool): Whether to detach the probability from gradient + + Returns: + Tensor: output probability. + """ + if self.detach_usefulness: + x = x.detach() + x = self.usefulness_layers(x) + return x + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _htm, _usf = self.forward(_feats) + _htm_flip, _usf_flip = self.forward(_feats_flip) + B, C, H, W = _htm.shape + + # Flip back the keypoints + _htm_flip = flip_heatmaps( + _htm_flip, + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + heatmaps = (_htm + _htm_flip) * 0.5 + + # Flip back scalars + # _usf_flip = _usf_flip[:, flip_indices] + + usefulness = (_usf + _usf_flip) * 0.5 + else: + heatmaps, usefulness = self.forward(feats) + B, C, H, W = heatmaps.shape + + preds = self.decode(heatmaps) + usefulness = to_numpy(usefulness).reshape((B, 1)) + + for pi, p in enumerate(preds): + p.set_field(usefulness[pi], "keypoints_usf") + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + dt_heatmaps, dt_usfs = self.forward(feats) + device=dt_heatmaps.device + B, C, H, W = dt_heatmaps.shape + + # Extract GT data + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + # breakpoint() + gt_usfs = np.stack( + [d.gt_instances.identified.astype(float) for d in batch_data_samples]) + if self.usefulness_thr is not None: + gt_usfs = (gt_usfs > self.usefulness_thr).astype(int) + + gt_annotated = np.stack( + [d.gt_instances.keypoints_visible.astype(int) for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # Convert everything to tensors + gt_usfs = torch.tensor(gt_usfs, device=device, dtype=dt_usfs.dtype) + gt_annotated = torch.tensor(gt_annotated, device=device) + + # Reshape everything to comparable shapes + gt_heatmaps = gt_heatmaps.view((B, C, H, W)) + dt_heatmaps = dt_heatmaps.view((B, C, H, W)) + gt_usfs = gt_usfs.view((B, 1)) + dt_usfs = dt_usfs.view((B, 1)) + keypoint_weights = keypoint_weights.view((B, C)) + gt_annotated = gt_annotated.view((B, C)) + + # Compute uselfulness weights + # usfs_weights = torch.ones_like(dt_usfs, dtype=torch.float, device=device) + usfs_weights = gt_usfs.detach().clone() * 8.0 + 1.0 # Weight the useful poses more ais the ratio in data is approx 1:9 + + # calculate losses + losses = dict() + heatmap_weights = keypoint_weights + + heatmap_loss = self.keypoint_loss_module(dt_heatmaps, gt_heatmaps, heatmap_weights) + usefulness_loss = self.usefulness_loss_module( + dt_usfs, gt_usfs, + target_weight=usfs_weights + ) + + losses.update( + loss_kpt=heatmap_loss, + loss_usefulness=usefulness_loss, + ) + + # calculate accuracy + if train_cfg.get('compute_acc', True): + acc_pose = self.get_pose_accuracy( + dt_heatmaps, gt_heatmaps, keypoint_weights > 0.5 + ) + losses.update(acc_pose=acc_pose) + + # Calculate the best binary accuracy for probability + if self.usefulness_thr is not None: + usf_acc, usf_thr = self.get_binary_accuracy( + dt_usfs, gt_usfs, torch.ones_like(dt_usfs, dtype=torch.bool) + ) + losses.update(usf_acc=usf_acc, usf_thr=usf_thr) + else: + usf_err = self.get_mae( + dt_usfs, + gt_usfs, + # (gt_annotated > 0.5).any(axis=1).view(dt_usfs.shape), + mask=torch.ones_like(dt_usfs, dtype=torch.bool) + ) + losses.update(usf_mae=usf_err) + + return losses + + def get_pose_accuracy(self, dt, gt, mask): + """Calculate the accuracy of predicted pose.""" + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(dt), + target=to_numpy(gt), + mask=to_numpy(mask), + ) + acc_pose = torch.tensor(avg_acc, device=gt.device) + return acc_pose + + def get_binary_accuracy(self, dt, gt, mask, force_balanced=False): + """Calculate the binary accuracy.""" + assert dt.shape == gt.shape + device = gt.device + dt = to_numpy(dt) + gt = to_numpy(gt) + mask = to_numpy(mask) + + dt = dt[mask] + gt = gt[mask] + gt = gt.astype(bool) + + if force_balanced: + # Force the number of positive and negative samples to be balanced + pos_num = np.sum(gt) + neg_num = len(gt) - pos_num + num = min(pos_num, neg_num) + if num == 0: + return torch.tensor([0.0], device=device), torch.tensor([0.0], device=device) + pos_idx = np.where(gt)[0] + neg_idx = np.where(~gt)[0] + + # Randomly sample the same number of positive and negative samples + np.random.shuffle(pos_idx) + np.random.shuffle(neg_idx) + idx = np.concatenate([pos_idx[:num], neg_idx[:num]]) + dt = dt[idx] + gt = gt[idx] + + n_samples = len(gt) + thresholds = np.arange(0.1, 1.0, 0.05) + preds = (dt[:, None] > thresholds) + correct = preds == gt[:, None] + counts = correct.sum(axis=0) + + # Find the threshold that maximizes the accuracy + best_idx = np.argmax(counts) + best_threshold = thresholds[best_idx] + best_acc = counts[best_idx] / n_samples + + best_acc = torch.tensor(best_acc, device=device).float() + best_threshold = torch.tensor(best_threshold, device=device).float() + return best_acc, best_threshold + + def get_mae(self, dt, gt, mask): + """Calculate the mean absolute error.""" + assert dt.shape == gt.shape + device = gt.device + dt = to_numpy(dt) + gt = to_numpy(gt) + mask = to_numpy(mask) + + dt = dt[mask] + gt = gt[mask] + mae = np.abs(dt - gt).mean() + + mae = torch.tensor(mae, device=device) + return mae + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`TopdownHeatmapSimpleHead` (before MMPose v1.0.0) to a + compatible format of :class:`HeatmapHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k[len(prefix):] + # In old version, "final_layer" includes both intermediate + # conv layers (new "conv_layers") and final conv layers (new + # "final_layer"). + # + # If there is no intermediate conv layer, old "final_layer" will + # have keys like "final_layer.xxx", which should be still + # named "final_layer.xxx"; + # + # If there are intermediate conv layers, old "final_layer" will + # have keys like "final_layer.n.xxx", where the weights of the last + # one should be renamed "final_layer.xxx", and others should be + # renamed "conv_layers.n.xxx" + k_parts = k.split('.') + if k_parts[0] == 'final_layer': + if len(k_parts) == 3: + assert isinstance(self.conv_layers, nn.Sequential) + idx = int(k_parts[1]) + if idx < len(self.conv_layers): + # final_layer.n.xxx -> conv_layers.n.xxx + k_new = 'conv_layers.' + '.'.join(k_parts[1:]) + else: + # final_layer.n.xxx -> final_layer.xxx + k_new = 'final_layer.' + k_parts[2] + else: + # final_layer.xxx remains final_layer.xxx + k_new = k + else: + k_new = k + + state_dict[prefix + k_new] = v diff --git a/mmpose/models/heads/hybrid_heads/rtmo_head.py b/mmpose/models/heads/hybrid_heads/rtmo_head.py new file mode 100644 index 0000000000000000000000000000000000000000..c364c20e98fdbb2c1af9f4bba832c24fc20aec37 --- /dev/null +++ b/mmpose/models/heads/hybrid_heads/rtmo_head.py @@ -0,0 +1,1040 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import types +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, Scale +from mmdet.utils import ConfigType, reduce_mean +from mmengine.model import BaseModule, bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor + +from mmpose.evaluation.functional import nms_torch +from mmpose.models.utils import (GAUEncoder, SinePositionalEncoding, + filter_scores_and_topk) +from mmpose.registry import MODELS +from mmpose.structures.bbox import bbox_xyxy2cs +from mmpose.utils.typing import Features, OptSampleList, Predictions +from .yoloxpose_head import YOLOXPoseHead + +EPS = 1e-8 + + +class RTMOHeadModule(BaseModule): + """RTMO head module for one-stage human pose estimation. + + This module predicts classification scores, bounding boxes, keypoint + offsets and visibilities from multi-level feature maps. + + Args: + num_classes (int): Number of categories excluding the background + category. + num_keypoints (int): Number of keypoints defined for one instance. + in_channels (int): Number of channels in the input feature maps. + cls_feat_channels (int): Number of channels in the classification score + and objectness prediction branch. Defaults to 256. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_groups (int): Group number of group convolution layers in keypoint + regression branch. Defaults to 8. + channels_per_group (int): Number of channels for each group of group + convolution layers in keypoint regression branch. Defaults to 32. + featmap_strides (Sequence[int]): Downsample factor of each feature + map. Defaults to [8, 16, 32]. + conv_bias (bool or str): If specified as `auto`, it will be decided + by the norm_cfg. Bias of conv will be set as True if `norm_cfg` + is None, otherwise False. Defaults to "auto". + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + num_keypoints: int, + in_channels: int, + num_classes: int = 1, + widen_factor: float = 1.0, + cls_feat_channels: int = 256, + stacked_convs: int = 2, + num_groups=8, + channels_per_group=36, + pose_vec_channels=-1, + featmap_strides: Sequence[int] = [8, 16, 32], + conv_bias: Union[bool, str] = 'auto', + conv_cfg: Optional[ConfigType] = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: Optional[ConfigType] = None, + ): + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.cls_feat_channels = int(cls_feat_channels * widen_factor) + self.stacked_convs = stacked_convs + assert conv_bias == 'auto' or isinstance(conv_bias, bool) + self.conv_bias = conv_bias + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.featmap_strides = featmap_strides + + self.in_channels = int(in_channels * widen_factor) + self.num_keypoints = num_keypoints + + self.num_groups = num_groups + self.channels_per_group = int(widen_factor * channels_per_group) + self.pose_vec_channels = pose_vec_channels + + self._init_layers() + + def _init_layers(self): + """Initialize heads for all level feature maps.""" + self._init_cls_branch() + self._init_pose_branch() + + def _init_cls_branch(self): + """Initialize classification branch for all level feature maps.""" + self.conv_cls = nn.ModuleList() + for _ in self.featmap_strides: + stacked_convs = [] + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.cls_feat_channels + stacked_convs.append( + ConvModule( + chn, + self.cls_feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.conv_bias)) + self.conv_cls.append(nn.Sequential(*stacked_convs)) + + # output layers + self.out_cls = nn.ModuleList() + for _ in self.featmap_strides: + self.out_cls.append( + nn.Conv2d(self.cls_feat_channels, self.num_classes, 1)) + + def _init_pose_branch(self): + """Initialize pose prediction branch for all level feature maps.""" + self.conv_pose = nn.ModuleList() + out_chn = self.num_groups * self.channels_per_group + for _ in self.featmap_strides: + stacked_convs = [] + for i in range(self.stacked_convs * 2): + chn = self.in_channels if i == 0 else out_chn + groups = 1 if i == 0 else self.num_groups + stacked_convs.append( + ConvModule( + chn, + out_chn, + 3, + stride=1, + padding=1, + groups=groups, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.conv_bias)) + self.conv_pose.append(nn.Sequential(*stacked_convs)) + + # output layers + self.out_bbox = nn.ModuleList() + self.out_kpt_reg = nn.ModuleList() + self.out_kpt_vis = nn.ModuleList() + for _ in self.featmap_strides: + self.out_bbox.append(nn.Conv2d(out_chn, 4, 1)) + self.out_kpt_reg.append( + nn.Conv2d(out_chn, self.num_keypoints * 2, 1)) + self.out_kpt_vis.append(nn.Conv2d(out_chn, self.num_keypoints, 1)) + + if self.pose_vec_channels > 0: + self.out_pose = nn.ModuleList() + for _ in self.featmap_strides: + self.out_pose.append( + nn.Conv2d(out_chn, self.pose_vec_channels, 1)) + + def init_weights(self): + """Initialize weights of the head. + + Use prior in model initialization to improve stability. + """ + + super().init_weights() + bias_init = bias_init_with_prob(0.01) + for conv_cls in self.out_cls: + conv_cls.bias.data.fill_(bias_init) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + cls_scores (List[Tensor]): Classification scores for each level. + bbox_preds (List[Tensor]): Bounding box predictions for each level. + kpt_offsets (List[Tensor]): Keypoint offsets for each level. + kpt_vis (List[Tensor]): Keypoint visibilities for each level. + pose_feats (List[Tensor]): Pose features for each level. + """ + + cls_scores, bbox_preds = [], [] + kpt_offsets, kpt_vis = [], [] + pose_feats = [] + + for i in range(len(x)): + + cls_feat, reg_feat = x[i].split(x[i].size(1) // 2, 1) + + cls_feat = self.conv_cls[i](cls_feat) + reg_feat = self.conv_pose[i](reg_feat) + + cls_scores.append(self.out_cls[i](cls_feat)) + bbox_preds.append(self.out_bbox[i](reg_feat)) + if self.training: + # `kpt_offsets` generates the proxy poses for positive + # sample selection during training + kpt_offsets.append(self.out_kpt_reg[i](reg_feat)) + kpt_vis.append(self.out_kpt_vis[i](reg_feat)) + + if self.pose_vec_channels > 0: + pose_feats.append(self.out_pose[i](reg_feat)) + else: + pose_feats.append(reg_feat) + + return cls_scores, bbox_preds, kpt_offsets, kpt_vis, pose_feats + + +class DCC(BaseModule): + """Dynamic Coordinate Classifier for One-stage Pose Estimation. + + Args: + in_channels (int): Number of input feature map channels. + num_keypoints (int): Number of keypoints for pose estimation. + feat_channels (int): Number of feature channels. + num_bins (Tuple[int, int]): Tuple representing the number of bins in + x and y directions. + spe_channels (int): Number of channels for Sine Positional Encoding. + Defaults to 128. + spe_temperature (float): Temperature for Sine Positional Encoding. + Defaults to 300.0. + gau_cfg (dict, optional): Configuration for Gated Attention Unit. + """ + + def __init__( + self, + in_channels: int, + num_keypoints: int, + feat_channels: int, + num_bins: Tuple[int, int], + spe_channels: int = 128, + spe_temperature: float = 300.0, + gau_cfg: Optional[dict] = dict( + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + use_rel_bias=False, + pos_enc='add'), + ): + super().__init__() + + self.in_channels = in_channels + self.num_keypoints = num_keypoints + + self.feat_channels = feat_channels + self.num_bins = num_bins + self.gau_cfg = gau_cfg + + self.spe = SinePositionalEncoding( + out_channels=spe_channels, + temperature=spe_temperature, + ) + self.spe_feat_channels = spe_channels + + self._build_layers() + self._build_basic_bins() + + def _build_layers(self): + """Builds layers for the model.""" + + # GAU encoder + if self.gau_cfg is not None: + gau_cfg = self.gau_cfg.copy() + gau_cfg['in_token_dims'] = self.feat_channels + gau_cfg['out_token_dims'] = self.feat_channels + self.gau = GAUEncoder(**gau_cfg) + if gau_cfg.get('pos_enc', 'none') in ('add', 'rope'): + self.pos_enc = nn.Parameter( + torch.randn(self.num_keypoints, gau_cfg['s'])) + + # fully-connected layers to convert pose feats to keypoint feats + pose_to_kpts = [ + nn.Linear(self.in_channels, + self.feat_channels * self.num_keypoints), + nn.BatchNorm1d(self.feat_channels * self.num_keypoints) + ] + self.pose_to_kpts = nn.Sequential(*pose_to_kpts) + + # adapter layers for dynamic encodings + self.x_fc = nn.Linear(self.spe_feat_channels, self.feat_channels) + self.y_fc = nn.Linear(self.spe_feat_channels, self.feat_channels) + + # fully-connected layers to predict sigma + self.sigma_fc = nn.Sequential( + nn.Linear(self.in_channels, self.num_keypoints), nn.Sigmoid(), + Scale(0.1)) + + def _build_basic_bins(self): + """Builds basic bin coordinates for x and y.""" + self.register_buffer('y_bins', + torch.linspace(-0.5, 0.5, self.num_bins[1])) + self.register_buffer('x_bins', + torch.linspace(-0.5, 0.5, self.num_bins[0])) + + def _apply_softmax(self, x_hms, y_hms): + """Apply softmax on 1-D heatmaps. + + Args: + x_hms (Tensor): 1-D heatmap in x direction. + y_hms (Tensor): 1-D heatmap in y direction. + + Returns: + tuple: A tuple containing the normalized x and y heatmaps. + """ + + x_hms = x_hms.clamp(min=-5e4, max=5e4) + y_hms = y_hms.clamp(min=-5e4, max=5e4) + pred_x = x_hms - x_hms.max(dim=-1, keepdims=True).values.detach() + pred_y = y_hms - y_hms.max(dim=-1, keepdims=True).values.detach() + + exp_x, exp_y = pred_x.exp(), pred_y.exp() + prob_x = exp_x / (exp_x.sum(dim=-1, keepdims=True) + EPS) + prob_y = exp_y / (exp_y.sum(dim=-1, keepdims=True) + EPS) + + return prob_x, prob_y + + def _get_bin_enc(self, bbox_cs, grids): + """Calculate dynamic bin encodings for expanded bounding box. + + This function computes dynamic bin allocations and encodings based + on the expanded bounding box center-scale (bbox_cs) and grid values. + The process involves adjusting the bins according to the scale and + center of the bounding box and then applying a sinusoidal positional + encoding (spe) followed by a fully connected layer (fc) to obtain the + final x and y bin encodings. + + Args: + bbox_cs (Tensor): A tensor representing the center and scale of + bounding boxes. + grids (Tensor): A tensor representing the grid coordinates. + + Returns: + tuple: A tuple containing the encoded x and y bins. + """ + center, scale = bbox_cs.split(2, dim=-1) + center = center - grids + + x_bins, y_bins = self.x_bins, self.y_bins + + # dynamic bin allocation + x_bins = x_bins.view(*((1,) * (scale.ndim-1)), -1) \ + * scale[..., 0:1] + center[..., 0:1] + y_bins = y_bins.view(*((1,) * (scale.ndim-1)), -1) \ + * scale[..., 1:2] + center[..., 1:2] + + # dynamic bin encoding + x_bins_enc = self.x_fc(self.spe(position=x_bins)) + y_bins_enc = self.y_fc(self.spe(position=y_bins)) + + return x_bins_enc, y_bins_enc + + def _pose_feats_to_heatmaps(self, pose_feats, x_bins_enc, y_bins_enc): + """Convert pose features to heatmaps using x and y bin encodings. + + This function transforms the given pose features into keypoint + features and then generates x and y heatmaps based on the x and y + bin encodings. If Gated attention unit (gau) is used, it applies it + to the keypoint features. The heatmaps are generated using matrix + multiplication of pose features and bin encodings. + + Args: + pose_feats (Tensor): The pose features tensor. + x_bins_enc (Tensor): The encoded x bins tensor. + y_bins_enc (Tensor): The encoded y bins tensor. + + Returns: + tuple: A tuple containing the x and y heatmaps. + """ + + kpt_feats = self.pose_to_kpts(pose_feats) + + kpt_feats = kpt_feats.reshape(*kpt_feats.shape[:-1], + self.num_keypoints, self.feat_channels) + + if hasattr(self, 'gau'): + kpt_feats = self.gau( + kpt_feats, pos_enc=getattr(self, 'pos_enc', None)) + + x_hms = torch.matmul(kpt_feats, + x_bins_enc.transpose(-1, -2).contiguous()) + y_hms = torch.matmul(kpt_feats, + y_bins_enc.transpose(-1, -2).contiguous()) + + return x_hms, y_hms + + def _decode_xy_heatmaps(self, x_hms, y_hms, bbox_cs): + """Decode x and y heatmaps to obtain coordinates. + + This function decodes x and y heatmaps to obtain the corresponding + coordinates. It adjusts the x and y bins based on the bounding box + center and scale, and then computes the weighted sum of these bins + with the heatmaps to derive the x and y coordinates. + + Args: + x_hms (Tensor): The normalized x heatmaps tensor. + y_hms (Tensor): The normalized y heatmaps tensor. + bbox_cs (Tensor): The bounding box center-scale tensor. + + Returns: + Tensor: A tensor of decoded x and y coordinates. + """ + center, scale = bbox_cs.split(2, dim=-1) + + x_bins, y_bins = self.x_bins, self.y_bins + + x_bins = x_bins.view(*((1,) * (scale.ndim-1)), -1) \ + * scale[..., 0:1] + center[..., 0:1] + y_bins = y_bins.view(*((1,) * (scale.ndim-1)), -1) \ + * scale[..., 1:2] + center[..., 1:2] + + x = (x_hms * x_bins.unsqueeze(1)).sum(dim=-1) + y = (y_hms * y_bins.unsqueeze(1)).sum(dim=-1) + + return torch.stack((x, y), dim=-1) + + def generate_target_heatmap(self, kpt_targets, bbox_cs, sigmas, areas): + """Generate target heatmaps for keypoints based on bounding box. + + This function calculates x and y bins adjusted by bounding box center + and scale. It then computes distances from keypoint targets to these + bins and normalizes these distances based on the areas and sigmas. + Finally, it uses these distances to generate heatmaps for x and y + coordinates under assumption of laplacian error. + + Args: + kpt_targets (Tensor): Keypoint targets tensor. + bbox_cs (Tensor): Bounding box center-scale tensor. + sigmas (Tensor): Learned deviation of grids. + areas (Tensor): Areas of GT instance assigned to grids. + + Returns: + tuple: A tuple containing the x and y heatmaps. + """ + + # calculate the error of each bin from the GT keypoint coordinates + center, scale = bbox_cs.split(2, dim=-1) + x_bins = self.x_bins.view(*((1,) * (scale.ndim-1)), -1) \ + * scale[..., 0:1] + center[..., 0:1] + y_bins = self.y_bins.view(*((1,) * (scale.ndim-1)), -1) \ + * scale[..., 1:2] + center[..., 1:2] + + dist_x = torch.abs(kpt_targets.narrow(2, 0, 1) - x_bins.unsqueeze(1)) + dist_y = torch.abs(kpt_targets.narrow(2, 1, 1) - y_bins.unsqueeze(1)) + + # normalize + areas = areas.pow(0.5).clip(min=1).reshape(-1, 1, 1) + sigmas = sigmas.clip(min=1e-3).unsqueeze(2) + dist_x = dist_x / areas / sigmas + dist_y = dist_y / areas / sigmas + + hm_x = torch.exp(-dist_x / 2) / sigmas + hm_y = torch.exp(-dist_y / 2) / sigmas + + return hm_x, hm_y + + def forward_train(self, pose_feats, bbox_cs, grids): + """Forward pass for training. + + This function processes pose features during training. It computes + sigmas using a fully connected layer, generates bin encodings, + creates heatmaps from pose features, applies softmax to the heatmaps, + and then decodes the heatmaps to get pose predictions. + + Args: + pose_feats (Tensor): The pose features tensor. + bbox_cs (Tensor): The bounding box in the format of center & scale. + grids (Tensor): The grid coordinates. + + Returns: + tuple: A tuple containing pose predictions, heatmaps, and sigmas. + """ + sigmas = self.sigma_fc(pose_feats) + x_bins_enc, y_bins_enc = self._get_bin_enc(bbox_cs, grids) + x_hms, y_hms = self._pose_feats_to_heatmaps(pose_feats, x_bins_enc, + y_bins_enc) + x_hms, y_hms = self._apply_softmax(x_hms, y_hms) + pose_preds = self._decode_xy_heatmaps(x_hms, y_hms, bbox_cs) + return pose_preds, (x_hms, y_hms), sigmas + + @torch.no_grad() + def forward_test(self, pose_feats, bbox_cs, grids): + """Forward pass for testing. + + This function processes pose features during testing. It generates + bin encodings, creates heatmaps from pose features, and then decodes + the heatmaps to get pose predictions. + + Args: + pose_feats (Tensor): The pose features tensor. + bbox_cs (Tensor): The bounding box in the format of center & scale. + grids (Tensor): The grid coordinates. + + Returns: + Tensor: Pose predictions tensor. + """ + x_bins_enc, y_bins_enc = self._get_bin_enc(bbox_cs, grids) + x_hms, y_hms = self._pose_feats_to_heatmaps(pose_feats, x_bins_enc, + y_bins_enc) + x_hms, y_hms = self._apply_softmax(x_hms, y_hms) + pose_preds = self._decode_xy_heatmaps(x_hms, y_hms, bbox_cs) + return pose_preds + + def switch_to_deploy(self, test_cfg: Optional[Dict] = None): + if getattr(self, 'deploy', False): + return + + self._convert_pose_to_kpts() + if hasattr(self, 'gau'): + self._convert_gau() + self._convert_forward_test() + + self.deploy = True + + def _convert_pose_to_kpts(self): + """Merge BatchNorm layer into Fully Connected layer. + + This function merges a BatchNorm layer into the associated Fully + Connected layer to avoid dimension mismatch during ONNX exportation. It + adjusts the weights and biases of the FC layer to incorporate the BN + layer's parameters, and then replaces the original FC layer with the + updated one. + """ + fc, bn = self.pose_to_kpts + + # Calculate adjusted weights and biases + std = (bn.running_var + bn.eps).sqrt() + weight = fc.weight * (bn.weight / std).unsqueeze(1) + bias = bn.bias + (fc.bias - bn.running_mean) * bn.weight / std + + # Update FC layer with adjusted parameters + fc.weight.data = weight.detach() + fc.bias.data = bias.detach() + self.pose_to_kpts = fc + + def _convert_gau(self): + """Reshape and merge tensors for Gated Attention Unit (GAU). + + This function pre-processes the gamma and beta tensors of the GAU and + handles the position encoding if available. It also redefines the GAU's + forward method to incorporate these pre-processed tensors, optimizing + the computation process. + """ + # Reshape gamma and beta tensors in advance + gamma_q = self.gau.gamma[0].view(1, 1, 1, self.gau.gamma.size(-1)) + gamma_k = self.gau.gamma[1].view(1, 1, 1, self.gau.gamma.size(-1)) + beta_q = self.gau.beta[0].view(1, 1, 1, self.gau.beta.size(-1)) + beta_k = self.gau.beta[1].view(1, 1, 1, self.gau.beta.size(-1)) + + # Adjust beta tensors with position encoding if available + if hasattr(self, 'pos_enc'): + pos_enc = self.pos_enc.reshape(1, 1, *self.pos_enc.shape) + beta_q = beta_q + pos_enc + beta_k = beta_k + pos_enc + + gamma_q = gamma_q.detach().cpu() + gamma_k = gamma_k.detach().cpu() + beta_q = beta_q.detach().cpu() + beta_k = beta_k.detach().cpu() + + @torch.no_grad() + def _forward(self, x, *args, **kwargs): + norm = torch.linalg.norm(x, dim=-1, keepdim=True) * self.ln.scale + x = x / norm.clamp(min=self.ln.eps) * self.ln.g + + uv = self.uv(x) + uv = self.act_fn(uv) + + u, v, base = torch.split(uv, [self.e, self.e, self.s], dim=-1) + if not torch.onnx.is_in_onnx_export(): + q = base * gamma_q.to(base) + beta_q.to(base) + k = base * gamma_k.to(base) + beta_k.to(base) + else: + q = base * gamma_q + beta_q + k = base * gamma_k + beta_k + qk = torch.matmul(q, k.transpose(-1, -2)) + + kernel = torch.square(torch.nn.functional.relu(qk / self.sqrt_s)) + x = u * torch.matmul(kernel, v) + x = self.o(x) + return x + + self.gau._forward = types.MethodType(_forward, self.gau) + + def _convert_forward_test(self): + """Simplify the forward test process. + + This function precomputes certain tensors and redefines the + forward_test method for the model. It includes steps for converting + pose features to keypoint features, performing dynamic bin encoding, + calculating 1-D heatmaps, and decoding these heatmaps to produce final + pose predictions. + """ + x_bins_ = self.x_bins.view(1, 1, -1).detach().cpu() + y_bins_ = self.y_bins.view(1, 1, -1).detach().cpu() + dim_t = self.spe.dim_t.view(1, 1, 1, -1).detach().cpu() + + @torch.no_grad() + def _forward_test(self, pose_feats, bbox_cs, grids): + + # step 1: pose features -> keypoint features + kpt_feats = self.pose_to_kpts(pose_feats) + kpt_feats = kpt_feats.reshape(*kpt_feats.shape[:-1], + self.num_keypoints, + self.feat_channels) + kpt_feats = self.gau(kpt_feats) + + # step 2: dynamic bin encoding + center, scale = bbox_cs.split(2, dim=-1) + center = center - grids + + if not torch.onnx.is_in_onnx_export(): + x_bins = x_bins_.to(scale) * scale[..., 0:1] + center[..., 0:1] + y_bins = y_bins_.to(scale) * scale[..., 1:2] + center[..., 1:2] + freq_x = x_bins.unsqueeze(-1) / dim_t.to(scale) + freq_y = y_bins.unsqueeze(-1) / dim_t.to(scale) + else: + x_bins = x_bins_ * scale[..., 0:1] + center[..., 0:1] + y_bins = y_bins_ * scale[..., 1:2] + center[..., 1:2] + freq_x = x_bins.unsqueeze(-1) / dim_t + freq_y = y_bins.unsqueeze(-1) / dim_t + + spe_x = torch.cat((freq_x.cos(), freq_x.sin()), dim=-1) + spe_y = torch.cat((freq_y.cos(), freq_y.sin()), dim=-1) + + x_bins_enc = self.x_fc(spe_x).transpose(-1, -2).contiguous() + y_bins_enc = self.y_fc(spe_y).transpose(-1, -2).contiguous() + + # step 3: calculate 1-D heatmaps + x_hms = torch.matmul(kpt_feats, x_bins_enc) + y_hms = torch.matmul(kpt_feats, y_bins_enc) + x_hms, y_hms = self._apply_softmax(x_hms, y_hms) + + # step 4: decode 1-D heatmaps through integral + x = (x_hms * x_bins.unsqueeze(-2)).sum(dim=-1) + grids[..., 0:1] + y = (y_hms * y_bins.unsqueeze(-2)).sum(dim=-1) + grids[..., 1:2] + + keypoints = torch.stack((x, y), dim=-1) + + if not torch.onnx.is_in_onnx_export(): + keypoints = keypoints.squeeze(0) + return keypoints + + self.forward_test = types.MethodType(_forward_test, self) + + +@MODELS.register_module() +class RTMOHead(YOLOXPoseHead): + """One-stage coordinate classification head introduced in RTMO (2023). This + head incorporates dynamic coordinate classification and YOLO structure for + precise keypoint localization. + + Args: + num_keypoints (int): Number of keypoints to detect. + head_module_cfg (ConfigType): Configuration for the head module. + featmap_strides (Sequence[int]): Strides of feature maps. + Defaults to [16, 32]. + num_classes (int): Number of object classes, defaults to 1. + use_aux_loss (bool): Indicates whether to use auxiliary loss, + defaults to False. + proxy_target_cc (bool): Indicates whether to use keypoints predicted + by coordinate classification as the targets for proxy regression + branch. Defaults to False. + assigner (ConfigType): Configuration for positive sample assigning + module. + prior_generator (ConfigType): Configuration for prior generation. + bbox_padding (float): Padding for bounding boxes, defaults to 1.25. + overlaps_power (float): Power factor adopted by overlaps before they + are assigned as targets in classification loss. Defaults to 1.0. + dcc_cfg (Optional[ConfigType]): Configuration for dynamic coordinate + classification module. + loss_cls (Optional[ConfigType]): Configuration for classification loss. + loss_bbox (Optional[ConfigType]): Configuration for bounding box loss. + loss_oks (Optional[ConfigType]): Configuration for OKS loss. + loss_vis (Optional[ConfigType]): Configuration for visibility loss. + loss_mle (Optional[ConfigType]): Configuration for MLE loss. + loss_bbox_aux (Optional[ConfigType]): Configuration for auxiliary + bounding box loss. + """ + + def __init__( + self, + num_keypoints: int, + head_module_cfg: ConfigType, + featmap_strides: Sequence[int] = [16, 32], + num_classes: int = 1, + use_aux_loss: bool = False, + proxy_target_cc: bool = False, + assigner: ConfigType = None, + prior_generator: ConfigType = None, + bbox_padding: float = 1.25, + overlaps_power: float = 1.0, + dcc_cfg: Optional[ConfigType] = None, + loss_cls: Optional[ConfigType] = None, + loss_bbox: Optional[ConfigType] = None, + loss_oks: Optional[ConfigType] = None, + loss_vis: Optional[ConfigType] = None, + loss_mle: Optional[ConfigType] = None, + loss_bbox_aux: Optional[ConfigType] = None, + ): + super().__init__( + num_keypoints=num_keypoints, + head_module_cfg=None, + featmap_strides=featmap_strides, + num_classes=num_classes, + use_aux_loss=use_aux_loss, + assigner=assigner, + prior_generator=prior_generator, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_oks=loss_oks, + loss_vis=loss_vis, + loss_bbox_aux=loss_bbox_aux, + overlaps_power=overlaps_power) + + self.bbox_padding = bbox_padding + + # override to ensure consistency + head_module_cfg['featmap_strides'] = featmap_strides + head_module_cfg['num_keypoints'] = num_keypoints + + # build modules + self.head_module = RTMOHeadModule(**head_module_cfg) + + self.proxy_target_cc = proxy_target_cc + if dcc_cfg is not None: + dcc_cfg['num_keypoints'] = num_keypoints + self.dcc = DCC(**dcc_cfg) + + # build losses + if loss_mle is not None: + self.loss_mle = MODELS.build(loss_mle) + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + + # 1. collect & reform predictions + cls_scores, bbox_preds, kpt_offsets, kpt_vis, pose_vecs = self.forward( + feats) + + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + flatten_priors = torch.cat(mlvl_priors) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = self._flatten_predictions(cls_scores) + flatten_bbox_preds = self._flatten_predictions(bbox_preds) + flatten_objectness = torch.ones_like( + flatten_cls_scores).detach().narrow(-1, 0, 1) * 1e4 + flatten_kpt_offsets = self._flatten_predictions(kpt_offsets) + flatten_kpt_vis = self._flatten_predictions(kpt_vis) + flatten_pose_vecs = self._flatten_predictions(pose_vecs) + flatten_bbox_decoded = self.decode_bbox(flatten_bbox_preds, + flatten_priors[..., :2], + flatten_priors[..., -1]) + flatten_kpt_decoded = self.decode_kpt_reg(flatten_kpt_offsets, + flatten_priors[..., :2], + flatten_priors[..., -1]) + + # 2. generate targets + targets = self._get_targets(flatten_priors, + flatten_cls_scores.detach(), + flatten_objectness.detach(), + flatten_bbox_decoded.detach(), + flatten_kpt_decoded.detach(), + flatten_kpt_vis.detach(), + batch_data_samples) + pos_masks, cls_targets, obj_targets, obj_weights, \ + bbox_targets, bbox_aux_targets, kpt_targets, kpt_aux_targets, \ + vis_targets, vis_weights, pos_areas, pos_priors, group_indices, \ + num_fg_imgs = targets + + num_pos = torch.tensor( + sum(num_fg_imgs), + dtype=torch.float, + device=flatten_cls_scores.device) + num_total_samples = max(reduce_mean(num_pos), 1.0) + + # 3. calculate loss + extra_info = dict(num_samples=num_total_samples) + losses = dict() + cls_preds_all = flatten_cls_scores.view(-1, self.num_classes) + + if num_pos > 0: + + # 3.1 bbox loss + bbox_preds = flatten_bbox_decoded.view(-1, 4)[pos_masks] + losses['loss_bbox'] = self.loss_bbox( + bbox_preds, bbox_targets) / num_total_samples + + if self.use_aux_loss: + if hasattr(self, 'loss_bbox_aux'): + bbox_preds_raw = flatten_bbox_preds.view(-1, 4)[pos_masks] + losses['loss_bbox_aux'] = self.loss_bbox_aux( + bbox_preds_raw, bbox_aux_targets) / num_total_samples + + # 3.2 keypoint visibility loss + kpt_vis_preds = flatten_kpt_vis.view(-1, + self.num_keypoints)[pos_masks] + losses['loss_vis'] = self.loss_vis(kpt_vis_preds, vis_targets, + vis_weights) + + # 3.3 keypoint loss + kpt_reg_preds = flatten_kpt_decoded.view(-1, self.num_keypoints, + 2)[pos_masks] + + if hasattr(self, 'loss_mle') and self.loss_mle.loss_weight > 0: + pose_vecs = flatten_pose_vecs.view( + -1, flatten_pose_vecs.size(-1))[pos_masks] + bbox_cs = torch.cat( + bbox_xyxy2cs(bbox_preds, self.bbox_padding), dim=1) + # 'cc' refers to 'cordinate classification' + kpt_cc_preds, pred_hms, sigmas = \ + self.dcc.forward_train(pose_vecs, + bbox_cs, + pos_priors[..., :2]) + target_hms = self.dcc.generate_target_heatmap( + kpt_targets, bbox_cs, sigmas, pos_areas) + losses['loss_mle'] = self.loss_mle(pred_hms, target_hms, + vis_targets) + + if self.proxy_target_cc: + # form the regression target using the coordinate + # classification predictions + with torch.no_grad(): + diff_cc = torch.norm(kpt_cc_preds - kpt_targets, dim=-1) + diff_reg = torch.norm(kpt_reg_preds - kpt_targets, dim=-1) + mask = (diff_reg > diff_cc).float() + kpt_weights_reg = vis_targets * mask + oks = self.assigner.oks_calculator(kpt_cc_preds, + kpt_targets, + vis_targets, pos_areas) + cls_targets = oks.unsqueeze(1) + + losses['loss_oks'] = self.loss_oks(kpt_reg_preds, + kpt_cc_preds.detach(), + kpt_weights_reg, pos_areas) + + else: + losses['loss_oks'] = self.loss_oks(kpt_reg_preds, kpt_targets, + vis_targets, pos_areas) + + # update the target for classification loss + # the target for the positive grids are set to the oks calculated + # using predictions and assigned ground truth instances + extra_info['overlaps'] = cls_targets + cls_targets = cls_targets.pow(self.overlaps_power).detach() + obj_targets[pos_masks] = cls_targets.to(obj_targets) + + # 3.4 classification loss + losses['loss_cls'] = self.loss_cls(cls_preds_all, obj_targets, + obj_weights) / num_total_samples + losses.update(extra_info) + + return losses + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-scale features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (1, h, w) + or (K+1, h, w) if keypoint heatmaps are predicted + - displacements (Tensor): The predicted displacement fields + in shape (K*2, h, w) + """ + + cls_scores, bbox_preds, _, kpt_vis, pose_vecs = self.forward(feats) + + cfg = copy.deepcopy(test_cfg) + + batch_img_metas = [d.metainfo for d in batch_data_samples] + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + # If the shape does not change, use the previous mlvl_priors + if featmap_sizes != self.featmap_sizes: + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device) + self.featmap_sizes = featmap_sizes + flatten_priors = torch.cat(self.mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full((featmap_size.numel(), ), + stride) for featmap_size, stride in zip( + featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten predictions + flatten_cls_scores = self._flatten_predictions(cls_scores).sigmoid() + flatten_bbox_preds = self._flatten_predictions(bbox_preds) + flatten_kpt_vis = self._flatten_predictions(kpt_vis).sigmoid() + flatten_pose_vecs = self._flatten_predictions(pose_vecs) + if flatten_pose_vecs is None: + flatten_pose_vecs = [None] * len(batch_img_metas) + flatten_bbox_preds = self.decode_bbox(flatten_bbox_preds, + flatten_priors, flatten_stride) + + results_list = [] + for (bboxes, scores, kpt_vis, pose_vecs, + img_meta) in zip(flatten_bbox_preds, flatten_cls_scores, + flatten_kpt_vis, flatten_pose_vecs, + batch_img_metas): + + score_thr = cfg.get('score_thr', 0.01) + + nms_pre = cfg.get('nms_pre', 100000) + scores, labels = scores.max(1, keepdim=True) + scores, _, keep_idxs_score, results = filter_scores_and_topk( + scores, score_thr, nms_pre, results=dict(labels=labels[:, 0])) + labels = results['labels'] + + bboxes = bboxes[keep_idxs_score] + kpt_vis = kpt_vis[keep_idxs_score] + grids = flatten_priors[keep_idxs_score] + stride = flatten_stride[keep_idxs_score] + + if bboxes.numel() > 0: + nms_thr = cfg.get('nms_thr', 1.0) + if nms_thr < 1.0: + + keep_idxs_nms = nms_torch(bboxes, scores, nms_thr) + bboxes = bboxes[keep_idxs_nms] + stride = stride[keep_idxs_nms] + labels = labels[keep_idxs_nms] + kpt_vis = kpt_vis[keep_idxs_nms] + scores = scores[keep_idxs_nms] + + pose_vecs = pose_vecs[keep_idxs_score][keep_idxs_nms] + bbox_cs = torch.cat( + bbox_xyxy2cs(bboxes, self.bbox_padding), dim=1) + grids = grids[keep_idxs_nms] + keypoints = self.dcc.forward_test(pose_vecs, bbox_cs, grids) + + else: + # empty prediction + keypoints = bboxes.new_zeros((0, self.num_keypoints, 2)) + + results = InstanceData( + scores=scores, + labels=labels, + bboxes=bboxes, + bbox_scores=scores, + keypoints=keypoints, + keypoint_scores=kpt_vis, + keypoints_visible=kpt_vis) + + input_size = img_meta['input_size'] + results.bboxes[:, 0::2].clamp_(0, input_size[0]) + results.bboxes[:, 1::2].clamp_(0, input_size[1]) + + results_list.append(results.numpy()) + + return results_list + + def switch_to_deploy(self, test_cfg: Optional[Dict]): + """Precompute and save the grid coordinates and strides.""" + + if getattr(self, 'deploy', False): + return + + self.deploy = True + + # grid generator + input_size = test_cfg.get('input_size', (640, 640)) + featmaps = [] + for s in self.featmap_strides: + featmaps.append( + torch.rand(1, 1, input_size[0] // s, input_size[1] // s)) + featmap_sizes = [fmap.shape[2:] for fmap in featmaps] + + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, dtype=torch.float32, device='cpu') + self.flatten_priors = torch.cat(self.mlvl_priors) + + mlvl_strides = [ + self.flatten_priors.new_full((featmap_size.numel(), ), stride) for + featmap_size, stride in zip(featmap_sizes, self.featmap_strides) + ] + self.flatten_stride = torch.cat(mlvl_strides) diff --git a/mmpose/models/heads/hybrid_heads/vis_head.py b/mmpose/models/heads/hybrid_heads/vis_head.py new file mode 100644 index 0000000000000000000000000000000000000000..6f808670ad2c23841d56043c98a78a3fa0ae9aaa --- /dev/null +++ b/mmpose/models/heads/hybrid_heads/vis_head.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple, Union + +import torch +from torch import Tensor, nn + +from mmpose.models.utils.tta import flip_visibility +from mmpose.registry import MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + + +@MODELS.register_module() +class VisPredictHead(BaseHead): + """VisPredictHead must be used together with other heads. It can predict + keypoints coordinates of and their visibility simultaneously. In the + current version, it only supports top-down approaches. + + Args: + pose_cfg (Config): Config to construct keypoints prediction head + loss (Config): Config for visibility loss. Defaults to use + :class:`BCELoss` + use_sigmoid (bool): Whether to use sigmoid activation function + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__(self, + pose_cfg: ConfigType, + loss: ConfigType = dict( + type='BCELoss', use_target_weight=False, + use_sigmoid=True), + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = pose_cfg['in_channels'] + if pose_cfg.get('num_joints', None) is not None: + self.out_channels = pose_cfg['num_joints'] + elif pose_cfg.get('out_channels', None) is not None: + self.out_channels = pose_cfg['out_channels'] + else: + raise ValueError('VisPredictHead requires \'num_joints\' or' + ' \'out_channels\' in the pose_cfg.') + + self.loss_module = MODELS.build(loss) + + self.pose_head = MODELS.build(pose_cfg) + self.pose_cfg = pose_cfg + + self.use_sigmoid = loss.get('use_sigmoid', False) + + modules = [ + nn.AdaptiveAvgPool2d(1), + nn.Flatten(), + nn.Linear(self.in_channels, self.out_channels) + ] + if self.use_sigmoid: + modules.append(nn.Sigmoid()) + + self.vis_head = nn.Sequential(*modules) + + def vis_forward(self, feats: Tuple[Tensor]): + """Forward the vis_head. The input is multi scale feature maps and the + output is coordinates visibility. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates visibility. + """ + x = feats[-1] + while len(x.shape) < 4: + x.unsqueeze_(-1) + x = self.vis_head(x) + return x.reshape(-1, self.out_channels) + + def forward(self, feats: Tuple[Tensor]): + """Forward the network. The input is multi scale feature maps and the + output is coordinates and coordinates visibility. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tuple[Tensor]: output coordinates and coordinates visibility. + """ + x_pose = self.pose_head.forward(feats) + x_vis = self.vis_forward(feats) + + return x_pose, x_vis + + def integrate(self, batch_vis: Tensor, + pose_preds: Union[Tuple, Predictions]) -> InstanceList: + """Add keypoints visibility prediction to pose prediction. + + Overwrite the original keypoint_scores. + """ + if isinstance(pose_preds, tuple): + pose_pred_instances, pose_pred_fields = pose_preds + else: + pose_pred_instances = pose_preds + pose_pred_fields = None + + batch_vis_np = to_numpy(batch_vis, unzip=True) + + assert len(pose_pred_instances) == len(batch_vis_np) + for index, _ in enumerate(pose_pred_instances): + pose_pred_instances[index].keypoints_visible = batch_vis_np[index] + + return pose_pred_instances, pose_pred_fields + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + posehead's ``test_cfg['output_heatmap']==True``, return both + pose and heatmap prediction; otherwise only return the pose + prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + - keypoint_visibility (np.ndarray): predicted keypoints + visibility in shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _batch_vis = self.vis_forward(_feats) + _batch_vis_flip = flip_visibility( + self.vis_forward(_feats_flip), flip_indices=flip_indices) + batch_vis = (_batch_vis + _batch_vis_flip) * 0.5 + else: + batch_vis = self.vis_forward(feats) # (B, K, D) + + batch_vis.unsqueeze_(dim=1) # (B, N, K, D) + + if not self.use_sigmoid: + batch_vis = torch.sigmoid(batch_vis) + + batch_pose = self.pose_head.predict(feats, batch_data_samples, + test_cfg) + + return self.integrate(batch_vis, batch_pose) + + @torch.no_grad() + def vis_accuracy(self, vis_pred_outputs, vis_labels, vis_weights=None): + """Calculate visibility prediction accuracy.""" + if not self.use_sigmoid: + vis_pred_outputs = torch.sigmoid(vis_pred_outputs) + threshold = 0.5 + predictions = (vis_pred_outputs >= threshold).float() + correct = (predictions == vis_labels).float() + if vis_weights is not None: + accuracy = (correct * vis_weights).sum(dim=1) / ( + vis_weights.sum(dim=1) + 1e-6) + else: + accuracy = correct.mean(dim=1) + return accuracy.mean() + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + vis_pred_outputs = self.vis_forward(feats) + vis_labels = [] + vis_weights = [] if self.loss_module.use_target_weight else None + for d in batch_data_samples: + vis_label = d.gt_instance_labels.keypoint_weights.float() + vis_labels.append(vis_label) + if vis_weights is not None: + vis_weights.append( + getattr(d.gt_instance_labels, 'keypoints_visible_weights', + vis_label.new_ones(vis_label.shape))) + vis_labels = torch.cat(vis_labels) + vis_weights = torch.cat(vis_weights) if vis_weights else None + + # calculate vis losses + losses = dict() + loss_vis = self.loss_module(vis_pred_outputs, vis_labels, vis_weights) + + losses.update(loss_vis=loss_vis) + + # calculate vis accuracy + acc_vis = self.vis_accuracy(vis_pred_outputs, vis_labels, vis_weights) + losses.update(acc_vis=acc_vis) + + # calculate keypoints losses + loss_kpt = self.pose_head.loss(feats, batch_data_samples) + losses.update(loss_kpt) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/hybrid_heads/yoloxpose_head.py b/mmpose/models/heads/hybrid_heads/yoloxpose_head.py new file mode 100644 index 0000000000000000000000000000000000000000..07ae63a32500aacdc66d091ead817cb0f7b3243b --- /dev/null +++ b/mmpose/models/heads/hybrid_heads/yoloxpose_head.py @@ -0,0 +1,782 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule, bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor + +from mmpose.evaluation.functional import nms_torch +from mmpose.models.utils import filter_scores_and_topk +from mmpose.registry import MODELS, TASK_UTILS +from mmpose.structures import PoseDataSample +from mmpose.utils import reduce_mean +from mmpose.utils.typing import (ConfigType, Features, OptSampleList, + Predictions, SampleList) + + +class YOLOXPoseHeadModule(BaseModule): + """YOLOXPose head module for one-stage human pose estimation. + + This module predicts classification scores, bounding boxes, keypoint + offsets and visibilities from multi-level feature maps. + + Args: + num_classes (int): Number of categories excluding the background + category. + num_keypoints (int): Number of keypoints defined for one instance. + in_channels (Union[int, Sequence]): Number of channels in the input + feature map. + feat_channels (int): Number of channels in the classification score + and objectness prediction branch. Defaults to 256. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_groups (int): Group number of group convolution layers in keypoint + regression branch. Defaults to 8. + channels_per_group (int): Number of channels for each group of group + convolution layers in keypoint regression branch. Defaults to 32. + featmap_strides (Sequence[int]): Downsample factor of each feature + map. Defaults to [8, 16, 32]. + conv_bias (bool or str): If specified as `auto`, it will be decided + by the norm_cfg. Bias of conv will be set as True if `norm_cfg` + is None, otherwise False. Defaults to "auto". + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + num_keypoints: int, + in_channels: Union[int, Sequence], + num_classes: int = 1, + widen_factor: float = 1.0, + feat_channels: int = 256, + stacked_convs: int = 2, + featmap_strides: Sequence[int] = [8, 16, 32], + conv_bias: Union[bool, str] = 'auto', + conv_cfg: Optional[ConfigType] = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: Optional[ConfigType] = None, + ): + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.feat_channels = int(feat_channels * widen_factor) + self.stacked_convs = stacked_convs + assert conv_bias == 'auto' or isinstance(conv_bias, bool) + self.conv_bias = conv_bias + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.featmap_strides = featmap_strides + + if isinstance(in_channels, int): + in_channels = int(in_channels * widen_factor) + self.in_channels = in_channels + self.num_keypoints = num_keypoints + + self._init_layers() + + def _init_layers(self): + """Initialize heads for all level feature maps.""" + self._init_cls_branch() + self._init_reg_branch() + self._init_pose_branch() + + def _init_cls_branch(self): + """Initialize classification branch for all level feature maps.""" + self.conv_cls = nn.ModuleList() + for _ in self.featmap_strides: + stacked_convs = [] + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + stacked_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.conv_bias)) + self.conv_cls.append(nn.Sequential(*stacked_convs)) + + # output layers + self.out_cls = nn.ModuleList() + self.out_obj = nn.ModuleList() + for _ in self.featmap_strides: + self.out_cls.append( + nn.Conv2d(self.feat_channels, self.num_classes, 1)) + + def _init_reg_branch(self): + """Initialize classification branch for all level feature maps.""" + self.conv_reg = nn.ModuleList() + for _ in self.featmap_strides: + stacked_convs = [] + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + stacked_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.conv_bias)) + self.conv_reg.append(nn.Sequential(*stacked_convs)) + + # output layers + self.out_bbox = nn.ModuleList() + self.out_obj = nn.ModuleList() + for _ in self.featmap_strides: + self.out_bbox.append(nn.Conv2d(self.feat_channels, 4, 1)) + self.out_obj.append(nn.Conv2d(self.feat_channels, 1, 1)) + + def _init_pose_branch(self): + self.conv_pose = nn.ModuleList() + + for _ in self.featmap_strides: + stacked_convs = [] + for i in range(self.stacked_convs * 2): + in_chn = self.in_channels if i == 0 else self.feat_channels + stacked_convs.append( + ConvModule( + in_chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.conv_bias)) + self.conv_pose.append(nn.Sequential(*stacked_convs)) + + # output layers + self.out_kpt = nn.ModuleList() + self.out_kpt_vis = nn.ModuleList() + for _ in self.featmap_strides: + self.out_kpt.append( + nn.Conv2d(self.feat_channels, self.num_keypoints * 2, 1)) + self.out_kpt_vis.append( + nn.Conv2d(self.feat_channels, self.num_keypoints, 1)) + + def init_weights(self): + """Initialize weights of the head.""" + # Use prior in model initialization to improve stability + super().init_weights() + bias_init = bias_init_with_prob(0.01) + for conv_cls, conv_obj in zip(self.out_cls, self.out_obj): + conv_cls.bias.data.fill_(bias_init) + conv_obj.bias.data.fill_(bias_init) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + cls_scores (List[Tensor]): Classification scores for each level. + objectnesses (List[Tensor]): Objectness scores for each level. + bbox_preds (List[Tensor]): Bounding box predictions for each level. + kpt_offsets (List[Tensor]): Keypoint offsets for each level. + kpt_vis (List[Tensor]): Keypoint visibilities for each level. + """ + + cls_scores, bbox_preds, objectnesses = [], [], [] + kpt_offsets, kpt_vis = [], [] + + for i in range(len(x)): + + cls_feat = self.conv_cls[i](x[i]) + reg_feat = self.conv_reg[i](x[i]) + pose_feat = self.conv_pose[i](x[i]) + + cls_scores.append(self.out_cls[i](cls_feat)) + objectnesses.append(self.out_obj[i](reg_feat)) + bbox_preds.append(self.out_bbox[i](reg_feat)) + kpt_offsets.append(self.out_kpt[i](pose_feat)) + kpt_vis.append(self.out_kpt_vis[i](pose_feat)) + + return cls_scores, objectnesses, bbox_preds, kpt_offsets, kpt_vis + + +@MODELS.register_module() +class YOLOXPoseHead(BaseModule): + + def __init__( + self, + num_keypoints: int, + head_module_cfg: Optional[ConfigType] = None, + featmap_strides: Sequence[int] = [8, 16, 32], + num_classes: int = 1, + use_aux_loss: bool = False, + assigner: ConfigType = None, + prior_generator: ConfigType = None, + loss_cls: Optional[ConfigType] = None, + loss_obj: Optional[ConfigType] = None, + loss_bbox: Optional[ConfigType] = None, + loss_oks: Optional[ConfigType] = None, + loss_vis: Optional[ConfigType] = None, + loss_bbox_aux: Optional[ConfigType] = None, + loss_kpt_aux: Optional[ConfigType] = None, + overlaps_power: float = 1.0, + ): + super().__init__() + + self.featmap_sizes = None + self.num_classes = num_classes + self.featmap_strides = featmap_strides + self.use_aux_loss = use_aux_loss + self.num_keypoints = num_keypoints + self.overlaps_power = overlaps_power + + self.prior_generator = TASK_UTILS.build(prior_generator) + if head_module_cfg is not None: + head_module_cfg['featmap_strides'] = featmap_strides + head_module_cfg['num_keypoints'] = num_keypoints + self.head_module = YOLOXPoseHeadModule(**head_module_cfg) + self.assigner = TASK_UTILS.build(assigner) + + # build losses + self.loss_cls = MODELS.build(loss_cls) + if loss_obj is not None: + self.loss_obj = MODELS.build(loss_obj) + self.loss_bbox = MODELS.build(loss_bbox) + self.loss_oks = MODELS.build(loss_oks) + self.loss_vis = MODELS.build(loss_vis) + if loss_bbox_aux is not None: + self.loss_bbox_aux = MODELS.build(loss_bbox_aux) + if loss_kpt_aux is not None: + self.loss_kpt_aux = MODELS.build(loss_kpt_aux) + + def forward(self, feats: Features): + assert isinstance(feats, (tuple, list)) + return self.head_module(feats) + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + + # 1. collect & reform predictions + cls_scores, objectnesses, bbox_preds, kpt_offsets, \ + kpt_vis = self.forward(feats) + + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + flatten_priors = torch.cat(mlvl_priors) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = self._flatten_predictions(cls_scores) + flatten_bbox_preds = self._flatten_predictions(bbox_preds) + flatten_objectness = self._flatten_predictions(objectnesses) + flatten_kpt_offsets = self._flatten_predictions(kpt_offsets) + flatten_kpt_vis = self._flatten_predictions(kpt_vis) + flatten_bbox_decoded = self.decode_bbox(flatten_bbox_preds, + flatten_priors[..., :2], + flatten_priors[..., -1]) + flatten_kpt_decoded = self.decode_kpt_reg(flatten_kpt_offsets, + flatten_priors[..., :2], + flatten_priors[..., -1]) + + # 2. generate targets + targets = self._get_targets(flatten_priors, + flatten_cls_scores.detach(), + flatten_objectness.detach(), + flatten_bbox_decoded.detach(), + flatten_kpt_decoded.detach(), + flatten_kpt_vis.detach(), + batch_data_samples) + pos_masks, cls_targets, obj_targets, obj_weights, \ + bbox_targets, bbox_aux_targets, kpt_targets, kpt_aux_targets, \ + vis_targets, vis_weights, pos_areas, pos_priors, group_indices, \ + num_fg_imgs = targets + + num_pos = torch.tensor( + sum(num_fg_imgs), + dtype=torch.float, + device=flatten_cls_scores.device) + num_total_samples = max(reduce_mean(num_pos), 1.0) + + # 3. calculate loss + # 3.1 objectness loss + losses = dict() + + obj_preds = flatten_objectness.view(-1, 1) + losses['loss_obj'] = self.loss_obj(obj_preds, obj_targets, + obj_weights) / num_total_samples + + if num_pos > 0: + # 3.2 bbox loss + bbox_preds = flatten_bbox_decoded.view(-1, 4)[pos_masks] + losses['loss_bbox'] = self.loss_bbox( + bbox_preds, bbox_targets) / num_total_samples + + # 3.3 keypoint loss + kpt_preds = flatten_kpt_decoded.view(-1, self.num_keypoints, + 2)[pos_masks] + losses['loss_kpt'] = self.loss_oks(kpt_preds, kpt_targets, + vis_targets, pos_areas) + + # 3.4 keypoint visibility loss + kpt_vis_preds = flatten_kpt_vis.view(-1, + self.num_keypoints)[pos_masks] + losses['loss_vis'] = self.loss_vis(kpt_vis_preds, vis_targets, + vis_weights) + + # 3.5 classification loss + cls_preds = flatten_cls_scores.view(-1, + self.num_classes)[pos_masks] + losses['overlaps'] = cls_targets + cls_targets = cls_targets.pow(self.overlaps_power).detach() + losses['loss_cls'] = self.loss_cls(cls_preds, + cls_targets) / num_total_samples + + if self.use_aux_loss: + if hasattr(self, 'loss_bbox_aux'): + # 3.6 auxiliary bbox regression loss + bbox_preds_raw = flatten_bbox_preds.view(-1, 4)[pos_masks] + losses['loss_bbox_aux'] = self.loss_bbox_aux( + bbox_preds_raw, bbox_aux_targets) / num_total_samples + + if hasattr(self, 'loss_kpt_aux'): + # 3.7 auxiliary keypoint regression loss + kpt_preds_raw = flatten_kpt_offsets.view( + -1, self.num_keypoints, 2)[pos_masks] + kpt_weights = vis_targets / vis_targets.size(-1) + losses['loss_kpt_aux'] = self.loss_kpt_aux( + kpt_preds_raw, kpt_aux_targets, kpt_weights) + + return losses + + @torch.no_grad() + def _get_targets( + self, + priors: Tensor, + batch_cls_scores: Tensor, + batch_objectness: Tensor, + batch_decoded_bboxes: Tensor, + batch_decoded_kpts: Tensor, + batch_kpt_vis: Tensor, + batch_data_samples: SampleList, + ): + num_imgs = len(batch_data_samples) + + # use clip to avoid nan + batch_cls_scores = batch_cls_scores.clip(min=-1e4, max=1e4).sigmoid() + batch_objectness = batch_objectness.clip(min=-1e4, max=1e4).sigmoid() + batch_kpt_vis = batch_kpt_vis.clip(min=-1e4, max=1e4).sigmoid() + batch_cls_scores[torch.isnan(batch_cls_scores)] = 0 + batch_objectness[torch.isnan(batch_objectness)] = 0 + + targets_each = [] + for i in range(num_imgs): + target = self._get_targets_single(priors, batch_cls_scores[i], + batch_objectness[i], + batch_decoded_bboxes[i], + batch_decoded_kpts[i], + batch_kpt_vis[i], + batch_data_samples[i]) + targets_each.append(target) + + targets = list(zip(*targets_each)) + for i, target in enumerate(targets): + if torch.is_tensor(target[0]): + target = tuple(filter(lambda x: x.size(0) > 0, target)) + if len(target) > 0: + targets[i] = torch.cat(target) + + foreground_masks, cls_targets, obj_targets, obj_weights, \ + bbox_targets, kpt_targets, vis_targets, vis_weights, pos_areas, \ + pos_priors, group_indices, num_pos_per_img = targets + + # post-processing for targets + if self.use_aux_loss: + bbox_cxcy = (bbox_targets[:, :2] + bbox_targets[:, 2:]) / 2.0 + bbox_wh = bbox_targets[:, 2:] - bbox_targets[:, :2] + bbox_aux_targets = torch.cat([ + (bbox_cxcy - pos_priors[:, :2]) / pos_priors[:, 2:], + torch.log(bbox_wh / pos_priors[:, 2:] + 1e-8) + ], + dim=-1) + + kpt_aux_targets = (kpt_targets - pos_priors[:, None, :2]) \ + / pos_priors[:, None, 2:] + else: + bbox_aux_targets, kpt_aux_targets = None, None + + return (foreground_masks, cls_targets, obj_targets, obj_weights, + bbox_targets, bbox_aux_targets, kpt_targets, kpt_aux_targets, + vis_targets, vis_weights, pos_areas, pos_priors, group_indices, + num_pos_per_img) + + @torch.no_grad() + def _get_targets_single( + self, + priors: Tensor, + cls_scores: Tensor, + objectness: Tensor, + decoded_bboxes: Tensor, + decoded_kpts: Tensor, + kpt_vis: Tensor, + data_sample: PoseDataSample, + ) -> tuple: + """Compute classification, bbox, keypoints and objectness targets for + priors in a single image. + + Args: + priors (Tensor): All priors of one image, a 2D-Tensor with shape + [num_priors, 4] in [cx, xy, stride_w, stride_y] format. + cls_scores (Tensor): Classification predictions of one image, + a 2D-Tensor with shape [num_priors, num_classes] + objectness (Tensor): Objectness predictions of one image, + a 1D-Tensor with shape [num_priors] + decoded_bboxes (Tensor): Decoded bboxes predictions of one image, + a 2D-Tensor with shape [num_priors, 4] in xyxy format. + decoded_kpts (Tensor): Decoded keypoints predictions of one image, + a 3D-Tensor with shape [num_priors, num_keypoints, 2]. + kpt_vis (Tensor): Keypoints visibility predictions of one image, + a 2D-Tensor with shape [num_priors, num_keypoints]. + gt_instances (:obj:`InstanceData`): Ground truth of instance + annotations. It should includes ``bboxes`` and ``labels`` + attributes. + data_sample (PoseDataSample): Data sample that contains the ground + truth annotations for current image. + + Returns: + tuple: A tuple containing various target tensors for training: + - foreground_mask (Tensor): Binary mask indicating foreground + priors. + - cls_target (Tensor): Classification targets. + - obj_target (Tensor): Objectness targets. + - obj_weight (Tensor): Weights for objectness targets. + - bbox_target (Tensor): BBox targets. + - kpt_target (Tensor): Keypoints targets. + - vis_target (Tensor): Visibility targets for keypoints. + - vis_weight (Tensor): Weights for keypoints visibility + targets. + - pos_areas (Tensor): Areas of positive samples. + - pos_priors (Tensor): Priors corresponding to positive + samples. + - group_index (List[Tensor]): Indices of groups for positive + samples. + - num_pos_per_img (int): Number of positive samples. + """ + # TODO: change the shape of objectness to [num_priors] + num_priors = priors.size(0) + gt_instances = data_sample.gt_instance_labels + gt_fields = data_sample.get('gt_fields', dict()) + num_gts = len(gt_instances) + + # No target + if num_gts == 0: + cls_target = cls_scores.new_zeros((0, self.num_classes)) + bbox_target = cls_scores.new_zeros((0, 4)) + obj_target = cls_scores.new_zeros((num_priors, 1)) + obj_weight = cls_scores.new_ones((num_priors, 1)) + kpt_target = cls_scores.new_zeros((0, self.num_keypoints, 2)) + vis_target = cls_scores.new_zeros((0, self.num_keypoints)) + vis_weight = cls_scores.new_zeros((0, self.num_keypoints)) + pos_areas = cls_scores.new_zeros((0, )) + pos_priors = priors[:0] + foreground_mask = cls_scores.new_zeros(num_priors).bool() + return (foreground_mask, cls_target, obj_target, obj_weight, + bbox_target, kpt_target, vis_target, vis_weight, pos_areas, + pos_priors, [], 0) + + # assign positive samples + scores = cls_scores * objectness + pred_instances = InstanceData( + bboxes=decoded_bboxes, + scores=scores.sqrt_(), + priors=priors, + keypoints=decoded_kpts, + keypoints_visible=kpt_vis, + ) + assign_result = self.assigner.assign( + pred_instances=pred_instances, gt_instances=gt_instances) + + # sampling + pos_inds = torch.nonzero( + assign_result['gt_inds'] > 0, as_tuple=False).squeeze(-1).unique() + num_pos_per_img = pos_inds.size(0) + pos_gt_labels = assign_result['labels'][pos_inds] + pos_assigned_gt_inds = assign_result['gt_inds'][pos_inds] - 1 + + # bbox target + bbox_target = gt_instances.bboxes[pos_assigned_gt_inds.long()] + + # cls target + max_overlaps = assign_result['max_overlaps'][pos_inds] + cls_target = F.one_hot(pos_gt_labels, + self.num_classes) * max_overlaps.unsqueeze(-1) + + # pose targets + kpt_target = gt_instances.keypoints[pos_assigned_gt_inds] + vis_target = gt_instances.keypoints_visible[pos_assigned_gt_inds] + if 'keypoints_visible_weights' in gt_instances: + vis_weight = gt_instances.keypoints_visible_weights[ + pos_assigned_gt_inds] + else: + vis_weight = vis_target.new_ones(vis_target.shape) + pos_areas = gt_instances.areas[pos_assigned_gt_inds] + + # obj target + obj_target = torch.zeros_like(objectness) + obj_target[pos_inds] = 1 + + invalid_mask = gt_fields.get('heatmap_mask', None) + if invalid_mask is not None and (invalid_mask != 0.0).any(): + # ignore the tokens that predict the unlabled instances + pred_vis = (kpt_vis.unsqueeze(-1) > 0.3).float() + mean_kpts = (decoded_kpts * pred_vis).sum(dim=1) / pred_vis.sum( + dim=1).clamp(min=1e-8) + mean_kpts = mean_kpts.reshape(1, -1, 1, 2) + wh = invalid_mask.shape[-1] + grids = mean_kpts / (wh - 1) * 2 - 1 + mask = invalid_mask.unsqueeze(0).float() + weight = F.grid_sample( + mask, grids, mode='bilinear', padding_mode='zeros') + obj_weight = 1.0 - weight.reshape(num_priors, 1) + else: + obj_weight = obj_target.new_ones(obj_target.shape) + + # misc + foreground_mask = torch.zeros_like(objectness.squeeze()).to(torch.bool) + foreground_mask[pos_inds] = 1 + pos_priors = priors[pos_inds] + group_index = [ + torch.where(pos_assigned_gt_inds == num)[0] + for num in torch.unique(pos_assigned_gt_inds) + ] + + return (foreground_mask, cls_target, obj_target, obj_weight, + bbox_target, kpt_target, vis_target, vis_weight, pos_areas, + pos_priors, group_index, num_pos_per_img) + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-scale features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (1, h, w) + or (K+1, h, w) if keypoint heatmaps are predicted + - displacements (Tensor): The predicted displacement fields + in shape (K*2, h, w) + """ + + cls_scores, objectnesses, bbox_preds, kpt_offsets, \ + kpt_vis = self.forward(feats) + + cfg = copy.deepcopy(test_cfg) + + batch_img_metas = [d.metainfo for d in batch_data_samples] + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + # If the shape does not change, use the previous mlvl_priors + if featmap_sizes != self.featmap_sizes: + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device) + self.featmap_sizes = featmap_sizes + flatten_priors = torch.cat(self.mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full((featmap_size.numel(), ), + stride) for featmap_size, stride in zip( + featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = self._flatten_predictions(cls_scores).sigmoid() + flatten_bbox_preds = self._flatten_predictions(bbox_preds) + flatten_objectness = self._flatten_predictions(objectnesses).sigmoid() + flatten_kpt_offsets = self._flatten_predictions(kpt_offsets) + flatten_kpt_vis = self._flatten_predictions(kpt_vis).sigmoid() + flatten_bbox_preds = self.decode_bbox(flatten_bbox_preds, + flatten_priors, flatten_stride) + flatten_kpt_reg = self.decode_kpt_reg(flatten_kpt_offsets, + flatten_priors, flatten_stride) + + results_list = [] + for (bboxes, scores, objectness, kpt_reg, kpt_vis, + img_meta) in zip(flatten_bbox_preds, flatten_cls_scores, + flatten_objectness, flatten_kpt_reg, + flatten_kpt_vis, batch_img_metas): + + score_thr = cfg.get('score_thr', 0.01) + scores *= objectness + + nms_pre = cfg.get('nms_pre', 100000) + scores, labels = scores.max(1, keepdim=True) + scores, _, keep_idxs_score, results = filter_scores_and_topk( + scores, score_thr, nms_pre, results=dict(labels=labels[:, 0])) + labels = results['labels'] + + bboxes = bboxes[keep_idxs_score] + kpt_vis = kpt_vis[keep_idxs_score] + stride = flatten_stride[keep_idxs_score] + keypoints = kpt_reg[keep_idxs_score] + + if bboxes.numel() > 0: + nms_thr = cfg.get('nms_thr', 1.0) + if nms_thr < 1.0: + keep_idxs_nms = nms_torch(bboxes, scores, nms_thr) + bboxes = bboxes[keep_idxs_nms] + stride = stride[keep_idxs_nms] + labels = labels[keep_idxs_nms] + kpt_vis = kpt_vis[keep_idxs_nms] + keypoints = keypoints[keep_idxs_nms] + scores = scores[keep_idxs_nms] + + results = InstanceData( + scores=scores, + labels=labels, + bboxes=bboxes, + bbox_scores=scores, + keypoints=keypoints, + keypoint_scores=kpt_vis, + keypoints_visible=kpt_vis) + + input_size = img_meta['input_size'] + results.bboxes[:, 0::2].clamp_(0, input_size[0]) + results.bboxes[:, 1::2].clamp_(0, input_size[1]) + + results_list.append(results.numpy()) + + return results_list + + def decode_bbox(self, pred_bboxes: torch.Tensor, priors: torch.Tensor, + stride: Union[torch.Tensor, int]) -> torch.Tensor: + """Decode regression results (delta_x, delta_y, log_w, log_h) to + bounding boxes (tl_x, tl_y, br_x, br_y). + + Note: + - batch size: B + - token number: N + + Args: + pred_bboxes (torch.Tensor): Encoded boxes with shape (B, N, 4), + representing (delta_x, delta_y, log_w, log_h) for each box. + priors (torch.Tensor): Anchors coordinates, with shape (N, 2). + stride (torch.Tensor | int): Strides of the bboxes. It can be a + single value if the same stride applies to all boxes, or it + can be a tensor of shape (N, ) if different strides are used + for each box. + + Returns: + torch.Tensor: Decoded bounding boxes with shape (N, 4), + representing (tl_x, tl_y, br_x, br_y) for each box. + """ + stride = stride.view(1, stride.size(0), 1) + priors = priors.view(1, priors.size(0), 2) + + xys = (pred_bboxes[..., :2] * stride) + priors + whs = pred_bboxes[..., 2:].exp() * stride + + # Calculate bounding box corners + tl_x = xys[..., 0] - whs[..., 0] / 2 + tl_y = xys[..., 1] - whs[..., 1] / 2 + br_x = xys[..., 0] + whs[..., 0] / 2 + br_y = xys[..., 1] + whs[..., 1] / 2 + + decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) + return decoded_bboxes + + def decode_kpt_reg(self, pred_kpt_offsets: torch.Tensor, + priors: torch.Tensor, + stride: torch.Tensor) -> torch.Tensor: + """Decode regression results (delta_x, delta_y) to keypoints + coordinates (x, y). + + Args: + pred_kpt_offsets (torch.Tensor): Encoded keypoints offsets with + shape (batch_size, num_anchors, num_keypoints, 2). + priors (torch.Tensor): Anchors coordinates with shape + (num_anchors, 2). + stride (torch.Tensor): Strides of the anchors. + + Returns: + torch.Tensor: Decoded keypoints coordinates with shape + (batch_size, num_boxes, num_keypoints, 2). + """ + stride = stride.view(1, stride.size(0), 1, 1) + priors = priors.view(1, priors.size(0), 1, 2) + pred_kpt_offsets = pred_kpt_offsets.reshape( + *pred_kpt_offsets.shape[:-1], self.num_keypoints, 2) + + decoded_kpts = pred_kpt_offsets * stride + priors + return decoded_kpts + + def _flatten_predictions(self, preds: List[Tensor]): + """Flattens the predictions from a list of tensors to a single + tensor.""" + if len(preds) == 0: + return None + + preds = [x.permute(0, 2, 3, 1).flatten(1, 2) for x in preds] + return torch.cat(preds, dim=1) diff --git a/mmpose/models/heads/regression_heads/__init__.py b/mmpose/models/heads/regression_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..729d193b51981b9819290a3787f8292c72bc16d4 --- /dev/null +++ b/mmpose/models/heads/regression_heads/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dsnt_head import DSNTHead +from .integral_regression_head import IntegralRegressionHead +from .motion_regression_head import MotionRegressionHead +from .regression_head import RegressionHead +from .rle_head import RLEHead +from .temporal_regression_head import TemporalRegressionHead +from .trajectory_regression_head import TrajectoryRegressionHead + +__all__ = [ + 'RegressionHead', 'IntegralRegressionHead', 'DSNTHead', 'RLEHead', + 'TemporalRegressionHead', 'TrajectoryRegressionHead', + 'MotionRegressionHead' +] diff --git a/mmpose/models/heads/regression_heads/dsnt_head.py b/mmpose/models/heads/regression_heads/dsnt_head.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd49e385db31c996de086419285e2f5fa7748b3 --- /dev/null +++ b/mmpose/models/heads/regression_heads/dsnt_head.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from mmengine.logging import MessageHub +from torch import Tensor + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.registry import MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import ConfigType, OptConfigType, OptSampleList +from .integral_regression_head import IntegralRegressionHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class DSNTHead(IntegralRegressionHead): + """Top-down integral regression head introduced in `DSNT`_ by Nibali et + al(2018). The head contains a differentiable spatial to numerical transform + (DSNT) layer that do soft-argmax operation on the predicted heatmaps to + regress the coordinates. + + This head is used for algorithms that require supervision of heatmaps + in `DSNT` approach. + + Args: + in_channels (int | sequence[int]): Number of input channels + in_featuremap_size (int | sequence[int]): Size of input feature map + num_joints (int): Number of joints + lambda_t (int): Discard heatmap-based loss when current + epoch > lambda_t. Defaults to -1. + debias (bool): Whether to remove the bias of Integral Pose Regression. + see `Removing the Bias of Integral Pose Regression`_ by Gu et al + (2021). Defaults to ``False``. + beta (float): A smoothing parameter in softmax. Defaults to ``1.0``. + deconv_out_channels (sequence[int]): The output channel number of each + deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + conv_out_channels (sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config for keypoint loss. Defaults to use + :class:`DSNTLoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`DSNT`: https://arxiv.org/abs/1801.07372 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + in_featuremap_size: Tuple[int, int], + num_joints: int, + lambda_t: int = -1, + debias: bool = False, + beta: float = 1.0, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='JSDiscretLoss', use_target_weight=True) + ]), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + super().__init__( + in_channels=in_channels, + in_featuremap_size=in_featuremap_size, + num_joints=num_joints, + debias=debias, + beta=beta, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer, + loss=loss, + decoder=decoder, + init_cfg=init_cfg) + + self.lambda_t = lambda_t + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_coords, pred_heatmaps = self.forward(inputs) + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + + input_list = [pred_coords, pred_heatmaps] + target_list = [keypoint_labels, gt_heatmaps] + # calculate losses + losses = dict() + + loss_list = self.loss_module(input_list, target_list, keypoint_weights) + + loss = loss_list[0] + loss_list[1] + + if self.lambda_t > 0: + mh = MessageHub.get_current_instance() + cur_epoch = mh.get_info('epoch') + if cur_epoch >= self.lambda_t: + loss = loss_list[0] + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_coords), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) + + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses diff --git a/mmpose/models/heads/regression_heads/integral_regression_head.py b/mmpose/models/heads/regression_heads/integral_regression_head.py new file mode 100644 index 0000000000000000000000000000000000000000..9046d94ad4318a19a3037f839ee054a445c80c68 --- /dev/null +++ b/mmpose/models/heads/regression_heads/integral_regression_head.py @@ -0,0 +1,339 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.models.utils.tta import flip_coordinates, flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from .. import HeatmapHead +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class IntegralRegressionHead(BaseHead): + """Top-down integral regression head introduced in `IPR`_ by Xiao et + al(2018). The head contains a differentiable spatial to numerical transform + (DSNT) layer that do soft-argmax operation on the predicted heatmaps to + regress the coordinates. + + This head is used for algorithms that only supervise the coordinates. + + Args: + in_channels (int | sequence[int]): Number of input channels + in_featuremap_size (int | sequence[int]): Size of input feature map + num_joints (int): Number of joints + debias (bool): Whether to remove the bias of Integral Pose Regression. + see `Removing the Bias of Integral Pose Regression`_ by Gu et al + (2021). Defaults to ``False``. + beta (float): A smoothing parameter in softmax. Defaults to ``1.0``. + deconv_out_channels (sequence[int]): The output channel number of each + deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + conv_out_channels (sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config for keypoint loss. Defaults to use + :class:`SmoothL1Loss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`IPR`: https://arxiv.org/abs/1711.08229 + .. _`Debias`: + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + in_featuremap_size: Tuple[int, int], + num_joints: int, + debias: bool = False, + beta: float = 1.0, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict( + type='SmoothL1Loss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.debias = debias + self.beta = beta + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + num_deconv = len(deconv_out_channels) if deconv_out_channels else 0 + if num_deconv != 0: + + self.heatmap_size = tuple( + [s * (2**num_deconv) for s in in_featuremap_size]) + + # deconv layers + 1x1 conv + self.simplebaseline_head = HeatmapHead( + in_channels=in_channels, + out_channels=num_joints, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer) + + if final_layer is not None: + in_channels = num_joints + else: + in_channels = deconv_out_channels[-1] + + else: + self.simplebaseline_head = None + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=num_joints, + kernel_size=1) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = None + + self.heatmap_size = in_featuremap_size + + if isinstance(in_channels, list): + raise ValueError( + f'{self.__class__.__name__} does not support selecting ' + 'multiple input features.') + + W, H = self.heatmap_size + self.linspace_x = torch.arange(0.0, 1.0 * W, 1).reshape(1, 1, 1, W) / W + self.linspace_y = torch.arange(0.0, 1.0 * H, 1).reshape(1, 1, H, 1) / H + + self.linspace_x = nn.Parameter(self.linspace_x, requires_grad=False) + self.linspace_y = nn.Parameter(self.linspace_y, requires_grad=False) + + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def _linear_expectation(self, heatmaps: Tensor, + linspace: Tensor) -> Tensor: + """Calculate linear expectation.""" + + B, N, _, _ = heatmaps.shape + heatmaps = heatmaps.mul(linspace).reshape(B, N, -1) + expectation = torch.sum(heatmaps, dim=2, keepdim=True) + + return expectation + + def _flat_softmax(self, featmaps: Tensor) -> Tensor: + """Use Softmax to normalize the featmaps in depthwise.""" + + _, N, H, W = featmaps.shape + + featmaps = featmaps.reshape(-1, N, H * W) + heatmaps = F.softmax(featmaps, dim=2) + + return heatmaps.reshape(-1, N, H, W) + + def forward(self, feats: Tuple[Tensor]) -> Union[Tensor, Tuple[Tensor]]: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates(and sigmas[optional]). + """ + if self.simplebaseline_head is None: + feats = feats[-1] + if self.final_layer is not None: + feats = self.final_layer(feats) + else: + feats = self.simplebaseline_head(feats) + + heatmaps = self._flat_softmax(feats * self.beta) + + pred_x = self._linear_expectation(heatmaps, self.linspace_x) + pred_y = self._linear_expectation(heatmaps, self.linspace_y) + + if self.debias: + B, N, H, W = feats.shape + C = feats.reshape(B, N, H * W).exp().sum(dim=2).reshape(B, N, 1) + pred_x = C / (C - 1) * (pred_x - 1 / (2 * C)) + pred_y = C / (C - 1) * (pred_y - 1 / (2 * C)) + + coords = torch.cat([pred_x, pred_y], dim=-1) + return coords, heatmaps + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + input_size = batch_data_samples[0].metainfo['input_size'] + _feats, _feats_flip = feats + + _batch_coords, _batch_heatmaps = self.forward(_feats) + + _batch_coords_flip, _batch_heatmaps_flip = self.forward( + _feats_flip) + _batch_coords_flip = flip_coordinates( + _batch_coords_flip, + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=input_size) + _batch_heatmaps_flip = flip_heatmaps( + _batch_heatmaps_flip, + flip_mode='heatmap', + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + else: + batch_coords, batch_heatmaps = self.forward(feats) # (B, K, D) + + batch_coords.unsqueeze_(dim=1) # (B, N, K, D) + preds = self.decode(batch_coords) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_coords, _ = self.forward(inputs) + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + + # TODO: multi-loss calculation + loss = self.loss_module(pred_coords, keypoint_labels, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_coords), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) + + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to load weights of deconv layers from + :class:`HeatmapHead` into `simplebaseline_head`. + + The hook will be automatically registered during initialization. + """ + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k.lstrip(prefix) + + k_new = _k + k_parts = k.split('.') + if self.simplebaseline_head is not None: + if k_parts[0] == 'conv_layers': + k_new = ( + prefix + 'simplebaseline_head.deconv_layers.' + + '.'.join(k_parts[1:])) + elif k_parts[0] == 'final_layer': + k_new = prefix + 'simplebaseline_head.' + k + + state_dict[k_new] = v diff --git a/mmpose/models/heads/regression_heads/motion_regression_head.py b/mmpose/models/heads/regression_heads/motion_regression_head.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad94973459156f6f44eadac02656bbf6c8a39b8 --- /dev/null +++ b/mmpose/models/heads/regression_heads/motion_regression_head.py @@ -0,0 +1,194 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from typing import Tuple + +import numpy as np +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_mpjpe +from mmpose.models.utils.tta import flip_coordinates +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + + +@MODELS.register_module() +class MotionRegressionHead(BaseHead): + """Regression head of `MotionBERT`_ by Zhu et al (2022). + + Args: + in_channels (int): Number of input channels. Default: 256. + out_channels (int): Number of output channels. Default: 3. + embedding_size (int): Number of embedding channels. Default: 512. + loss (Config): Config for keypoint loss. Defaults to use + :class:`MSELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`MotionBERT`: https://arxiv.org/abs/2210.06551 + """ + + _version = 2 + + def __init__(self, + in_channels: int = 256, + out_channels: int = 3, + embedding_size: int = 512, + loss: ConfigType = dict( + type='MSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.pre_logits = nn.Sequential( + OrderedDict([('fc', nn.Linear(in_channels, embedding_size)), + ('act', nn.Tanh())])) + self.fc = nn.Linear( + embedding_size, + out_channels) if embedding_size > 0 else nn.Identity() + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: Output coordinates (and sigmas[optional]). + """ + x = feats # (B, F, K, in_channels) + x = self.pre_logits(x) # (B, F, K, embedding_size) + x = self.fc(x) # (B, F, K, out_channels) + + return x + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs. + + Returns: + preds (sequence[InstanceData]): Prediction results. + Each contains the following fields: + + - keypoints: Predicted keypoints of shape (B, N, K, D). + - keypoint_scores: Scores of predicted keypoints of shape + (B, N, K). + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + _batch_coords = self.forward(_feats) + _batch_coords_flip = torch.stack([ + flip_coordinates( + _batch_coord_flip, + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=(1, 1)) + for _batch_coord_flip in self.forward(_feats_flip) + ], + dim=0) + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + else: + batch_coords = self.forward(feats) + + # Restore global position with camera_param and factor + camera_param = batch_data_samples[0].metainfo.get('camera_param', None) + if camera_param is not None: + w = torch.stack([ + torch.from_numpy(np.array([b.metainfo['camera_param']['w']])) + for b in batch_data_samples + ]) + h = torch.stack([ + torch.from_numpy(np.array([b.metainfo['camera_param']['h']])) + for b in batch_data_samples + ]) + else: + w = torch.stack([ + torch.empty((0), dtype=torch.float32) + for _ in batch_data_samples + ]) + h = torch.stack([ + torch.empty((0), dtype=torch.float32) + for _ in batch_data_samples + ]) + + factor = batch_data_samples[0].metainfo.get('factor', None) + if factor is not None: + factor = torch.stack([ + torch.from_numpy(b.metainfo['factor']) + for b in batch_data_samples + ]) + else: + factor = torch.stack([ + torch.empty((0), dtype=torch.float32) + for _ in batch_data_samples + ]) + + preds = self.decode((batch_coords, w, h, factor)) + + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + lifting_target_label = torch.stack([ + d.gt_instance_labels.lifting_target_label + for d in batch_data_samples + ]) + lifting_target_weight = torch.stack([ + d.gt_instance_labels.lifting_target_weight + for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, lifting_target_label, + lifting_target_weight.unsqueeze(-1)) + + losses.update(loss_pose3d=loss) + + # calculate accuracy + mpjpe_err = keypoint_mpjpe( + pred=to_numpy(pred_outputs), + gt=to_numpy(lifting_target_label), + mask=to_numpy(lifting_target_weight) > 0) + + mpjpe_pose = torch.tensor( + mpjpe_err, device=lifting_target_label.device) + losses.update(mpjpe=mpjpe_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='TruncNormal', layer=['Linear'], std=0.02)] + return init_cfg diff --git a/mmpose/models/heads/regression_heads/regression_head.py b/mmpose/models/heads/regression_heads/regression_head.py new file mode 100644 index 0000000000000000000000000000000000000000..8ff73aa6ef1bed93e8985d9be20f3c94355d8c21 --- /dev/null +++ b/mmpose/models/heads/regression_heads/regression_head.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.models.utils.tta import flip_coordinates +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class RegressionHead(BaseHead): + """Top-down regression head introduced in `Deeppose`_ by Toshev et al + (2014). The head is composed of fully-connected layers to predict the + coordinates directly. + + Args: + in_channels (int | sequence[int]): Number of input channels + num_joints (int): Number of joints + loss (Config): Config for keypoint loss. Defaults to use + :class:`SmoothL1Loss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`Deeppose`: https://arxiv.org/abs/1312.4659 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_joints: int, + loss: ConfigType = dict( + type='SmoothL1Loss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.fc = nn.Linear(in_channels, self.num_joints * 2) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates(and sigmas[optional]). + """ + x = feats[-1] + + x = torch.flatten(x, 1) + x = self.fc(x) + + return x.reshape(-1, self.num_joints, 2) + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs.""" + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + input_size = batch_data_samples[0].metainfo['input_size'] + _feats, _feats_flip = feats + + _batch_coords = self.forward(_feats) + _batch_coords_flip = flip_coordinates( + self.forward(_feats_flip), + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=input_size) + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + else: + batch_coords = self.forward(feats) # (B, K, D) + + batch_coords.unsqueeze_(dim=1) # (B, N, K, D) + preds = self.decode(batch_coords) + + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, keypoint_labels, + keypoint_weights.unsqueeze(-1)) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_outputs), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_outputs.size(0), 2), dtype=np.float32)) + + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/regression_heads/rle_head.py b/mmpose/models/heads/regression_heads/rle_head.py new file mode 100644 index 0000000000000000000000000000000000000000..ef696dffa6b53c9c981879732416a21fa8f45349 --- /dev/null +++ b/mmpose/models/heads/regression_heads/rle_head.py @@ -0,0 +1,187 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.models.utils.tta import flip_coordinates +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class RLEHead(BaseHead): + """Top-down regression head introduced in `RLE`_ by Li et al(2021). The + head is composed of fully-connected layers to predict the coordinates and + sigma(the variance of the coordinates) together. + + Args: + in_channels (int | sequence[int]): Number of input channels + num_joints (int): Number of joints + loss (Config): Config for keypoint loss. Defaults to use + :class:`RLELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`RLE`: https://arxiv.org/abs/2107.11291 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_joints: int, + loss: ConfigType = dict( + type='RLELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.fc = nn.Linear(in_channels, self.num_joints * 4) + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates(and sigmas[optional]). + """ + x = feats[-1] + + x = torch.flatten(x, 1) + x = self.fc(x) + + return x.reshape(-1, self.num_joints, 4) + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs.""" + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + input_size = batch_data_samples[0].metainfo['input_size'] + + _feats, _feats_flip = feats + + _batch_coords = self.forward(_feats) + _batch_coords[..., 2:] = _batch_coords[..., 2:].sigmoid() + + _batch_coords_flip = flip_coordinates( + self.forward(_feats_flip), + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=input_size) + _batch_coords_flip[..., 2:] = _batch_coords_flip[..., 2:].sigmoid() + + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + else: + batch_coords = self.forward(feats) # (B, K, D) + batch_coords[..., 2:] = batch_coords[..., 2:].sigmoid() + + batch_coords.unsqueeze_(dim=1) # (B, N, K, D) + preds = self.decode(batch_coords) + + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + pred_coords = pred_outputs[:, :, :2] + pred_sigma = pred_outputs[:, :, 2:4] + + # calculate losses + losses = dict() + loss = self.loss_module(pred_coords, pred_sigma, keypoint_labels, + keypoint_weights.unsqueeze(-1)) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_coords), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) + + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`DeepposeRegressionHead` (before MMPose v1.0.0) to a + compatible format of :class:`RegressionHead`. + + The hook will be automatically registered during initialization. + """ + + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + v = state_dict.pop(_k) + k = _k.lstrip(prefix) + # In old version, "loss" includes the instances of loss, + # now it should be renamed "loss_module" + k_parts = k.split('.') + if k_parts[0] == 'loss': + # loss.xxx -> loss_module.xxx + k_new = prefix + 'loss_module.' + '.'.join(k_parts[1:]) + else: + k_new = _k + + state_dict[k_new] = v + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/regression_heads/temporal_regression_head.py b/mmpose/models/heads/regression_heads/temporal_regression_head.py new file mode 100644 index 0000000000000000000000000000000000000000..902be8099ef6d8643f78542dc4949eea5004816b --- /dev/null +++ b/mmpose/models/heads/regression_heads/temporal_regression_head.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_mpjpe +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class TemporalRegressionHead(BaseHead): + """Temporal Regression head of `VideoPose3D`_ by Dario et al (CVPR'2019). + + Args: + in_channels (int | sequence[int]): Number of input channels + num_joints (int): Number of joints + loss (Config): Config for keypoint loss. Defaults to use + :class:`SmoothL1Loss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`VideoPose3D`: https://arxiv.org/abs/1811.11742 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_joints: int, + loss: ConfigType = dict( + type='MSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.conv = nn.Conv1d(in_channels, self.num_joints * 3, 1) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: Output coordinates (and sigmas[optional]). + """ + x = feats[-1] + + x = self.conv(x) + + return x.reshape(-1, self.num_joints, 3) + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs. + + Returns: + preds (sequence[InstanceData]): Prediction results. + Each contains the following fields: + + - keypoints: Predicted keypoints of shape (B, N, K, D). + - keypoint_scores: Scores of predicted keypoints of shape + (B, N, K). + """ + + batch_coords = self.forward(feats) # (B, K, D) + + # Restore global position with target_root + target_root = batch_data_samples[0].metainfo.get('target_root', None) + if target_root is not None: + target_root = torch.stack([ + torch.from_numpy(b.metainfo['target_root']) + for b in batch_data_samples + ]) + else: + target_root = torch.stack([ + torch.empty((0), dtype=torch.float32) + for _ in batch_data_samples + ]) + + preds = self.decode((batch_coords, target_root)) + + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + lifting_target_label = torch.cat([ + d.gt_instance_labels.lifting_target_label + for d in batch_data_samples + ]) + lifting_target_weight = torch.cat([ + d.gt_instance_labels.lifting_target_weight + for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, lifting_target_label, + lifting_target_weight.unsqueeze(-1)) + + losses.update(loss_pose3d=loss) + + # calculate accuracy + mpjpe_err = keypoint_mpjpe( + pred=to_numpy(pred_outputs), + gt=to_numpy(lifting_target_label), + mask=to_numpy(lifting_target_weight) > 0) + + mpjpe_pose = torch.tensor( + mpjpe_err, device=lifting_target_label.device) + losses.update(mpjpe=mpjpe_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/regression_heads/trajectory_regression_head.py b/mmpose/models/heads/regression_heads/trajectory_regression_head.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d02f2ce37061a0c6cd6abd0e188937dd405c21 --- /dev/null +++ b/mmpose/models/heads/regression_heads/trajectory_regression_head.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_mpjpe +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class TrajectoryRegressionHead(BaseHead): + """Trajectory Regression head of `VideoPose3D`_ by Dario et al (CVPR'2019). + + Args: + in_channels (int | sequence[int]): Number of input channels + num_joints (int): Number of joints + loss (Config): Config for trajectory loss. Defaults to use + :class:`MPJPELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`VideoPose3D`: https://arxiv.org/abs/1811.11742 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_joints: int, + loss: ConfigType = dict( + type='MPJPELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.conv = nn.Conv1d(in_channels, self.num_joints * 3, 1) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates(and sigmas[optional]). + """ + x = feats[-1] + + x = self.conv(x) + + return x.reshape(-1, self.num_joints, 3) + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs. + + Returns: + preds (sequence[InstanceData]): Prediction results. + Each contains the following fields: + + - keypoints: Predicted keypoints of shape (B, N, K, D). + - keypoint_scores: Scores of predicted keypoints of shape + (B, N, K). + """ + + batch_coords = self.forward(feats) # (B, K, D) + + # Restore global position with target_root + target_root = batch_data_samples[0].metainfo.get('target_root', None) + if target_root is not None: + target_root = torch.stack([ + torch.from_numpy(b.metainfo['target_root']) + for b in batch_data_samples + ]) + else: + target_root = torch.stack([ + torch.empty((0), dtype=torch.float32) + for _ in batch_data_samples + ]) + + preds = self.decode((batch_coords, target_root)) + + return preds + + def loss(self, + inputs: Union[Tensor, Tuple[Tensor]], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + lifting_target_label = torch.cat([ + d.gt_instance_labels.lifting_target_label + for d in batch_data_samples + ]) + trajectory_weights = torch.cat([ + d.gt_instance_labels.trajectory_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, lifting_target_label, + trajectory_weights.unsqueeze(-1)) + + losses.update(loss_traj=loss) + + # calculate accuracy + mpjpe_err = keypoint_mpjpe( + pred=to_numpy(pred_outputs), + gt=to_numpy(lifting_target_label), + mask=to_numpy(trajectory_weights) > 0) + + mpjpe_traj = torch.tensor( + mpjpe_err, device=lifting_target_label.device) + losses.update(mpjpe_traj=mpjpe_traj) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/transformer_heads/__init__.py b/mmpose/models/heads/transformer_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bb16484ff8b441db1f32a721cd8f0d410234289e --- /dev/null +++ b/mmpose/models/heads/transformer_heads/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .edpose_head import EDPoseHead +from .transformers import (FFN, DeformableDetrTransformerDecoder, + DeformableDetrTransformerDecoderLayer, + DeformableDetrTransformerEncoder, + DeformableDetrTransformerEncoderLayer, + DetrTransformerDecoder, DetrTransformerDecoderLayer, + DetrTransformerEncoder, DetrTransformerEncoderLayer, + PositionEmbeddingSineHW) + +__all__ = [ + 'EDPoseHead', 'DetrTransformerEncoder', 'DetrTransformerDecoder', + 'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer', + 'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder', + 'DeformableDetrTransformerEncoderLayer', + 'DeformableDetrTransformerDecoderLayer', 'PositionEmbeddingSineHW', 'FFN' +] diff --git a/mmpose/models/heads/transformer_heads/base_transformer_head.py b/mmpose/models/heads/transformer_heads/base_transformer_head.py new file mode 100644 index 0000000000000000000000000000000000000000..96855e186d8874a712f38d0ef6a604ace5d34b7f --- /dev/null +++ b/mmpose/models/heads/transformer_heads/base_transformer_head.py @@ -0,0 +1,136 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import abstractmethod +from typing import Dict, Tuple + +import torch +from torch import Tensor + +from mmpose.registry import MODELS +from mmpose.utils.typing import (Features, OptConfigType, OptMultiConfig, + OptSampleList, Predictions) +from ..base_head import BaseHead + + +@MODELS.register_module() +class TransformerHead(BaseHead): + r"""Implementation of `Deformable DETR: Deformable Transformers for + End-to-End Object Detection `_ + + Code is modified from the `official github repo + `_. + + Args: + encoder (ConfigDict, optional): Config of the + Transformer encoder. Defaults to None. + decoder (ConfigDict, optional): Config of the + Transformer decoder. Defaults to None. + out_head (ConfigDict, optional): Config for the + bounding final out head module. Defaults to None. + positional_encoding (ConfigDict, optional): Config for + transformer position encoding. Defaults to None. + num_queries (int): Number of query in Transformer. + loss (ConfigDict, optional): Config for loss functions. + Defaults to None. + init_cfg (ConfigDict, optional): Config to control the initialization. + """ + + def __init__(self, + encoder: OptConfigType = None, + decoder: OptConfigType = None, + out_head: OptConfigType = None, + positional_encoding: OptConfigType = None, + num_queries: int = 100, + loss: OptConfigType = None, + init_cfg: OptMultiConfig = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.encoder_cfg = encoder + self.decoder_cfg = decoder + self.out_head_cfg = out_head + self.positional_encoding_cfg = positional_encoding + self.num_queries = num_queries + + def forward(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList = None) -> Dict: + """Forward the network.""" + encoder_outputs_dict = self.forward_encoder(feats, batch_data_samples) + + decoder_outputs_dict = self.forward_decoder(**encoder_outputs_dict) + + head_outputs_dict = self.forward_out_head(batch_data_samples, + **decoder_outputs_dict) + return head_outputs_dict + + @abstractmethod + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}) -> Predictions: + """Predict results from features.""" + pass + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + pass + + @abstractmethod + def forward_encoder(self, feat: Tensor, feat_mask: Tensor, + feat_pos: Tensor, **kwargs) -> Dict: + pass + + @abstractmethod + def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, + **kwargs) -> Dict: + pass + + @abstractmethod + def forward_out_head(self, query: Tensor, query_pos: Tensor, + memory: Tensor, **kwargs) -> Dict: + pass + + @staticmethod + def get_valid_ratio(mask: Tensor) -> Tensor: + """Get the valid radios of feature map in a level. + + .. code:: text + + |---> valid_W <---| + ---+-----------------+-----+--- + A | | | A + | | | | | + | | | | | + valid_H | | | | + | | | | H + | | | | | + V | | | | + ---+-----------------+ | | + | | V + +-----------------------+--- + |---------> W <---------| + + The valid_ratios are defined as: + r_h = valid_H / H, r_w = valid_W / W + They are the factors to re-normalize the relative coordinates of the + image to the relative coordinates of the current level feature map. + + Args: + mask (Tensor): Binary mask of a feature map, has shape (bs, H, W). + + Returns: + Tensor: valid ratios [r_w, r_h] of a feature map, has shape (1, 2). + """ + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio diff --git a/mmpose/models/heads/transformer_heads/edpose_head.py b/mmpose/models/heads/transformer_heads/edpose_head.py new file mode 100644 index 0000000000000000000000000000000000000000..d864f8fadd9882c16a3b81c55ca319def8d5d3aa --- /dev/null +++ b/mmpose/models/heads/transformer_heads/edpose_head.py @@ -0,0 +1,1346 @@ +# ---------------------------------------------------------------------------- +# Adapted from https://github.com/IDEA-Research/ED-Pose/ \ +# tree/master/models/edpose +# Original licence: IDEA License 1.0 +# ---------------------------------------------------------------------------- + +import copy +import math +from typing import Dict, List, Tuple + +import numpy as np +import torch +import torch.nn.functional as F +from mmcv.ops import MultiScaleDeformableAttention +from mmengine.model import BaseModule, ModuleList, constant_init +from mmengine.structures import InstanceData +from torch import Tensor, nn + +from mmpose.models.utils import inverse_sigmoid +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, OptConfigType, + OptSampleList, Predictions) +from .base_transformer_head import TransformerHead +from .transformers.deformable_detr_layers import ( + DeformableDetrTransformerDecoderLayer, DeformableDetrTransformerEncoder) +from .transformers.utils import FFN, PositionEmbeddingSineHW + + +class EDPoseDecoder(BaseModule): + """Transformer decoder of EDPose: `Explicit Box Detection Unifies End-to- + End Multi-Person Pose Estimation. + + Args: + layer_cfg (ConfigDict): the config of each encoder + layer. All the layers will share the same config. + num_layers (int): Number of decoder layers. + return_intermediate (bool, optional): Whether to return outputs of + intermediate layers. Defaults to `True`. + embed_dims (int): Dims of embed. + query_dim (int): Dims of queries. + num_feature_levels (int): Number of feature levels. + num_box_decoder_layers (int): Number of box decoder layers. + num_keypoints (int): Number of datasets' body keypoints. + num_dn (int): Number of denosing points. + num_group (int): Number of decoder layers. + """ + + def __init__(self, + layer_cfg, + num_layers, + return_intermediate, + embed_dims: int = 256, + query_dim=4, + num_feature_levels=1, + num_box_decoder_layers=2, + num_keypoints=17, + num_dn=100, + num_group=100): + super().__init__() + + self.layer_cfg = layer_cfg + self.num_layers = num_layers + self.embed_dims = embed_dims + + assert return_intermediate, 'support return_intermediate only' + self.return_intermediate = return_intermediate + + assert query_dim in [ + 2, 4 + ], 'query_dim should be 2/4 but {}'.format(query_dim) + self.query_dim = query_dim + + self.num_feature_levels = num_feature_levels + + self.layers = ModuleList([ + DeformableDetrTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.norm = nn.LayerNorm(self.embed_dims) + + self.ref_point_head = FFN(self.query_dim // 2 * self.embed_dims, + self.embed_dims, self.embed_dims, 2) + + self.num_keypoints = num_keypoints + self.query_scale = None + self.bbox_embed = None + self.class_embed = None + self.pose_embed = None + self.pose_hw_embed = None + self.num_box_decoder_layers = num_box_decoder_layers + self.box_pred_damping = None + self.num_group = num_group + self.rm_detach = None + self.num_dn = num_dn + self.hw = nn.Embedding(self.num_keypoints, 2) + self.keypoint_embed = nn.Embedding(self.num_keypoints, embed_dims) + self.kpt_index = [ + x for x in range(self.num_group * (self.num_keypoints + 1)) + if x % (self.num_keypoints + 1) != 0 + ] + + def forward(self, query: Tensor, value: Tensor, key_padding_mask: Tensor, + reference_points: Tensor, spatial_shapes: Tensor, + level_start_index: Tensor, valid_ratios: Tensor, + humandet_attn_mask: Tensor, human2pose_attn_mask: Tensor, + **kwargs) -> Tuple[Tensor]: + """Forward function of decoder + Args: + query (Tensor): The input queries, has shape (bs, num_queries, + dim). + value (Tensor): The input values, has shape (bs, num_value, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn` + input. ByteTensor, has shape (bs, num_value). + reference_points (Tensor): The initial reference, has shape + (bs, num_queries, 4) with the last dimension arranged as + (cx, cy, w, h) when `as_two_stage` is `True`, otherwise has + shape (bs, num_queries, 2) with the last dimension arranged + as (cx, cy). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + reg_branches: (obj:`nn.ModuleList`, optional): Used for refining + the regression results. + + Returns: + Tuple[Tuple[Tensor]]: Outputs of Deformable Transformer Decoder. + + - output (Tuple[Tensor]): Output embeddings of the last decoder, + each has shape (num_decoder_layers, num_queries, bs, embed_dims) + - reference_points (Tensor): The reference of the last decoder + layer, each has shape (num_decoder_layers, bs, num_queries, 4). + The coordinates are arranged as (cx, cy, w, h) + """ + output = query + attn_mask = humandet_attn_mask + intermediate = [] + intermediate_reference_points = [reference_points] + effect_num_dn = self.num_dn if self.training else 0 + inter_select_number = self.num_group + for layer_id, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = \ + reference_points[:, :, None] * \ + torch.cat([valid_ratios, valid_ratios], -1)[None, :] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = \ + reference_points[:, :, None] * \ + valid_ratios[None, :] + + query_sine_embed = self.get_proposal_pos_embed( + reference_points_input[:, :, 0, :]) # nq, bs, 256*2 + query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256 + + output = layer( + output.transpose(0, 1), + query_pos=query_pos.transpose(0, 1), + value=value.transpose(0, 1), + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reference_points=reference_points_input.transpose( + 0, 1).contiguous(), + self_attn_mask=attn_mask, + **kwargs) + output = output.transpose(0, 1) + intermediate.append(self.norm(output)) + + # human update + if layer_id < self.num_box_decoder_layers: + delta_unsig = self.bbox_embed[layer_id](output) + new_reference_points = delta_unsig + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + + # query expansion + if layer_id == self.num_box_decoder_layers - 1: + dn_output = output[:effect_num_dn] + dn_new_reference_points = new_reference_points[:effect_num_dn] + class_unselected = self.class_embed[layer_id]( + output)[effect_num_dn:] + topk_proposals = torch.topk( + class_unselected.max(-1)[0], inter_select_number, dim=0)[1] + new_reference_points_for_box = torch.gather( + new_reference_points[effect_num_dn:], 0, + topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + new_output_for_box = torch.gather( + output[effect_num_dn:], 0, + topk_proposals.unsqueeze(-1).repeat(1, 1, self.embed_dims)) + bs = new_output_for_box.shape[1] + new_output_for_keypoint = new_output_for_box[:, None, :, :] \ + + self.keypoint_embed.weight[None, :, None, :] + if self.num_keypoints == 17: + delta_xy = self.pose_embed[-1](new_output_for_keypoint)[ + ..., :2] + else: + delta_xy = self.pose_embed[0](new_output_for_keypoint)[ + ..., :2] + keypoint_xy = (inverse_sigmoid( + new_reference_points_for_box[..., :2][:, None]) + + delta_xy).sigmoid() + num_queries, _, bs, _ = keypoint_xy.shape + keypoint_wh_weight = self.hw.weight.unsqueeze(0).unsqueeze( + -2).repeat(num_queries, 1, bs, 1).sigmoid() + keypoint_wh = keypoint_wh_weight * \ + new_reference_points_for_box[..., 2:][:, None] + new_reference_points_for_keypoint = torch.cat( + (keypoint_xy, keypoint_wh), dim=-1) + new_reference_points = torch.cat( + (new_reference_points_for_box.unsqueeze(1), + new_reference_points_for_keypoint), + dim=1).flatten(0, 1) + output = torch.cat( + (new_output_for_box.unsqueeze(1), new_output_for_keypoint), + dim=1).flatten(0, 1) + new_reference_points = torch.cat( + (dn_new_reference_points, new_reference_points), dim=0) + output = torch.cat((dn_output, output), dim=0) + attn_mask = human2pose_attn_mask + + # human-to-keypoints update + if layer_id >= self.num_box_decoder_layers: + effect_num_dn = self.num_dn if self.training else 0 + inter_select_number = self.num_group + ref_before_sigmoid = inverse_sigmoid(reference_points) + output_bbox_dn = output[:effect_num_dn] + output_bbox_norm = output[effect_num_dn:][0::( + self.num_keypoints + 1)] + ref_before_sigmoid_bbox_dn = \ + ref_before_sigmoid[:effect_num_dn] + ref_before_sigmoid_bbox_norm = \ + ref_before_sigmoid[effect_num_dn:][0::( + self.num_keypoints + 1)] + delta_unsig_dn = self.bbox_embed[layer_id](output_bbox_dn) + delta_unsig_norm = self.bbox_embed[layer_id](output_bbox_norm) + outputs_unsig_dn = delta_unsig_dn + ref_before_sigmoid_bbox_dn + outputs_unsig_norm = delta_unsig_norm + \ + ref_before_sigmoid_bbox_norm + new_reference_points_for_box_dn = outputs_unsig_dn.sigmoid() + new_reference_points_for_box_norm = outputs_unsig_norm.sigmoid( + ) + output_kpt = output[effect_num_dn:].index_select( + 0, torch.tensor(self.kpt_index, device=output.device)) + delta_xy_unsig = self.pose_embed[layer_id - + self.num_box_decoder_layers]( + output_kpt) + outputs_unsig = ref_before_sigmoid[ + effect_num_dn:].index_select( + 0, torch.tensor(self.kpt_index, + device=output.device)).clone() + delta_hw_unsig = self.pose_hw_embed[ + layer_id - self.num_box_decoder_layers]( + output_kpt) + outputs_unsig[..., :2] += delta_xy_unsig[..., :2] + outputs_unsig[..., 2:] += delta_hw_unsig + new_reference_points_for_keypoint = outputs_unsig.sigmoid() + bs = new_reference_points_for_box_norm.shape[1] + new_reference_points_norm = torch.cat( + (new_reference_points_for_box_norm.unsqueeze(1), + new_reference_points_for_keypoint.view( + -1, self.num_keypoints, bs, 4)), + dim=1).flatten(0, 1) + new_reference_points = torch.cat( + (new_reference_points_for_box_dn, + new_reference_points_norm), + dim=0) + + reference_points = new_reference_points.detach() + intermediate_reference_points.append(reference_points) + + decoder_outputs = [itm_out.transpose(0, 1) for itm_out in intermediate] + reference_points = [ + itm_refpoint.transpose(0, 1) + for itm_refpoint in intermediate_reference_points + ] + + return decoder_outputs, reference_points + + @staticmethod + def get_proposal_pos_embed(pos_tensor: Tensor, + temperature: int = 10000, + num_pos_feats: int = 128) -> Tensor: + """Get the position embedding of the proposal. + + Args: + pos_tensor (Tensor): Not normalized proposals, has shape + (bs, num_queries, 4) with the last dimension arranged as + (cx, cy, w, h). + temperature (int, optional): The temperature used for scaling the + position embedding. Defaults to 10000. + num_pos_feats (int, optional): The feature dimension for each + position along x, y, w, and h-axis. Note the final returned + dimension for each position is 4 times of num_pos_feats. + Default to 128. + + Returns: + Tensor: The position embedding of proposal, has shape + (bs, num_queries, num_pos_feats * 4), with the last dimension + arranged as (cx, cy, w, h) + """ + + scale = 2 * math.pi + dim_t = torch.arange( + num_pos_feats, dtype=torch.float32, device=pos_tensor.device) + dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) + x_embed = pos_tensor[:, :, 0] * scale + y_embed = pos_tensor[:, :, 1] * scale + pos_x = x_embed[:, :, None] / dim_t + pos_y = y_embed[:, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), + dim=3).flatten(2) + pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), + dim=3).flatten(2) + if pos_tensor.size(-1) == 2: + pos = torch.cat((pos_y, pos_x), dim=2) + elif pos_tensor.size(-1) == 4: + w_embed = pos_tensor[:, :, 2] * scale + pos_w = w_embed[:, :, None] / dim_t + pos_w = torch.stack( + (pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), + dim=3).flatten(2) + + h_embed = pos_tensor[:, :, 3] * scale + pos_h = h_embed[:, :, None] / dim_t + pos_h = torch.stack( + (pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), + dim=3).flatten(2) + + pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) + else: + raise ValueError('Unknown pos_tensor shape(-1):{}'.format( + pos_tensor.size(-1))) + return pos + + +class EDPoseOutHead(BaseModule): + """Final Head of EDPose: `Explicit Box Detection Unifies End-to-End Multi- + Person Pose Estimation. + + Args: + num_classes (int): The number of classes. + num_keypoints (int): The number of datasets' body keypoints. + num_queries (int): The number of queries. + cls_no_bias (bool): Weather add the bias to class embed. + embed_dims (int): The dims of embed. + as_two_stage (bool, optional): Whether to generate the proposal + from the outputs of encoder. Defaults to `False`. + refine_queries_num (int): The number of refines queries after + decoders. + num_box_decoder_layers (int): The number of bbox decoder layer. + num_group (int): The number of groups. + num_pred_layer (int): The number of the prediction layers. + Defaults to 6. + dec_pred_class_embed_share (bool): Whether to share parameters + for all the class prediction layers. Defaults to `False`. + dec_pred_bbox_embed_share (bool): Whether to share parameters + for all the bbox prediction layers. Defaults to `False`. + dec_pred_pose_embed_share (bool): Whether to share parameters + for all the pose prediction layers. Defaults to `False`. + """ + + def __init__(self, + num_classes, + num_keypoints: int = 17, + num_queries: int = 900, + cls_no_bias: bool = False, + embed_dims: int = 256, + as_two_stage: bool = False, + refine_queries_num: int = 100, + num_box_decoder_layers: int = 2, + num_group: int = 100, + num_pred_layer: int = 6, + dec_pred_class_embed_share: bool = False, + dec_pred_bbox_embed_share: bool = False, + dec_pred_pose_embed_share: bool = False, + **kwargs): + super().__init__() + self.embed_dims = embed_dims + self.as_two_stage = as_two_stage + self.num_classes = num_classes + self.refine_queries_num = refine_queries_num + self.num_box_decoder_layers = num_box_decoder_layers + self.num_keypoints = num_keypoints + self.num_queries = num_queries + + # prepare pred layers + self.dec_pred_class_embed_share = dec_pred_class_embed_share + self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share + self.dec_pred_pose_embed_share = dec_pred_pose_embed_share + # prepare class & box embed + _class_embed = nn.Linear( + self.embed_dims, self.num_classes, bias=(not cls_no_bias)) + if not cls_no_bias: + prior_prob = 0.01 + bias_value = -math.log((1 - prior_prob) / prior_prob) + _class_embed.bias.data = torch.ones(self.num_classes) * bias_value + + _bbox_embed = FFN(self.embed_dims, self.embed_dims, 4, 3) + _pose_embed = FFN(self.embed_dims, self.embed_dims, 2, 3) + _pose_hw_embed = FFN(self.embed_dims, self.embed_dims, 2, 3) + + self.num_group = num_group + if dec_pred_bbox_embed_share: + box_embed_layerlist = [_bbox_embed for i in range(num_pred_layer)] + else: + box_embed_layerlist = [ + copy.deepcopy(_bbox_embed) for i in range(num_pred_layer) + ] + if dec_pred_class_embed_share: + class_embed_layerlist = [ + _class_embed for i in range(num_pred_layer) + ] + else: + class_embed_layerlist = [ + copy.deepcopy(_class_embed) for i in range(num_pred_layer) + ] + + if num_keypoints == 17: + if dec_pred_pose_embed_share: + pose_embed_layerlist = [ + _pose_embed + for i in range(num_pred_layer - num_box_decoder_layers + 1) + ] + else: + pose_embed_layerlist = [ + copy.deepcopy(_pose_embed) + for i in range(num_pred_layer - num_box_decoder_layers + 1) + ] + else: + if dec_pred_pose_embed_share: + pose_embed_layerlist = [ + _pose_embed + for i in range(num_pred_layer - num_box_decoder_layers) + ] + else: + pose_embed_layerlist = [ + copy.deepcopy(_pose_embed) + for i in range(num_pred_layer - num_box_decoder_layers) + ] + + pose_hw_embed_layerlist = [ + _pose_hw_embed + for i in range(num_pred_layer - num_box_decoder_layers) + ] + self.bbox_embed = nn.ModuleList(box_embed_layerlist) + self.class_embed = nn.ModuleList(class_embed_layerlist) + self.pose_embed = nn.ModuleList(pose_embed_layerlist) + self.pose_hw_embed = nn.ModuleList(pose_hw_embed_layerlist) + + def init_weights(self) -> None: + """Initialize weights of the Deformable DETR head.""" + + for m in self.bbox_embed: + constant_init(m[-1], 0, bias=0) + for m in self.pose_embed: + constant_init(m[-1], 0, bias=0) + + def forward(self, hidden_states: List[Tensor], references: List[Tensor], + mask_dict: Dict, hidden_states_enc: Tensor, + referens_enc: Tensor, batch_data_samples) -> Dict: + """Forward function. + + Args: + hidden_states (Tensor): Hidden states output from each decoder + layer, has shape (num_decoder_layers, bs, num_queries, dim). + references (list[Tensor]): List of the reference from the decoder. + + Returns: + tuple[Tensor]: results of head containing the following tensor. + + - pred_logits (Tensor): Outputs from the + classification head, the socres of every bboxes. + - pred_boxes (Tensor): The output boxes. + - pred_keypoints (Tensor): The output keypoints. + """ + # update human boxes + effec_dn_num = self.refine_queries_num if self.training else 0 + outputs_coord_list = [] + outputs_class = [] + for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_cls_embed, + layer_hs) in enumerate( + zip(references[:-1], self.bbox_embed, + self.class_embed, hidden_states)): + if dec_lid < self.num_box_decoder_layers: + layer_delta_unsig = layer_bbox_embed(layer_hs) + layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid( + layer_ref_sig) + layer_outputs_unsig = layer_outputs_unsig.sigmoid() + layer_cls = layer_cls_embed(layer_hs) + outputs_coord_list.append(layer_outputs_unsig) + outputs_class.append(layer_cls) + else: + layer_hs_bbox_dn = layer_hs[:, :effec_dn_num, :] + layer_hs_bbox_norm = \ + layer_hs[:, effec_dn_num:, :][:, 0::( + self.num_keypoints + 1), :] + bs = layer_ref_sig.shape[0] + ref_before_sigmoid_bbox_dn = \ + layer_ref_sig[:, : effec_dn_num, :] + ref_before_sigmoid_bbox_norm = \ + layer_ref_sig[:, effec_dn_num:, :][:, 0::( + self.num_keypoints + 1), :] + layer_delta_unsig_dn = layer_bbox_embed(layer_hs_bbox_dn) + layer_delta_unsig_norm = layer_bbox_embed(layer_hs_bbox_norm) + layer_outputs_unsig_dn = layer_delta_unsig_dn + \ + inverse_sigmoid(ref_before_sigmoid_bbox_dn) + layer_outputs_unsig_dn = layer_outputs_unsig_dn.sigmoid() + layer_outputs_unsig_norm = layer_delta_unsig_norm + \ + inverse_sigmoid(ref_before_sigmoid_bbox_norm) + layer_outputs_unsig_norm = layer_outputs_unsig_norm.sigmoid() + layer_outputs_unsig = torch.cat( + (layer_outputs_unsig_dn, layer_outputs_unsig_norm), dim=1) + layer_cls_dn = layer_cls_embed(layer_hs_bbox_dn) + layer_cls_norm = layer_cls_embed(layer_hs_bbox_norm) + layer_cls = torch.cat((layer_cls_dn, layer_cls_norm), dim=1) + outputs_class.append(layer_cls) + outputs_coord_list.append(layer_outputs_unsig) + + # update keypoints boxes + outputs_keypoints_list = [] + kpt_index = [ + x for x in range(self.num_group * (self.num_keypoints + 1)) + if x % (self.num_keypoints + 1) != 0 + ] + for dec_lid, (layer_ref_sig, layer_hs) in enumerate( + zip(references[:-1], hidden_states)): + if dec_lid < self.num_box_decoder_layers: + assert isinstance(layer_hs, torch.Tensor) + bs = layer_hs.shape[0] + layer_res = layer_hs.new_zeros( + (bs, self.num_queries, self.num_keypoints * 3)) + outputs_keypoints_list.append(layer_res) + else: + bs = layer_ref_sig.shape[0] + layer_hs_kpt = \ + layer_hs[:, effec_dn_num:, :].index_select( + 1, torch.tensor(kpt_index, device=layer_hs.device)) + delta_xy_unsig = self.pose_embed[dec_lid - + self.num_box_decoder_layers]( + layer_hs_kpt) + layer_ref_sig_kpt = \ + layer_ref_sig[:, effec_dn_num:, :].index_select( + 1, torch.tensor(kpt_index, device=layer_hs.device)) + layer_outputs_unsig_keypoints = delta_xy_unsig + \ + inverse_sigmoid(layer_ref_sig_kpt[..., :2]) + vis_xy_unsig = torch.ones_like( + layer_outputs_unsig_keypoints, + device=layer_outputs_unsig_keypoints.device) + xyv = torch.cat((layer_outputs_unsig_keypoints, + vis_xy_unsig[:, :, 0].unsqueeze(-1)), + dim=-1) + xyv = xyv.sigmoid() + layer_res = xyv.reshape( + (bs, self.num_group, self.num_keypoints, 3)).flatten(2, 3) + layer_res = self.keypoint_xyzxyz_to_xyxyzz(layer_res) + outputs_keypoints_list.append(layer_res) + + dn_mask_dict = mask_dict + if self.refine_queries_num > 0 and dn_mask_dict is not None: + outputs_class, outputs_coord_list, outputs_keypoints_list = \ + self.dn_post_process2( + outputs_class, outputs_coord_list, + outputs_keypoints_list, dn_mask_dict + ) + + for _out_class, _out_bbox, _out_keypoint in zip( + outputs_class, outputs_coord_list, outputs_keypoints_list): + assert _out_class.shape[1] == \ + _out_bbox.shape[1] == _out_keypoint.shape[1] + + return outputs_class[-1], outputs_coord_list[ + -1], outputs_keypoints_list[-1] + + def keypoint_xyzxyz_to_xyxyzz(self, keypoints: torch.Tensor): + """ + Args: + keypoints (torch.Tensor): ..., 51 + """ + res = torch.zeros_like(keypoints) + num_points = keypoints.shape[-1] // 3 + res[..., 0:2 * num_points:2] = keypoints[..., 0::3] + res[..., 1:2 * num_points:2] = keypoints[..., 1::3] + res[..., 2 * num_points:] = keypoints[..., 2::3] + return res + + +@MODELS.register_module() +class EDPoseHead(TransformerHead): + """Head introduced in `Explicit Box Detection Unifies End-to-End Multi- + Person Pose Estimation`_ by J Yang1 et al (2023). The head is composed of + Encoder, Decoder and Out_head. + + Code is modified from the `official github repo + `_. + + More details can be found in the `paper + `_ . + + Args: + num_queries (int): Number of query in Transformer. + num_feature_levels (int): Number of feature levels. Defaults to 4. + num_keypoints (int): Number of keypoints. Defaults to 4. + as_two_stage (bool, optional): Whether to generate the proposal + from the outputs of encoder. Defaults to `False`. + encoder (:obj:`ConfigDict` or dict, optional): Config of the + Transformer encoder. Defaults to None. + decoder (:obj:`ConfigDict` or dict, optional): Config of the + Transformer decoder. Defaults to None. + out_head (:obj:`ConfigDict` or dict, optional): Config for the + bounding final out head module. Defaults to None. + positional_encoding (:obj:`ConfigDict` or dict): Config for + transformer position encoding. Defaults None. + denosing_cfg (:obj:`ConfigDict` or dict, optional): Config of the + human query denoising training strategy. + data_decoder (:obj:`ConfigDict` or dict, optional): Config of the + data decoder which transform the results from output space to + input space. + dec_pred_class_embed_share (bool): Whether to share the class embed + layer. Default False. + dec_pred_bbox_embed_share (bool): Whether to share the bbox embed + layer. Default False. + refine_queries_num (int): Number of refined human content queries + and their position queries . + two_stage_keep_all_tokens (bool): Whether to keep all tokens. + """ + + def __init__(self, + num_queries: int = 100, + num_feature_levels: int = 4, + num_keypoints: int = 17, + as_two_stage: bool = False, + encoder: OptConfigType = None, + decoder: OptConfigType = None, + out_head: OptConfigType = None, + positional_encoding: OptConfigType = None, + data_decoder: OptConfigType = None, + denosing_cfg: OptConfigType = None, + dec_pred_class_embed_share: bool = False, + dec_pred_bbox_embed_share: bool = False, + refine_queries_num: int = 100, + two_stage_keep_all_tokens: bool = False) -> None: + + self.as_two_stage = as_two_stage + self.num_feature_levels = num_feature_levels + self.refine_queries_num = refine_queries_num + self.dec_pred_class_embed_share = dec_pred_class_embed_share + self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share + self.two_stage_keep_all_tokens = two_stage_keep_all_tokens + self.num_heads = decoder['layer_cfg']['self_attn_cfg']['num_heads'] + self.num_group = decoder['num_group'] + self.num_keypoints = num_keypoints + self.denosing_cfg = denosing_cfg + if data_decoder is not None: + self.data_decoder = KEYPOINT_CODECS.build(data_decoder) + else: + self.data_decoder = None + + super().__init__( + encoder=encoder, + decoder=decoder, + out_head=out_head, + positional_encoding=positional_encoding, + num_queries=num_queries) + + self.positional_encoding = PositionEmbeddingSineHW( + **self.positional_encoding_cfg) + self.encoder = DeformableDetrTransformerEncoder(**self.encoder_cfg) + self.decoder = EDPoseDecoder( + num_keypoints=num_keypoints, **self.decoder_cfg) + self.out_head = EDPoseOutHead( + num_keypoints=num_keypoints, + as_two_stage=as_two_stage, + refine_queries_num=refine_queries_num, + **self.out_head_cfg, + **self.decoder_cfg) + + self.embed_dims = self.encoder.embed_dims + self.label_enc = nn.Embedding( + self.denosing_cfg['dn_labelbook_size'] + 1, self.embed_dims) + + if not self.as_two_stage: + self.query_embedding = nn.Embedding(self.num_queries, + self.embed_dims) + self.refpoint_embedding = nn.Embedding(self.num_queries, 4) + + self.level_embed = nn.Parameter( + torch.Tensor(self.num_feature_levels, self.embed_dims)) + + self.decoder.bbox_embed = self.out_head.bbox_embed + self.decoder.pose_embed = self.out_head.pose_embed + self.decoder.pose_hw_embed = self.out_head.pose_hw_embed + self.decoder.class_embed = self.out_head.class_embed + + if self.as_two_stage: + self.memory_trans_fc = nn.Linear(self.embed_dims, self.embed_dims) + self.memory_trans_norm = nn.LayerNorm(self.embed_dims) + if dec_pred_class_embed_share and dec_pred_bbox_embed_share: + self.enc_out_bbox_embed = self.out_head.bbox_embed[0] + else: + self.enc_out_bbox_embed = copy.deepcopy( + self.out_head.bbox_embed[0]) + + if dec_pred_class_embed_share and dec_pred_bbox_embed_share: + self.enc_out_class_embed = self.out_head.class_embed[0] + else: + self.enc_out_class_embed = copy.deepcopy( + self.out_head.class_embed[0]) + + def init_weights(self) -> None: + """Initialize weights for Transformer and other components.""" + super().init_weights() + for coder in self.encoder, self.decoder: + for p in coder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MultiScaleDeformableAttention): + m.init_weights() + if self.as_two_stage: + nn.init.xavier_uniform_(self.memory_trans_fc.weight) + + nn.init.normal_(self.level_embed) + + def pre_transformer(self, + img_feats: Tuple[Tensor], + batch_data_samples: OptSampleList = None + ) -> Tuple[Dict]: + """Process image features before feeding them to the transformer. + + Args: + img_feats (tuple[Tensor]): Multi-level features that may have + different resolutions, output from neck. Each feature has + shape (bs, dim, h_lvl, w_lvl), where 'lvl' means 'layer'. + batch_data_samples (list[:obj:`DetDataSample`], optional): The + batch data samples. It usually includes information such + as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. + Defaults to None. + + Returns: + tuple[dict]: The first dict contains the inputs of encoder and the + second dict contains the inputs of decoder. + + - encoder_inputs_dict (dict): The keyword args dictionary of + `self.encoder()`. + - decoder_inputs_dict (dict): The keyword args dictionary of + `self.forward_decoder()`, which includes 'memory_mask'. + """ + batch_size = img_feats[0].size(0) + # construct binary masks for the transformer. + assert batch_data_samples is not None + batch_input_shape = batch_data_samples[0].batch_input_shape + img_shape_list = [sample.img_shape for sample in batch_data_samples] + input_img_h, input_img_w = batch_input_shape + masks = img_feats[0].new_ones((batch_size, input_img_h, input_img_w)) + for img_id in range(batch_size): + img_h, img_w = img_shape_list[img_id] + masks[img_id, :img_h, :img_w] = 0 + # NOTE following the official DETR repo, non-zero values representing + # ignored positions, while zero values means valid positions. + + mlvl_masks = [] + mlvl_pos_embeds = [] + for feat in img_feats: + mlvl_masks.append( + F.interpolate(masks[None], + size=feat.shape[-2:]).to(torch.bool).squeeze(0)) + mlvl_pos_embeds.append(self.positional_encoding(mlvl_masks[-1])) + + feat_flatten = [] + lvl_pos_embed_flatten = [] + mask_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(img_feats, mlvl_masks, mlvl_pos_embeds)): + batch_size, c, h, w = feat.shape + # [bs, c, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl, c] + feat = feat.view(batch_size, c, -1).permute(0, 2, 1) + pos_embed = pos_embed.view(batch_size, c, -1).permute(0, 2, 1) + lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1) + # [bs, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl] + mask = mask.flatten(1) + spatial_shape = (h, w) + + feat_flatten.append(feat) + lvl_pos_embed_flatten.append(lvl_pos_embed) + mask_flatten.append(mask) + spatial_shapes.append(spatial_shape) + + # (bs, num_feat_points, dim) + feat_flatten = torch.cat(feat_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + # (bs, num_feat_points), where num_feat_points = sum_lvl(h_lvl*w_lvl) + mask_flatten = torch.cat(mask_flatten, 1) + + spatial_shapes = torch.as_tensor( # (num_level, 2) + spatial_shapes, + dtype=torch.long, + device=feat_flatten.device) + level_start_index = torch.cat(( + spatial_shapes.new_zeros((1, )), # (num_level) + spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( # (bs, num_level, 2) + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + if self.refine_queries_num > 0 or batch_data_samples is not None: + input_query_label, input_query_bbox, humandet_attn_mask, \ + human2pose_attn_mask, mask_dict =\ + self.prepare_for_denosing( + batch_data_samples, + device=img_feats[0].device) + else: + assert batch_data_samples is None + input_query_bbox = input_query_label = \ + humandet_attn_mask = human2pose_attn_mask = mask_dict = None + + encoder_inputs_dict = dict( + query=feat_flatten, + query_pos=lvl_pos_embed_flatten, + key_padding_mask=mask_flatten, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios) + decoder_inputs_dict = dict( + memory_mask=mask_flatten, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + humandet_attn_mask=humandet_attn_mask, + human2pose_attn_mask=human2pose_attn_mask, + input_query_bbox=input_query_bbox, + input_query_label=input_query_label, + mask_dict=mask_dict) + return encoder_inputs_dict, decoder_inputs_dict + + def forward_encoder(self, + img_feats: Tuple[Tensor], + batch_data_samples: OptSampleList = None) -> Dict: + """Forward with Transformer encoder. + + The forward procedure is defined as: + 'pre_transformer' -> 'encoder' + + Args: + img_feats (tuple[Tensor]): Multi-level features that may have + different resolutions, output from neck. Each feature has + shape (bs, dim, h_lvl, w_lvl), where 'lvl' means 'layer'. + batch_data_samples (list[:obj:`DetDataSample`], optional): The + batch data samples. It usually includes information such + as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. + Defaults to None. + + Returns: + dict: The dictionary of encoder outputs, which includes the + `memory` of the encoder output. + """ + encoder_inputs_dict, decoder_inputs_dict = self.pre_transformer( + img_feats, batch_data_samples) + + memory = self.encoder(**encoder_inputs_dict) + encoder_outputs_dict = dict(memory=memory, **decoder_inputs_dict) + return encoder_outputs_dict + + def pre_decoder(self, memory: Tensor, memory_mask: Tensor, + spatial_shapes: Tensor, input_query_bbox: Tensor, + input_query_label: Tensor) -> Tuple[Dict, Dict]: + """Prepare intermediate variables before entering Transformer decoder, + such as `query` and `reference_points`. + + Args: + memory (Tensor): The output embeddings of the Transformer encoder, + has shape (bs, num_feat_points, dim). + memory_mask (Tensor): ByteTensor, the padding mask of the memory, + has shape (bs, num_feat_points). It will only be used when + `as_two_stage` is `True`. + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + It will only be used when `as_two_stage` is `True`. + input_query_bbox (Tensor): Denosing bbox query for training. + input_query_label (Tensor): Denosing label query for training. + + Returns: + tuple[dict, dict]: The decoder_inputs_dict and head_inputs_dict. + + - decoder_inputs_dict (dict): The keyword dictionary args of + `self.decoder()`. + - head_inputs_dict (dict): The keyword dictionary args of the + bbox_head functions. + """ + bs, _, c = memory.shape + if self.as_two_stage: + output_memory, output_proposals = \ + self.gen_encoder_output_proposals( + memory, memory_mask, spatial_shapes) + enc_outputs_class = self.enc_out_class_embed(output_memory) + enc_outputs_coord_unact = self.enc_out_bbox_embed( + output_memory) + output_proposals + + topk_proposals = torch.topk( + enc_outputs_class.max(-1)[0], self.num_queries, dim=1)[1] + topk_coords_undetach = torch.gather( + enc_outputs_coord_unact, 1, + topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + topk_coords_unact = topk_coords_undetach.detach() + reference_points = topk_coords_unact.sigmoid() + + query_undetach = torch.gather( + output_memory, 1, + topk_proposals.unsqueeze(-1).repeat(1, 1, self.embed_dims)) + query = query_undetach.detach() + + if input_query_bbox is not None: + reference_points = torch.cat( + [input_query_bbox, topk_coords_unact], dim=1).sigmoid() + query = torch.cat([input_query_label, query], dim=1) + if self.two_stage_keep_all_tokens: + hidden_states_enc = output_memory.unsqueeze(0) + referens_enc = enc_outputs_coord_unact.unsqueeze(0) + else: + hidden_states_enc = query_undetach.unsqueeze(0) + referens_enc = topk_coords_undetach.sigmoid().unsqueeze(0) + else: + hidden_states_enc, referens_enc = None, None + query = self.query_embedding.weight[:, None, :].repeat( + 1, bs, 1).transpose(0, 1) + reference_points = \ + self.refpoint_embedding.weight[:, None, :].repeat(1, bs, 1) + + if input_query_bbox is not None: + reference_points = torch.cat( + [input_query_bbox, reference_points], dim=1) + query = torch.cat([input_query_label, query], dim=1) + reference_points = reference_points.sigmoid() + + decoder_inputs_dict = dict( + query=query, reference_points=reference_points) + head_inputs_dict = dict( + hidden_states_enc=hidden_states_enc, referens_enc=referens_enc) + return decoder_inputs_dict, head_inputs_dict + + def forward_decoder(self, memory: Tensor, memory_mask: Tensor, + spatial_shapes: Tensor, level_start_index: Tensor, + valid_ratios: Tensor, humandet_attn_mask: Tensor, + human2pose_attn_mask: Tensor, input_query_bbox: Tensor, + input_query_label: Tensor, mask_dict: Dict) -> Dict: + """Forward with Transformer decoder. + + The forward procedure is defined as: + 'pre_decoder' -> 'decoder' + + Args: + memory (Tensor): The output embeddings of the Transformer encoder, + has shape (bs, num_feat_points, dim). + memory_mask (Tensor): ByteTensor, the padding mask of the memory, + has shape (bs, num_feat_points). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + humandet_attn_mask (Tensor): Human attention mask. + human2pose_attn_mask (Tensor): Human to pose attention mask. + input_query_bbox (Tensor): Denosing bbox query for training. + input_query_label (Tensor): Denosing label query for training. + + Returns: + dict: The dictionary of decoder outputs, which includes the + `hidden_states` of the decoder output and `references` including + the initial and intermediate reference_points. + """ + decoder_in, head_in = self.pre_decoder(memory, memory_mask, + spatial_shapes, + input_query_bbox, + input_query_label) + + inter_states, inter_references = self.decoder( + query=decoder_in['query'].transpose(0, 1), + value=memory.transpose(0, 1), + key_padding_mask=memory_mask, # for cross_attn + reference_points=decoder_in['reference_points'].transpose(0, 1), + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + humandet_attn_mask=humandet_attn_mask, + human2pose_attn_mask=human2pose_attn_mask) + references = inter_references + decoder_outputs_dict = dict( + hidden_states=inter_states, + references=references, + mask_dict=mask_dict) + decoder_outputs_dict.update(head_in) + return decoder_outputs_dict + + def forward_out_head(self, batch_data_samples: OptSampleList, + hidden_states: List[Tensor], references: List[Tensor], + mask_dict: Dict, hidden_states_enc: Tensor, + referens_enc: Tensor) -> Tuple[Tensor]: + """Forward function.""" + out = self.out_head(hidden_states, references, mask_dict, + hidden_states_enc, referens_enc, + batch_data_samples) + return out + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features.""" + input_shapes = np.array( + [d.metainfo['input_size'] for d in batch_data_samples]) + + if test_cfg.get('flip_test', False): + assert NotImplementedError( + 'flip_test is currently not supported ' + 'for EDPose. Please set `model.test_cfg.flip_test=False`') + else: + pred_logits, pred_boxes, pred_keypoints = self.forward( + feats, batch_data_samples) # (B, K, D) + + pred = self.decode( + input_shapes, + pred_logits=pred_logits, + pred_boxes=pred_boxes, + pred_keypoints=pred_keypoints) + return pred + + def decode(self, input_shapes: np.ndarray, pred_logits: Tensor, + pred_boxes: Tensor, pred_keypoints: Tensor): + """Select the final top-k keypoints, and decode the results from + normalize size to origin input size. + + Args: + input_shapes (Tensor): The size of input image. + pred_logits (Tensor): The result of score. + pred_boxes (Tensor): The result of bbox. + pred_keypoints (Tensor): The result of keypoints. + + Returns: + """ + + if self.data_decoder is None: + raise RuntimeError(f'The data decoder has not been set in \ + {self.__class__.__name__}. ' + 'Please set the data decoder configs in \ + the init parameters to ' + 'enable head methods `head.predict()` and \ + `head.decode()`') + + preds = [] + + pred_logits = pred_logits.sigmoid() + pred_logits, pred_boxes, pred_keypoints = to_numpy( + [pred_logits, pred_boxes, pred_keypoints]) + + for input_shape, pred_logit, pred_bbox, pred_kpts in zip( + input_shapes, pred_logits, pred_boxes, pred_keypoints): + + bboxes, keypoints, keypoint_scores = self.data_decoder.decode( + input_shape, pred_logit, pred_bbox, pred_kpts) + + # pack outputs + preds.append( + InstanceData( + keypoints=keypoints, + keypoint_scores=keypoint_scores, + bboxes=bboxes)) + + return preds + + def gen_encoder_output_proposals(self, memory: Tensor, memory_mask: Tensor, + spatial_shapes: Tensor + ) -> Tuple[Tensor, Tensor]: + """Generate proposals from encoded memory. The function will only be + used when `as_two_stage` is `True`. + + Args: + memory (Tensor): The output embeddings of the Transformer encoder, + has shape (bs, num_feat_points, dim). + memory_mask (Tensor): ByteTensor, the padding mask of the memory, + has shape (bs, num_feat_points). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + + Returns: + tuple: A tuple of transformed memory and proposals. + + - output_memory (Tensor): The transformed memory for obtaining + top-k proposals, has shape (bs, num_feat_points, dim). + - output_proposals (Tensor): The inverse-normalized proposal, has + shape (batch_size, num_keys, 4) with the last dimension arranged + as (cx, cy, w, h). + """ + bs = memory.size(0) + proposals = [] + _cur = 0 # start index in the sequence of the current level + for lvl, (H, W) in enumerate(spatial_shapes): + mask_flatten_ = memory_mask[:, + _cur:(_cur + H * W)].view(bs, H, W, 1) + valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1).unsqueeze(-1) + valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1).unsqueeze(-1) + + grid_y, grid_x = torch.meshgrid( + torch.linspace( + 0, H - 1, H, dtype=torch.float32, device=memory.device), + torch.linspace( + 0, W - 1, W, dtype=torch.float32, device=memory.device)) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_W, valid_H], 1).view(bs, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(bs, -1, -1, -1) + 0.5) / scale + wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) + proposal = torch.cat((grid, wh), -1).view(bs, -1, 4) + proposals.append(proposal) + _cur += (H * W) + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & + (output_proposals < 0.99)).all( + -1, keepdim=True) + + output_proposals = inverse_sigmoid(output_proposals) + output_proposals = output_proposals.masked_fill( + memory_mask.unsqueeze(-1), float('inf')) + output_proposals = output_proposals.masked_fill( + ~output_proposals_valid, float('inf')) + + output_memory = memory + output_memory = output_memory.masked_fill( + memory_mask.unsqueeze(-1), float(0)) + output_memory = output_memory.masked_fill(~output_proposals_valid, + float(0)) + output_memory = self.memory_trans_fc(output_memory) + output_memory = self.memory_trans_norm(output_memory) + # [bs, sum(hw), 2] + return output_memory, output_proposals + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg + + def prepare_for_denosing(self, targets: OptSampleList, device): + """prepare for dn components in forward function.""" + if not self.training: + bs = len(targets) + attn_mask_infere = torch.zeros( + bs, + self.num_heads, + self.num_group * (self.num_keypoints + 1), + self.num_group * (self.num_keypoints + 1), + device=device, + dtype=torch.bool) + group_bbox_kpt = (self.num_keypoints + 1) + kpt_index = [ + x for x in range(self.num_group * (self.num_keypoints + 1)) + if x % (self.num_keypoints + 1) == 0 + ] + for matchj in range(self.num_group * (self.num_keypoints + 1)): + sj = (matchj // group_bbox_kpt) * group_bbox_kpt + ej = (matchj // group_bbox_kpt + 1) * group_bbox_kpt + if sj > 0: + attn_mask_infere[:, :, matchj, :sj] = True + if ej < self.num_group * (self.num_keypoints + 1): + attn_mask_infere[:, :, matchj, ej:] = True + for match_x in range(self.num_group * (self.num_keypoints + 1)): + if match_x % group_bbox_kpt == 0: + attn_mask_infere[:, :, match_x, kpt_index] = False + + attn_mask_infere = attn_mask_infere.flatten(0, 1) + return None, None, None, attn_mask_infere, None + + # targets, dn_scalar, noise_scale = dn_args + device = targets[0]['boxes'].device + bs = len(targets) + refine_queries_num = self.refine_queries_num + + # gather gt boxes and labels + gt_boxes = [t['boxes'] for t in targets] + gt_labels = [t['labels'] for t in targets] + gt_keypoints = [t['keypoints'] for t in targets] + + # repeat them + def get_indices_for_repeat(now_num, target_num, device='cuda'): + """ + Input: + - now_num: int + - target_num: int + Output: + - indices: tensor[target_num] + """ + out_indice = [] + base_indice = torch.arange(now_num).to(device) + multiplier = target_num // now_num + out_indice.append(base_indice.repeat(multiplier)) + residue = target_num % now_num + out_indice.append(base_indice[torch.randint( + 0, now_num, (residue, ), device=device)]) + return torch.cat(out_indice) + + gt_boxes_expand = [] + gt_labels_expand = [] + gt_keypoints_expand = [] + for idx, (gt_boxes_i, gt_labels_i, gt_keypoint_i) in enumerate( + zip(gt_boxes, gt_labels, gt_keypoints)): + num_gt_i = gt_boxes_i.shape[0] + if num_gt_i > 0: + indices = get_indices_for_repeat(num_gt_i, refine_queries_num, + device) + gt_boxes_expand_i = gt_boxes_i[indices] # num_dn, 4 + gt_labels_expand_i = gt_labels_i[indices] + gt_keypoints_expand_i = gt_keypoint_i[indices] + else: + # all negative samples when no gt boxes + gt_boxes_expand_i = torch.rand( + refine_queries_num, 4, device=device) + gt_labels_expand_i = torch.ones( + refine_queries_num, dtype=torch.int64, + device=device) * int(self.num_classes) + gt_keypoints_expand_i = torch.rand( + refine_queries_num, self.num_keypoints * 3, device=device) + gt_boxes_expand.append(gt_boxes_expand_i) + gt_labels_expand.append(gt_labels_expand_i) + gt_keypoints_expand.append(gt_keypoints_expand_i) + gt_boxes_expand = torch.stack(gt_boxes_expand) + gt_labels_expand = torch.stack(gt_labels_expand) + gt_keypoints_expand = torch.stack(gt_keypoints_expand) + knwon_boxes_expand = gt_boxes_expand.clone() + knwon_labels_expand = gt_labels_expand.clone() + + # add noise + if self.denosing_cfg['dn_label_noise_ratio'] > 0: + prob = torch.rand_like(knwon_labels_expand.float()) + chosen_indice = prob < self.denosing_cfg['dn_label_noise_ratio'] + new_label = torch.randint_like( + knwon_labels_expand[chosen_indice], 0, + self.dn_labelbook_size) # randomly put a new one here + knwon_labels_expand[chosen_indice] = new_label + + if self.denosing_cfg['dn_box_noise_scale'] > 0: + diff = torch.zeros_like(knwon_boxes_expand) + diff[..., :2] = knwon_boxes_expand[..., 2:] / 2 + diff[..., 2:] = knwon_boxes_expand[..., 2:] + knwon_boxes_expand += torch.mul( + (torch.rand_like(knwon_boxes_expand) * 2 - 1.0), + diff) * self.denosing_cfg['dn_box_noise_scale'] + knwon_boxes_expand = knwon_boxes_expand.clamp(min=0.0, max=1.0) + + input_query_label = self.label_enc(knwon_labels_expand) + input_query_bbox = inverse_sigmoid(knwon_boxes_expand) + + # prepare mask + if 'group2group' in self.denosing_cfg['dn_attn_mask_type_list']: + attn_mask = torch.zeros( + bs, + self.num_heads, + refine_queries_num + self.num_queries, + refine_queries_num + self.num_queries, + device=device, + dtype=torch.bool) + attn_mask[:, :, refine_queries_num:, :refine_queries_num] = True + for idx, (gt_boxes_i, + gt_labels_i) in enumerate(zip(gt_boxes, gt_labels)): + num_gt_i = gt_boxes_i.shape[0] + if num_gt_i == 0: + continue + for matchi in range(refine_queries_num): + si = (matchi // num_gt_i) * num_gt_i + ei = (matchi // num_gt_i + 1) * num_gt_i + if si > 0: + attn_mask[idx, :, matchi, :si] = True + if ei < refine_queries_num: + attn_mask[idx, :, matchi, ei:refine_queries_num] = True + attn_mask = attn_mask.flatten(0, 1) + + if 'group2group' in self.denosing_cfg['dn_attn_mask_type_list']: + attn_mask2 = torch.zeros( + bs, + self.num_heads, + refine_queries_num + self.num_group * (self.num_keypoints + 1), + refine_queries_num + self.num_group * (self.num_keypoints + 1), + device=device, + dtype=torch.bool) + attn_mask2[:, :, refine_queries_num:, :refine_queries_num] = True + group_bbox_kpt = (self.num_keypoints + 1) + kpt_index = [ + x for x in range(self.num_group * (self.num_keypoints + 1)) + if x % (self.num_keypoints + 1) == 0 + ] + for matchj in range(self.num_group * (self.num_keypoints + 1)): + sj = (matchj // group_bbox_kpt) * group_bbox_kpt + ej = (matchj // group_bbox_kpt + 1) * group_bbox_kpt + if sj > 0: + attn_mask2[:, :, refine_queries_num:, + refine_queries_num:][:, :, matchj, :sj] = True + if ej < self.num_group * (self.num_keypoints + 1): + attn_mask2[:, :, refine_queries_num:, + refine_queries_num:][:, :, matchj, ej:] = True + + for match_x in range(self.num_group * (self.num_keypoints + 1)): + if match_x % group_bbox_kpt == 0: + attn_mask2[:, :, refine_queries_num:, + refine_queries_num:][:, :, match_x, + kpt_index] = False + + for idx, (gt_boxes_i, + gt_labels_i) in enumerate(zip(gt_boxes, gt_labels)): + num_gt_i = gt_boxes_i.shape[0] + if num_gt_i == 0: + continue + for matchi in range(refine_queries_num): + si = (matchi // num_gt_i) * num_gt_i + ei = (matchi // num_gt_i + 1) * num_gt_i + if si > 0: + attn_mask2[idx, :, matchi, :si] = True + if ei < refine_queries_num: + attn_mask2[idx, :, matchi, + ei:refine_queries_num] = True + attn_mask2 = attn_mask2.flatten(0, 1) + + mask_dict = { + 'pad_size': refine_queries_num, + 'known_bboxs': gt_boxes_expand, + 'known_labels': gt_labels_expand, + 'known_keypoints': gt_keypoints_expand + } + + return input_query_label, input_query_bbox, \ + attn_mask, attn_mask2, mask_dict + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + assert NotImplementedError( + 'the training of EDPose has not been ' + 'supported. Please stay tuned for further update.') diff --git a/mmpose/models/heads/transformer_heads/transformers/__init__.py b/mmpose/models/heads/transformer_heads/transformers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0e9f115cd1e770554ffc405a969d11e745b493ed --- /dev/null +++ b/mmpose/models/heads/transformer_heads/transformers/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .deformable_detr_layers import (DeformableDetrTransformerDecoder, + DeformableDetrTransformerDecoderLayer, + DeformableDetrTransformerEncoder, + DeformableDetrTransformerEncoderLayer) +from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer, + DetrTransformerEncoder, DetrTransformerEncoderLayer) +from .utils import FFN, PositionEmbeddingSineHW + +__all__ = [ + 'DetrTransformerEncoder', 'DetrTransformerDecoder', + 'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer', + 'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder', + 'DeformableDetrTransformerEncoderLayer', + 'DeformableDetrTransformerDecoderLayer', 'PositionEmbeddingSineHW', 'FFN' +] diff --git a/mmpose/models/heads/transformer_heads/transformers/deformable_detr_layers.py b/mmpose/models/heads/transformer_heads/transformers/deformable_detr_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..149f04e469ba48ff1f1f8b8474e44ce74ecebdb0 --- /dev/null +++ b/mmpose/models/heads/transformer_heads/transformers/deformable_detr_layers.py @@ -0,0 +1,251 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple, Union + +import torch +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmcv.ops import MultiScaleDeformableAttention +from mmengine.model import ModuleList +from torch import Tensor, nn + +from mmpose.models.utils import inverse_sigmoid +from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer, + DetrTransformerEncoder, DetrTransformerEncoderLayer) + + +class DeformableDetrTransformerEncoder(DetrTransformerEncoder): + """Transformer encoder of Deformable DETR.""" + + def _init_layers(self) -> None: + """Initialize encoder layers.""" + self.layers = ModuleList([ + DeformableDetrTransformerEncoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, spatial_shapes: Tensor, + level_start_index: Tensor, valid_ratios: Tensor, + **kwargs) -> Tensor: + """Forward function of Transformer encoder. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + query_pos (Tensor): The positional encoding for query, has shape + (bs, num_queries, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor, has shape (bs, num_queries). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + + Returns: + Tensor: Output queries of Transformer encoder, which is also + called 'encoder output embeddings' or 'memory', has shape + (bs, num_queries, dim) + """ + reference_points = self.get_encoder_reference_points( + spatial_shapes, valid_ratios, device=query.device) + for layer in self.layers: + query = layer( + query=query, + query_pos=query_pos, + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reference_points=reference_points, + **kwargs) + return query + + @staticmethod + def get_encoder_reference_points(spatial_shapes: Tensor, + valid_ratios: Tensor, + device: Union[torch.device, + str]) -> Tensor: + """Get the reference points used in encoder. + + Args: + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + device (obj:`device` or str): The device acquired by the + `reference_points`. + + Returns: + Tensor: Reference points used in decoder, has shape (bs, length, + num_levels, 2). + """ + + reference_points_list = [] + for lvl, (H, W) in enumerate(spatial_shapes): + ref_y, ref_x = torch.meshgrid( + torch.linspace( + 0.5, H - 0.5, H, dtype=torch.float32, device=device), + torch.linspace( + 0.5, W - 0.5, W, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 1] * H) + ref_x = ref_x.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 0] * W) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + # [bs, sum(hw), num_level, 2] + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + +class DeformableDetrTransformerDecoder(DetrTransformerDecoder): + """Transformer Decoder of Deformable DETR.""" + + def _init_layers(self) -> None: + """Initialize decoder layers.""" + self.layers = ModuleList([ + DeformableDetrTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + if self.post_norm_cfg is not None: + raise ValueError('There is not post_norm in ' + f'{self._get_name()}') + + def forward(self, + query: Tensor, + query_pos: Tensor, + value: Tensor, + key_padding_mask: Tensor, + reference_points: Tensor, + spatial_shapes: Tensor, + level_start_index: Tensor, + valid_ratios: Tensor, + reg_branches: Optional[nn.Module] = None, + **kwargs) -> Tuple[Tensor]: + """Forward function of Transformer decoder. + + Args: + query (Tensor): The input queries, has shape (bs, num_queries, + dim). + query_pos (Tensor): The input positional query, has shape + (bs, num_queries, dim). It will be added to `query` before + forward function. + value (Tensor): The input values, has shape (bs, num_value, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn` + input. ByteTensor, has shape (bs, num_value). + reference_points (Tensor): The initial reference, has shape + (bs, num_queries, 4) with the last dimension arranged as + (cx, cy, w, h) when `as_two_stage` is `True`, otherwise has + shape (bs, num_queries, 2) with the last dimension arranged + as (cx, cy). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + reg_branches: (obj:`nn.ModuleList`, optional): Used for refining + the regression results. Only would be passed when + `with_box_refine` is `True`, otherwise would be `None`. + + Returns: + tuple[Tensor]: Outputs of Deformable Transformer Decoder. + + - output (Tensor): Output embeddings of the last decoder, has + shape (num_queries, bs, embed_dims) when `return_intermediate` + is `False`. Otherwise, Intermediate output embeddings of all + decoder layers, has shape (num_decoder_layers, num_queries, bs, + embed_dims). + - reference_points (Tensor): The reference of the last decoder + layer, has shape (bs, num_queries, 4) when `return_intermediate` + is `False`. Otherwise, Intermediate references of all decoder + layers, has shape (num_decoder_layers, bs, num_queries, 4). The + coordinates are arranged as (cx, cy, w, h) + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for layer_id, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = \ + reference_points[:, :, None] * \ + torch.cat([valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = \ + reference_points[:, :, None] * \ + valid_ratios[:, None] + output = layer( + output, + query_pos=query_pos, + value=value, + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reference_points=reference_points_input, + **kwargs) + + if reg_branches is not None: + tmp_reg_preds = reg_branches[layer_id](output) + if reference_points.shape[-1] == 4: + new_reference_points = tmp_reg_preds + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + assert reference_points.shape[-1] == 2 + new_reference_points = tmp_reg_preds + new_reference_points[..., :2] = tmp_reg_preds[ + ..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +class DeformableDetrTransformerEncoderLayer(DetrTransformerEncoderLayer): + """Encoder layer of Deformable DETR.""" + + def _init_layers(self) -> None: + """Initialize self_attn, ffn, and norms.""" + self.self_attn = MultiScaleDeformableAttention(**self.self_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(2) + ] + self.norms = ModuleList(norms_list) + + +class DeformableDetrTransformerDecoderLayer(DetrTransformerDecoderLayer): + """Decoder layer of Deformable DETR.""" + + def _init_layers(self) -> None: + """Initialize self_attn, cross-attn, ffn, and norms.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.cross_attn = MultiScaleDeformableAttention(**self.cross_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(3) + ] + self.norms = ModuleList(norms_list) diff --git a/mmpose/models/heads/transformer_heads/transformers/detr_layers.py b/mmpose/models/heads/transformer_heads/transformers/detr_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..a669c5dda6c7693d405be0abc11b7486cce896e7 --- /dev/null +++ b/mmpose/models/heads/transformer_heads/transformers/detr_layers.py @@ -0,0 +1,354 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +import torch +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmengine import ConfigDict +from mmengine.model import BaseModule, ModuleList +from torch import Tensor + +from mmpose.utils.typing import ConfigType, OptConfigType + + +class DetrTransformerEncoder(BaseModule): + """Encoder of DETR. + + Args: + num_layers (int): Number of encoder layers. + layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder + layer. All the layers will share the same config. + init_cfg (:obj:`ConfigDict` or dict, optional): the config to control + the initialization. Defaults to None. + """ + + def __init__(self, + num_layers: int, + layer_cfg: ConfigType, + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + self.num_layers = num_layers + self.layer_cfg = layer_cfg + self._init_layers() + + def _init_layers(self) -> None: + """Initialize encoder layers.""" + self.layers = ModuleList([ + DetrTransformerEncoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, **kwargs) -> Tensor: + """Forward function of encoder. + + Args: + query (Tensor): Input queries of encoder, has shape + (bs, num_queries, dim). + query_pos (Tensor): The positional embeddings of the queries, has + shape (bs, num_queries, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor, has shape (bs, num_queries). + + Returns: + Tensor: Has shape (bs, num_queries, dim) if `batch_first` is + `True`, otherwise (num_queries, bs, dim). + """ + for layer in self.layers: + query = layer(query, query_pos, key_padding_mask, **kwargs) + return query + + +class DetrTransformerDecoder(BaseModule): + """Decoder of DETR. + + Args: + num_layers (int): Number of decoder layers. + layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder + layer. All the layers will share the same config. + post_norm_cfg (:obj:`ConfigDict` or dict, optional): Config of the + post normalization layer. Defaults to `LN`. + return_intermediate (bool, optional): Whether to return outputs of + intermediate layers. Defaults to `True`, + init_cfg (:obj:`ConfigDict` or dict, optional): the config to control + the initialization. Defaults to None. + """ + + def __init__(self, + num_layers: int, + layer_cfg: ConfigType, + post_norm_cfg: OptConfigType = dict(type='LN'), + return_intermediate: bool = True, + init_cfg: Union[dict, ConfigDict] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.layer_cfg = layer_cfg + self.num_layers = num_layers + self.post_norm_cfg = post_norm_cfg + self.return_intermediate = return_intermediate + self._init_layers() + + def _init_layers(self) -> None: + """Initialize decoder layers.""" + self.layers = ModuleList([ + DetrTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + self.post_norm = build_norm_layer(self.post_norm_cfg, + self.embed_dims)[1] + + def forward(self, query: Tensor, key: Tensor, value: Tensor, + query_pos: Tensor, key_pos: Tensor, key_padding_mask: Tensor, + **kwargs) -> Tensor: + """Forward function of decoder + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + key (Tensor): The input key, has shape (bs, num_keys, dim). + value (Tensor): The input value with the same shape as `key`. + query_pos (Tensor): The positional encoding for `query`, with the + same shape as `query`. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. + key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn` + input. ByteTensor, has shape (bs, num_value). + + Returns: + Tensor: The forwarded results will have shape + (num_decoder_layers, bs, num_queries, dim) if + `return_intermediate` is `True` else (1, bs, num_queries, dim). + """ + intermediate = [] + for layer in self.layers: + query = layer( + query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + key_padding_mask=key_padding_mask, + **kwargs) + if self.return_intermediate: + intermediate.append(self.post_norm(query)) + query = self.post_norm(query) + + if self.return_intermediate: + return torch.stack(intermediate) + + return query.unsqueeze(0) + + +class DetrTransformerEncoderLayer(BaseModule): + """Implements encoder layer in DETR transformer. + + Args: + self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self + attention. + ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config for + normalization layers. All the layers will share the same + config. Defaults to `LN`. + init_cfg (:obj:`ConfigDict` or dict, optional): Config to control + the initialization. Defaults to None. + """ + + def __init__(self, + self_attn_cfg: OptConfigType = dict( + embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg: OptConfigType = dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True)), + norm_cfg: OptConfigType = dict(type='LN'), + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + + self.self_attn_cfg = self_attn_cfg + if 'batch_first' not in self.self_attn_cfg: + self.self_attn_cfg['batch_first'] = True + else: + assert self.self_attn_cfg['batch_first'] is True, 'First \ + dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` flag.' + + self.ffn_cfg = ffn_cfg + self.norm_cfg = norm_cfg + self._init_layers() + + def _init_layers(self) -> None: + """Initialize self-attention, FFN, and normalization.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(2) + ] + self.norms = ModuleList(norms_list) + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, **kwargs) -> Tensor: + """Forward function of an encoder layer. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + query_pos (Tensor): The positional encoding for query, with + the same shape as `query`. + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor. has shape (bs, num_queries). + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + key_padding_mask=key_padding_mask, + **kwargs) + query = self.norms[0](query) + query = self.ffn(query) + query = self.norms[1](query) + + return query + + +class DetrTransformerDecoderLayer(BaseModule): + """Implements decoder layer in DETR transformer. + + Args: + self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self + attention. + cross_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for cross + attention. + ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config for + normalization layers. All the layers will share the same + config. Defaults to `LN`. + init_cfg (:obj:`ConfigDict` or dict, optional): Config to control + the initialization. Defaults to None. + """ + + def __init__(self, + self_attn_cfg: OptConfigType = dict( + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + cross_attn_cfg: OptConfigType = dict( + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + ffn_cfg: OptConfigType = dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + norm_cfg: OptConfigType = dict(type='LN'), + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + + self.self_attn_cfg = self_attn_cfg + self.cross_attn_cfg = cross_attn_cfg + if 'batch_first' not in self.self_attn_cfg: + self.self_attn_cfg['batch_first'] = True + else: + assert self.self_attn_cfg['batch_first'] is True, 'First \ + dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` flag.' + + if 'batch_first' not in self.cross_attn_cfg: + self.cross_attn_cfg['batch_first'] = True + else: + assert self.cross_attn_cfg['batch_first'] is True, 'First \ + dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` flag.' + + self.ffn_cfg = ffn_cfg + self.norm_cfg = norm_cfg + self._init_layers() + + def _init_layers(self) -> None: + """Initialize self-attention, FFN, and normalization.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.cross_attn = MultiheadAttention(**self.cross_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(3) + ] + self.norms = ModuleList(norms_list) + + def forward(self, + query: Tensor, + key: Tensor = None, + value: Tensor = None, + query_pos: Tensor = None, + key_pos: Tensor = None, + self_attn_mask: Tensor = None, + cross_attn_mask: Tensor = None, + key_padding_mask: Tensor = None, + **kwargs) -> Tensor: + """ + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + key (Tensor, optional): The input key, has shape (bs, num_keys, + dim). If `None`, the `query` will be used. Defaults to `None`. + value (Tensor, optional): The input value, has the same shape as + `key`, as in `nn.MultiheadAttention.forward`. If `None`, the + `key` will be used. Defaults to `None`. + query_pos (Tensor, optional): The positional encoding for `query`, + has the same shape as `query`. If not `None`, it will be added + to `query` before forward function. Defaults to `None`. + key_pos (Tensor, optional): The positional encoding for `key`, has + the same shape as `key`. If not `None`, it will be added to + `key` before forward function. If None, and `query_pos` has the + same shape as `key`, then `query_pos` will be used for + `key_pos`. Defaults to None. + self_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + cross_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor, optional): The `key_padding_mask` of + `self_attn` input. ByteTensor, has shape (bs, num_value). + Defaults to None. + + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=self_attn_mask, + **kwargs) + query = self.norms[0](query) + query = self.cross_attn( + query=query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=cross_attn_mask, + key_padding_mask=key_padding_mask, + **kwargs) + query = self.norms[1](query) + query = self.ffn(query) + query = self.norms[2](query) + + return query diff --git a/mmpose/models/heads/transformer_heads/transformers/utils.py b/mmpose/models/heads/transformer_heads/transformers/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7d7c086dc838bc45347f1c7de9f3a7718e15a3cd --- /dev/null +++ b/mmpose/models/heads/transformer_heads/transformers/utils.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn.functional as F +from mmcv.cnn import Linear +from mmengine.model import BaseModule, ModuleList +from torch import Tensor + + +class FFN(BaseModule): + """Very simple multi-layer perceptron with relu. Mostly used in DETR series + detectors. + + Args: + input_dim (int): Feature dim of the input tensor. + hidden_dim (int): Feature dim of the hidden layer. + output_dim (int): Feature dim of the output tensor. + num_layers (int): Number of FFN layers.. + """ + + def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, + num_layers: int) -> None: + super().__init__() + + self.num_layers = num_layers + + self.layers = ModuleList() + self.layers.append(Linear(input_dim, hidden_dim)) + for _ in range(num_layers - 2): + self.layers.append(Linear(hidden_dim, hidden_dim)) + self.layers.append(Linear(hidden_dim, output_dim)) + + def forward(self, x: Tensor) -> Tensor: + """Forward function of FFN. + + Args: + x (Tensor): The input feature, has shape + (num_queries, bs, input_dim). + Returns: + Tensor: The output feature, has shape + (num_queries, bs, output_dim). + """ + for i, layer in enumerate(self.layers): + x = layer(x) + if i < self.num_layers - 1: + x = F.relu(x) + return x + + +class PositionEmbeddingSineHW(BaseModule): + """This is a more standard version of the position embedding, very similar + to the one used by the Attention is all you need paper, generalized to work + on images.""" + + def __init__(self, + num_pos_feats=64, + temperatureH=10000, + temperatureW=10000, + normalize=False, + scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperatureH = temperatureH + self.temperatureW = temperatureW + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError('normalize should be True if scale is passed') + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, mask: Tensor): + + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_tx = torch.arange( + self.num_pos_feats, dtype=torch.float32, device=mask.device) + dim_tx = self.temperatureW**(2 * (dim_tx // 2) / self.num_pos_feats) + pos_x = x_embed[:, :, :, None] / dim_tx + + dim_ty = torch.arange( + self.num_pos_feats, dtype=torch.float32, device=mask.device) + dim_ty = self.temperatureH**(2 * (dim_ty // 2) / self.num_pos_feats) + pos_y = y_embed[:, :, :, None] / dim_ty + + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), + dim=4).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), + dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + + return pos diff --git a/mmpose/models/losses/__init__.py b/mmpose/models/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9fbf6ba1de561a5d772a224e5d806c5996aacff2 --- /dev/null +++ b/mmpose/models/losses/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ae_loss import AssociativeEmbeddingLoss +from .bbox_loss import IoULoss +from .classification_loss import (BCELoss, JSDiscretLoss, KLDiscretLoss, + VariFocalLoss) +from .fea_dis_loss import FeaLoss +from .heatmap_loss import (AdaptiveWingLoss, KeypointMSELoss, + KeypointOHKMMSELoss, MLECCLoss, + OKSHeatmapLoss, CalibrationLoss) +from .logit_dis_loss import KDLoss +from .loss_wrappers import CombinedLoss, MultipleLossWrapper +from .regression_loss import (BoneLoss, L1Loss, MPJPELoss, + MPJPEVelocityJointLoss, MSELoss, OKSLoss, + RLELoss, SemiSupervisionLoss, SmoothL1Loss, + SoftWeightSmoothL1Loss, SoftWingLoss, WingLoss, + L1LogLoss) + +__all__ = [ + 'KeypointMSELoss', 'KeypointOHKMMSELoss', 'SmoothL1Loss', 'WingLoss', + 'MPJPELoss', 'MSELoss', 'L1Loss', 'BCELoss', 'BoneLoss', + 'SemiSupervisionLoss', 'SoftWingLoss', 'AdaptiveWingLoss', 'RLELoss', + 'KLDiscretLoss', 'MultipleLossWrapper', 'JSDiscretLoss', 'CombinedLoss', + 'AssociativeEmbeddingLoss', 'SoftWeightSmoothL1Loss', + 'MPJPEVelocityJointLoss', 'FeaLoss', 'KDLoss', 'OKSLoss', 'IoULoss', + 'VariFocalLoss', 'MLECCLoss', 'L1LogLoss', 'OKSHeatmapLoss', + 'CalibrationLoss' +] diff --git a/mmpose/models/losses/ae_loss.py b/mmpose/models/losses/ae_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..1f1e08181beaf835238596d95fe509b122c64b3d --- /dev/null +++ b/mmpose/models/losses/ae_loss.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import List, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class AssociativeEmbeddingLoss(nn.Module): + """Associative Embedding loss. + + Details can be found in + `Associative Embedding `_ + + Note: + + - batch size: B + - instance number: N + - keypoint number: K + - keypoint dimension: D + - embedding tag dimension: L + - heatmap size: [W, H] + + Args: + loss_weight (float): Weight of the loss. Defaults to 1.0 + push_loss_factor (float): A factor that controls the weight between + the push loss and the pull loss. Defaults to 0.5 + """ + + def __init__(self, + loss_weight: float = 1.0, + push_loss_factor: float = 0.5) -> None: + super().__init__() + self.loss_weight = loss_weight + self.push_loss_factor = push_loss_factor + + def _ae_loss_per_image(self, tags: Tensor, keypoint_indices: Tensor): + """Compute associative embedding loss for one image. + + Args: + tags (Tensor): Tagging heatmaps in shape (K*L, H, W) + keypoint_indices (Tensor): Ground-truth keypint position indices + in shape (N, K, 2) + """ + K = keypoint_indices.shape[1] + C, H, W = tags.shape + L = C // K + + tags = tags.view(L, K, H * W) + instance_tags = [] + instance_kpt_tags = [] + + for keypoint_indices_n in keypoint_indices: + _kpt_tags = [] + for k in range(K): + if keypoint_indices_n[k, 1]: + _kpt_tags.append(tags[:, k, keypoint_indices_n[k, 0]]) + + if _kpt_tags: + kpt_tags = torch.stack(_kpt_tags) + instance_kpt_tags.append(kpt_tags) + instance_tags.append(kpt_tags.mean(dim=0)) + + N = len(instance_kpt_tags) # number of instances with valid keypoints + + if N == 0: + pull_loss = tags.new_zeros(size=(), requires_grad=True) + push_loss = tags.new_zeros(size=(), requires_grad=True) + else: + pull_loss = sum( + F.mse_loss(_kpt_tags, _tag.expand_as(_kpt_tags)) + for (_kpt_tags, _tag) in zip(instance_kpt_tags, instance_tags)) + + if N == 1: + push_loss = tags.new_zeros(size=(), requires_grad=True) + else: + tag_mat = torch.stack(instance_tags) # (N, L) + diff = tag_mat[None] - tag_mat[:, None] # (N, N, L) + push_loss = torch.sum(torch.exp(-diff.pow(2))) + + # normalization + eps = 1e-6 + pull_loss = pull_loss / (N + eps) + push_loss = push_loss / ((N - 1) * N + eps) + + return pull_loss, push_loss + + def forward(self, tags: Tensor, keypoint_indices: Union[List[Tensor], + Tensor]): + """Compute associative embedding loss on a batch of data. + + Args: + tags (Tensor): Tagging heatmaps in shape (B, L*K, H, W) + keypoint_indices (Tensor|List[Tensor]): Ground-truth keypint + position indices represented by a Tensor in shape + (B, N, K, 2), or a list of B Tensors in shape (N_i, K, 2) + Each keypoint's index is represented as [i, v], where i is the + position index in the heatmap (:math:`i=y*w+x`) and v is the + visibility + + Returns: + tuple: + - pull_loss (Tensor) + - push_loss (Tensor) + """ + + assert tags.shape[0] == len(keypoint_indices) + + pull_loss = 0. + push_loss = 0. + + for i in range(tags.shape[0]): + _pull, _push = self._ae_loss_per_image(tags[i], + keypoint_indices[i]) + pull_loss += _pull * self.loss_weight + push_loss += _push * self.loss_weight * self.push_loss_factor + + return pull_loss, push_loss diff --git a/mmpose/models/losses/bbox_loss.py b/mmpose/models/losses/bbox_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..2694076b26e59625e52ac0fc7fa1045c39574b72 --- /dev/null +++ b/mmpose/models/losses/bbox_loss.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + +import torch.nn as nn +import torch.nn.functional as F + +from mmpose.registry import MODELS +from mmpose.structures.bbox import bbox_overlaps + + +@MODELS.register_module() +class IoULoss(nn.Module): + """Binary Cross Entropy loss. + + Args: + reduction (str): Options are "none", "mean" and "sum". + eps (float): Epsilon to avoid log(0). + loss_weight (float): Weight of the loss. Default: 1.0. + mode (str): Loss scaling mode, including "linear", "square", and "log". + Default: 'log' + """ + + def __init__(self, + reduction='mean', + mode='log', + eps: float = 1e-16, + loss_weight=1.): + super().__init__() + + assert reduction in ('mean', 'sum', 'none'), f'the argument ' \ + f'`reduction` should be either \'mean\', \'sum\' or \'none\', ' \ + f'but got {reduction}' + + assert mode in ('linear', 'square', 'log'), f'the argument ' \ + f'`reduction` should be either \'linear\', \'square\' or ' \ + f'\'log\', but got {mode}' + + self.reduction = reduction + self.criterion = partial(F.cross_entropy, reduction='none') + self.loss_weight = loss_weight + self.mode = mode + self.eps = eps + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_labels: K + + Args: + output (torch.Tensor[N, K]): Output classification. + target (torch.Tensor[N, K]): Target classification. + """ + ious = bbox_overlaps( + output, target, is_aligned=True).clamp(min=self.eps) + + if self.mode == 'linear': + loss = 1 - ious + elif self.mode == 'square': + loss = 1 - ious.pow(2) + elif self.mode == 'log': + loss = -ious.log() + else: + raise NotImplementedError + + if target_weight is not None: + for i in range(loss.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + loss = loss * target_weight + + if self.reduction == 'sum': + loss = loss.sum() + elif self.reduction == 'mean': + loss = loss.mean() + + return loss * self.loss_weight diff --git a/mmpose/models/losses/classification_loss.py b/mmpose/models/losses/classification_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..0b70d88cfa7739fa42ff4b5c219db26f6e78e987 --- /dev/null +++ b/mmpose/models/losses/classification_loss.py @@ -0,0 +1,333 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class BCELoss(nn.Module): + """Binary Cross Entropy loss. + + Args: + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + reduction (str): Options are "none", "mean" and "sum". + loss_weight (float): Weight of the loss. Default: 1.0. + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + before output. Defaults to False. + """ + + def __init__(self, + use_target_weight=False, + loss_weight=1., + reduction='mean', + use_sigmoid=False): + super().__init__() + + assert reduction in ('mean', 'sum', 'none'), f'the argument ' \ + f'`reduction` should be either \'mean\', \'sum\' or \'none\', ' \ + f'but got {reduction}' + + self.reduction = reduction + self.use_sigmoid = use_sigmoid + criterion = F.binary_cross_entropy if use_sigmoid \ + else F.binary_cross_entropy_with_logits + self.criterion = partial(criterion, reduction='none') + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_labels: K + + Args: + output (torch.Tensor[N, K]): Output classification. + target (torch.Tensor[N, K]): Target classification. + target_weight (torch.Tensor[N, K] or torch.Tensor[N]): + Weights across different labels. + """ + + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output, target) + if target_weight.dim() == 1: + target_weight = target_weight[:, None] + loss = (loss * target_weight) + else: + loss = self.criterion(output, target) + + if self.reduction == 'sum': + loss = loss.sum() + elif self.reduction == 'mean': + loss = loss.mean() + + return loss * self.loss_weight + + +@MODELS.register_module() +class JSDiscretLoss(nn.Module): + """Discrete JS Divergence loss for DSNT with Gaussian Heatmap. + + Modified from `the official implementation + `_. + + Args: + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + size_average (bool): Option to average the loss by the batch_size. + """ + + def __init__( + self, + use_target_weight=True, + size_average: bool = True, + ): + super(JSDiscretLoss, self).__init__() + self.use_target_weight = use_target_weight + self.size_average = size_average + self.kl_loss = nn.KLDivLoss(reduction='none') + + def kl(self, p, q): + """Kullback-Leibler Divergence.""" + + eps = 1e-24 + kl_values = self.kl_loss((q + eps).log(), p) + return kl_values + + def js(self, pred_hm, gt_hm): + """Jensen-Shannon Divergence.""" + + m = 0.5 * (pred_hm + gt_hm) + js_values = 0.5 * (self.kl(pred_hm, m) + self.kl(gt_hm, m)) + return js_values + + def forward(self, pred_hm, gt_hm, target_weight=None): + """Forward function. + + Args: + pred_hm (torch.Tensor[N, K, H, W]): Predicted heatmaps. + gt_hm (torch.Tensor[N, K, H, W]): Target heatmaps. + target_weight (torch.Tensor[N, K] or torch.Tensor[N]): + Weights across different labels. + + Returns: + torch.Tensor: Loss value. + """ + + if self.use_target_weight: + assert target_weight is not None + assert pred_hm.ndim >= target_weight.ndim + + for i in range(pred_hm.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + + loss = self.js(pred_hm * target_weight, gt_hm * target_weight) + else: + loss = self.js(pred_hm, gt_hm) + + if self.size_average: + loss /= len(gt_hm) + + return loss.sum() + + +@MODELS.register_module() +class KLDiscretLoss(nn.Module): + """Discrete KL Divergence loss for SimCC with Gaussian Label Smoothing. + Modified from `the official implementation. + + `_. + Args: + beta (float): Temperature factor of Softmax. Default: 1.0. + label_softmax (bool): Whether to use Softmax on labels. + Default: False. + label_beta (float): Temperature factor of Softmax on labels. + Default: 1.0. + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + mask (list[int]): Index of masked keypoints. + mask_weight (float): Weight of masked keypoints. Default: 1.0. + """ + + def __init__(self, + beta=1.0, + label_softmax=False, + label_beta=10.0, + use_target_weight=True, + mask=None, + mask_weight=1.0): + super(KLDiscretLoss, self).__init__() + self.beta = beta + self.label_softmax = label_softmax + self.label_beta = label_beta + self.use_target_weight = use_target_weight + self.mask = mask + self.mask_weight = mask_weight + + self.log_softmax = nn.LogSoftmax(dim=1) + self.kl_loss = nn.KLDivLoss(reduction='none') + + def criterion(self, dec_outs, labels): + """Criterion function.""" + log_pt = self.log_softmax(dec_outs * self.beta) + if self.label_softmax: + labels = F.softmax(labels * self.label_beta, dim=1) + loss = torch.mean(self.kl_loss(log_pt, labels), dim=1) + return loss + + def forward(self, pred_simcc, gt_simcc, target_weight): + """Forward function. + + Args: + pred_simcc (Tuple[Tensor, Tensor]): Predicted SimCC vectors of + x-axis and y-axis. + gt_simcc (Tuple[Tensor, Tensor]): Target representations. + target_weight (torch.Tensor[N, K] or torch.Tensor[N]): + Weights across different labels. + """ + N, K, _ = pred_simcc[0].shape + loss = 0 + + if self.use_target_weight: + weight = target_weight.reshape(-1) + else: + weight = 1. + + for pred, target in zip(pred_simcc, gt_simcc): + pred = pred.reshape(-1, pred.size(-1)) + target = target.reshape(-1, target.size(-1)) + + t_loss = self.criterion(pred, target).mul(weight) + + if self.mask is not None: + t_loss = t_loss.reshape(N, K) + t_loss[:, self.mask] = t_loss[:, self.mask] * self.mask_weight + + loss = loss + t_loss.sum() + + return loss / K + + +@MODELS.register_module() +class InfoNCELoss(nn.Module): + """InfoNCE loss for training a discriminative representation space with a + contrastive manner. + + `Representation Learning with Contrastive Predictive Coding + arXiv: `_. + + Args: + temperature (float, optional): The temperature to use in the softmax + function. Higher temperatures lead to softer probability + distributions. Defaults to 1.0. + loss_weight (float, optional): The weight to apply to the loss. + Defaults to 1.0. + """ + + def __init__(self, temperature: float = 1.0, loss_weight=1.0) -> None: + super(InfoNCELoss, self).__init__() + assert temperature > 0, f'the argument `temperature` must be ' \ + f'positive, but got {temperature}' + self.temp = temperature + self.loss_weight = loss_weight + + def forward(self, features: torch.Tensor) -> torch.Tensor: + """Computes the InfoNCE loss. + + Args: + features (Tensor): A tensor containing the feature + representations of different samples. + + Returns: + Tensor: A tensor of shape (1,) containing the InfoNCE loss. + """ + n = features.size(0) + features_norm = F.normalize(features, dim=1) + logits = features_norm.mm(features_norm.t()) / self.temp + targets = torch.arange(n, dtype=torch.long, device=features.device) + loss = F.cross_entropy(logits, targets, reduction='sum') + return loss * self.loss_weight + + +@MODELS.register_module() +class VariFocalLoss(nn.Module): + """Varifocal loss. + + Args: + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + reduction (str): Options are "none", "mean" and "sum". + loss_weight (float): Weight of the loss. Default: 1.0. + alpha (float): A balancing factor for the negative part of + Varifocal Loss. Defaults to 0.75. + gamma (float): Gamma parameter for the modulating factor. + Defaults to 2.0. + """ + + def __init__(self, + use_target_weight=False, + loss_weight=1., + reduction='mean', + alpha=0.75, + gamma=2.0): + super().__init__() + + assert reduction in ('mean', 'sum', 'none'), f'the argument ' \ + f'`reduction` should be either \'mean\', \'sum\' or \'none\', ' \ + f'but got {reduction}' + + self.reduction = reduction + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + self.alpha = alpha + self.gamma = gamma + + def criterion(self, output, target): + label = (target > 1e-4).to(target) + weight = self.alpha * output.sigmoid().pow( + self.gamma) * (1 - label) + target + output = output.clip(min=-10, max=10) + vfl = ( + F.binary_cross_entropy_with_logits( + output, target, reduction='none') * weight) + return vfl + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_labels: K + + Args: + output (torch.Tensor[N, K]): Output classification. + target (torch.Tensor[N, K]): Target classification. + target_weight (torch.Tensor[N, K] or torch.Tensor[N]): + Weights across different labels. + """ + + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output, target) + if target_weight.dim() == 1: + target_weight = target_weight.unsqueeze(1) + loss = (loss * target_weight) + else: + loss = self.criterion(output, target) + + loss[torch.isinf(loss)] = 0.0 + loss[torch.isnan(loss)] = 0.0 + + if self.reduction == 'sum': + loss = loss.sum() + elif self.reduction == 'mean': + loss = loss.mean() + + return loss * self.loss_weight diff --git a/mmpose/models/losses/fea_dis_loss.py b/mmpose/models/losses/fea_dis_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..b90ca9d24f56139de25ed95b8f2d19e6012cb516 --- /dev/null +++ b/mmpose/models/losses/fea_dis_loss.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class FeaLoss(nn.Module): + """PyTorch version of feature-based distillation from DWPose Modified from + the official implementation. + + + Args: + student_channels(int): Number of channels in the student's feature map. + teacher_channels(int): Number of channels in the teacher's feature map. + alpha_fea (float, optional): Weight of dis_loss. Defaults to 0.00007 + """ + + def __init__( + self, + name, + use_this, + student_channels, + teacher_channels, + alpha_fea=0.00007, + ): + super(FeaLoss, self).__init__() + self.alpha_fea = alpha_fea + + if teacher_channels != student_channels: + self.align = nn.Conv2d( + student_channels, + teacher_channels, + kernel_size=1, + stride=1, + padding=0) + else: + self.align = None + + def forward(self, preds_S, preds_T): + """Forward function. + + Args: + preds_S(Tensor): Bs*C*H*W, student's feature map + preds_T(Tensor): Bs*C*H*W, teacher's feature map + """ + + if self.align is not None: + outs = self.align(preds_S) + else: + outs = preds_S + + loss = self.get_dis_loss(outs, preds_T) + + return loss + + def get_dis_loss(self, preds_S, preds_T): + loss_mse = nn.MSELoss(reduction='sum') + N, C, H, W = preds_T.shape + + dis_loss = loss_mse(preds_S, preds_T) / N * self.alpha_fea + + return dis_loss diff --git a/mmpose/models/losses/heatmap_loss.py b/mmpose/models/losses/heatmap_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..908e3636bdf922fee10fbee0c06bc084b18a0940 --- /dev/null +++ b/mmpose/models/losses/heatmap_loss.py @@ -0,0 +1,854 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class KeypointMSELoss(nn.Module): + """MSE loss for heatmaps. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + skip_empty_channel (bool): If ``True``, heatmap channels with no + non-zero value (which means no visible ground-truth keypoint + in the image) will not be used to calculate the loss. Defaults to + ``False`` + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_target_weight: bool = False, + skip_empty_channel: bool = False, + loss_weight: float = 1.): + super().__init__() + self.use_target_weight = use_target_weight + self.skip_empty_channel = skip_empty_channel + self.loss_weight = loss_weight + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Optional[Tensor] = None, + mask: Optional[Tensor] = None, + per_keypoint: bool = False, + per_pixel: bool = False) -> Tensor: + """Forward function of loss. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W] + target (Tensor): The target heatmaps with shape [B, K, H, W] + target_weights (Tensor, optional): The target weights of differet + keypoints, with shape [B, K] (keypoint-wise) or + [B, K, H, W] (pixel-wise). + mask (Tensor, optional): The masks of valid heatmap pixels in + shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will + be applied. Defaults to ``None`` + + Returns: + Tensor: The calculated loss. + """ + + _mask = self._get_mask(target, target_weights, mask) + + _loss = F.mse_loss(output, target, reduction='none') + + if _mask is not None: + loss = _loss * _mask + + if per_pixel: + pass + elif per_keypoint: + loss = loss.mean(dim=(2, 3)) + else: + loss = loss.mean() + + return loss * self.loss_weight + + def _get_mask(self, target: Tensor, target_weights: Optional[Tensor], + mask: Optional[Tensor]) -> Optional[Tensor]: + """Generate the heatmap mask w.r.t. the given mask, target weight and + `skip_empty_channel` setting. + + Returns: + Tensor: The mask in shape (B, K, *) or ``None`` if no mask is + needed. + """ + # Given spatial mask + if mask is not None: + # check mask has matching type with target + assert (mask.ndim == target.ndim and all( + d_m == d_t or d_m == 1 + for d_m, d_t in zip(mask.shape, target.shape))), ( + f'mask and target have mismatched shapes {mask.shape} v.s.' + f'{target.shape}') + + # Mask by target weights (keypoint-wise mask) + if target_weights is not None: + # check target weight has matching shape with target + assert (target_weights.ndim in (2, 4) and target_weights.shape + == target.shape[:target_weights.ndim]), ( + 'target_weights and target have mismatched shapes ' + f'{target_weights.shape} v.s. {target.shape}') + + ndim_pad = target.ndim - target_weights.ndim + _mask = target_weights.view(target_weights.shape + + (1, ) * ndim_pad) + + if mask is None: + mask = _mask + else: + mask = mask * _mask + + # Mask by ``skip_empty_channel`` + if self.skip_empty_channel: + _mask = (target != 0).flatten(2).any(dim=2) + ndim_pad = target.ndim - _mask.ndim + _mask = _mask.view(_mask.shape + (1, ) * ndim_pad) + + if mask is None: + mask = _mask + else: + mask = mask * _mask + + return mask + + +@MODELS.register_module() +class CombinedTargetMSELoss(nn.Module): + """MSE loss for combined target. + + CombinedTarget: The combination of classification target + (response map) and regression target (offset map). + Paper ref: Huang et al. The Devil is in the Details: Delving into + Unbiased Data Processing for Human Pose Estimation (CVPR 2020). + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_target_weight: bool = False, + loss_weight: float = 1.): + super().__init__() + self.criterion = nn.MSELoss(reduction='mean') + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output: Tensor, target: Tensor, + target_weights: Tensor) -> Tensor: + """Forward function of loss. + + Note: + - batch_size: B + - num_channels: C + - heatmaps height: H + - heatmaps weight: W + - num_keypoints: K + Here, C = 3 * K + + Args: + output (Tensor): The output feature maps with shape [B, C, H, W]. + target (Tensor): The target feature maps with shape [B, C, H, W]. + target_weights (Tensor): The target weights of differet keypoints, + with shape [B, K]. + + Returns: + Tensor: The calculated loss. + """ + batch_size = output.size(0) + num_channels = output.size(1) + heatmaps_pred = output.reshape( + (batch_size, num_channels, -1)).split(1, 1) + heatmaps_gt = target.reshape( + (batch_size, num_channels, -1)).split(1, 1) + loss = 0. + num_joints = num_channels // 3 + for idx in range(num_joints): + heatmap_pred = heatmaps_pred[idx * 3].squeeze() + heatmap_gt = heatmaps_gt[idx * 3].squeeze() + offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze() + offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze() + offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze() + offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze() + if self.use_target_weight: + target_weight = target_weights[:, idx, None] + heatmap_pred = heatmap_pred * target_weight + heatmap_gt = heatmap_gt * target_weight + # classification loss + loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt) + # regression loss + loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred, + heatmap_gt * offset_x_gt) + loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred, + heatmap_gt * offset_y_gt) + return loss / num_joints * self.loss_weight + + +@MODELS.register_module() +class KeypointOHKMMSELoss(nn.Module): + """MSE loss with online hard keypoint mining. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + topk (int): Only top k joint losses are kept. Defaults to 8 + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_target_weight: bool = False, + topk: int = 8, + loss_weight: float = 1.): + super().__init__() + assert topk > 0 + self.criterion = nn.MSELoss(reduction='none') + self.use_target_weight = use_target_weight + self.topk = topk + self.loss_weight = loss_weight + + def _ohkm(self, losses: Tensor) -> Tensor: + """Online hard keypoint mining. + + Note: + - batch_size: B + - num_keypoints: K + + Args: + loss (Tensor): The losses with shape [B, K] + + Returns: + Tensor: The calculated loss. + """ + ohkm_loss = 0. + B = losses.shape[0] + for i in range(B): + sub_loss = losses[i] + _, topk_idx = torch.topk( + sub_loss, k=self.topk, dim=0, sorted=False) + tmp_loss = torch.gather(sub_loss, 0, topk_idx) + ohkm_loss += torch.sum(tmp_loss) / self.topk + ohkm_loss /= B + return ohkm_loss + + def forward(self, output: Tensor, target: Tensor, + target_weights: Tensor) -> Tensor: + """Forward function of loss. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W]. + target (Tensor): The target heatmaps with shape [B, K, H, W]. + target_weights (Tensor): The target weights of differet keypoints, + with shape [B, K]. + + Returns: + Tensor: The calculated loss. + """ + num_keypoints = output.size(1) + if num_keypoints < self.topk: + raise ValueError(f'topk ({self.topk}) should not be ' + f'larger than num_keypoints ({num_keypoints}).') + + losses = [] + for idx in range(num_keypoints): + if self.use_target_weight: + target_weight = target_weights[:, idx, None, None] + losses.append( + self.criterion(output[:, idx] * target_weight, + target[:, idx] * target_weight)) + else: + losses.append(self.criterion(output[:, idx], target[:, idx])) + + losses = [loss.mean(dim=(1, 2)).unsqueeze(dim=1) for loss in losses] + losses = torch.cat(losses, dim=1) + + return self._ohkm(losses) * self.loss_weight + + +@MODELS.register_module() +class AdaptiveWingLoss(nn.Module): + """Adaptive wing loss. paper ref: 'Adaptive Wing Loss for Robust Face + Alignment via Heatmap Regression' Wang et al. ICCV'2019. + + Args: + alpha (float), omega (float), epsilon (float), theta (float) + are hyper-parameters. + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, + alpha=2.1, + omega=14, + epsilon=1, + theta=0.5, + use_target_weight=False, + loss_weight=1.): + super().__init__() + self.alpha = float(alpha) + self.omega = float(omega) + self.epsilon = float(epsilon) + self.theta = float(theta) + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def criterion(self, pred, target): + """Criterion of wingloss. + + Note: + batch_size: N + num_keypoints: K + + Args: + pred (torch.Tensor[NxKxHxW]): Predicted heatmaps. + target (torch.Tensor[NxKxHxW]): Target heatmaps. + """ + H, W = pred.shape[2:4] + delta = (target - pred).abs() + + A = self.omega * ( + 1 / (1 + torch.pow(self.theta / self.epsilon, self.alpha - target)) + ) * (self.alpha - target) * (torch.pow( + self.theta / self.epsilon, + self.alpha - target - 1)) * (1 / self.epsilon) + C = self.theta * A - self.omega * torch.log( + 1 + torch.pow(self.theta / self.epsilon, self.alpha - target)) + + losses = torch.where( + delta < self.theta, + self.omega * + torch.log(1 + + torch.pow(delta / self.epsilon, self.alpha - target)), + A * delta - C) + + return torch.mean(losses) + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Optional[Tensor] = None): + """Forward function. + + Note: + batch_size: N + num_keypoints: K + + Args: + output (torch.Tensor[N, K, H, W]): Output heatmaps. + target (torch.Tensor[N, K, H, W]): Target heatmaps. + target_weight (torch.Tensor[N, K]): + Weights across different joint types. + """ + if self.use_target_weight: + assert (target_weights.ndim in (2, 4) and target_weights.shape + == target.shape[:target_weights.ndim]), ( + 'target_weights and target have mismatched shapes ' + f'{target_weights.shape} v.s. {target.shape}') + + ndim_pad = target.ndim - target_weights.ndim + target_weights = target_weights.view(target_weights.shape + + (1, ) * ndim_pad) + loss = self.criterion(output * target_weights, + target * target_weights) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class FocalHeatmapLoss(KeypointMSELoss): + """A class for calculating the modified focal loss for heatmap prediction. + + This loss function is exactly the same as the one used in CornerNet. It + runs faster and costs a little bit more memory. + + `CornerNet: Detecting Objects as Paired Keypoints + arXiv: `_. + + Arguments: + alpha (int): The alpha parameter in the focal loss equation. + beta (int): The beta parameter in the focal loss equation. + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + skip_empty_channel (bool): If ``True``, heatmap channels with no + non-zero value (which means no visible ground-truth keypoint + in the image) will not be used to calculate the loss. Defaults to + ``False`` + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + alpha: int = 2, + beta: int = 4, + use_target_weight: bool = False, + skip_empty_channel: bool = False, + loss_weight: float = 1.0): + super(FocalHeatmapLoss, self).__init__(use_target_weight, + skip_empty_channel, loss_weight) + self.alpha = alpha + self.beta = beta + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Optional[Tensor] = None, + mask: Optional[Tensor] = None) -> Tensor: + """Calculate the modified focal loss for heatmap prediction. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W] + target (Tensor): The target heatmaps with shape [B, K, H, W] + target_weights (Tensor, optional): The target weights of differet + keypoints, with shape [B, K] (keypoint-wise) or + [B, K, H, W] (pixel-wise). + mask (Tensor, optional): The masks of valid heatmap pixels in + shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will + be applied. Defaults to ``None`` + + Returns: + Tensor: The calculated loss. + """ + _mask = self._get_mask(target, target_weights, mask) + + pos_inds = target.eq(1).float() + neg_inds = target.lt(1).float() + + if _mask is not None: + pos_inds = pos_inds * _mask + neg_inds = neg_inds * _mask + + neg_weights = torch.pow(1 - target, self.beta) + + pos_loss = torch.log(output) * torch.pow(1 - output, + self.alpha) * pos_inds + neg_loss = torch.log(1 - output) * torch.pow( + output, self.alpha) * neg_weights * neg_inds + + num_pos = pos_inds.float().sum() + if num_pos == 0: + loss = -neg_loss.sum() + else: + loss = -(pos_loss.sum() + neg_loss.sum()) / num_pos + return loss * self.loss_weight + + +@MODELS.register_module() +class MLECCLoss(nn.Module): + """Maximum Likelihood Estimation loss for Coordinate Classification. + + This loss function is designed to work with coordinate classification + problems where the likelihood of each target coordinate is maximized. + + Args: + reduction (str): Specifies the reduction to apply to the output: + 'none' | 'mean' | 'sum'. Default: 'mean'. + mode (str): Specifies the mode of calculating loss: + 'linear' | 'square' | 'log'. Default: 'log'. + use_target_weight (bool): If True, uses weighted loss. Different + joint types may have different target weights. Defaults to False. + loss_weight (float): Weight of the loss. Defaults to 1.0. + + Raises: + AssertionError: If the `reduction` or `mode` arguments are not in the + expected choices. + NotImplementedError: If the selected mode is not implemented. + """ + + def __init__(self, + reduction: str = 'mean', + mode: str = 'log', + use_target_weight: bool = False, + loss_weight: float = 1.0): + super().__init__() + assert reduction in ('mean', 'sum', 'none'), \ + f"`reduction` should be either 'mean', 'sum', or 'none', " \ + f'but got {reduction}' + assert mode in ('linear', 'square', 'log'), \ + f"`mode` should be either 'linear', 'square', or 'log', " \ + f'but got {mode}' + + self.reduction = reduction + self.mode = mode + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, outputs, targets, target_weight=None): + """Forward pass for the MLECCLoss. + + Args: + outputs (torch.Tensor): The predicted outputs. + targets (torch.Tensor): The ground truth targets. + target_weight (torch.Tensor, optional): Optional tensor of weights + for each target. + + Returns: + torch.Tensor: Calculated loss based on the specified mode and + reduction. + """ + + assert len(outputs) == len(targets), \ + 'Outputs and targets must have the same length' + + prob = 1.0 + for o, t in zip(outputs, targets): + prob *= (o * t).sum(dim=-1) + + if self.mode == 'linear': + loss = 1.0 - prob + elif self.mode == 'square': + loss = 1.0 - prob.pow(2) + elif self.mode == 'log': + loss = -torch.log(prob + 1e-4) + + loss[torch.isnan(loss)] = 0.0 + + if self.use_target_weight: + assert target_weight is not None + for i in range(loss.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + loss = loss * target_weight + + if self.reduction == 'sum': + loss = loss.flatten(1).sum(dim=1) + elif self.reduction == 'mean': + loss = loss.flatten(1).mean(dim=1) + + return loss * self.loss_weight + + +@MODELS.register_module() +class OKSHeatmapLoss(nn.Module): + """OKS-based loss for heatmaps. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + skip_empty_channel (bool): If ``True``, heatmap channels with no + non-zero value (which means no visible ground-truth keypoint + in the image) will not be used to calculate the loss. Defaults to + ``False`` + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_target_weight: bool = False, + skip_empty_channel: bool = False, + smoothing_weight: float = 0.2, + gaussian_weight: float = 0.0, + loss_weight: float = 1., + oks_type: str = "minus"): + super().__init__() + self.use_target_weight = use_target_weight + self.skip_empty_channel = skip_empty_channel + self.loss_weight = loss_weight + self.smoothing_weight = smoothing_weight + self.gaussian_weight = gaussian_weight + self.oks_type = oks_type.lower() + + assert self.oks_type in ["minus", "plus", "both"] + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Optional[Tensor] = None, + mask: Optional[Tensor] = None, + per_pixel: bool = False, + per_keypoint: bool = False) -> Tensor: + """Forward function of loss. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W] + target (Tensor): The target heatmaps with shape [B, K, H, W] + target_weights (Tensor, optional): The target weights of differet + keypoints, with shape [B, K] (keypoint-wise) or + [B, K, H, W] (pixel-wise). + mask (Tensor, optional): The masks of valid heatmap pixels in + shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will + be applied. Defaults to ``None`` + + Returns: + Tensor: The calculated loss. + """ + + assert target.max() <= 1, 'target should be normalized' + assert target.min() >= 0, 'target should be normalized' + + B, K, H, W = output.shape + + _mask = self._get_mask(target, target_weights, mask) + + oks_minus = output * (1-target) + oks_plus = (1-output) * (target) + if self.oks_type == "both": + oks = (oks_minus + oks_plus) / 2 + elif self.oks_type == "minus": + oks = oks_minus + elif self.oks_type == "plus": + oks = oks_plus + else: + raise ValueError(f"oks_type {self.oks_type} not recognized") + + mse = F.mse_loss(output, target, reduction='none') + + # Smoothness loss + sobel_x = torch.tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=torch.float32).view(1, 1, 3, 3).to(output.device) + sobel_y = torch.tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=torch.float32).view(1, 1, 3, 3).to(output.device) + gradient_x = F.conv2d(output.reshape(B*K, 1, H, W), sobel_x, padding='same') + gradient_y = F.conv2d(output.reshape(B*K, 1, H, W), sobel_y, padding='same') + gradient = (gradient_x**2 + gradient_y**2).reshape(B, K, H, W) + + if _mask is not None: + oks = oks * _mask + mse = mse * _mask + gradient = gradient * _mask + + + oks_minus_weight = ( + 1 - self.smoothing_weight - self.gaussian_weight + ) + + if per_pixel: + loss = ( + self.smoothing_weight * gradient + + oks_minus_weight * oks + + self.gaussian_weight * mse + ) + elif per_keypoint: + max_gradient, _ = gradient.reshape((B, K, H*W)).max(dim=-1) + loss = ( + oks_minus_weight * oks.sum(dim=(2, 3)) + + self.smoothing_weight * max_gradient + + self.gaussian_weight * mse.mean(dim=(2, 3)) + ) + else: + max_gradient, _ = gradient.reshape((B, K, H*W)).max(dim=-1) + loss = ( + oks_minus_weight * oks.sum(dim=(2, 3)) + + self.smoothing_weight * max_gradient + + self.gaussian_weight * mse.mean(dim=(2, 3)) + ).mean() + + return loss * self.loss_weight + + def _get_mask(self, target: Tensor, target_weights: Optional[Tensor], + mask: Optional[Tensor]) -> Optional[Tensor]: + """Generate the heatmap mask w.r.t. the given mask, target weight and + `skip_empty_channel` setting. + + Returns: + Tensor: The mask in shape (B, K, *) or ``None`` if no mask is + needed. + """ + # Given spatial mask + if mask is not None: + # check mask has matching type with target + assert (mask.ndim == target.ndim and all( + d_m == d_t or d_m == 1 + for d_m, d_t in zip(mask.shape, target.shape))), ( + f'mask and target have mismatched shapes {mask.shape} v.s.' + f'{target.shape}') + + # Mask by target weights (keypoint-wise mask) + if target_weights is not None: + # check target weight has matching shape with target + assert (target_weights.ndim in (2, 4) and target_weights.shape + == target.shape[:target_weights.ndim]), ( + 'target_weights and target have mismatched shapes ' + f'{target_weights.shape} v.s. {target.shape}') + + ndim_pad = target.ndim - target_weights.ndim + _mask = target_weights.view(target_weights.shape + + (1, ) * ndim_pad) + + if mask is None: + mask = _mask + else: + mask = mask * _mask + + # Mask by ``skip_empty_channel`` + if self.skip_empty_channel: + _mask = (target != 0).flatten(2).any(dim=2) + ndim_pad = target.ndim - _mask.ndim + _mask = _mask.view(_mask.shape + (1, ) * ndim_pad) + + if mask is None: + mask = _mask + else: + mask = mask * _mask + + return mask + + +@MODELS.register_module() + +class CalibrationLoss(nn.Module): + """OKS-based loss for heatmaps. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + skip_empty_channel (bool): If ``True``, heatmap channels with no + non-zero value (which means no visible ground-truth keypoint + in the image) will not be used to calculate the loss. Defaults to + ``False`` + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_target_weight: bool = False, + skip_empty_channel: bool = False, + loss_weight: float = 1., + ignore_bottom_percentile: float = 0.7): + super().__init__() + self.use_target_weight = use_target_weight + self.skip_empty_channel = skip_empty_channel + self.loss_weight = loss_weight + self.ignore_bottom_percentile = ignore_bottom_percentile + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Optional[Tensor] = None, + mask: Optional[Tensor] = None, + per_pixel: bool = False, + per_keypoint: bool = False) -> Tensor: + """Forward function of loss. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W] + target (Tensor): The target heatmaps with shape [B, K, H, W] + target_weights (Tensor, optional): The target weights of differet + keypoints, with shape [B, K] (keypoint-wise) or + [B, K, H, W] (pixel-wise). + mask (Tensor, optional): The masks of valid heatmap pixels in + shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will + be applied. Defaults to ``None`` + + Returns: + Tensor: The calculated loss. + """ + + assert target.max() <= 1, 'target should be normalized' + assert target.min() >= 0, 'target should be normalized' + + B, K, H, W = output.shape + + _mask = self._get_mask(target, target_weights, mask) + + pred_probs = output * target + pred_probs_sum = pred_probs.sum(dim=(2,3)) + # threshold = torch.quantile(pred_probs_sum.detach(), self.ignore_bottom_percentile) + # _mask = _mask * (pred_probs_sum > self.ignore_bottom_percentile).view(B, K, 1, 1) + + # print() + # tmp = -torch.log(pred_probs_sum.flatten() + 1e-10)[:, None] + # tmp = torch.cat([pred_probs_sum.flatten()[:, None], tmp, _mask.reshape(tmp.shape)], dim=1) + # print(tmp[:5, :]) + + if per_pixel: + cross_entropy = -torch.log(pred_probs + 1e-10) + loss = cross_entropy * _mask + elif per_keypoint: + cross_entropy = -torch.log(pred_probs_sum + 1e-10) + loss = cross_entropy * _mask + else: + cross_entropy = -torch.log(pred_probs_sum + 1e-10) + loss = cross_entropy * _mask + loss = loss.mean() + + return loss * self.loss_weight + + + def _get_mask(self, target: Tensor, target_weights: Optional[Tensor], + mask: Optional[Tensor]) -> Optional[Tensor]: + """Generate the heatmap mask w.r.t. the given mask, target weight and + `skip_empty_channel` setting. + + Returns: + Tensor: The mask in shape (B, K, *) or ``None`` if no mask is + needed. + """ + # Given spatial mask + if mask is not None: + # check mask has matching type with target + assert (mask.ndim == target.ndim and all( + d_m == d_t or d_m == 1 + for d_m, d_t in zip(mask.shape, target.shape))), ( + f'mask and target have mismatched shapes {mask.shape} v.s.' + f'{target.shape}') + + # Mask by target weights (keypoint-wise mask) + if target_weights is not None: + # check target weight has matching shape with target + assert (target_weights.ndim in (2, 4) and target_weights.shape + == target.shape[:target_weights.ndim]), ( + 'target_weights and target have mismatched shapes ' + f'{target_weights.shape} v.s. {target.shape}') + + ndim_pad = target.ndim - target_weights.ndim + _mask = target_weights.view(target_weights.shape + + (1, ) * ndim_pad) + + if mask is None: + mask = _mask + else: + mask = mask * _mask + + # Mask by ``skip_empty_channel`` + if self.skip_empty_channel: + _mask = (target != 0).flatten(2).any(dim=2) + ndim_pad = target.ndim - _mask.ndim + _mask = _mask.view(_mask.shape + (1, ) * ndim_pad) + + if mask is None: + mask = _mask + else: + mask = mask * _mask + + return mask diff --git a/mmpose/models/losses/logit_dis_loss.py b/mmpose/models/losses/logit_dis_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..32906a1c3f1a07723548946322dc637f1761a71b --- /dev/null +++ b/mmpose/models/losses/logit_dis_loss.py @@ -0,0 +1,64 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class KDLoss(nn.Module): + """PyTorch version of logit-based distillation from DWPose Modified from + the official implementation. + + + Args: + weight (float, optional): Weight of dis_loss. Defaults to 1.0 + """ + + def __init__( + self, + name, + use_this, + weight=1.0, + ): + super(KDLoss, self).__init__() + + self.log_softmax = nn.LogSoftmax(dim=1) + self.kl_loss = nn.KLDivLoss(reduction='none') + self.weight = weight + + def forward(self, pred, pred_t, beta, target_weight): + ls_x, ls_y = pred + lt_x, lt_y = pred_t + + lt_x = lt_x.detach() + lt_y = lt_y.detach() + + num_joints = ls_x.size(1) + loss = 0 + + loss += (self.loss(ls_x, lt_x, beta, target_weight)) + loss += (self.loss(ls_y, lt_y, beta, target_weight)) + + return loss / num_joints + + def loss(self, logit_s, logit_t, beta, weight): + + N = logit_s.shape[0] + + if len(logit_s.shape) == 3: + K = logit_s.shape[1] + logit_s = logit_s.reshape(N * K, -1) + logit_t = logit_t.reshape(N * K, -1) + + # N*W(H) + s_i = self.log_softmax(logit_s * beta) + t_i = F.softmax(logit_t * beta, dim=1) + + # kd + loss_all = torch.sum(self.kl_loss(s_i, t_i), dim=1) + loss_all = loss_all.reshape(N, K).sum(dim=1).mean() + loss_all = self.weight * loss_all + + return loss_all diff --git a/mmpose/models/losses/loss_wrappers.py b/mmpose/models/losses/loss_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..d821661b48a133ffd6c9232d5a6a2d3eb6bf0a50 --- /dev/null +++ b/mmpose/models/losses/loss_wrappers.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict + +import torch.nn as nn + +from mmpose.registry import MODELS +from mmpose.utils.typing import ConfigType + + +@MODELS.register_module() +class MultipleLossWrapper(nn.Module): + """A wrapper to collect multiple loss functions together and return a list + of losses in the same order. + + Args: + losses (list): List of Loss Config + """ + + def __init__(self, losses: list): + super().__init__() + self.num_losses = len(losses) + + loss_modules = [] + for loss_cfg in losses: + t_loss = MODELS.build(loss_cfg) + loss_modules.append(t_loss) + self.loss_modules = nn.ModuleList(loss_modules) + + def forward(self, input_list, target_list, keypoint_weights=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + input_list (List[Tensor]): List of inputs. + target_list (List[Tensor]): List of targets. + keypoint_weights (Tensor[N, K, D]): + Weights across different joint types. + """ + assert isinstance(input_list, list), '' + assert isinstance(target_list, list), '' + assert len(input_list) == len(target_list), '' + + losses = [] + for i in range(self.num_losses): + input_i = input_list[i] + target_i = target_list[i] + + loss_i = self.loss_modules[i](input_i, target_i, keypoint_weights) + losses.append(loss_i) + + return losses + + +@MODELS.register_module() +class CombinedLoss(nn.ModuleDict): + """A wrapper to combine multiple loss functions. These loss functions can + have different input type (e.g. heatmaps or regression values), and can + only be involed individually and explixitly. + + Args: + losses (Dict[str, ConfigType]): The names and configs of loss + functions to be wrapped + + Example:: + >>> heatmap_loss_cfg = dict(type='KeypointMSELoss') + >>> ae_loss_cfg = dict(type='AssociativeEmbeddingLoss') + >>> loss_module = CombinedLoss( + ... losses=dict( + ... heatmap_loss=heatmap_loss_cfg, + ... ae_loss=ae_loss_cfg)) + >>> loss_hm = loss_module.heatmap_loss(pred_heatmap, gt_heatmap) + >>> loss_ae = loss_module.ae_loss(pred_tags, keypoint_indices) + """ + + def __init__(self, losses: Dict[str, ConfigType]): + super().__init__() + for loss_name, loss_cfg in losses.items(): + self.add_module(loss_name, MODELS.build(loss_cfg)) diff --git a/mmpose/models/losses/regression_loss.py b/mmpose/models/losses/regression_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..591bfb1b9cde41accdea2fd11456162bc6bfec0e --- /dev/null +++ b/mmpose/models/losses/regression_loss.py @@ -0,0 +1,862 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.registry import MODELS +from ..utils.realnvp import RealNVP + + +@MODELS.register_module() +class RLELoss(nn.Module): + """RLE Loss. + + `Human Pose Regression With Residual Log-Likelihood Estimation + arXiv: `_. + + Code is modified from `the official implementation + `_. + + Args: + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + size_average (bool): Option to average the loss by the batch_size. + residual (bool): Option to add L1 loss and let the flow + learn the residual error distribution. + q_dis (string): Option for the identity Q(error) distribution, + Options: "laplace" or "gaussian" + """ + + def __init__(self, + use_target_weight=False, + size_average=True, + residual=True, + q_distribution='laplace'): + super(RLELoss, self).__init__() + self.size_average = size_average + self.use_target_weight = use_target_weight + self.residual = residual + self.q_distribution = q_distribution + + self.flow_model = RealNVP() + + def forward(self, pred, sigma, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + pred (Tensor[N, K, D]): Output regression. + sigma (Tensor[N, K, D]): Output sigma. + target (Tensor[N, K, D]): Target regression. + target_weight (Tensor[N, K, D]): + Weights across different joint types. + """ + sigma = sigma.sigmoid() + + error = (pred - target) / (sigma + 1e-9) + # (B, K, 2) + log_phi = self.flow_model.log_prob(error.reshape(-1, 2)) + log_phi = log_phi.reshape(target.shape[0], target.shape[1], 1) + log_sigma = torch.log(sigma).reshape(target.shape[0], target.shape[1], + 2) + nf_loss = log_sigma - log_phi + + if self.residual: + assert self.q_distribution in ['laplace', 'gaussian'] + if self.q_distribution == 'laplace': + loss_q = torch.log(sigma * 2) + torch.abs(error) + else: + loss_q = torch.log( + sigma * math.sqrt(2 * math.pi)) + 0.5 * error**2 + + loss = nf_loss + loss_q + else: + loss = nf_loss + + if self.use_target_weight: + assert target_weight is not None + loss *= target_weight + + if self.size_average: + loss /= len(loss) + + return loss.sum() + + +@MODELS.register_module() +class SmoothL1Loss(nn.Module): + """SmoothL1Loss loss. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, use_target_weight=False, loss_weight=1.): + super().__init__() + self.criterion = F.smooth_l1_loss + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K, D]): + Weights across different joint types. + """ + + if self.use_target_weight: + assert target_weight is not None + assert output.ndim >= target_weight.ndim + + for i in range(output.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class L1LogLoss(nn.Module): + """L1LogLoss loss. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, use_target_weight=False, loss_weight=1.): + super().__init__() + self.criterion = F.smooth_l1_loss + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K, D]): + Weights across different joint types. + """ + # Use logarithm to compute relative error + output = torch.log(1 + output) + target = torch.log(1 + target) + + if self.use_target_weight: + assert target_weight is not None + assert output.ndim >= target_weight.ndim + + for i in range(output.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class SoftWeightSmoothL1Loss(nn.Module): + """Smooth L1 loss with soft weight for regression. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + supervise_empty (bool): Whether to supervise the output with zero + weight. + beta (float): Specifies the threshold at which to change between + L1 and L2 loss. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, + use_target_weight=False, + supervise_empty=True, + beta=1.0, + loss_weight=1.): + super().__init__() + + reduction = 'none' if use_target_weight else 'mean' + self.criterion = partial( + self.smooth_l1_loss, reduction=reduction, beta=beta) + + self.supervise_empty = supervise_empty + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + @staticmethod + def smooth_l1_loss(input, target, reduction='none', beta=1.0): + """Re-implement torch.nn.functional.smooth_l1_loss with beta to support + pytorch <= 1.6.""" + delta = input - target + mask = delta.abs() < beta + delta[mask] = (delta[mask]).pow(2) / (2 * beta) + delta[~mask] = delta[~mask].abs() - beta / 2 + + if reduction == 'mean': + return delta.mean() + elif reduction == 'sum': + return delta.sum() + elif reduction == 'none': + return delta + else: + raise ValueError(f'reduction must be \'mean\', \'sum\' or ' + f'\'none\', but got \'{reduction}\'') + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K, D]): + Weights across different joint types. + """ + if self.use_target_weight: + assert target_weight is not None + assert output.ndim >= target_weight.ndim + + for i in range(output.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + + loss = self.criterion(output, target) * target_weight + if self.supervise_empty: + loss = loss.mean() + else: + num_elements = torch.nonzero(target_weight > 0).size()[0] + loss = loss.sum() / max(num_elements, 1.0) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class WingLoss(nn.Module): + """Wing Loss. paper ref: 'Wing Loss for Robust Facial Landmark Localisation + with Convolutional Neural Networks' Feng et al. CVPR'2018. + + Args: + omega (float): Also referred to as width. + epsilon (float): Also referred to as curvature. + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, + omega=10.0, + epsilon=2.0, + use_target_weight=False, + loss_weight=1.): + super().__init__() + self.omega = omega + self.epsilon = epsilon + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + # constant that smoothly links the piecewise-defined linear + # and nonlinear parts + self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon)) + + def criterion(self, pred, target): + """Criterion of wingloss. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + pred (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + """ + delta = (target - pred).abs() + losses = torch.where( + delta < self.omega, + self.omega * torch.log(1.0 + delta / self.epsilon), delta - self.C) + return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N,K,D]): + Weights across different joint types. + """ + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class SoftWingLoss(nn.Module): + """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face + Alignment' Lin et al. TIP'2021. + + loss = + 1. |x| , if |x| < omega1 + 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 + + Args: + omega1 (float): The first threshold. + omega2 (float): The second threshold. + epsilon (float): Also referred to as curvature. + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, + omega1=2.0, + omega2=20.0, + epsilon=0.5, + use_target_weight=False, + loss_weight=1.): + super().__init__() + self.omega1 = omega1 + self.omega2 = omega2 + self.epsilon = epsilon + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + # constant that smoothly links the piecewise-defined linear + # and nonlinear parts + self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / + self.epsilon) + + def criterion(self, pred, target): + """Criterion of wingloss. + + Note: + batch_size: N + num_keypoints: K + dimension of keypoints: D (D=2 or D=3) + + Args: + pred (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + """ + delta = (target - pred).abs() + losses = torch.where( + delta < self.omega1, delta, + self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) + return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + batch_size: N + num_keypoints: K + dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K, D]): + Weights across different joint types. + """ + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class MPJPEVelocityJointLoss(nn.Module): + """MPJPE (Mean Per Joint Position Error) loss. + + Args: + loss_weight (float): Weight of the loss. Default: 1.0. + lambda_scale (float): Factor of the N-MPJPE loss. Default: 0.5. + lambda_3d_velocity (float): Factor of the velocity loss. Default: 20.0. + """ + + def __init__(self, + use_target_weight=False, + loss_weight=1., + lambda_scale=0.5, + lambda_3d_velocity=20.0): + super().__init__() + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + self.lambda_scale = lambda_scale + self.lambda_3d_velocity = lambda_3d_velocity + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N,K,D]): + Weights across different joint types. + """ + norm_output = torch.mean( + torch.sum(torch.square(output), dim=-1, keepdim=True), + dim=-2, + keepdim=True) + norm_target = torch.mean( + torch.sum(target * output, dim=-1, keepdim=True), + dim=-2, + keepdim=True) + + velocity_output = output[..., 1:, :, :] - output[..., :-1, :, :] + velocity_target = target[..., 1:, :, :] - target[..., :-1, :, :] + + if self.use_target_weight: + assert target_weight is not None + mpjpe = torch.mean( + torch.norm((output - target) * target_weight, dim=-1)) + + nmpjpe = torch.mean( + torch.norm( + (norm_target / norm_output * output - target) * + target_weight, + dim=-1)) + + loss_3d_velocity = torch.mean( + torch.norm( + (velocity_output - velocity_target) * target_weight, + dim=-1)) + else: + mpjpe = torch.mean(torch.norm(output - target, dim=-1)) + + nmpjpe = torch.mean( + torch.norm( + norm_target / norm_output * output - target, dim=-1)) + + loss_3d_velocity = torch.mean( + torch.norm(velocity_output - velocity_target, dim=-1)) + + loss = mpjpe + nmpjpe * self.lambda_scale + \ + loss_3d_velocity * self.lambda_3d_velocity + + return loss * self.loss_weight + + +@MODELS.register_module() +class MPJPELoss(nn.Module): + """MPJPE (Mean Per Joint Position Error) loss. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, use_target_weight=False, loss_weight=1.): + super().__init__() + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N,K,D]): + Weights across different joint types. + """ + + if self.use_target_weight: + assert target_weight is not None + loss = torch.mean( + torch.norm((output - target) * target_weight, dim=-1)) + else: + loss = torch.mean(torch.norm(output - target, dim=-1)) + + return loss * self.loss_weight + + +@MODELS.register_module() +class L1Loss(nn.Module): + """L1Loss loss.""" + + def __init__(self, + reduction='mean', + use_target_weight=False, + loss_weight=1.): + super().__init__() + + assert reduction in ('mean', 'sum', 'none'), f'the argument ' \ + f'`reduction` should be either \'mean\', \'sum\' or \'none\', ' \ + f'but got {reduction}' + + self.criterion = partial(F.l1_loss, reduction=reduction) + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + + Args: + output (torch.Tensor[N, K, 2]): Output regression. + target (torch.Tensor[N, K, 2]): Target regression. + target_weight (torch.Tensor[N, K, 2]): + Weights across different joint types. + """ + if self.use_target_weight: + assert target_weight is not None + for _ in range(target.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class MSELoss(nn.Module): + """MSE loss for coordinate regression.""" + + def __init__(self, use_target_weight=False, loss_weight=1.): + super().__init__() + self.criterion = F.mse_loss + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + + Args: + output (torch.Tensor[N, K, 2]): Output regression. + target (torch.Tensor[N, K, 2]): Target regression. + target_weight (torch.Tensor[N, K, 2]): + Weights across different joint types. + """ + + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class BoneLoss(nn.Module): + """Bone length loss. + + Args: + joint_parents (list): Indices of each joint's parent joint. + use_target_weight (bool): Option to use weighted bone loss. + Different bone types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, joint_parents, use_target_weight=False, loss_weight=1.): + super().__init__() + self.joint_parents = joint_parents + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + self.non_root_indices = [] + for i in range(len(self.joint_parents)): + if i != self.joint_parents[i]: + self.non_root_indices.append(i) + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K-1]): + Weights across different bone types. + """ + output_bone = torch.norm( + output - output[:, self.joint_parents, :], + dim=-1)[:, self.non_root_indices] + target_bone = torch.norm( + target - target[:, self.joint_parents, :], + dim=-1)[:, self.non_root_indices] + if self.use_target_weight: + assert target_weight is not None + loss = torch.mean( + torch.abs((output_bone * target_weight).mean(dim=0) - + (target_bone * target_weight).mean(dim=0))) + else: + loss = torch.mean( + torch.abs(output_bone.mean(dim=0) - target_bone.mean(dim=0))) + + return loss * self.loss_weight + + +@MODELS.register_module() +class SemiSupervisionLoss(nn.Module): + """Semi-supervision loss for unlabeled data. It is composed of projection + loss and bone loss. + + Paper ref: `3D human pose estimation in video with temporal convolutions + and semi-supervised training` Dario Pavllo et al. CVPR'2019. + + Args: + joint_parents (list): Indices of each joint's parent joint. + projection_loss_weight (float): Weight for projection loss. + bone_loss_weight (float): Weight for bone loss. + warmup_iterations (int): Number of warmup iterations. In the first + `warmup_iterations` iterations, the model is trained only on + labeled data, and semi-supervision loss will be 0. + This is a workaround since currently we cannot access + epoch number in loss functions. Note that the iteration number in + an epoch can be changed due to different GPU numbers in multi-GPU + settings. So please set this parameter carefully. + warmup_iterations = dataset_size // samples_per_gpu // gpu_num + * warmup_epochs + """ + + def __init__(self, + joint_parents, + projection_loss_weight=1., + bone_loss_weight=1., + warmup_iterations=0): + super().__init__() + self.criterion_projection = MPJPELoss( + loss_weight=projection_loss_weight) + self.criterion_bone = BoneLoss( + joint_parents, loss_weight=bone_loss_weight) + self.warmup_iterations = warmup_iterations + self.num_iterations = 0 + + @staticmethod + def project_joints(x, intrinsics): + """Project 3D joint coordinates to 2D image plane using camera + intrinsic parameters. + + Args: + x (torch.Tensor[N, K, 3]): 3D joint coordinates. + intrinsics (torch.Tensor[N, 4] | torch.Tensor[N, 9]): Camera + intrinsics: f (2), c (2), k (3), p (2). + """ + while intrinsics.dim() < x.dim(): + intrinsics.unsqueeze_(1) + f = intrinsics[..., :2] + c = intrinsics[..., 2:4] + _x = torch.clamp(x[:, :, :2] / x[:, :, 2:], -1, 1) + if intrinsics.shape[-1] == 9: + k = intrinsics[..., 4:7] + p = intrinsics[..., 7:9] + + r2 = torch.sum(_x[:, :, :2]**2, dim=-1, keepdim=True) + radial = 1 + torch.sum( + k * torch.cat((r2, r2**2, r2**3), dim=-1), + dim=-1, + keepdim=True) + tan = torch.sum(p * _x, dim=-1, keepdim=True) + _x = _x * (radial + tan) + p * r2 + _x = f * _x + c + return _x + + def forward(self, output, target): + losses = dict() + + self.num_iterations += 1 + if self.num_iterations <= self.warmup_iterations: + return losses + + labeled_pose = output['labeled_pose'] + unlabeled_pose = output['unlabeled_pose'] + unlabeled_traj = output['unlabeled_traj'] + unlabeled_target_2d = target['unlabeled_target_2d'] + intrinsics = target['intrinsics'] + + # projection loss + unlabeled_output = unlabeled_pose + unlabeled_traj + unlabeled_output_2d = self.project_joints(unlabeled_output, intrinsics) + loss_proj = self.criterion_projection(unlabeled_output_2d, + unlabeled_target_2d, None) + losses['proj_loss'] = loss_proj + + # bone loss + loss_bone = self.criterion_bone(unlabeled_pose, labeled_pose, None) + losses['bone_loss'] = loss_bone + + return losses + + +@MODELS.register_module() +class OKSLoss(nn.Module): + """A PyTorch implementation of the Object Keypoint Similarity (OKS) loss as + described in the paper "YOLO-Pose: Enhancing YOLO for Multi Person Pose + Estimation Using Object Keypoint Similarity Loss" by Debapriya et al. + (2022). + + The OKS loss is used for keypoint-based object recognition and consists + of a measure of the similarity between predicted and ground truth + keypoint locations, adjusted by the size of the object in the image. + + The loss function takes as input the predicted keypoint locations, the + ground truth keypoint locations, a mask indicating which keypoints are + valid, and bounding boxes for the objects. + + Args: + metainfo (Optional[str]): Path to a JSON file containing information + about the dataset's annotations. + reduction (str): Options are "none", "mean" and "sum". + eps (float): Epsilon to avoid log(0). + loss_weight (float): Weight of the loss. Default: 1.0. + mode (str): Loss scaling mode, including "linear", "square", and "log". + Default: 'linear' + norm_target_weight (bool): whether to normalize the target weight + with number of visible keypoints. Defaults to False. + """ + + def __init__(self, + metainfo: Optional[str] = None, + reduction='mean', + mode='linear', + eps=1e-8, + norm_target_weight=False, + loss_weight=1.): + super().__init__() + + assert reduction in ('mean', 'sum', 'none'), f'the argument ' \ + f'`reduction` should be either \'mean\', \'sum\' or \'none\', ' \ + f'but got {reduction}' + + assert mode in ('linear', 'square', 'log'), f'the argument ' \ + f'`reduction` should be either \'linear\', \'square\' or ' \ + f'\'log\', but got {mode}' + + self.reduction = reduction + self.loss_weight = loss_weight + self.mode = mode + self.norm_target_weight = norm_target_weight + self.eps = eps + + if metainfo is not None: + metainfo = parse_pose_metainfo(dict(from_file=metainfo)) + sigmas = metainfo.get('sigmas', None) + if sigmas is not None: + self.register_buffer('sigmas', torch.as_tensor(sigmas)) + + def forward(self, output, target, target_weight=None, areas=None): + """Forward function. + + Note: + - batch_size: N + - num_labels: K + + Args: + output (torch.Tensor[N, K, 2]): Output keypoints coordinates. + target (torch.Tensor[N, K, 2]): Target keypoints coordinates.. + target_weight (torch.Tensor[N, K]): Loss weight for each keypoint. + areas (torch.Tensor[N]): Instance size which is adopted as + normalization factor. + """ + dist = torch.norm(output - target, dim=-1) + if areas is not None: + dist = dist / areas.pow(0.5).clip(min=self.eps).unsqueeze(-1) + if hasattr(self, 'sigmas'): + sigmas = self.sigmas.reshape(*((1, ) * (dist.ndim - 1)), -1) + dist = dist / (sigmas * 2) + + oks = torch.exp(-dist.pow(2) / 2) + + if target_weight is not None: + if self.norm_target_weight: + target_weight = target_weight / target_weight.sum( + dim=-1, keepdims=True).clip(min=self.eps) + else: + target_weight = target_weight / target_weight.size(-1) + oks = oks * target_weight + oks = oks.sum(dim=-1) + + if self.mode == 'linear': + loss = 1 - oks + elif self.mode == 'square': + loss = 1 - oks.pow(2) + elif self.mode == 'log': + loss = -oks.log() + else: + raise NotImplementedError() + + if self.reduction == 'sum': + loss = loss.sum() + elif self.reduction == 'mean': + loss = loss.mean() + + return loss * self.loss_weight diff --git a/mmpose/models/necks/__init__.py b/mmpose/models/necks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..90d68013d5f3ce92b61372430a4b7f02f1bedcd0 --- /dev/null +++ b/mmpose/models/necks/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .channel_mapper import ChannelMapper +from .cspnext_pafpn import CSPNeXtPAFPN +from .fmap_proc_neck import FeatureMapProcessor +from .fpn import FPN +from .gap_neck import GlobalAveragePooling +from .hybrid_encoder import HybridEncoder +from .posewarper_neck import PoseWarperNeck +from .yolox_pafpn import YOLOXPAFPN + +__all__ = [ + 'GlobalAveragePooling', 'PoseWarperNeck', 'FPN', 'FeatureMapProcessor', + 'ChannelMapper', 'YOLOXPAFPN', 'CSPNeXtPAFPN', 'HybridEncoder' +] diff --git a/mmpose/models/necks/channel_mapper.py b/mmpose/models/necks/channel_mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..4d4148a08903f94a18abaf8aec804aceb9e2ea21 --- /dev/null +++ b/mmpose/models/necks/channel_mapper.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import Tensor + +from mmpose.registry import MODELS +from mmpose.utils.typing import OptConfigType, OptMultiConfig + + +@MODELS.register_module() +class ChannelMapper(BaseModule): + """Channel Mapper to reduce/increase channels of backbone features. + + This is used to reduce/increase channels of backbone features. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + kernel_size (int, optional): kernel_size for reducing channels (used + at each scale). Default: 3. + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Default: None. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + normalization layer. Default: None. + act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + activation layer in ConvModule. Default: dict(type='ReLU'). + num_outs (int, optional): Number of output feature maps. There would + be extra_convs when num_outs larger than the length of in_channels. + init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict], + optional): Initialization config dict. + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = ChannelMapper(in_channels, 11, 3).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__( + self, + in_channels: List[int], + out_channels: int, + kernel_size: int = 3, + conv_cfg: OptConfigType = None, + norm_cfg: OptConfigType = None, + act_cfg: OptConfigType = dict(type='ReLU'), + num_outs: int = None, + bias: Union[bool, str] = 'auto', + init_cfg: OptMultiConfig = dict( + type='Xavier', layer='Conv2d', distribution='uniform') + ) -> None: + super().__init__(init_cfg=init_cfg) + assert isinstance(in_channels, list) + self.extra_convs = None + if num_outs is None: + num_outs = len(in_channels) + self.convs = nn.ModuleList() + for in_channel in in_channels: + self.convs.append( + ConvModule( + in_channel, + out_channels, + kernel_size, + bias=bias, + padding=(kernel_size - 1) // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + if num_outs > len(in_channels): + self.extra_convs = nn.ModuleList() + for i in range(len(in_channels), num_outs): + if i == len(in_channels): + in_channel = in_channels[-1] + else: + in_channel = out_channels + self.extra_convs.append( + ConvModule( + in_channel, + out_channels, + 3, + stride=2, + padding=1, + bias=bias, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]: + """Forward function.""" + assert len(inputs) == len(self.convs) + outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] + if self.extra_convs: + for i in range(len(self.extra_convs)): + if i == 0: + outs.append(self.extra_convs[0](inputs[-1])) + else: + outs.append(self.extra_convs[i](outs[-1])) + return tuple(outs) diff --git a/mmpose/models/necks/cspnext_pafpn.py b/mmpose/models/necks/cspnext_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..35f4dc2f10df1f36fe0fef9caffaae59edb66c5d --- /dev/null +++ b/mmpose/models/necks/cspnext_pafpn.py @@ -0,0 +1,187 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Sequence, Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmengine.model import BaseModule +from torch import Tensor + +from mmpose.registry import MODELS +from mmpose.utils.typing import ConfigType, OptMultiConfig +from ..utils import CSPLayer + + +@MODELS.register_module() +class CSPNeXtPAFPN(BaseModule): + """Path Aggregation Network with CSPNeXt blocks. Modified from RTMDet. + + Args: + in_channels (Sequence[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + out_indices (Sequence[int]): Output from which stages. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. + Defaults to 3. + use_depthwise (bool): Whether to use depthwise separable convolution in + blocks. Defaults to False. + expand_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Default: 0.5 + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(scale_factor=2, mode='nearest')` + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN') + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__( + self, + in_channels: Sequence[int], + out_channels: int, + out_indices=( + 0, + 1, + 2, + ), + num_csp_blocks: int = 3, + use_depthwise: bool = False, + expand_ratio: float = 0.5, + upsample_cfg: ConfigType = dict(scale_factor=2, mode='nearest'), + conv_cfg: bool = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='Swish'), + init_cfg: OptMultiConfig = dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu') + ) -> None: + super().__init__(init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.out_indices = out_indices + + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + # build top-down blocks + self.upsample = nn.Upsample(**upsample_cfg) + self.reduce_layers = nn.ModuleList() + self.top_down_blocks = nn.ModuleList() + for idx in range(len(in_channels) - 1, 0, -1): + self.reduce_layers.append( + ConvModule( + in_channels[idx], + in_channels[idx - 1], + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.top_down_blocks.append( + CSPLayer( + in_channels[idx - 1] * 2, + in_channels[idx - 1], + num_blocks=num_csp_blocks, + add_identity=False, + use_depthwise=use_depthwise, + use_cspnext_block=True, + expand_ratio=expand_ratio, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + # build bottom-up blocks + self.downsamples = nn.ModuleList() + self.bottom_up_blocks = nn.ModuleList() + for idx in range(len(in_channels) - 1): + self.downsamples.append( + conv( + in_channels[idx], + in_channels[idx], + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.bottom_up_blocks.append( + CSPLayer( + in_channels[idx] * 2, + in_channels[idx + 1], + num_blocks=num_csp_blocks, + add_identity=False, + use_depthwise=use_depthwise, + use_cspnext_block=True, + expand_ratio=expand_ratio, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + if self.out_channels is not None: + self.out_convs = nn.ModuleList() + for i in range(len(in_channels)): + self.out_convs.append( + conv( + in_channels[i], + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.out_convs = conv( + in_channels[-1], + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]: + """ + Args: + inputs (tuple[Tensor]): input features. + + Returns: + tuple[Tensor]: YOLOXPAFPN features. + """ + assert len(inputs) == len(self.in_channels) + + # top-down path + inner_outs = [inputs[-1]] + for idx in range(len(self.in_channels) - 1, 0, -1): + feat_high = inner_outs[0] + feat_low = inputs[idx - 1] + feat_high = self.reduce_layers[len(self.in_channels) - 1 - idx]( + feat_high) + inner_outs[0] = feat_high + + upsample_feat = self.upsample(feat_high) + + inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx]( + torch.cat([upsample_feat, feat_low], 1)) + inner_outs.insert(0, inner_out) + + # bottom-up path + outs = [inner_outs[0]] + for idx in range(len(self.in_channels) - 1): + feat_low = outs[-1] + feat_high = inner_outs[idx + 1] + downsample_feat = self.downsamples[idx](feat_low) + out = self.bottom_up_blocks[idx]( + torch.cat([downsample_feat, feat_high], 1)) + outs.append(out) + + if self.out_channels is not None: + # out convs + for idx, conv in enumerate(self.out_convs): + outs[idx] = conv(outs[idx]) + + return tuple([outs[i] for i in self.out_indices]) diff --git a/mmpose/models/necks/fmap_proc_neck.py b/mmpose/models/necks/fmap_proc_neck.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3a4d7bf44ab07641a4968f143e17c19b24743b --- /dev/null +++ b/mmpose/models/necks/fmap_proc_neck.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from mmpose.models.utils.ops import resize +from mmpose.registry import MODELS + + +@MODELS.register_module() +class FeatureMapProcessor(nn.Module): + """A PyTorch module for selecting, concatenating, and rescaling feature + maps. + + Args: + select_index (Optional[Union[int, Tuple[int]]], optional): Index or + indices of feature maps to select. Defaults to None, which means + all feature maps are used. + concat (bool, optional): Whether to concatenate the selected feature + maps. Defaults to False. + scale_factor (float, optional): The scaling factor to apply to the + feature maps. Defaults to 1.0. + apply_relu (bool, optional): Whether to apply ReLU on input feature + maps. Defaults to False. + align_corners (bool, optional): Whether to align corners when resizing + the feature maps. Defaults to False. + """ + + def __init__( + self, + select_index: Optional[Union[int, Tuple[int]]] = None, + concat: bool = False, + scale_factor: float = 1.0, + apply_relu: bool = False, + align_corners: bool = False, + ): + super().__init__() + + if isinstance(select_index, int): + select_index = (select_index, ) + self.select_index = select_index + self.concat = concat + + assert ( + scale_factor > 0 + ), f'the argument `scale_factor` must be positive, ' \ + f'but got {scale_factor}' + self.scale_factor = scale_factor + self.apply_relu = apply_relu + self.align_corners = align_corners + + def forward(self, inputs: Union[Tensor, Sequence[Tensor]] + ) -> Union[Tensor, List[Tensor]]: + + if not isinstance(inputs, (tuple, list)): + sequential_input = False + inputs = [inputs] + else: + sequential_input = True + + if self.select_index is not None: + inputs = [inputs[i] for i in self.select_index] + + if self.concat: + inputs = self._concat(inputs) + + if self.apply_relu: + inputs = [F.relu(x) for x in inputs] + + if self.scale_factor != 1.0: + inputs = self._rescale(inputs) + + if not sequential_input: + inputs = inputs[0] + + return inputs + + def _concat(self, inputs: Sequence[Tensor]) -> List[Tensor]: + size = inputs[0].shape[-2:] + resized_inputs = [ + resize( + x, + size=size, + mode='bilinear', + align_corners=self.align_corners) for x in inputs + ] + return [torch.cat(resized_inputs, dim=1)] + + def _rescale(self, inputs: Sequence[Tensor]) -> List[Tensor]: + rescaled_inputs = [ + resize( + x, + scale_factor=self.scale_factor, + mode='bilinear', + align_corners=self.align_corners, + ) for x in inputs + ] + return rescaled_inputs diff --git a/mmpose/models/necks/fpn.py b/mmpose/models/necks/fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..d4d3311bda792898dd1bc7ef9b9462db7b01ce05 --- /dev/null +++ b/mmpose/models/necks/fpn.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import xavier_init + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class FPN(nn.Module): + r"""Feature Pyramid Network. + + This is an implementation of paper `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (list[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, it is equivalent to `add_extra_convs='on_input'`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: dict(mode='nearest'). + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest')): + super().__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1 or end_level == self.num_ins - 1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level is not the last level, no extra level is allowed + self.backbone_end_level = end_level + 1 + assert end_level < self.num_ins + assert num_outs == end_level - start_level + 1 + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + def init_weights(self): + """Initialize model weights.""" + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + # fix runtime error of "+=" inplace operation in PyTorch 1.10 + laterals[i - 1] = laterals[i - 1] + F.interpolate( + laterals[i], **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] = laterals[i - 1] + F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return outs diff --git a/mmpose/models/necks/gap_neck.py b/mmpose/models/necks/gap_neck.py new file mode 100644 index 0000000000000000000000000000000000000000..58ce5d939ffdeb912a02e8b1823ab073cbc3d9e3 --- /dev/null +++ b/mmpose/models/necks/gap_neck.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class GlobalAveragePooling(nn.Module): + """Global Average Pooling neck. + + Note that we use `view` to remove extra channel after pooling. We do not + use `squeeze` as it will also remove the batch dimension when the tensor + has a batch dimension of size 1, which can lead to unexpected errors. + """ + + def __init__(self): + super().__init__() + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + + def init_weights(self): + pass + + def forward(self, inputs): + """Forward function.""" + + if isinstance(inputs, tuple): + outs = tuple([self.gap(x) for x in inputs]) + outs = tuple( + [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) + elif isinstance(inputs, list): + outs = [self.gap(x) for x in inputs] + outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)] + elif isinstance(inputs, torch.Tensor): + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + else: + raise TypeError('neck inputs should be tuple or torch.tensor') + return outs diff --git a/mmpose/models/necks/hybrid_encoder.py b/mmpose/models/necks/hybrid_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..6d9db8d1b8855ed5acf49ce22b46de4d2804b489 --- /dev/null +++ b/mmpose/models/necks/hybrid_encoder.py @@ -0,0 +1,298 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule, ModuleList +from torch import Tensor + +from mmpose.models.utils import (DetrTransformerEncoder, RepVGGBlock, + SinePositionalEncoding) +from mmpose.registry import MODELS +from mmpose.utils.typing import ConfigType, OptConfigType + + +class CSPRepLayer(BaseModule): + """CSPRepLayer, a layer that combines Cross Stage Partial Networks with + RepVGG Blocks. + + Args: + in_channels (int): Number of input channels to the layer. + out_channels (int): Number of output channels from the layer. + num_blocks (int): The number of RepVGG blocks to be used in the layer. + Defaults to 3. + widen_factor (float): Expansion factor for intermediate channels. + Determines the hidden channel size based on out_channels. + Defaults to 1.0. + norm_cfg (dict): Configuration for normalization layers. + Defaults to Batch Normalization with trainable parameters. + act_cfg (dict): Configuration for activation layers. + Defaults to SiLU (Swish) with in-place operation. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + num_blocks: int = 3, + widen_factor: float = 1.0, + norm_cfg: OptConfigType = dict(type='BN', requires_grad=True), + act_cfg: OptConfigType = dict(type='SiLU', inplace=True)): + super(CSPRepLayer, self).__init__() + hidden_channels = int(out_channels * widen_factor) + self.conv1 = ConvModule( + in_channels, + hidden_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = ConvModule( + in_channels, + hidden_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.bottlenecks = nn.Sequential(*[ + RepVGGBlock(hidden_channels, hidden_channels, act_cfg=act_cfg) + for _ in range(num_blocks) + ]) + if hidden_channels != out_channels: + self.conv3 = ConvModule( + hidden_channels, + out_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.conv3 = nn.Identity() + + def forward(self, x: Tensor) -> Tensor: + """Forward function. + + Args: + x (Tensor): The input tensor. + + Returns: + Tensor: The output tensor. + """ + x_1 = self.conv1(x) + x_1 = self.bottlenecks(x_1) + x_2 = self.conv2(x) + return self.conv3(x_1 + x_2) + + +@MODELS.register_module() +class HybridEncoder(BaseModule): + """Hybrid encoder neck introduced in `RT-DETR` by Lyu et al (2023), + combining transformer encoders with a Feature Pyramid Network (FPN) and a + Path Aggregation Network (PAN). + + Args: + encoder_cfg (ConfigType): Configuration for the transformer encoder. + projector (OptConfigType, optional): Configuration for an optional + projector module. Defaults to None. + num_encoder_layers (int, optional): Number of encoder layers. + Defaults to 1. + in_channels (List[int], optional): Input channels of feature maps. + Defaults to [512, 1024, 2048]. + feat_strides (List[int], optional): Strides of feature maps. + Defaults to [8, 16, 32]. + hidden_dim (int, optional): Hidden dimension of the MLP. + Defaults to 256. + use_encoder_idx (List[int], optional): Indices of encoder layers to + use. Defaults to [2]. + pe_temperature (int, optional): Positional encoding temperature. + Defaults to 10000. + widen_factor (float, optional): Expansion factor for CSPRepLayer. + Defaults to 1.0. + deepen_factor (float, optional): Depth multiplier for CSPRepLayer. + Defaults to 1.0. + spe_learnable (bool, optional): Whether positional encoding is + learnable. Defaults to False. + output_indices (Optional[List[int]], optional): Indices of output + layers. Defaults to None. + norm_cfg (OptConfigType, optional): Configuration for normalization + layers. Defaults to Batch Normalization. + act_cfg (OptConfigType, optional): Configuration for activation + layers. Defaults to SiLU (Swish) with in-place operation. + + .. _`RT-DETR`: https://arxiv.org/abs/2304.08069 + """ + + def __init__(self, + encoder_cfg: ConfigType = dict(), + projector: OptConfigType = None, + num_encoder_layers: int = 1, + in_channels: List[int] = [512, 1024, 2048], + feat_strides: List[int] = [8, 16, 32], + hidden_dim: int = 256, + use_encoder_idx: List[int] = [2], + pe_temperature: int = 10000, + widen_factor: float = 1.0, + deepen_factor: float = 1.0, + spe_learnable: bool = False, + output_indices: Optional[List[int]] = None, + norm_cfg: OptConfigType = dict(type='BN', requires_grad=True), + act_cfg: OptConfigType = dict(type='SiLU', inplace=True)): + super(HybridEncoder, self).__init__() + self.in_channels = in_channels + self.feat_strides = feat_strides + self.hidden_dim = hidden_dim + self.use_encoder_idx = use_encoder_idx + self.num_encoder_layers = num_encoder_layers + self.pe_temperature = pe_temperature + self.output_indices = output_indices + + # channel projection + self.input_proj = ModuleList() + for in_channel in in_channels: + self.input_proj.append( + ConvModule( + in_channel, + hidden_dim, + kernel_size=1, + padding=0, + norm_cfg=norm_cfg, + act_cfg=None)) + + # encoder transformer + if len(use_encoder_idx) > 0: + pos_enc_dim = self.hidden_dim // 2 + self.encoder = ModuleList([ + DetrTransformerEncoder(num_encoder_layers, encoder_cfg) + for _ in range(len(use_encoder_idx)) + ]) + + self.sincos_pos_enc = SinePositionalEncoding( + pos_enc_dim, + learnable=spe_learnable, + temperature=self.pe_temperature, + spatial_dim=2) + + # top-down fpn + lateral_convs = list() + fpn_blocks = list() + for idx in range(len(in_channels) - 1, 0, -1): + lateral_convs.append( + ConvModule( + hidden_dim, + hidden_dim, + 1, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + fpn_blocks.append( + CSPRepLayer( + hidden_dim * 2, + hidden_dim, + round(3 * deepen_factor), + act_cfg=act_cfg, + widen_factor=widen_factor)) + self.lateral_convs = ModuleList(lateral_convs) + self.fpn_blocks = ModuleList(fpn_blocks) + + # bottom-up pan + downsample_convs = list() + pan_blocks = list() + for idx in range(len(in_channels) - 1): + downsample_convs.append( + ConvModule( + hidden_dim, + hidden_dim, + 3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + pan_blocks.append( + CSPRepLayer( + hidden_dim * 2, + hidden_dim, + round(3 * deepen_factor), + act_cfg=act_cfg, + widen_factor=widen_factor)) + self.downsample_convs = ModuleList(downsample_convs) + self.pan_blocks = ModuleList(pan_blocks) + + if projector is not None: + self.projector = MODELS.build(projector) + else: + self.projector = None + + def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]: + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + proj_feats = [ + self.input_proj[i](inputs[i]) for i in range(len(inputs)) + ] + # encoder + if self.num_encoder_layers > 0: + for i, enc_ind in enumerate(self.use_encoder_idx): + h, w = proj_feats[enc_ind].shape[2:] + # flatten [B, C, H, W] to [B, HxW, C] + src_flatten = proj_feats[enc_ind].flatten(2).permute( + 0, 2, 1).contiguous() + + if torch.onnx.is_in_onnx_export(): + pos_enc = getattr(self, f'pos_enc_{i}') + else: + pos_enc = self.sincos_pos_enc(size=(h, w)) + pos_enc = pos_enc.transpose(-1, -2).reshape(1, h * w, -1) + memory = self.encoder[i]( + src_flatten, query_pos=pos_enc, key_padding_mask=None) + + proj_feats[enc_ind] = memory.permute( + 0, 2, 1).contiguous().view([-1, self.hidden_dim, h, w]) + + # top-down fpn + inner_outs = [proj_feats[-1]] + for idx in range(len(self.in_channels) - 1, 0, -1): + feat_high = inner_outs[0] + feat_low = proj_feats[idx - 1] + feat_high = self.lateral_convs[len(self.in_channels) - 1 - idx]( + feat_high) + inner_outs[0] = feat_high + + upsample_feat = F.interpolate( + feat_high, scale_factor=2., mode='nearest') + inner_out = self.fpn_blocks[len(self.in_channels) - 1 - idx]( + torch.cat([upsample_feat, feat_low], axis=1)) + inner_outs.insert(0, inner_out) + + # bottom-up pan + outs = [inner_outs[0]] + for idx in range(len(self.in_channels) - 1): + feat_low = outs[-1] + feat_high = inner_outs[idx + 1] + downsample_feat = self.downsample_convs[idx](feat_low) # Conv + out = self.pan_blocks[idx]( # CSPRepLayer + torch.cat([downsample_feat, feat_high], axis=1)) + outs.append(out) + + if self.output_indices is not None: + outs = [outs[i] for i in self.output_indices] + + if self.projector is not None: + outs = self.projector(outs) + + return tuple(outs) + + def switch_to_deploy(self, test_cfg): + """Switch to deploy mode.""" + + if getattr(self, 'deploy', False): + return + + if self.num_encoder_layers > 0: + for i, enc_ind in enumerate(self.use_encoder_idx): + h, w = test_cfg['input_size'] + h = int(h / 2**(3 + enc_ind)) + w = int(w / 2**(3 + enc_ind)) + pos_enc = self.sincos_pos_enc(size=(h, w)) + pos_enc = pos_enc.transpose(-1, -2).reshape(1, h * w, -1) + self.register_buffer(f'pos_enc_{i}', pos_enc) + + self.deploy = True diff --git a/mmpose/models/necks/posewarper_neck.py b/mmpose/models/necks/posewarper_neck.py new file mode 100644 index 0000000000000000000000000000000000000000..517fabd2e839878e7cf692c91adad450f432e8f0 --- /dev/null +++ b/mmpose/models/necks/posewarper_neck.py @@ -0,0 +1,329 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import constant_init, normal_init +from mmengine.utils import digit_version +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.utils.ops import resize +from mmpose.registry import MODELS +from ..backbones.resnet import BasicBlock, Bottleneck + +try: + from mmcv.ops import DeformConv2d + has_mmcv_full = True +except (ImportError, ModuleNotFoundError): + has_mmcv_full = False + + +@MODELS.register_module() +class PoseWarperNeck(nn.Module): + """PoseWarper neck. + + `"Learning temporal pose estimation from sparsely-labeled videos" + `_. + + Args: + in_channels (int): Number of input channels from backbone + out_channels (int): Number of output channels + inner_channels (int): Number of intermediate channels of the res block + deform_groups (int): Number of groups in the deformable conv + dilations (list|tuple): different dilations of the offset conv layers + trans_conv_kernel (int): the kernel of the trans conv layer, which is + used to get heatmap from the output of backbone. Default: 1 + res_blocks_cfg (dict|None): config of residual blocks. If None, + use the default values. If not None, it should contain the + following keys: + + - block (str): the type of residual block, Default: 'BASIC'. + - num_blocks (int): the number of blocks, Default: 20. + + offsets_kernel (int): the kernel of offset conv layer. + deform_conv_kernel (int): the kernel of defomrable conv layer. + in_index (int|Sequence[int]): Input feature index. Default: 0 + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + Default: None. + + - 'resize_concat': Multiple feature maps will be resize to \ + the same size as first one and than concat together. \ + Usually used in FCN head of HRNet. + - 'multiple_select': Multiple feature maps will be bundle into \ + a list and passed into decode head. + - None: Only one select feature map is allowed. + + freeze_trans_layer (bool): Whether to freeze the transition layer + (stop grad and set eval mode). Default: True. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + im2col_step (int): the argument `im2col_step` in deformable conv, + Default: 80. + """ + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + minimum_mmcv_version = '1.3.17' + + def __init__(self, + in_channels, + out_channels, + inner_channels, + deform_groups=17, + dilations=(3, 6, 12, 18, 24), + trans_conv_kernel=1, + res_blocks_cfg=None, + offsets_kernel=3, + deform_conv_kernel=3, + in_index=0, + input_transform=None, + freeze_trans_layer=True, + norm_eval=False, + im2col_step=80): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.inner_channels = inner_channels + self.deform_groups = deform_groups + self.dilations = dilations + self.trans_conv_kernel = trans_conv_kernel + self.res_blocks_cfg = res_blocks_cfg + self.offsets_kernel = offsets_kernel + self.deform_conv_kernel = deform_conv_kernel + self.in_index = in_index + self.input_transform = input_transform + self.freeze_trans_layer = freeze_trans_layer + self.norm_eval = norm_eval + self.im2col_step = im2col_step + + identity_trans_layer = False + + assert trans_conv_kernel in [0, 1, 3] + kernel_size = trans_conv_kernel + if kernel_size == 3: + padding = 1 + elif kernel_size == 1: + padding = 0 + else: + # 0 for Identity mapping. + identity_trans_layer = True + + if identity_trans_layer: + self.trans_layer = nn.Identity() + else: + self.trans_layer = build_conv_layer( + cfg=dict(type='Conv2d'), + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding) + + # build chain of residual blocks + if res_blocks_cfg is not None and not isinstance(res_blocks_cfg, dict): + raise TypeError('res_blocks_cfg should be dict or None.') + + if res_blocks_cfg is None: + block_type = 'BASIC' + num_blocks = 20 + else: + block_type = res_blocks_cfg.get('block', 'BASIC') + num_blocks = res_blocks_cfg.get('num_blocks', 20) + + block = self.blocks_dict[block_type] + + res_layers = [] + downsample = nn.Sequential( + build_conv_layer( + cfg=dict(type='Conv2d'), + in_channels=out_channels, + out_channels=inner_channels, + kernel_size=1, + stride=1, + bias=False), + build_norm_layer(dict(type='BN'), inner_channels)[1]) + res_layers.append( + block( + in_channels=out_channels, + out_channels=inner_channels, + downsample=downsample)) + + for _ in range(1, num_blocks): + res_layers.append(block(inner_channels, inner_channels)) + self.offset_feats = nn.Sequential(*res_layers) + + # build offset layers + self.num_offset_layers = len(dilations) + assert self.num_offset_layers > 0, 'Number of offset layers ' \ + 'should be larger than 0.' + + target_offset_channels = 2 * offsets_kernel**2 * deform_groups + + offset_layers = [ + build_conv_layer( + cfg=dict(type='Conv2d'), + in_channels=inner_channels, + out_channels=target_offset_channels, + kernel_size=offsets_kernel, + stride=1, + dilation=dilations[i], + padding=dilations[i], + bias=False, + ) for i in range(self.num_offset_layers) + ] + self.offset_layers = nn.ModuleList(offset_layers) + + # build deformable conv layers + assert digit_version(mmcv.__version__) >= \ + digit_version(self.minimum_mmcv_version), \ + f'Current MMCV version: {mmcv.__version__}, ' \ + f'but MMCV >= {self.minimum_mmcv_version} is required, see ' \ + f'https://github.com/open-mmlab/mmcv/issues/1440, ' \ + f'Please install the latest MMCV.' + + if has_mmcv_full: + deform_conv_layers = [ + DeformConv2d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=deform_conv_kernel, + stride=1, + padding=int(deform_conv_kernel / 2) * dilations[i], + dilation=dilations[i], + deform_groups=deform_groups, + im2col_step=self.im2col_step, + ) for i in range(self.num_offset_layers) + ] + else: + raise ImportError('Please install the full version of mmcv ' + 'to use `DeformConv2d`.') + + self.deform_conv_layers = nn.ModuleList(deform_conv_layers) + + self.freeze_layers() + + def freeze_layers(self): + if self.freeze_trans_layer: + self.trans_layer.eval() + + for param in self.trans_layer.parameters(): + param.requires_grad = False + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, std=0.001) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + elif isinstance(m, DeformConv2d): + filler = torch.zeros([ + m.weight.size(0), + m.weight.size(1), + m.weight.size(2), + m.weight.size(3) + ], + dtype=torch.float32, + device=m.weight.device) + for k in range(m.weight.size(0)): + filler[k, k, + int(m.weight.size(2) / 2), + int(m.weight.size(3) / 2)] = 1.0 + m.weight = torch.nn.Parameter(filler) + m.weight.requires_grad = True + + # posewarper offset layer weight initialization + for m in self.offset_layers.modules(): + constant_init(m, 0) + + def _transform_inputs(self, inputs): + """Transform inputs for decoder. + + Args: + inputs (list[Tensor] | Tensor): multi-level img features. + + Returns: + Tensor: The transformed inputs + """ + if not isinstance(inputs, list): + return inputs + + if self.input_transform == 'resize_concat': + inputs = [inputs[i] for i in self.in_index] + upsampled_inputs = [ + resize( + input=x, + size=inputs[0].shape[2:], + mode='bilinear', + align_corners=self.align_corners) for x in inputs + ] + inputs = torch.cat(upsampled_inputs, dim=1) + elif self.input_transform == 'multiple_select': + inputs = [inputs[i] for i in self.in_index] + else: + inputs = inputs[self.in_index] + + return inputs + + def forward(self, inputs, frame_weight): + assert isinstance(inputs, (list, tuple)), 'PoseWarperNeck inputs ' \ + 'should be list or tuple, even though the length is 1, ' \ + 'for unified processing.' + + output_heatmap = 0 + if len(inputs) > 1: + inputs = [self._transform_inputs(input) for input in inputs] + inputs = [self.trans_layer(input) for input in inputs] + + # calculate difference features + diff_features = [ + self.offset_feats(inputs[0] - input) for input in inputs + ] + + for i in range(len(inputs)): + if frame_weight[i] == 0: + continue + warped_heatmap = 0 + for j in range(self.num_offset_layers): + offset = (self.offset_layers[j](diff_features[i])) + warped_heatmap_tmp = self.deform_conv_layers[j](inputs[i], + offset) + warped_heatmap += warped_heatmap_tmp / \ + self.num_offset_layers + + output_heatmap += warped_heatmap * frame_weight[i] + + else: + inputs = inputs[0] + inputs = self._transform_inputs(inputs) + inputs = self.trans_layer(inputs) + + num_frames = len(frame_weight) + batch_size = inputs.size(0) // num_frames + ref_x = inputs[:batch_size] + ref_x_tiled = ref_x.repeat(num_frames, 1, 1, 1) + + offset_features = self.offset_feats(ref_x_tiled - inputs) + + warped_heatmap = 0 + for j in range(self.num_offset_layers): + offset = self.offset_layers[j](offset_features) + + warped_heatmap_tmp = self.deform_conv_layers[j](inputs, offset) + warped_heatmap += warped_heatmap_tmp / self.num_offset_layers + + for i in range(num_frames): + if frame_weight[i] == 0: + continue + output_heatmap += warped_heatmap[i * batch_size:(i + 1) * + batch_size] * frame_weight[i] + + return output_heatmap + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self.freeze_layers() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/necks/yolox_pafpn.py b/mmpose/models/necks/yolox_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..adc4cfffa304fce6c62d969f2bf66ef83b4f626d --- /dev/null +++ b/mmpose/models/necks/yolox_pafpn.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from ..utils import CSPLayer + + +@MODELS.register_module() +class YOLOXPAFPN(BaseModule): + """Path Aggregation Network used in YOLOX. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3 + use_depthwise (bool): Whether to depthwise separable convolution in + blocks. Default: False + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(scale_factor=2, mode='nearest')` + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN') + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + num_csp_blocks=3, + use_depthwise=False, + upsample_cfg=dict(scale_factor=2, mode='nearest'), + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu')): + super(YOLOXPAFPN, self).__init__(init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + # build top-down blocks + self.upsample = nn.Upsample(**upsample_cfg) + self.reduce_layers = nn.ModuleList() + self.top_down_blocks = nn.ModuleList() + for idx in range(len(in_channels) - 1, 0, -1): + self.reduce_layers.append( + ConvModule( + in_channels[idx], + in_channels[idx - 1], + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.top_down_blocks.append( + CSPLayer( + in_channels[idx - 1] * 2, + in_channels[idx - 1], + num_blocks=num_csp_blocks, + add_identity=False, + use_depthwise=use_depthwise, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + # build bottom-up blocks + self.downsamples = nn.ModuleList() + self.bottom_up_blocks = nn.ModuleList() + for idx in range(len(in_channels) - 1): + self.downsamples.append( + conv( + in_channels[idx], + in_channels[idx], + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.bottom_up_blocks.append( + CSPLayer( + in_channels[idx] * 2, + in_channels[idx + 1], + num_blocks=num_csp_blocks, + add_identity=False, + use_depthwise=use_depthwise, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.out_convs = nn.ModuleList() + for i in range(len(in_channels)): + self.out_convs.append( + ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs): + """ + Args: + inputs (tuple[Tensor]): input features. + + Returns: + tuple[Tensor]: YOLOXPAFPN features. + """ + assert len(inputs) == len(self.in_channels) + + # top-down path + inner_outs = [inputs[-1]] + for idx in range(len(self.in_channels) - 1, 0, -1): + feat_heigh = inner_outs[0] + feat_low = inputs[idx - 1] + feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx]( + feat_heigh) + inner_outs[0] = feat_heigh + + upsample_feat = self.upsample(feat_heigh) + + inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx]( + torch.cat([upsample_feat, feat_low], 1)) + inner_outs.insert(0, inner_out) + + # bottom-up path + outs = [inner_outs[0]] + for idx in range(len(self.in_channels) - 1): + feat_low = outs[-1] + feat_height = inner_outs[idx + 1] + downsample_feat = self.downsamples[idx](feat_low) + out = self.bottom_up_blocks[idx]( + torch.cat([downsample_feat, feat_height], 1)) + outs.append(out) + + # out convs + for idx, conv in enumerate(self.out_convs): + outs[idx] = conv(outs[idx]) + + return tuple(outs) diff --git a/mmpose/models/pose_estimators/__init__.py b/mmpose/models/pose_estimators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c5287e0c2caa617f88ec5a0ff538478e5e562a0b --- /dev/null +++ b/mmpose/models/pose_estimators/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bottomup import BottomupPoseEstimator +from .pose_lifter import PoseLifter +from .topdown import TopdownPoseEstimator + +__all__ = ['TopdownPoseEstimator', 'BottomupPoseEstimator', 'PoseLifter'] diff --git a/mmpose/models/pose_estimators/base.py b/mmpose/models/pose_estimators/base.py new file mode 100644 index 0000000000000000000000000000000000000000..216f592fda1be26a6d7441aec339a96956ee19bb --- /dev/null +++ b/mmpose/models/pose_estimators/base.py @@ -0,0 +1,245 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Tuple, Union + +import torch +from mmengine.dist import get_world_size +from mmengine.logging import print_log +from mmengine.model import BaseModel +from torch import Tensor + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.models.utils import check_and_update_config +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, ForwardResults, OptConfigType, + Optional, OptMultiConfig, OptSampleList, + SampleList) + + +class BasePoseEstimator(BaseModel, metaclass=ABCMeta): + """Base class for pose estimators. + + Args: + data_preprocessor (dict | ConfigDict, optional): The pre-processing + config of :class:`BaseDataPreprocessor`. Defaults to ``None`` + init_cfg (dict | ConfigDict): The model initialization config. + Defaults to ``None`` + use_syncbn (bool): whether to use SyncBatchNorm. Defaults to False. + metainfo (dict): Meta information for dataset, such as keypoints + definition and properties. If set, the metainfo of the input data + batch will be overridden. For more details, please refer to + https://mmpose.readthedocs.io/en/latest/user_guides/ + prepare_datasets.html#create-a-custom-dataset-info- + config-file-for-the-dataset. Defaults to ``None`` + """ + _version = 2 + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + use_syncbn: bool = False, + init_cfg: OptMultiConfig = None, + metainfo: Optional[dict] = None): + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.metainfo = self._load_metainfo(metainfo) + self.train_cfg = train_cfg if train_cfg else {} + self.test_cfg = test_cfg if test_cfg else {} + + self.backbone = MODELS.build(backbone) + + # the PR #2108 and #2126 modified the interface of neck and head. + # The following function automatically detects outdated + # configurations and updates them accordingly, while also providing + # clear and concise information on the changes made. + neck, head = check_and_update_config(neck, head) + + if neck is not None: + self.neck = MODELS.build(neck) + + if head is not None: + self.head = MODELS.build(head) + self.head.test_cfg = self.test_cfg.copy() + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + # TODO: Waiting for mmengine support + if use_syncbn and get_world_size() > 1: + torch.nn.SyncBatchNorm.convert_sync_batchnorm(self) + print_log('Using SyncBatchNorm()', 'current') + + def switch_to_deploy(self): + """Switch the sub-modules to deploy mode.""" + for name, layer in self.named_modules(): + if layer == self: + continue + if callable(getattr(layer, 'switch_to_deploy', None)): + print_log(f'module {name} has been switched to deploy mode', + 'current') + layer.switch_to_deploy(self.test_cfg) + + @property + def with_neck(self) -> bool: + """bool: whether the pose estimator has a neck.""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_head(self) -> bool: + """bool: whether the pose estimator has a head.""" + return hasattr(self, 'head') and self.head is not None + + @staticmethod + def _load_metainfo(metainfo: dict = None) -> dict: + """Collect meta information from the dictionary of meta. + + Args: + metainfo (dict): Raw data of pose meta information. + + Returns: + dict: Parsed meta information. + """ + + if metainfo is None: + return None + + if not isinstance(metainfo, dict): + raise TypeError( + f'metainfo should be a dict, but got {type(metainfo)}') + + metainfo = parse_pose_metainfo(metainfo) + return metainfo + + def forward(self, + inputs: torch.Tensor, + data_samples: OptSampleList, + mode: str = 'tensor') -> ForwardResults: + """The unified entry for a forward process in both training and test. + + The method should accept three modes: 'tensor', 'predict' and 'loss': + + - 'tensor': Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - 'predict': Forward and return the predictions, which are fully + processed to a list of :obj:`PoseDataSample`. + - 'loss': Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general + data_samples (list[:obj:`PoseDataSample`], optional): The + annotation of every sample. Defaults to ``None`` + mode (str): Set the forward mode and return value type. Defaults + to ``'tensor'`` + + Returns: + The return type depends on ``mode``. + + - If ``mode='tensor'``, return a tensor or a tuple of tensors + - If ``mode='predict'``, return a list of :obj:``PoseDataSample`` + that contains the pose predictions + - If ``mode='loss'``, return a dict of tensor(s) which is the loss + function value + """ + if isinstance(inputs, list): + inputs = torch.stack(inputs) + if mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + # use customed metainfo to override the default metainfo + if self.metainfo is not None: + for data_sample in data_samples: + data_sample.set_metainfo(self.metainfo) + return self.predict(inputs, data_samples) + elif mode == 'tensor': + return self._forward(inputs) + else: + raise RuntimeError(f'Invalid mode "{mode}". ' + 'Only supports loss, predict and tensor mode.') + + @abstractmethod + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + @abstractmethod + def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing.""" + + def _forward(self, + inputs: Tensor, + data_samples: OptSampleList = None + ) -> Union[Tensor, Tuple[Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + + Returns: + Union[Tensor | Tuple[Tensor]]: forward output of the network. + """ + + x = self.extract_feat(inputs) + if self.with_head: + x = self.head.forward(x) + + return x + + def extract_feat(self, inputs: Tensor) -> Tuple[Tensor]: + """Extract features. + + Args: + inputs (Tensor): Image tensor with shape (N, C, H ,W). + + Returns: + tuple[Tensor]: Multi-level features that may have various + resolutions. + """ + x = self.backbone(inputs) + if self.with_neck: + x = self.neck(x) + + return x + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to. + + 1) convert old-version state dict of + :class:`TopdownHeatmapSimpleHead` (before MMPose v1.0.0) to a + compatible format of :class:`HeatmapHead`. + + 2) remove the weights in data_preprocessor to avoid warning + `unexpected key in source state_dict: ...`. These weights are + initialized with given arguments and remain same during training + and inference. + + The hook will be automatically registered during initialization. + """ + + keys = list(state_dict.keys()) + + # remove the keys in data_preprocessor to avoid warning + for k in keys: + if k in ('data_preprocessor.mean', 'data_preprocessor.std'): + del state_dict[k] + + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + for k in keys: + if 'keypoint_head' in k: + v = state_dict.pop(k) + k = k.replace('keypoint_head', 'head') + state_dict[k] = v diff --git a/mmpose/models/pose_estimators/bottomup.py b/mmpose/models/pose_estimators/bottomup.py new file mode 100644 index 0000000000000000000000000000000000000000..7b82980a13a286f480f1616c9fc89d3ad2577196 --- /dev/null +++ b/mmpose/models/pose_estimators/bottomup.py @@ -0,0 +1,192 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import zip_longest +from typing import List, Optional, Union + +from mmengine.utils import is_list_of +from torch import Tensor + +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptMultiConfig, PixelDataList, SampleList) +from .base import BasePoseEstimator + + +@MODELS.register_module() +class BottomupPoseEstimator(BasePoseEstimator): + """Base class for bottom-up pose estimators. + + Args: + backbone (dict): The backbone config + neck (dict, optional): The neck config. Defaults to ``None`` + head (dict, optional): The head config. Defaults to ``None`` + train_cfg (dict, optional): The runtime config for training process. + Defaults to ``None`` + test_cfg (dict, optional): The runtime config for testing process. + Defaults to ``None`` + use_syncbn (bool): whether to use SyncBatchNorm. Defaults to False. + data_preprocessor (dict, optional): The data preprocessing config to + build the instance of :class:`BaseDataPreprocessor`. Defaults to + ``None``. + init_cfg (dict, optional): The config to control the initialization. + Defaults to ``None`` + """ + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + use_syncbn: bool = False, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__( + backbone=backbone, + neck=neck, + head=head, + train_cfg=train_cfg, + test_cfg=test_cfg, + use_syncbn=use_syncbn, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples. + + Returns: + dict: A dictionary of losses. + """ + feats = self.extract_feat(inputs) + + losses = dict() + + if self.with_head: + losses.update( + self.head.loss(feats, data_samples, train_cfg=self.train_cfg)) + + return losses + + def predict(self, inputs: Union[Tensor, List[Tensor]], + data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + inputs (Tensor | List[Tensor]): Input image in tensor or image + pyramid as a list of tensors. Each tensor is in shape + [B, C, H, W] + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + + Returns: + list[:obj:`PoseDataSample`]: The pose estimation results of the + input images. The return value is `PoseDataSample` instances with + ``pred_instances`` and ``pred_fields``(optional) field , and + ``pred_instances`` usually contains the following keys: + + - keypoints (Tensor): predicted keypoint coordinates in shape + (num_instances, K, D) where K is the keypoint number and D + is the keypoint dimension + - keypoint_scores (Tensor): predicted keypoint scores in shape + (num_instances, K) + """ + assert self.with_head, ( + 'The model must have head to perform prediction.') + + multiscale_test = self.test_cfg.get('multiscale_test', False) + flip_test = self.test_cfg.get('flip_test', False) + + # enable multi-scale test + aug_scales = data_samples[0].metainfo.get('aug_scales', None) + if multiscale_test: + assert isinstance(aug_scales, list) + assert is_list_of(inputs, Tensor) + # `inputs` includes images in original and augmented scales + assert len(inputs) == len(aug_scales) + 1 + else: + assert isinstance(inputs, Tensor) + # single-scale test + inputs = [inputs] + + feats = [] + for _inputs in inputs: + if flip_test: + _feats_orig = self.extract_feat(_inputs) + _feats_flip = self.extract_feat(_inputs.flip(-1)) + _feats = [_feats_orig, _feats_flip] + else: + _feats = self.extract_feat(_inputs) + + feats.append(_feats) + + if not multiscale_test: + feats = feats[0] + + preds = self.head.predict(feats, data_samples, test_cfg=self.test_cfg) + + if isinstance(preds, tuple): + batch_pred_instances, batch_pred_fields = preds + else: + batch_pred_instances = preds + batch_pred_fields = None + + results = self.add_pred_to_datasample(batch_pred_instances, + batch_pred_fields, data_samples) + + return results + + def add_pred_to_datasample(self, batch_pred_instances: InstanceList, + batch_pred_fields: Optional[PixelDataList], + batch_data_samples: SampleList) -> SampleList: + """Add predictions into data samples. + + Args: + batch_pred_instances (List[InstanceData]): The predicted instances + of the input data batch + batch_pred_fields (List[PixelData], optional): The predicted + fields (e.g. heatmaps) of the input batch + batch_data_samples (List[PoseDataSample]): The input data batch + + Returns: + List[PoseDataSample]: A list of data samples where the predictions + are stored in the ``pred_instances`` field of each data sample. + The length of the list is the batch size when ``merge==False``, or + 1 when ``merge==True``. + """ + assert len(batch_pred_instances) == len(batch_data_samples) + if batch_pred_fields is None: + batch_pred_fields = [] + + for pred_instances, pred_fields, data_sample in zip_longest( + batch_pred_instances, batch_pred_fields, batch_data_samples): + + input_size = data_sample.metainfo['input_size'] + input_center = data_sample.metainfo['input_center'] + input_scale = data_sample.metainfo['input_scale'] + + # convert keypoint coordinates from input space to image space + pred_instances.keypoints = pred_instances.keypoints / input_size \ + * input_scale + input_center - 0.5 * input_scale + if 'keypoints_visible' not in pred_instances: + pred_instances.keypoints_visible = \ + pred_instances.keypoint_scores + + # convert bbox coordinates from input space to image space + if 'bboxes' in pred_instances: + bboxes = pred_instances.bboxes.reshape( + pred_instances.bboxes.shape[0], 2, 2) + bboxes = bboxes / input_size * input_scale + input_center \ + - 0.5 * input_scale + pred_instances.bboxes = bboxes.reshape(bboxes.shape[0], 4) + + data_sample.pred_instances = pred_instances + + if pred_fields is not None: + data_sample.pred_fields = pred_fields + + return batch_data_samples diff --git a/mmpose/models/pose_estimators/pose_lifter.py b/mmpose/models/pose_estimators/pose_lifter.py new file mode 100644 index 0000000000000000000000000000000000000000..ec8401d1a2bf2e425e2e106763d115a432de50fd --- /dev/null +++ b/mmpose/models/pose_estimators/pose_lifter.py @@ -0,0 +1,357 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import zip_longest +from typing import Tuple, Union + +import torch +from torch import Tensor + +from mmpose.models.utils import check_and_update_config +from mmpose.models.utils.tta import flip_coordinates +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + Optional, OptMultiConfig, OptSampleList, + PixelDataList, SampleList) +from .base import BasePoseEstimator + + +@MODELS.register_module() +class PoseLifter(BasePoseEstimator): + """Base class for pose lifter. + + Args: + backbone (dict): The backbone config + neck (dict, optional): The neck config. Defaults to ``None`` + head (dict, optional): The head config. Defaults to ``None`` + traj_backbone (dict, optional): The backbone config for trajectory + model. Defaults to ``None`` + traj_neck (dict, optional): The neck config for trajectory model. + Defaults to ``None`` + traj_head (dict, optional): The head config for trajectory model. + Defaults to ``None`` + semi_loss (dict, optional): The semi-supervised loss config. + Defaults to ``None`` + train_cfg (dict, optional): The runtime config for training process. + Defaults to ``None`` + test_cfg (dict, optional): The runtime config for testing process. + Defaults to ``None`` + data_preprocessor (dict, optional): The data preprocessing config to + build the instance of :class:`BaseDataPreprocessor`. Defaults to + ``None`` + init_cfg (dict, optional): The config to control the initialization. + Defaults to ``None`` + metainfo (dict): Meta information for dataset, such as keypoints + definition and properties. If set, the metainfo of the input data + batch will be overridden. For more details, please refer to + https://mmpose.readthedocs.io/en/latest/user_guides/ + prepare_datasets.html#create-a-custom-dataset-info- + config-file-for-the-dataset. Defaults to ``None`` + """ + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + traj_backbone: OptConfigType = None, + traj_neck: OptConfigType = None, + traj_head: OptConfigType = None, + semi_loss: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None, + metainfo: Optional[dict] = None): + super().__init__( + backbone=backbone, + neck=neck, + head=head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg, + metainfo=metainfo) + + # trajectory model + self.share_backbone = False + if traj_head is not None: + if traj_backbone is not None: + self.traj_backbone = MODELS.build(traj_backbone) + else: + self.share_backbone = True + + # the PR #2108 and #2126 modified the interface of neck and head. + # The following function automatically detects outdated + # configurations and updates them accordingly, while also providing + # clear and concise information on the changes made. + traj_neck, traj_head = check_and_update_config( + traj_neck, traj_head) + + if traj_neck is not None: + self.traj_neck = MODELS.build(traj_neck) + + self.traj_head = MODELS.build(traj_head) + + # semi-supervised loss + self.semi_supervised = semi_loss is not None + if self.semi_supervised: + assert any([head, traj_head]) + self.semi_loss = MODELS.build(semi_loss) + + @property + def with_traj_backbone(self): + """bool: Whether the pose lifter has trajectory backbone.""" + return hasattr(self, 'traj_backbone') and \ + self.traj_backbone is not None + + @property + def with_traj_neck(self): + """bool: Whether the pose lifter has trajectory neck.""" + return hasattr(self, 'traj_neck') and self.traj_neck is not None + + @property + def with_traj(self): + """bool: Whether the pose lifter has trajectory head.""" + return hasattr(self, 'traj_head') + + @property + def causal(self): + """bool: Whether the pose lifter is causal.""" + if hasattr(self.backbone, 'causal'): + return self.backbone.causal + else: + raise AttributeError('A PoseLifter\'s backbone should have ' + 'the bool attribute "causal" to indicate if' + 'it performs causal inference.') + + def extract_feat(self, inputs: Tensor) -> Tuple[Tensor]: + """Extract features. + + Args: + inputs (Tensor): Image tensor with shape (N, K, C, T). + + Returns: + tuple[Tensor]: Multi-level features that may have various + resolutions. + """ + # supervised learning + # pose model + feats = self.backbone(inputs) + if self.with_neck: + feats = self.neck(feats) + + # trajectory model + if self.with_traj: + if self.share_backbone: + traj_x = feats + else: + traj_x = self.traj_backbone(inputs) + + if self.with_traj_neck: + traj_x = self.traj_neck(traj_x) + return feats, traj_x + else: + return feats + + def _forward(self, + inputs: Tensor, + data_samples: OptSampleList = None + ) -> Union[Tensor, Tuple[Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + inputs (Tensor): Inputs with shape (N, K, C, T). + + Returns: + Union[Tensor | Tuple[Tensor]]: forward output of the network. + """ + feats = self.extract_feat(inputs) + + if self.with_traj: + # forward with trajectory model + x, traj_x = feats + if self.with_head: + x = self.head.forward(x) + + traj_x = self.traj_head.forward(traj_x) + return x, traj_x + else: + # forward without trajectory model + x = feats + if self.with_head: + x = self.head.forward(x) + return x + + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (Tensor): Inputs with shape (N, K, C, T). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples. + + Returns: + dict: A dictionary of losses. + """ + feats = self.extract_feat(inputs) + + losses = {} + + if self.with_traj: + x, traj_x = feats + # loss of trajectory model + losses.update( + self.traj_head.loss( + traj_x, data_samples, train_cfg=self.train_cfg)) + else: + x = feats + + if self.with_head: + # loss of pose model + losses.update( + self.head.loss(x, data_samples, train_cfg=self.train_cfg)) + + # TODO: support semi-supervised learning + if self.semi_supervised: + losses.update(semi_loss=self.semi_loss(inputs, data_samples)) + + return losses + + def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Note: + - batch_size: B + - num_input_keypoints: K + - input_keypoint_dim: C + - input_sequence_len: T + + Args: + inputs (Tensor): Inputs with shape like (B, K, C, T). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + + Returns: + list[:obj:`PoseDataSample`]: The pose estimation results of the + input images. The return value is `PoseDataSample` instances with + ``pred_instances`` and ``pred_fields``(optional) field , and + ``pred_instances`` usually contains the following keys: + + - keypoints (Tensor): predicted keypoint coordinates in shape + (num_instances, K, D) where K is the keypoint number and D + is the keypoint dimension + - keypoint_scores (Tensor): predicted keypoint scores in shape + (num_instances, K) + """ + assert self.with_head, ( + 'The model must have head to perform prediction.') + + if self.test_cfg.get('flip_test', False): + flip_indices = data_samples[0].metainfo['flip_indices'] + _feats = self.extract_feat(inputs) + _feats_flip = self.extract_feat( + torch.stack([ + flip_coordinates( + _input, + flip_indices=flip_indices, + shift_coords=self.test_cfg.get('shift_coords', True), + input_size=(1, 1)) for _input in inputs + ], + dim=0)) + + feats = [_feats, _feats_flip] + else: + feats = self.extract_feat(inputs) + + pose_preds, batch_pred_instances, batch_pred_fields = None, None, None + traj_preds, batch_traj_instances, batch_traj_fields = None, None, None + if self.with_traj: + x, traj_x = feats + traj_preds = self.traj_head.predict( + traj_x, data_samples, test_cfg=self.test_cfg) + else: + x = feats + + if self.with_head: + pose_preds = self.head.predict( + x, data_samples, test_cfg=self.test_cfg) + + if isinstance(pose_preds, tuple): + batch_pred_instances, batch_pred_fields = pose_preds + else: + batch_pred_instances = pose_preds + + if isinstance(traj_preds, tuple): + batch_traj_instances, batch_traj_fields = traj_preds + else: + batch_traj_instances = traj_preds + + results = self.add_pred_to_datasample(batch_pred_instances, + batch_pred_fields, + batch_traj_instances, + batch_traj_fields, data_samples) + + return results + + def add_pred_to_datasample( + self, + batch_pred_instances: InstanceList, + batch_pred_fields: Optional[PixelDataList], + batch_traj_instances: InstanceList, + batch_traj_fields: Optional[PixelDataList], + batch_data_samples: SampleList, + ) -> SampleList: + """Add predictions into data samples. + + Args: + batch_pred_instances (List[InstanceData]): The predicted instances + of the input data batch + batch_pred_fields (List[PixelData], optional): The predicted + fields (e.g. heatmaps) of the input batch + batch_traj_instances (List[InstanceData]): The predicted instances + of the input data batch + batch_traj_fields (List[PixelData], optional): The predicted + fields (e.g. heatmaps) of the input batch + batch_data_samples (List[PoseDataSample]): The input data batch + + Returns: + List[PoseDataSample]: A list of data samples where the predictions + are stored in the ``pred_instances`` field of each data sample. + """ + assert len(batch_pred_instances) == len(batch_data_samples) + if batch_pred_fields is None: + batch_pred_fields, batch_traj_fields = [], [] + if batch_traj_instances is None: + batch_traj_instances = [] + output_keypoint_indices = self.test_cfg.get('output_keypoint_indices', + None) + + for (pred_instances, pred_fields, traj_instances, traj_fields, + data_sample) in zip_longest(batch_pred_instances, + batch_pred_fields, + batch_traj_instances, + batch_traj_fields, + batch_data_samples): + + if output_keypoint_indices is not None: + # select output keypoints with given indices + num_keypoints = pred_instances.keypoints.shape[1] + for key, value in pred_instances.all_items(): + if key.startswith('keypoint'): + pred_instances.set_field( + value[:, output_keypoint_indices], key) + + data_sample.pred_instances = pred_instances + + if pred_fields is not None: + if output_keypoint_indices is not None: + # select output heatmap channels with keypoint indices + # when the number of heatmap channel matches num_keypoints + for key, value in pred_fields.all_items(): + if value.shape[0] != num_keypoints: + continue + pred_fields.set_field(value[output_keypoint_indices], + key) + data_sample.pred_fields = pred_fields + + return batch_data_samples diff --git a/mmpose/models/pose_estimators/topdown.py b/mmpose/models/pose_estimators/topdown.py new file mode 100644 index 0000000000000000000000000000000000000000..ce458bc6cfc276e978537c5239bbd853a04e7686 --- /dev/null +++ b/mmpose/models/pose_estimators/topdown.py @@ -0,0 +1,196 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import zip_longest +from typing import Optional + +from torch import Tensor + +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptMultiConfig, PixelDataList, SampleList) +from .base import BasePoseEstimator + + +@MODELS.register_module() +class TopdownPoseEstimator(BasePoseEstimator): + """Base class for top-down pose estimators. + + Args: + backbone (dict): The backbone config + neck (dict, optional): The neck config. Defaults to ``None`` + head (dict, optional): The head config. Defaults to ``None`` + train_cfg (dict, optional): The runtime config for training process. + Defaults to ``None`` + test_cfg (dict, optional): The runtime config for testing process. + Defaults to ``None`` + data_preprocessor (dict, optional): The data preprocessing config to + build the instance of :class:`BaseDataPreprocessor`. Defaults to + ``None`` + init_cfg (dict, optional): The config to control the initialization. + Defaults to ``None`` + metainfo (dict): Meta information for dataset, such as keypoints + definition and properties. If set, the metainfo of the input data + batch will be overridden. For more details, please refer to + https://mmpose.readthedocs.io/en/latest/user_guides/ + prepare_datasets.html#create-a-custom-dataset-info- + config-file-for-the-dataset. Defaults to ``None`` + """ + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None, + metainfo: Optional[dict] = None, + freeze_backbone: bool = False): + super().__init__( + backbone=backbone, + neck=neck, + head=head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg, + metainfo=metainfo) + + # Freeze all params of the backbone + if freeze_backbone: + print("Freezing backbone!") + for param in self.backbone.parameters(): + param.requires_grad = False + + + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples. + + Returns: + dict: A dictionary of losses. + """ + feats = self.extract_feat(inputs) + + losses = dict() + + if self.with_head: + losses.update( + self.head.loss(feats, data_samples, train_cfg=self.train_cfg)) + + return losses + + def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W) + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + + Returns: + list[:obj:`PoseDataSample`]: The pose estimation results of the + input images. The return value is `PoseDataSample` instances with + ``pred_instances`` and ``pred_fields``(optional) field , and + ``pred_instances`` usually contains the following keys: + + - keypoints (Tensor): predicted keypoint coordinates in shape + (num_instances, K, D) where K is the keypoint number and D + is the keypoint dimension + - keypoint_scores (Tensor): predicted keypoint scores in shape + (num_instances, K) + """ + assert self.with_head, ( + 'The model must have head to perform prediction.') + + if self.test_cfg.get('flip_test', False): + _feats = self.extract_feat(inputs) + _feats_flip = self.extract_feat(inputs.flip(-1)) + feats = [_feats, _feats_flip] + else: + feats = self.extract_feat(inputs) + + preds = self.head.predict(feats, data_samples, test_cfg=self.test_cfg) + + if isinstance(preds, tuple): + batch_pred_instances, batch_pred_fields = preds + else: + batch_pred_instances = preds + batch_pred_fields = None + + results = self.add_pred_to_datasample(batch_pred_instances, + batch_pred_fields, data_samples) + + return results + + def add_pred_to_datasample(self, batch_pred_instances: InstanceList, + batch_pred_fields: Optional[PixelDataList], + batch_data_samples: SampleList) -> SampleList: + """Add predictions into data samples. + + Args: + batch_pred_instances (List[InstanceData]): The predicted instances + of the input data batch + batch_pred_fields (List[PixelData], optional): The predicted + fields (e.g. heatmaps) of the input batch + batch_data_samples (List[PoseDataSample]): The input data batch + + Returns: + List[PoseDataSample]: A list of data samples where the predictions + are stored in the ``pred_instances`` field of each data sample. + """ + assert len(batch_pred_instances) == len(batch_data_samples) + if batch_pred_fields is None: + batch_pred_fields = [] + output_keypoint_indices = self.test_cfg.get('output_keypoint_indices', + None) + + for pred_instances, pred_fields, data_sample in zip_longest( + batch_pred_instances, batch_pred_fields, batch_data_samples): + + if pred_instances is None: + continue + gt_instances = data_sample.gt_instances + + # convert keypoint coordinates from input space to image space + input_center = data_sample.metainfo['input_center'] + input_scale = data_sample.metainfo['input_scale'] + input_size = data_sample.metainfo['input_size'] + + pred_instances.keypoints[..., :2] = \ + pred_instances.keypoints[..., :2] / input_size * input_scale \ + + input_center - 0.5 * input_scale + if 'keypoints_visible' not in pred_instances: + pred_instances.keypoints_visible = \ + pred_instances.keypoint_scores + + if output_keypoint_indices is not None: + # select output keypoints with given indices + num_keypoints = pred_instances.keypoints.shape[1] + for key, value in pred_instances.all_items(): + if key.startswith('keypoint'): + pred_instances.set_field( + value[:, output_keypoint_indices], key) + + # add bbox information into pred_instances + pred_instances.bboxes = gt_instances.bboxes + pred_instances.bbox_scores = gt_instances.bbox_scores + + data_sample.pred_instances = pred_instances + + if pred_fields is not None: + if output_keypoint_indices is not None: + # select output heatmap channels with keypoint indices + # when the number of heatmap channel matches num_keypoints + for key, value in pred_fields.all_items(): + if value.shape[0] != num_keypoints: + continue + pred_fields.set_field(value[output_keypoint_indices], + key) + data_sample.pred_fields = pred_fields + + return batch_data_samples diff --git a/mmpose/models/task_modules/__init__.py b/mmpose/models/task_modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..caecfb9d330536717d65dc8cd42aae8bf5753400 --- /dev/null +++ b/mmpose/models/task_modules/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .assigners import * # noqa +from .prior_generators import * # noqa diff --git a/mmpose/models/task_modules/assigners/__init__.py b/mmpose/models/task_modules/assigners/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b6b006e389ccf9a8bd465d807670b9df5a60de3 --- /dev/null +++ b/mmpose/models/task_modules/assigners/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .metric_calculators import BBoxOverlaps2D, PoseOKS +from .sim_ota_assigner import SimOTAAssigner + +__all__ = ['SimOTAAssigner', 'PoseOKS', 'BBoxOverlaps2D'] diff --git a/mmpose/models/task_modules/assigners/metric_calculators.py b/mmpose/models/task_modules/assigners/metric_calculators.py new file mode 100644 index 0000000000000000000000000000000000000000..ebf4333b6646a2181f103f2a6f78138aa8ec6e93 --- /dev/null +++ b/mmpose/models/task_modules/assigners/metric_calculators.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from torch import Tensor + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.registry import TASK_UTILS +from mmpose.structures.bbox import bbox_overlaps + + +def cast_tensor_type(x, scale=1., dtype=None): + if dtype == 'fp16': + # scale is for preventing overflows + x = (x / scale).half() + return x + + +@TASK_UTILS.register_module() +class BBoxOverlaps2D: + """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" + + def __init__(self, scale=1., dtype=None): + self.scale = scale + self.dtype = dtype + + @torch.no_grad() + def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate IoU between 2D bboxes. + + Args: + bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) + in format, or shape (m, 5) in format. + bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) + in format, shape (m, 5) in format, or be empty. If ``is_aligned `` is ``True``, + then m and n must be equal. + mode (str): "iou" (intersection over union), "iof" (intersection + over foreground), or "giou" (generalized intersection over + union). + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + + Returns: + Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) + """ + assert bboxes1.size(-1) in [0, 4, 5] + assert bboxes2.size(-1) in [0, 4, 5] + if bboxes2.size(-1) == 5: + bboxes2 = bboxes2[..., :4] + if bboxes1.size(-1) == 5: + bboxes1 = bboxes1[..., :4] + + if self.dtype == 'fp16': + # change tensor type to save cpu and cuda memory and keep speed + bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) + bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) + overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) + if not overlaps.is_cuda and overlaps.dtype == torch.float16: + # resume cpu float32 + overlaps = overlaps.float() + return overlaps + + return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) + + def __repr__(self): + """str: a string describing the module""" + repr_str = self.__class__.__name__ + f'(' \ + f'scale={self.scale}, dtype={self.dtype})' + return repr_str + + +@TASK_UTILS.register_module() +class PoseOKS: + """OKS score Calculator.""" + + def __init__(self, + metainfo: Optional[str] = 'configs/_base_/datasets/coco.py'): + + if metainfo is not None: + metainfo = parse_pose_metainfo(dict(from_file=metainfo)) + sigmas = metainfo.get('sigmas', None) + if sigmas is not None: + self.sigmas = torch.as_tensor(sigmas) + + @torch.no_grad() + def __call__(self, + output: Tensor, + target: Tensor, + target_weights: Tensor, + areas: Tensor, + eps: float = 1e-8) -> Tensor: + + dist = torch.norm(output - target, dim=-1) + areas = areas.reshape(*((1, ) * (dist.ndim - 2)), -1, 1) + dist = dist / areas.pow(0.5).clip(min=eps) + + if hasattr(self, 'sigmas'): + if self.sigmas.device != dist.device: + self.sigmas = self.sigmas.to(dist.device) + sigmas = self.sigmas.reshape(*((1, ) * (dist.ndim - 1)), -1) + dist = dist / (sigmas * 2) + + target_weights = target_weights / target_weights.sum( + dim=-1, keepdims=True).clip(min=eps) + oks = (torch.exp(-dist.pow(2) / 2) * target_weights).sum(dim=-1) + return oks diff --git a/mmpose/models/task_modules/assigners/sim_ota_assigner.py b/mmpose/models/task_modules/assigners/sim_ota_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..b43851cf15a6e2116383304f6becd870fe9b4a27 --- /dev/null +++ b/mmpose/models/task_modules/assigners/sim_ota_assigner.py @@ -0,0 +1,302 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +from mmengine.structures import InstanceData +from torch import Tensor + +from mmpose.registry import TASK_UTILS +from mmpose.utils.typing import ConfigType + +INF = 100000.0 +EPS = 1.0e-7 + + +@TASK_UTILS.register_module() +class SimOTAAssigner: + """Computes matching between predictions and ground truth. + + Args: + center_radius (float): Radius of center area to determine + if a prior is in the center of a gt. Defaults to 2.5. + candidate_topk (int): Top-k ious candidates to calculate dynamic-k. + Defaults to 10. + iou_weight (float): Weight of bbox iou cost. Defaults to 3.0. + cls_weight (float): Weight of classification cost. Defaults to 1.0. + oks_weight (float): Weight of keypoint OKS cost. Defaults to 3.0. + vis_weight (float): Weight of keypoint visibility cost. Defaults to 0.0 + dynamic_k_indicator (str): Cost type for calculating dynamic-k, + either 'iou' or 'oks'. Defaults to 'iou'. + use_keypoints_for_center (bool): Whether to use keypoints to determine + if a prior is in the center of a gt. Defaults to False. + iou_calculator (dict): Config of IoU calculation method. + Defaults to dict(type='BBoxOverlaps2D'). + oks_calculator (dict): Config of OKS calculation method. + Defaults to dict(type='PoseOKS'). + """ + + def __init__(self, + center_radius: float = 2.5, + candidate_topk: int = 10, + iou_weight: float = 3.0, + cls_weight: float = 1.0, + oks_weight: float = 3.0, + vis_weight: float = 0.0, + dynamic_k_indicator: str = 'iou', + use_keypoints_for_center: bool = False, + iou_calculator: ConfigType = dict(type='BBoxOverlaps2D'), + oks_calculator: ConfigType = dict(type='PoseOKS')): + self.center_radius = center_radius + self.candidate_topk = candidate_topk + self.iou_weight = iou_weight + self.cls_weight = cls_weight + self.oks_weight = oks_weight + self.vis_weight = vis_weight + assert dynamic_k_indicator in ('iou', 'oks'), f'the argument ' \ + f'`dynamic_k_indicator` should be either \'iou\' or \'oks\', ' \ + f'but got {dynamic_k_indicator}' + self.dynamic_k_indicator = dynamic_k_indicator + + self.use_keypoints_for_center = use_keypoints_for_center + self.iou_calculator = TASK_UTILS.build(iou_calculator) + self.oks_calculator = TASK_UTILS.build(oks_calculator) + + def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, + **kwargs) -> dict: + """Assign gt to priors using SimOTA. + + Args: + pred_instances (:obj:`InstanceData`): Instances of model + predictions. It includes ``priors``, and the priors can + be anchors or points, or the bboxes predicted by the + previous stage, has shape (n, 4). The bboxes predicted by + the current model or stage will be named ``bboxes``, + ``labels``, and ``scores``, the same as the ``InstanceData`` + in other places. + gt_instances (:obj:`InstanceData`): Ground truth of instance + annotations. It usually includes ``bboxes``, with shape (k, 4), + and ``labels``, with shape (k, ). + Returns: + dict: Assignment result containing assigned gt indices, + max iou overlaps, assigned labels, etc. + """ + gt_bboxes = gt_instances.bboxes + gt_labels = gt_instances.labels + gt_keypoints = gt_instances.keypoints + gt_keypoints_visible = gt_instances.keypoints_visible + gt_areas = gt_instances.areas + num_gt = gt_bboxes.size(0) + + decoded_bboxes = pred_instances.bboxes + pred_scores = pred_instances.scores + priors = pred_instances.priors + keypoints = pred_instances.keypoints + keypoints_visible = pred_instances.keypoints_visible + num_bboxes = decoded_bboxes.size(0) + + # assign 0 by default + assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), + 0, + dtype=torch.long) + if num_gt == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) + assigned_labels = decoded_bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return dict( + num_gts=num_gt, + gt_inds=assigned_gt_inds, + max_overlaps=max_overlaps, + labels=assigned_labels) + + valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( + priors, gt_bboxes, gt_keypoints, gt_keypoints_visible) + valid_decoded_bbox = decoded_bboxes[valid_mask] + valid_pred_scores = pred_scores[valid_mask] + valid_pred_kpts = keypoints[valid_mask] + valid_pred_kpts_vis = keypoints_visible[valid_mask] + + num_valid = valid_decoded_bbox.size(0) + if num_valid == 0: + # No valid bboxes, return empty assignment + max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) + assigned_labels = decoded_bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return dict( + num_gts=num_gt, + gt_inds=assigned_gt_inds, + max_overlaps=max_overlaps, + labels=assigned_labels) + + cost_matrix = (~is_in_boxes_and_center) * INF + + # calculate iou + pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes) + if self.iou_weight > 0: + iou_cost = -torch.log(pairwise_ious + EPS) + cost_matrix = cost_matrix + iou_cost * self.iou_weight + + # calculate oks + if self.oks_weight > 0 or self.dynamic_k_indicator == 'oks': + pairwise_oks = self.oks_calculator( + valid_pred_kpts.unsqueeze(1), # [num_valid, 1, k, 2] + target=gt_keypoints.unsqueeze(0), # [1, num_gt, k, 2] + target_weights=gt_keypoints_visible.unsqueeze( + 0), # [1, num_gt, k] + areas=gt_areas.unsqueeze(0), # [1, num_gt] + ) # -> [num_valid, num_gt] + + oks_cost = -torch.log(pairwise_oks + EPS) + cost_matrix = cost_matrix + oks_cost * self.oks_weight + + # calculate cls + if self.cls_weight > 0: + gt_onehot_label = ( + F.one_hot(gt_labels.to(torch.int64), + pred_scores.shape[-1]).float().unsqueeze(0).repeat( + num_valid, 1, 1)) + valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat( + 1, num_gt, 1) + # disable AMP autocast to avoid overflow + with torch.cuda.amp.autocast(enabled=False): + cls_cost = ( + F.binary_cross_entropy( + valid_pred_scores.to(dtype=torch.float32), + gt_onehot_label, + reduction='none', + ).sum(-1).to(dtype=valid_pred_scores.dtype)) + cost_matrix = cost_matrix + cls_cost * self.cls_weight + # calculate vis + if self.vis_weight > 0: + valid_pred_kpts_vis = valid_pred_kpts_vis.unsqueeze(1).repeat( + 1, num_gt, 1) # [num_valid, 1, k] + gt_kpt_vis = gt_keypoints_visible.unsqueeze( + 0).float() # [1, num_gt, k] + with torch.cuda.amp.autocast(enabled=False): + vis_cost = ( + F.binary_cross_entropy( + valid_pred_kpts_vis.to(dtype=torch.float32), + gt_kpt_vis.repeat(num_valid, 1, 1), + reduction='none', + ).sum(-1).to(dtype=valid_pred_kpts_vis.dtype)) + cost_matrix = cost_matrix + vis_cost * self.vis_weight + + if self.dynamic_k_indicator == 'iou': + matched_pred_ious, matched_gt_inds = \ + self.dynamic_k_matching( + cost_matrix, pairwise_ious, num_gt, valid_mask) + elif self.dynamic_k_indicator == 'oks': + matched_pred_ious, matched_gt_inds = \ + self.dynamic_k_matching( + cost_matrix, pairwise_oks, num_gt, valid_mask) + + # convert to AssignResult format + assigned_gt_inds[valid_mask] = matched_gt_inds + 1 + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() + max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), + -INF, + dtype=torch.float32) + max_overlaps[valid_mask] = matched_pred_ious.to(max_overlaps) + return dict( + num_gts=num_gt, + gt_inds=assigned_gt_inds, + max_overlaps=max_overlaps, + labels=assigned_labels) + + def get_in_gt_and_in_center_info( + self, + priors: Tensor, + gt_bboxes: Tensor, + gt_keypoints: Optional[Tensor] = None, + gt_keypoints_visible: Optional[Tensor] = None, + ) -> Tuple[Tensor, Tensor]: + """Get the information of which prior is in gt bboxes and gt center + priors.""" + num_gt = gt_bboxes.size(0) + + repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt) + repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt) + repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt) + repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt) + + # is prior centers in gt bboxes, shape: [n_prior, n_gt] + l_ = repeated_x - gt_bboxes[:, 0] + t_ = repeated_y - gt_bboxes[:, 1] + r_ = gt_bboxes[:, 2] - repeated_x + b_ = gt_bboxes[:, 3] - repeated_y + + deltas = torch.stack([l_, t_, r_, b_], dim=1) + is_in_gts = deltas.min(dim=1).values > 0 + is_in_gts_all = is_in_gts.sum(dim=1) > 0 + + # is prior centers in gt centers + gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 + gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 + if self.use_keypoints_for_center and gt_keypoints_visible is not None: + gt_kpts_cts = (gt_keypoints * gt_keypoints_visible.unsqueeze(-1) + ).sum(dim=-2) / gt_keypoints_visible.sum( + dim=-1, keepdims=True).clip(min=0) + gt_kpts_cts = gt_kpts_cts.to(gt_bboxes) + valid_mask = gt_keypoints_visible.sum(-1) > 0 + gt_cxs[valid_mask] = gt_kpts_cts[valid_mask][..., 0] + gt_cys[valid_mask] = gt_kpts_cts[valid_mask][..., 1] + + ct_box_l = gt_cxs - self.center_radius * repeated_stride_x + ct_box_t = gt_cys - self.center_radius * repeated_stride_y + ct_box_r = gt_cxs + self.center_radius * repeated_stride_x + ct_box_b = gt_cys + self.center_radius * repeated_stride_y + + cl_ = repeated_x - ct_box_l + ct_ = repeated_y - ct_box_t + cr_ = ct_box_r - repeated_x + cb_ = ct_box_b - repeated_y + + ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1) + is_in_cts = ct_deltas.min(dim=1).values > 0 + is_in_cts_all = is_in_cts.sum(dim=1) > 0 + + # in boxes or in centers, shape: [num_priors] + is_in_gts_or_centers = is_in_gts_all | is_in_cts_all + + # both in boxes and centers, shape: [num_fg, num_gt] + is_in_boxes_and_centers = ( + is_in_gts[is_in_gts_or_centers, :] + & is_in_cts[is_in_gts_or_centers, :]) + return is_in_gts_or_centers, is_in_boxes_and_centers + + def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor, + num_gt: int, + valid_mask: Tensor) -> Tuple[Tensor, Tensor]: + """Use IoU and matching cost to calculate the dynamic top-k positive + targets.""" + matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) + # select candidate topk ious for dynamic-k calculation + candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) + topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) + # calculate dynamic k for each gt + dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) + matching_matrix[:, gt_idx][pos_idx] = 1 + + del topk_ious, dynamic_ks, pos_idx + + prior_match_gt_mask = matching_matrix.sum(1) > 1 + if prior_match_gt_mask.sum() > 0: + cost_min, cost_argmin = torch.min( + cost[prior_match_gt_mask, :], dim=1) + matching_matrix[prior_match_gt_mask, :] *= 0 + matching_matrix[prior_match_gt_mask, cost_argmin] = 1 + # get foreground mask inside box and center prior + fg_mask_inboxes = matching_matrix.sum(1) > 0 + valid_mask[valid_mask.clone()] = fg_mask_inboxes + + matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) + matched_pred_ious = (matching_matrix * + pairwise_ious).sum(1)[fg_mask_inboxes] + return matched_pred_ious, matched_gt_inds diff --git a/mmpose/models/task_modules/prior_generators/__init__.py b/mmpose/models/task_modules/prior_generators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e153da8447a807329589846f6c5d784a06bdd215 --- /dev/null +++ b/mmpose/models/task_modules/prior_generators/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .mlvl_point_generator import MlvlPointGenerator # noqa diff --git a/mmpose/models/task_modules/prior_generators/mlvl_point_generator.py b/mmpose/models/task_modules/prior_generators/mlvl_point_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..aed01af7342e152b35b0ba2accbceb076f1cffbe --- /dev/null +++ b/mmpose/models/task_modules/prior_generators/mlvl_point_generator.py @@ -0,0 +1,253 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import numpy as np +import torch +from torch import Tensor +from torch.nn.modules.utils import _pair + +from mmpose.registry import TASK_UTILS + +DeviceType = Union[str, torch.device] + + +@TASK_UTILS.register_module() +class MlvlPointGenerator: + """Standard points generator for multi-level (Mlvl) feature maps in 2D + points-based detectors. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels in order (w, h). + offset (float): The offset of points, the value is normalized with + corresponding stride. Defaults to 0.5. + centralize_points (bool): Whether to centralize the points to + the center of anchors. Defaults to False. + """ + + def __init__(self, + strides: Union[List[int], List[Tuple[int, int]]], + offset: float = 0.5, + centralize_points: bool = False) -> None: + self.strides = [_pair(stride) for stride in strides] + self.centralize_points = centralize_points + self.offset = offset if not centralize_points else 0.0 + + @property + def num_levels(self) -> int: + """int: number of feature levels that the generator will be applied""" + return len(self.strides) + + @property + def num_base_priors(self) -> List[int]: + """list[int]: The number of priors (points) at a point + on the feature grid""" + return [1 for _ in range(len(self.strides))] + + def _meshgrid(self, + x: Tensor, + y: Tensor, + row_major: bool = True) -> Tuple[Tensor, Tensor]: + yy, xx = torch.meshgrid(y, x) + if row_major: + # warning .flatten() would cause error in ONNX exporting + # have to use reshape here + return xx.reshape(-1), yy.reshape(-1) + + else: + return yy.reshape(-1), xx.reshape(-1) + + def grid_priors(self, + featmap_sizes: List[Tuple], + dtype: torch.dtype = torch.float32, + device: DeviceType = 'cuda', + with_stride: bool = False) -> List[Tensor]: + """Generate grid points of multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels, each size arrange as + as (h, w). + dtype (:obj:`dtype`): Dtype of priors. Defaults to torch.float32. + device (str | torch.device): The device where the anchors will be + put on. + with_stride (bool): Whether to concatenate the stride to + the last dimension of points. + + Return: + list[torch.Tensor]: Points of multiple feature levels. + The sizes of each tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + + assert self.num_levels == len(featmap_sizes) + multi_level_priors = [] + for i in range(self.num_levels): + priors = self.single_level_grid_priors( + featmap_sizes[i], + level_idx=i, + dtype=dtype, + device=device, + with_stride=with_stride) + multi_level_priors.append(priors) + return multi_level_priors + + def single_level_grid_priors(self, + featmap_size: Tuple[int], + level_idx: int, + dtype: torch.dtype = torch.float32, + device: DeviceType = 'cuda', + with_stride: bool = False) -> Tensor: + """Generate grid Points of a single level. + + Note: + This function is usually called by method ``self.grid_priors``. + + Args: + featmap_size (tuple[int]): Size of the feature maps, arrange as + (h, w). + level_idx (int): The index of corresponding feature map level. + dtype (:obj:`dtype`): Dtype of priors. Defaults to torch.float32. + device (str | torch.device): The device the tensor will be put on. + Defaults to 'cuda'. + with_stride (bool): Concatenate the stride to the last dimension + of points. + + Return: + Tensor: Points of single feature levels. + The shape of tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + feat_h, feat_w = featmap_size + stride_w, stride_h = self.strides[level_idx] + shift_x = (torch.arange(0, feat_w, device=device) + + self.offset) * stride_w + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + shift_x = shift_x.to(dtype) + + shift_y = (torch.arange(0, feat_h, device=device) + + self.offset) * stride_h + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + shift_y = shift_y.to(dtype) + + if self.centralize_points: + shift_x = shift_x + float(stride_w - 1) / 2.0 + shift_y = shift_y + float(stride_h - 1) / 2.0 + + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + if not with_stride: + shifts = torch.stack([shift_xx, shift_yy], dim=-1) + else: + # use `shape[0]` instead of `len(shift_xx)` for ONNX export + stride_w = shift_xx.new_full((shift_xx.shape[0], ), + stride_w).to(dtype) + stride_h = shift_xx.new_full((shift_yy.shape[0], ), + stride_h).to(dtype) + shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], + dim=-1) + all_points = shifts.to(device) + return all_points + + def valid_flags(self, + featmap_sizes: List[Tuple[int, int]], + pad_shape: Tuple[int], + device: DeviceType = 'cuda') -> List[Tensor]: + """Generate valid flags of points of multiple feature levels. + + Args: + featmap_sizes (list(tuple)): List of feature map sizes in + multiple feature levels, each size arrange as + as (h, w). + pad_shape (tuple(int)): The padded shape of the image, + arrange as (h, w). + device (str | torch.device): The device where the anchors will be + put on. + + Return: + list(torch.Tensor): Valid flags of points of multiple levels. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_flags = [] + for i in range(self.num_levels): + point_stride = self.strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w = pad_shape[:2] + valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h) + valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w) + flags = self.single_level_valid_flags((feat_h, feat_w), + (valid_feat_h, valid_feat_w), + device=device) + multi_level_flags.append(flags) + return multi_level_flags + + def single_level_valid_flags(self, + featmap_size: Tuple[int, int], + valid_size: Tuple[int, int], + device: DeviceType = 'cuda') -> Tensor: + """Generate the valid flags of points of a single feature map. + + Args: + featmap_size (tuple[int]): The size of feature maps, arrange as + as (h, w). + valid_size (tuple[int]): The valid size of the feature maps. + The size arrange as as (h, w). + device (str | torch.device): The device where the flags will be + put on. Defaults to 'cuda'. + + Returns: + torch.Tensor: The valid flags of each points in a single level \ + feature map. + """ + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + return valid + + def sparse_priors(self, + prior_idxs: Tensor, + featmap_size: Tuple[int], + level_idx: int, + dtype: torch.dtype = torch.float32, + device: DeviceType = 'cuda') -> Tensor: + """Generate sparse points according to the ``prior_idxs``. + + Args: + prior_idxs (Tensor): The index of corresponding anchors + in the feature map. + featmap_size (tuple[int]): feature map size arrange as (w, h). + level_idx (int): The level index of corresponding feature + map. + dtype (obj:`torch.dtype`): Date type of points. Defaults to + ``torch.float32``. + device (str | torch.device): The device where the points is + located. + Returns: + Tensor: Anchor with shape (N, 2), N should be equal to + the length of ``prior_idxs``. And last dimension + 2 represent (coord_x, coord_y). + """ + height, width = featmap_size + x = (prior_idxs % width + self.offset) * self.strides[level_idx][0] + y = ((prior_idxs // width) % height + + self.offset) * self.strides[level_idx][1] + prioris = torch.stack([x, y], 1).to(dtype) + prioris = prioris.to(device) + return prioris diff --git a/mmpose/models/utils/__init__.py b/mmpose/models/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..92ad02b36f7da28edae56a76f33109f02d4b68cd --- /dev/null +++ b/mmpose/models/utils/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .check_and_update_config import check_and_update_config +from .ckpt_convert import pvt_convert +from .csp_layer import CSPLayer +from .misc import filter_scores_and_topk +from .ops import FrozenBatchNorm2d, inverse_sigmoid +from .reparam_layers import RepVGGBlock +from .rtmcc_block import RTMCCBlock, rope +from .transformer import (DetrTransformerEncoder, GAUEncoder, PatchEmbed, + SinePositionalEncoding, nchw_to_nlc, nlc_to_nchw) + +__all__ = [ + 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'pvt_convert', 'RTMCCBlock', + 'rope', 'check_and_update_config', 'filter_scores_and_topk', 'CSPLayer', + 'FrozenBatchNorm2d', 'inverse_sigmoid', 'GAUEncoder', + 'SinePositionalEncoding', 'RepVGGBlock', 'DetrTransformerEncoder' +] diff --git a/mmpose/models/utils/check_and_update_config.py b/mmpose/models/utils/check_and_update_config.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd1efa39b584a08055d470343549349907c1a5c --- /dev/null +++ b/mmpose/models/utils/check_and_update_config.py @@ -0,0 +1,230 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Tuple, Union + +from mmengine.config import Config, ConfigDict +from mmengine.dist import master_only +from mmengine.logging import MMLogger + +ConfigType = Union[Config, ConfigDict] + + +def process_input_transform(input_transform: str, head: Dict, head_new: Dict, + head_deleted_dict: Dict, head_append_dict: Dict, + neck_new: Dict, input_index: Tuple[int], + align_corners: bool) -> None: + """Process the input_transform field and update head and neck + dictionaries.""" + if input_transform == 'resize_concat': + in_channels = head_new.pop('in_channels') + head_deleted_dict['in_channels'] = str(in_channels) + in_channels = sum([in_channels[i] for i in input_index]) + head_new['in_channels'] = in_channels + head_append_dict['in_channels'] = str(in_channels) + + neck_new.update( + dict( + type='FeatureMapProcessor', + concat=True, + select_index=input_index, + )) + if align_corners: + neck_new['align_corners'] = align_corners + + elif input_transform == 'select': + if input_index != (-1, ): + neck_new.update( + dict(type='FeatureMapProcessor', select_index=input_index)) + if isinstance(head['in_channels'], tuple): + in_channels = head_new.pop('in_channels') + head_deleted_dict['in_channels'] = str(in_channels) + if isinstance(input_index, int): + in_channels = in_channels[input_index] + else: + in_channels = tuple([in_channels[i] for i in input_index]) + head_new['in_channels'] = in_channels + head_append_dict['in_channels'] = str(in_channels) + if align_corners: + neck_new['align_corners'] = align_corners + + else: + raise ValueError(f'model.head get invalid value for argument ' + f'input_transform: {input_transform}') + + +def process_extra_field(extra: Dict, head_new: Dict, head_deleted_dict: Dict, + head_append_dict: Dict, neck_new: Dict) -> None: + """Process the extra field and update head and neck dictionaries.""" + head_deleted_dict['extra'] = 'dict(' + for key, value in extra.items(): + head_deleted_dict['extra'] += f'{key}={value},' + head_deleted_dict['extra'] = head_deleted_dict['extra'][:-1] + ')' + if 'final_conv_kernel' in extra: + kernel_size = extra['final_conv_kernel'] + if kernel_size > 1: + padding = kernel_size // 2 + head_new['final_layer'] = dict( + kernel_size=kernel_size, padding=padding) + head_append_dict[ + 'final_layer'] = f'dict(kernel_size={kernel_size}, ' \ + f'padding={padding})' + else: + head_new['final_layer'] = dict(kernel_size=kernel_size) + head_append_dict[ + 'final_layer'] = f'dict(kernel_size={kernel_size})' + if 'upsample' in extra: + neck_new.update( + dict( + type='FeatureMapProcessor', + scale_factor=float(extra['upsample']), + apply_relu=True, + )) + + +def process_has_final_layer(has_final_layer: bool, head_new: Dict, + head_deleted_dict: Dict, + head_append_dict: Dict) -> None: + """Process the has_final_layer field and update the head dictionary.""" + head_deleted_dict['has_final_layer'] = str(has_final_layer) + if not has_final_layer: + if 'final_layer' not in head_new: + head_new['final_layer'] = None + head_append_dict['final_layer'] = 'None' + + +def check_and_update_config(neck: Optional[ConfigType], + head: ConfigType) -> Tuple[Optional[Dict], Dict]: + """Check and update the configuration of the head and neck components. + Args: + neck (Optional[ConfigType]): Configuration for the neck component. + head (ConfigType): Configuration for the head component. + + Returns: + Tuple[Optional[Dict], Dict]: Updated configurations for the neck + and head components. + """ + head_new, neck_new = head.copy(), neck.copy() if isinstance(neck, + dict) else {} + head_deleted_dict, head_append_dict = {}, {} + + if 'input_transform' in head: + input_transform = head_new.pop('input_transform') + head_deleted_dict['input_transform'] = f'\'{input_transform}\'' + else: + input_transform = 'select' + + if 'input_index' in head: + input_index = head_new.pop('input_index') + head_deleted_dict['input_index'] = str(input_index) + else: + input_index = (-1, ) + + if 'align_corners' in head: + align_corners = head_new.pop('align_corners') + head_deleted_dict['align_corners'] = str(align_corners) + else: + align_corners = False + + process_input_transform(input_transform, head, head_new, head_deleted_dict, + head_append_dict, neck_new, input_index, + align_corners) + + if 'extra' in head: + extra = head_new.pop('extra') + process_extra_field(extra, head_new, head_deleted_dict, + head_append_dict, neck_new) + + if 'has_final_layer' in head: + has_final_layer = head_new.pop('has_final_layer') + process_has_final_layer(has_final_layer, head_new, head_deleted_dict, + head_append_dict) + + display_modifications(head_deleted_dict, head_append_dict, neck_new) + + neck_new = neck_new if len(neck_new) else None + return neck_new, head_new + + +@master_only +def display_modifications(head_deleted_dict: Dict, head_append_dict: Dict, + neck: Dict) -> None: + """Display the modifications made to the head and neck configurations. + + Args: + head_deleted_dict (Dict): Dictionary of deleted fields in the head. + head_append_dict (Dict): Dictionary of appended fields in the head. + neck (Dict): Updated neck configuration. + """ + if len(head_deleted_dict) + len(head_append_dict) == 0: + return + + old_model_info, new_model_info = build_model_info(head_deleted_dict, + head_append_dict, neck) + + total_info = '\nThe config you are using is outdated. '\ + 'The following section of the config:\n```\n' + total_info += old_model_info + total_info += '```\nshould be updated to\n```\n' + total_info += new_model_info + total_info += '```\nFor more information, please refer to '\ + 'https://mmpose.readthedocs.io/en/latest/' \ + 'guide_to_framework.html#step3-model' + + logger: MMLogger = MMLogger.get_current_instance() + logger.warning(total_info) + + +def build_model_info(head_deleted_dict: Dict, head_append_dict: Dict, + neck: Dict) -> Tuple[str, str]: + """Build the old and new model information strings. + Args: + head_deleted_dict (Dict): Dictionary of deleted fields in the head. + head_append_dict (Dict): Dictionary of appended fields in the head. + neck (Dict): Updated neck configuration. + + Returns: + Tuple[str, str]: Old and new model information strings. + """ + old_head_info = build_head_info(head_deleted_dict) + new_head_info = build_head_info(head_append_dict) + neck_info = build_neck_info(neck) + + old_model_info = 'model=dict(\n' + ' ' * 4 + '...,\n' + old_head_info + new_model_info = 'model=dict(\n' + ' ' * 4 + '...,\n' \ + + neck_info + new_head_info + + return old_model_info, new_model_info + + +def build_head_info(head_dict: Dict) -> str: + """Build the head information string. + + Args: + head_dict (Dict): Dictionary of fields in the head configuration. + Returns: + str: Head information string. + """ + head_info = ' ' * 4 + 'head=dict(\n' + for key, value in head_dict.items(): + head_info += ' ' * 8 + f'{key}={value},\n' + head_info += ' ' * 8 + '...),\n' + return head_info + + +def build_neck_info(neck: Dict) -> str: + """Build the neck information string. + Args: + neck (Dict): Updated neck configuration. + + Returns: + str: Neck information string. + """ + if len(neck) > 0: + neck = neck.copy() + neck_info = ' ' * 4 + 'neck=dict(\n' + ' ' * 8 + \ + f'type=\'{neck.pop("type")}\',\n' + for key, value in neck.items(): + neck_info += ' ' * 8 + f'{key}={str(value)},\n' + neck_info += ' ' * 4 + '),\n' + else: + neck_info = '' + return neck_info diff --git a/mmpose/models/utils/ckpt_convert.py b/mmpose/models/utils/ckpt_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..05f5cdb4a3cdf32ac2b6b7a8888c5a772e582f14 --- /dev/null +++ b/mmpose/models/utils/ckpt_convert.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +# This script consists of several convert functions which +# can modify the weights of model in original repo to be +# pre-trained weights. + +from collections import OrderedDict + +import torch + + +def pvt_convert(ckpt): + new_ckpt = OrderedDict() + # Process the concat between q linear weights and kv linear weights + use_abs_pos_embed = False + use_conv_ffn = False + for k in ckpt.keys(): + if k.startswith('pos_embed'): + use_abs_pos_embed = True + if k.find('dwconv') >= 0: + use_conv_ffn = True + for k, v in ckpt.items(): + if k.startswith('head'): + continue + if k.startswith('norm.'): + continue + if k.startswith('cls_token'): + continue + if k.startswith('pos_embed'): + stage_i = int(k.replace('pos_embed', '')) + new_k = k.replace(f'pos_embed{stage_i}', + f'layers.{stage_i - 1}.1.0.pos_embed') + if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 + new_v = v[:, 1:, :] # remove cls token + else: + new_v = v + elif k.startswith('patch_embed'): + stage_i = int(k.split('.')[0].replace('patch_embed', '')) + new_k = k.replace(f'patch_embed{stage_i}', + f'layers.{stage_i - 1}.0') + new_v = v + if 'proj.' in new_k: + new_k = new_k.replace('proj.', 'projection.') + elif k.startswith('block'): + stage_i = int(k.split('.')[0].replace('block', '')) + layer_i = int(k.split('.')[1]) + new_layer_i = layer_i + use_abs_pos_embed + new_k = k.replace(f'block{stage_i}.{layer_i}', + f'layers.{stage_i - 1}.1.{new_layer_i}') + new_v = v + if 'attn.q.' in new_k: + sub_item_k = k.replace('q.', 'kv.') + new_k = new_k.replace('q.', 'attn.in_proj_') + new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) + elif 'attn.kv.' in new_k: + continue + elif 'attn.proj.' in new_k: + new_k = new_k.replace('proj.', 'attn.out_proj.') + elif 'attn.sr.' in new_k: + new_k = new_k.replace('sr.', 'sr.') + elif 'mlp.' in new_k: + string = f'{new_k}-' + new_k = new_k.replace('mlp.', 'ffn.layers.') + if 'fc1.weight' in new_k or 'fc2.weight' in new_k: + new_v = v.reshape((*v.shape, 1, 1)) + new_k = new_k.replace('fc1.', '0.') + new_k = new_k.replace('dwconv.dwconv.', '1.') + if use_conv_ffn: + new_k = new_k.replace('fc2.', '4.') + else: + new_k = new_k.replace('fc2.', '3.') + string += f'{new_k} {v.shape}-{new_v.shape}' + elif k.startswith('norm'): + stage_i = int(k[4]) + new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') + new_v = v + else: + new_k = k + new_v = v + new_ckpt[new_k] = new_v + + return new_ckpt diff --git a/mmpose/models/utils/csp_layer.py b/mmpose/models/utils/csp_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..071e1209a2b4b0e1acb722063bfbd9b248fb8b5c --- /dev/null +++ b/mmpose/models/utils/csp_layer.py @@ -0,0 +1,273 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmengine.model import BaseModule +from mmengine.utils import digit_version +from torch import Tensor + +from mmpose.utils.typing import ConfigType, OptConfigType, OptMultiConfig + + +class ChannelAttention(BaseModule): + """Channel attention Module. + + Args: + channels (int): The input (and output) channels of the attention layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None + """ + + def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True) + if digit_version(torch.__version__) < (1, 7, 0): + self.act = nn.Hardsigmoid() + else: + self.act = nn.Hardsigmoid(inplace=True) + + def forward(self, x: Tensor) -> Tensor: + """Forward function for ChannelAttention.""" + with torch.cuda.amp.autocast(enabled=False): + out = self.global_avgpool(x) + out = self.fc(out) + out = self.act(out) + return x * out + + +class DarknetBottleneck(BaseModule): + """The basic bottleneck block used in Darknet. + + Each ResBlock consists of two ConvModules and the input is added to the + final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. + The first convLayer has filter size of 1x1 and the second one has the + filter size of 3x3. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + expansion (float): The kernel size of the convolution. + Defaults to 0.5. + add_identity (bool): Whether to add identity to the out. + Defaults to True. + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False. + conv_cfg (dict): Config dict for convolution layer. Defaults to None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='Swish'). + """ + + def __init__(self, + in_channels: int, + out_channels: int, + expansion: float = 0.5, + add_identity: bool = True, + use_depthwise: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='Swish'), + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) + hidden_channels = int(out_channels * expansion) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + self.conv1 = ConvModule( + in_channels, + hidden_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = conv( + hidden_channels, + out_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.add_identity = \ + add_identity and in_channels == out_channels + + def forward(self, x: Tensor) -> Tensor: + """Forward function.""" + identity = x + out = self.conv1(x) + out = self.conv2(out) + + if self.add_identity: + return out + identity + else: + return out + + +class CSPNeXtBlock(BaseModule): + """The basic bottleneck block used in CSPNeXt. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + expansion (float): Expand ratio of the hidden channel. Defaults to 0.5. + add_identity (bool): Whether to add identity to the out. Only works + when in_channels == out_channels. Defaults to True. + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False. + kernel_size (int): The kernel size of the second convolution layer. + Defaults to 5. + conv_cfg (dict): Config dict for convolution layer. Defaults to None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU'). + init_cfg (:obj:`ConfigDict` or dict or list[dict] or + list[:obj:`ConfigDict`], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + expansion: float = 0.5, + add_identity: bool = True, + use_depthwise: bool = False, + kernel_size: int = 5, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU'), + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) + hidden_channels = int(out_channels * expansion) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + self.conv1 = conv( + in_channels, + hidden_channels, + 3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = DepthwiseSeparableConvModule( + hidden_channels, + out_channels, + kernel_size, + stride=1, + padding=kernel_size // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.add_identity = \ + add_identity and in_channels == out_channels + + def forward(self, x: Tensor) -> Tensor: + """Forward function.""" + identity = x + out = self.conv1(x) + out = self.conv2(out) + + if self.add_identity: + return out + identity + else: + return out + + +class CSPLayer(BaseModule): + """Cross Stage Partial Layer. + + Args: + in_channels (int): The input channels of the CSP layer. + out_channels (int): The output channels of the CSP layer. + expand_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Defaults to 0.5. + num_blocks (int): Number of blocks. Defaults to 1. + add_identity (bool): Whether to add identity in blocks. + Defaults to True. + use_cspnext_block (bool): Whether to use CSPNeXt block. + Defaults to False. + use_depthwise (bool): Whether to use depthwise separable convolution in + blocks. Defaults to False. + channel_attention (bool): Whether to add channel attention in each + stage. Defaults to True. + conv_cfg (dict, optional): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN') + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='Swish') + init_cfg (:obj:`ConfigDict` or dict or list[dict] or + list[:obj:`ConfigDict`], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + expand_ratio: float = 0.5, + num_blocks: int = 1, + add_identity: bool = True, + use_depthwise: bool = False, + use_cspnext_block: bool = False, + channel_attention: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='Swish'), + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) + block = CSPNeXtBlock if use_cspnext_block else DarknetBottleneck + mid_channels = int(out_channels * expand_ratio) + self.channel_attention = channel_attention + self.main_conv = ConvModule( + in_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.short_conv = ConvModule( + in_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.final_conv = ConvModule( + 2 * mid_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.blocks = nn.Sequential(*[ + block( + mid_channels, + mid_channels, + 1.0, + add_identity, + use_depthwise, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) for _ in range(num_blocks) + ]) + if channel_attention: + self.attention = ChannelAttention(2 * mid_channels) + + def forward(self, x: Tensor) -> Tensor: + """Forward function.""" + x_short = self.short_conv(x) + + x_main = self.main_conv(x) + x_main = self.blocks(x_main) + + x_final = torch.cat((x_main, x_short), dim=1) + + if self.channel_attention: + x_final = self.attention(x_final) + return self.final_conv(x_final) diff --git a/mmpose/models/utils/geometry.py b/mmpose/models/utils/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..0ceadaec30cd2c9bb3fbada132e1ea674f2e8754 --- /dev/null +++ b/mmpose/models/utils/geometry.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.nn import functional as F + + +def rot6d_to_rotmat(x): + """Convert 6D rotation representation to 3x3 rotation matrix. + + Based on Zhou et al., "On the Continuity of Rotation + Representations in Neural Networks", CVPR 2019 + Input: + (B,6) Batch of 6-D rotation representations + Output: + (B,3,3) Batch of corresponding rotation matrices + """ + x = x.view(-1, 3, 2) + a1 = x[:, :, 0] + a2 = x[:, :, 1] + b1 = F.normalize(a1) + b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1) + b3 = torch.cross(b1, b2) + return torch.stack((b1, b2, b3), dim=-1) + + +def batch_rodrigues(theta): + """Convert axis-angle representation to rotation matrix. + Args: + theta: size = [B, 3] + Returns: + Rotation matrix corresponding to the quaternion + -- size = [B, 3, 3] + """ + l2norm = torch.norm(theta + 1e-8, p=2, dim=1) + angle = torch.unsqueeze(l2norm, -1) + normalized = torch.div(theta, angle) + angle = angle * 0.5 + v_cos = torch.cos(angle) + v_sin = torch.sin(angle) + quat = torch.cat([v_cos, v_sin * normalized], dim=1) + return quat_to_rotmat(quat) + + +def quat_to_rotmat(quat): + """Convert quaternion coefficients to rotation matrix. + Args: + quat: size = [B, 4] 4 <===>(w, x, y, z) + Returns: + Rotation matrix corresponding to the quaternion + -- size = [B, 3, 3] + """ + norm_quat = quat + norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) + w, x, y, z = norm_quat[:, 0], norm_quat[:, 1],\ + norm_quat[:, 2], norm_quat[:, 3] + + B = quat.size(0) + + w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) + wx, wy, wz = w * x, w * y, w * z + xy, xz, yz = x * y, x * z, y * z + + rotMat = torch.stack([ + w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, + w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, + w2 - x2 - y2 + z2 + ], + dim=1).view(B, 3, 3) + return rotMat diff --git a/mmpose/models/utils/misc.py b/mmpose/models/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..347c5217092b0feadaef6e0534b4d77b51d3b190 --- /dev/null +++ b/mmpose/models/utils/misc.py @@ -0,0 +1,76 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + +import torch +from six.moves import map, zip + + +def multi_apply(func, *args, **kwargs): + """Apply function to a list of arguments. + + Note: + This function applies the ``func`` to multiple inputs and + map the multiple outputs of the ``func`` into different + list. Each list contains the same type of outputs corresponding + to different inputs. + + Args: + func (Function): A function that will be applied to a list of + arguments + + Returns: + tuple(list): A tuple containing multiple list, each list contains + a kind of returned results by the function + """ + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +def filter_scores_and_topk(scores, score_thr, topk, results=None): + """Filter results using score threshold and topk candidates. + + Args: + scores (Tensor): The scores, shape (num_bboxes, K). + score_thr (float): The score filter threshold. + topk (int): The number of topk candidates. + results (dict or list or Tensor, Optional): The results to + which the filtering rule is to be applied. The shape + of each item is (num_bboxes, N). + + Returns: + tuple: Filtered results + + - scores (Tensor): The scores after being filtered, \ + shape (num_bboxes_filtered, ). + - labels (Tensor): The class labels, shape \ + (num_bboxes_filtered, ). + - anchor_idxs (Tensor): The anchor indexes, shape \ + (num_bboxes_filtered, ). + - filtered_results (dict or list or Tensor, Optional): \ + The filtered results. The shape of each item is \ + (num_bboxes_filtered, N). + """ + valid_mask = scores > score_thr + scores = scores[valid_mask] + valid_idxs = torch.nonzero(valid_mask) + + num_topk = min(topk, valid_idxs.size(0)) + # torch.sort is actually faster than .topk (at least on GPUs) + scores, idxs = scores.sort(descending=True) + scores = scores[:num_topk] + topk_idxs = valid_idxs[idxs[:num_topk]] + keep_idxs, labels = topk_idxs.unbind(dim=1) + + filtered_results = None + if results is not None: + if isinstance(results, dict): + filtered_results = {k: v[keep_idxs] for k, v in results.items()} + elif isinstance(results, list): + filtered_results = [result[keep_idxs] for result in results] + elif isinstance(results, torch.Tensor): + filtered_results = results[keep_idxs] + else: + raise NotImplementedError(f'Only supports dict or list or Tensor, ' + f'but get {type(results)}.') + return scores, labels, keep_idxs, filtered_results diff --git a/mmpose/models/utils/ops.py b/mmpose/models/utils/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d1ba0cf37c3293e575d41ba47034ee08331a4fa9 --- /dev/null +++ b/mmpose/models/utils/ops.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.nn import functional as F + +from mmpose.registry import MODELS + + +def resize(input: torch.Tensor, + size: Optional[Union[Tuple[int, int], torch.Size]] = None, + scale_factor: Optional[float] = None, + mode: str = 'nearest', + align_corners: Optional[bool] = None, + warning: bool = True) -> torch.Tensor: + """Resize a given input tensor using specified size or scale_factor. + + Args: + input (torch.Tensor): The input tensor to be resized. + size (Optional[Union[Tuple[int, int], torch.Size]]): The desired + output size. Defaults to None. + scale_factor (Optional[float]): The scaling factor for resizing. + Defaults to None. + mode (str): The interpolation mode. Defaults to 'nearest'. + align_corners (Optional[bool]): Determines whether to align the + corners when using certain interpolation modes. Defaults to None. + warning (bool): Whether to display a warning when the input and + output sizes are not ideal for alignment. Defaults to True. + + Returns: + torch.Tensor: The resized tensor. + """ + # Check if a warning should be displayed regarding input and output sizes + if warning: + if size is not None and align_corners: + input_h, input_w = tuple(int(x) for x in input.shape[2:]) + output_h, output_w = tuple(int(x) for x in size) + if output_h > input_h or output_w > output_h: + if ((output_h > 1 and output_w > 1 and input_h > 1 + and input_w > 1) and (output_h - 1) % (input_h - 1) + and (output_w - 1) % (input_w - 1)): + warnings.warn( + f'When align_corners={align_corners}, ' + 'the output would be more aligned if ' + f'input size {(input_h, input_w)} is `x+1` and ' + f'out size {(output_h, output_w)} is `nx+1`') + + # Convert torch.Size to tuple if necessary + if isinstance(size, torch.Size): + size = tuple(int(x) for x in size) + + # Perform the resizing operation + return F.interpolate(input, size, scale_factor, mode, align_corners) + + +@MODELS.register_module() +class FrozenBatchNorm2d(torch.nn.Module): + """BatchNorm2d where the batch statistics and the affine parameters are + fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, without + which any other models than torchvision.models.resnet[18,34,50,101] produce + nans. + """ + + def __init__(self, n, eps: int = 1e-5): + super(FrozenBatchNorm2d, self).__init__() + self.register_buffer('weight', torch.ones(n)) + self.register_buffer('bias', torch.zeros(n)) + self.register_buffer('running_mean', torch.zeros(n)) + self.register_buffer('running_var', torch.ones(n)) + self.eps = eps + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + num_batches_tracked_key = prefix + 'num_batches_tracked' + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super(FrozenBatchNorm2d, + self)._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, + unexpected_keys, error_msgs) + + def forward(self, x): + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + rv = self.running_var.reshape(1, -1, 1, 1) + rm = self.running_mean.reshape(1, -1, 1, 1) + scale = w * (rv + self.eps).rsqrt() + bias = b - rm * scale + return x * scale + bias + + +def inverse_sigmoid(x: Tensor, eps: float = 1e-3) -> Tensor: + """Inverse function of sigmoid. + + Args: + x (Tensor): The tensor to do the inverse. + eps (float): EPS avoid numerical overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse function of sigmoid, has the same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) diff --git a/mmpose/models/utils/realnvp.py b/mmpose/models/utils/realnvp.py new file mode 100644 index 0000000000000000000000000000000000000000..911953e8f9d1056d44a2d3538d750e89b9bd6a7a --- /dev/null +++ b/mmpose/models/utils/realnvp.py @@ -0,0 +1,76 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from torch import distributions + + +class RealNVP(nn.Module): + """RealNVP: a flow-based generative model + + `Density estimation using Real NVP + arXiv: `_. + + Code is modified from `the official implementation of RLE + `_. + + See also `real-nvp-pytorch + `_. + """ + + @staticmethod + def get_scale_net(): + """Get the scale model in a single invertable mapping.""" + return nn.Sequential( + nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), + nn.LeakyReLU(), nn.Linear(64, 2), nn.Tanh()) + + @staticmethod + def get_trans_net(): + """Get the translation model in a single invertable mapping.""" + return nn.Sequential( + nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), + nn.LeakyReLU(), nn.Linear(64, 2)) + + @property + def prior(self): + """The prior distribution.""" + return distributions.MultivariateNormal(self.loc, self.cov) + + def __init__(self): + super(RealNVP, self).__init__() + + self.register_buffer('loc', torch.zeros(2)) + self.register_buffer('cov', torch.eye(2)) + self.register_buffer( + 'mask', torch.tensor([[0, 1], [1, 0]] * 3, dtype=torch.float32)) + + self.s = torch.nn.ModuleList( + [self.get_scale_net() for _ in range(len(self.mask))]) + self.t = torch.nn.ModuleList( + [self.get_trans_net() for _ in range(len(self.mask))]) + self.init_weights() + + def init_weights(self): + """Initialization model weights.""" + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight, gain=0.01) + + def backward_p(self, x): + """Apply mapping form the data space to the latent space and calculate + the log determinant of the Jacobian matrix.""" + + log_det_jacob, z = x.new_zeros(x.shape[0]), x + for i in reversed(range(len(self.t))): + z_ = self.mask[i] * z + s = self.s[i](z_) * (1 - self.mask[i]) # torch.exp(s): betas + t = self.t[i](z_) * (1 - self.mask[i]) # gammas + z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_ + log_det_jacob -= s.sum(dim=1) + return z, log_det_jacob + + def log_prob(self, x): + """Calculate the log probability of given sample in data space.""" + + z, log_det = self.backward_p(x) + return self.prior.log_prob(z) + log_det diff --git a/mmpose/models/utils/regularizations.py b/mmpose/models/utils/regularizations.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c7449038066016f6efb60e126111ace962fe98 --- /dev/null +++ b/mmpose/models/utils/regularizations.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod, abstractproperty + +import torch + + +class PytorchModuleHook(metaclass=ABCMeta): + """Base class for PyTorch module hook registers. + + An instance of a subclass of PytorchModuleHook can be used to + register hook to a pytorch module using the `register` method like: + hook_register.register(module) + + Subclasses should add/overwrite the following methods: + - __init__ + - hook + - hook_type + """ + + @abstractmethod + def hook(self, *args, **kwargs): + """Hook function.""" + + @abstractproperty + def hook_type(self) -> str: + """Hook type Subclasses should overwrite this function to return a + string value in. + + {`forward`, `forward_pre`, `backward`} + """ + + def register(self, module): + """Register the hook function to the module. + + Args: + module (pytorch module): the module to register the hook. + + Returns: + handle (torch.utils.hooks.RemovableHandle): a handle to remove + the hook by calling handle.remove() + """ + assert isinstance(module, torch.nn.Module) + + if self.hook_type == 'forward': + h = module.register_forward_hook(self.hook) + elif self.hook_type == 'forward_pre': + h = module.register_forward_pre_hook(self.hook) + elif self.hook_type == 'backward': + h = module.register_backward_hook(self.hook) + else: + raise ValueError(f'Invalid hook type {self.hook}') + + return h + + +class WeightNormClipHook(PytorchModuleHook): + """Apply weight norm clip regularization. + + The module's parameter will be clip to a given maximum norm before each + forward pass. + + Args: + max_norm (float): The maximum norm of the parameter. + module_param_names (str|list): The parameter name (or name list) to + apply weight norm clip. + """ + + def __init__(self, max_norm=1.0, module_param_names='weight'): + self.module_param_names = module_param_names if isinstance( + module_param_names, list) else [module_param_names] + self.max_norm = max_norm + + @property + def hook_type(self): + return 'forward_pre' + + def hook(self, module, _input): + for name in self.module_param_names: + assert name in module._parameters, f'{name} is not a parameter' \ + f' of the module {type(module)}' + param = module._parameters[name] + + with torch.no_grad(): + m = param.norm().item() + if m > self.max_norm: + param.mul_(self.max_norm / (m + 1e-6)) diff --git a/mmpose/models/utils/reparam_layers.py b/mmpose/models/utils/reparam_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba196294f3cefe7702d053db953a83bfbde8db4 --- /dev/null +++ b/mmpose/models/utils/reparam_layers.py @@ -0,0 +1,217 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import types +from typing import Dict, Optional + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer +from mmengine.model import BaseModule +from torch import Tensor + +from mmpose.utils.typing import OptConfigType + + +class RepVGGBlock(BaseModule): + """A block in RepVGG architecture, supporting optional normalization in the + identity branch. + + This block consists of 3x3 and 1x1 convolutions, with an optional identity + shortcut branch that includes normalization. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): The stride of the block. Defaults to 1. + padding (int): The padding of the block. Defaults to 1. + dilation (int): The dilation of the block. Defaults to 1. + groups (int): The groups of the block. Defaults to 1. + padding_mode (str): The padding mode of the block. Defaults to 'zeros'. + norm_cfg (dict): The config dict for normalization layers. + Defaults to dict(type='BN'). + act_cfg (dict): The config dict for activation layers. + Defaults to dict(type='ReLU'). + without_branch_norm (bool): Whether to skip branch_norm. + Defaults to True. + init_cfg (dict): The config dict for initialization. Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + stride: int = 1, + padding: int = 1, + dilation: int = 1, + groups: int = 1, + padding_mode: str = 'zeros', + norm_cfg: OptConfigType = dict(type='BN'), + act_cfg: OptConfigType = dict(type='ReLU'), + without_branch_norm: bool = True, + init_cfg: OptConfigType = None): + super(RepVGGBlock, self).__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + # judge if input shape and output shape are the same. + # If true, add a normalized identity shortcut. + self.branch_norm = None + if out_channels == in_channels and stride == 1 and \ + padding == dilation and not without_branch_norm: + self.branch_norm = build_norm_layer(norm_cfg, in_channels)[1] + + self.branch_3x3 = ConvModule( + self.in_channels, + self.out_channels, + 3, + stride=self.stride, + padding=self.padding, + groups=self.groups, + dilation=self.dilation, + norm_cfg=self.norm_cfg, + act_cfg=None) + + self.branch_1x1 = ConvModule( + self.in_channels, + self.out_channels, + 1, + groups=self.groups, + norm_cfg=self.norm_cfg, + act_cfg=None) + + self.act = build_activation_layer(act_cfg) + + def forward(self, x: Tensor) -> Tensor: + """Forward pass through the RepVGG block. + + The output is the sum of 3x3 and 1x1 convolution outputs, + along with the normalized identity branch output, followed by + activation. + + Args: + x (Tensor): The input tensor. + + Returns: + Tensor: The output tensor. + """ + + if self.branch_norm is None: + branch_norm_out = 0 + else: + branch_norm_out = self.branch_norm(x) + + out = self.branch_3x3(x) + self.branch_1x1(x) + branch_norm_out + + out = self.act(out) + + return out + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + """Pad 1x1 tensor to 3x3. + Args: + kernel1x1 (Tensor): The input 1x1 kernel need to be padded. + + Returns: + Tensor: 3x3 kernel after padded. + """ + if kernel1x1 is None: + return 0 + else: + return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1]) + + def _fuse_bn_tensor(self, branch: nn.Module) -> Tensor: + """Derives the equivalent kernel and bias of a specific branch layer. + + Args: + branch (nn.Module): The layer that needs to be equivalently + transformed, which can be nn.Sequential or nn.Batchnorm2d + + Returns: + tuple: Equivalent kernel and bias + """ + if branch is None: + return 0, 0 + + if isinstance(branch, ConvModule): + kernel = branch.conv.weight + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + else: + assert isinstance(branch, (nn.SyncBatchNorm, nn.BatchNorm2d)) + if not hasattr(self, 'id_tensor'): + input_dim = self.in_channels // self.groups + kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), + dtype=np.float32) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = torch.from_numpy(kernel_value).to( + branch.weight.device) + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + def get_equivalent_kernel_bias(self): + """Derives the equivalent kernel and bias in a differentiable way. + + Returns: + tuple: Equivalent kernel and bias + """ + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.branch_3x3) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.branch_1x1) + kernelid, biasid = (0, 0) if self.branch_norm is None else \ + self._fuse_bn_tensor(self.branch_norm) + + return (kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, + bias3x3 + bias1x1 + biasid) + + def switch_to_deploy(self, test_cfg: Optional[Dict] = None): + """Switches the block to deployment mode. + + In deployment mode, the block uses a single convolution operation + derived from the equivalent kernel and bias, replacing the original + branches. This reduces computational complexity during inference. + """ + if getattr(self, 'deploy', False): + return + + kernel, bias = self.get_equivalent_kernel_bias() + self.conv_reparam = nn.Conv2d( + in_channels=self.branch_3x3.conv.in_channels, + out_channels=self.branch_3x3.conv.out_channels, + kernel_size=self.branch_3x3.conv.kernel_size, + stride=self.branch_3x3.conv.stride, + padding=self.branch_3x3.conv.padding, + dilation=self.branch_3x3.conv.dilation, + groups=self.branch_3x3.conv.groups, + bias=True) + self.conv_reparam.weight.data = kernel + self.conv_reparam.bias.data = bias + for para in self.parameters(): + para.detach_() + self.__delattr__('branch_3x3') + self.__delattr__('branch_1x1') + if hasattr(self, 'branch_norm'): + self.__delattr__('branch_norm') + + def _forward(self, x): + return self.act(self.conv_reparam(x)) + + self.forward = types.MethodType(_forward, self) + + self.deploy = True diff --git a/mmpose/models/utils/rtmcc_block.py b/mmpose/models/utils/rtmcc_block.py new file mode 100644 index 0000000000000000000000000000000000000000..0a16701c0f753d7e60dd02d081f377c9dcf74108 --- /dev/null +++ b/mmpose/models/utils/rtmcc_block.py @@ -0,0 +1,277 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks import DropPath +from mmengine.utils import digit_version +from mmengine.utils.dl_utils import TORCH_VERSION + +from .transformer import ScaleNorm + + +def rope(x, dim): + """Applies Rotary Position Embedding to input tensor. + + Args: + x (torch.Tensor): Input tensor. + dim (int | list[int]): The spatial dimension(s) to apply + rotary position embedding. + + Returns: + torch.Tensor: The tensor after applying rotary position + embedding. + + Reference: + `RoFormer: Enhanced Transformer with Rotary + Position Embedding `_ + """ + shape = x.shape + if isinstance(dim, int): + dim = [dim] + + spatial_shape = [shape[i] for i in dim] + total_len = 1 + for i in spatial_shape: + total_len *= i + + position = torch.reshape( + torch.arange(total_len, dtype=torch.int, device=x.device), + spatial_shape) + + for i in range(dim[-1] + 1, len(shape) - 1, 1): + position = torch.unsqueeze(position, dim=-1) + + half_size = shape[-1] // 2 + freq_seq = -torch.arange( + half_size, dtype=torch.int, device=x.device) / float(half_size) + inv_freq = 10000**-freq_seq + + sinusoid = position[..., None] * inv_freq[None, None, :] + + sin = torch.sin(sinusoid) + cos = torch.cos(sinusoid) + x1, x2 = torch.chunk(x, 2, dim=-1) + + return torch.cat([x1 * cos - x2 * sin, x2 * cos + x1 * sin], dim=-1) + + +class Scale(nn.Module): + """Scale vector by element multiplications. + + Args: + dim (int): The dimension of the scale vector. + init_value (float, optional): The initial value of the scale vector. + Defaults to 1.0. + trainable (bool, optional): Whether the scale vector is trainable. + Defaults to True. + """ + + def __init__(self, dim, init_value=1., trainable=True): + super().__init__() + self.scale = nn.Parameter( + init_value * torch.ones(dim), requires_grad=trainable) + + def forward(self, x): + """Forward function.""" + + return x * self.scale + + +class RTMCCBlock(nn.Module): + """Gated Attention Unit (GAU) in RTMBlock. + + Args: + num_token (int): The number of tokens. + in_token_dims (int): The input token dimension. + out_token_dims (int): The output token dimension. + expansion_factor (int, optional): The expansion factor of the + intermediate token dimension. Defaults to 2. + s (int, optional): The self-attention feature dimension. + Defaults to 128. + eps (float, optional): The minimum value in clamp. Defaults to 1e-5. + dropout_rate (float, optional): The dropout rate. Defaults to 0.0. + drop_path (float, optional): The drop path rate. Defaults to 0.0. + attn_type (str, optional): Type of attention which should be one of + the following options: + + - 'self-attn': Self-attention. + - 'cross-attn': Cross-attention. + + Defaults to 'self-attn'. + act_fn (str, optional): The activation function which should be one + of the following options: + + - 'ReLU': ReLU activation. + - 'SiLU': SiLU activation. + + Defaults to 'SiLU'. + bias (bool, optional): Whether to use bias in linear layers. + Defaults to False. + use_rel_bias (bool, optional): Whether to use relative bias. + Defaults to True. + pos_enc (bool, optional): Whether to use rotary position + embedding. Defaults to False. + + Reference: + `Transformer Quality in Linear Time + `_ + """ + + def __init__(self, + num_token, + in_token_dims, + out_token_dims, + expansion_factor=2, + s=128, + eps=1e-5, + dropout_rate=0., + drop_path=0., + attn_type='self-attn', + act_fn='SiLU', + bias=False, + use_rel_bias=True, + pos_enc=False): + + super(RTMCCBlock, self).__init__() + self.s = s + self.num_token = num_token + self.use_rel_bias = use_rel_bias + self.attn_type = attn_type + self.pos_enc = pos_enc + self.drop_path = DropPath(drop_path) \ + if drop_path > 0. else nn.Identity() + + self.e = int(in_token_dims * expansion_factor) + if use_rel_bias: + if attn_type == 'self-attn': + self.w = nn.Parameter( + torch.rand([2 * num_token - 1], dtype=torch.float)) + else: + self.a = nn.Parameter(torch.rand([1, s], dtype=torch.float)) + self.b = nn.Parameter(torch.rand([1, s], dtype=torch.float)) + self.o = nn.Linear(self.e, out_token_dims, bias=bias) + + if attn_type == 'self-attn': + self.uv = nn.Linear(in_token_dims, 2 * self.e + self.s, bias=bias) + self.gamma = nn.Parameter(torch.rand((2, self.s))) + self.beta = nn.Parameter(torch.rand((2, self.s))) + else: + self.uv = nn.Linear(in_token_dims, self.e + self.s, bias=bias) + self.k_fc = nn.Linear(in_token_dims, self.s, bias=bias) + self.v_fc = nn.Linear(in_token_dims, self.e, bias=bias) + nn.init.xavier_uniform_(self.k_fc.weight) + nn.init.xavier_uniform_(self.v_fc.weight) + + self.ln = ScaleNorm(in_token_dims, eps=eps) + + nn.init.xavier_uniform_(self.uv.weight) + + if act_fn == 'SiLU' or act_fn == nn.SiLU: + assert digit_version(TORCH_VERSION) >= digit_version('1.7.0'), \ + 'SiLU activation requires PyTorch version >= 1.7' + + self.act_fn = nn.SiLU(True) + elif act_fn == 'ReLU' or act_fn == nn.ReLU: + self.act_fn = nn.ReLU(True) + else: + raise NotImplementedError + + if in_token_dims == out_token_dims: + self.shortcut = True + self.res_scale = Scale(in_token_dims) + else: + self.shortcut = False + + self.sqrt_s = math.sqrt(s) + + self.dropout_rate = dropout_rate + + if dropout_rate > 0.: + self.dropout = nn.Dropout(dropout_rate) + + def rel_pos_bias(self, seq_len, k_len=None): + """Add relative position bias.""" + + if self.attn_type == 'self-attn': + t = F.pad(self.w[:2 * seq_len - 1], [0, seq_len]).repeat(seq_len) + t = t[..., :-seq_len].reshape(-1, seq_len, 3 * seq_len - 2) + r = (2 * seq_len - 1) // 2 + t = t[..., r:-r] + else: + a = rope(self.a.repeat(seq_len, 1), dim=0) + b = rope(self.b.repeat(k_len, 1), dim=0) + t = torch.bmm(a, b.permute(0, 2, 1)) + return t + + def _forward(self, inputs): + """GAU Forward function.""" + + if self.attn_type == 'self-attn': + x = inputs + else: + x, k, v = inputs + + x = self.ln(x) + + # [B, K, in_token_dims] -> [B, K, e + e + s] + uv = self.uv(x) + uv = self.act_fn(uv) + + if self.attn_type == 'self-attn': + # [B, K, e + e + s] -> [B, K, e], [B, K, e], [B, K, s] + u, v, base = torch.split(uv, [self.e, self.e, self.s], dim=2) + # [B, K, 1, s] * [1, 1, 2, s] + [2, s] -> [B, K, 2, s] + base = base.unsqueeze(2) * self.gamma[None, None, :] + self.beta + + if self.pos_enc: + base = rope(base, dim=1) + # [B, K, 2, s] -> [B, K, s], [B, K, s] + q, k = torch.unbind(base, dim=2) + + else: + # [B, K, e + s] -> [B, K, e], [B, K, s] + u, q = torch.split(uv, [self.e, self.s], dim=2) + + k = self.k_fc(k) # -> [B, K, s] + v = self.v_fc(v) # -> [B, K, e] + + if self.pos_enc: + q = rope(q, 1) + k = rope(k, 1) + + # [B, K, s].permute() -> [B, s, K] + # [B, K, s] x [B, s, K] -> [B, K, K] + qk = torch.bmm(q, k.permute(0, 2, 1)) + + if self.use_rel_bias: + if self.attn_type == 'self-attn': + bias = self.rel_pos_bias(q.size(1)) + else: + bias = self.rel_pos_bias(q.size(1), k.size(1)) + qk += bias[:, :q.size(1), :k.size(1)] + # [B, K, K] + kernel = torch.square(F.relu(qk / self.sqrt_s)) + + if self.dropout_rate > 0.: + kernel = self.dropout(kernel) + # [B, K, K] x [B, K, e] -> [B, K, e] + x = u * torch.bmm(kernel, v) + # [B, K, e] -> [B, K, out_token_dims] + x = self.o(x) + + return x + + def forward(self, x): + """Forward function.""" + + if self.shortcut: + if self.attn_type == 'cross-attn': + res_shortcut = x[0] + else: + res_shortcut = x + main_branch = self.drop_path(self._forward(x)) + return self.res_scale(res_shortcut) + main_branch + else: + return self.drop_path(self._forward(x)) diff --git a/mmpose/models/utils/transformer.py b/mmpose/models/utils/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..987b8658083dd03e97e908f9700405911357d2f9 --- /dev/null +++ b/mmpose/models/utils/transformer.py @@ -0,0 +1,902 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional, Sequence, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmengine.model import BaseModule, ModuleList +from mmengine.utils import digit_version, to_2tuple +from mmengine.utils.dl_utils import TORCH_VERSION +from torch import Tensor + +from mmpose.utils.typing import ConfigType, OptConfigType + +try: + from fairscale.nn.checkpoint import checkpoint_wrapper +except ImportError: + checkpoint_wrapper = None + + +def nlc_to_nchw(x, hw_shape): + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len does not match H, W' + return x.transpose(1, 2).reshape(B, C, H, W).contiguous() + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +class AdaptivePadding(nn.Module): + """Applies padding to input (if needed) so that input can get fully covered + by filter you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around + input. The "corner" mode would pad zero to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel: + stride (int | tuple): Stride of the filter. Default: 1: + dilation (int | tuple): Spacing between kernel elements. + Default: 1 + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + + super(AdaptivePadding, self).__init__() + + assert padding in ('same', 'corner') + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + dilation = to_2tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + """Get horizontal and vertical padding shapes.""" + + input_h, input_w = input_shape + kernel_h, kernel_w = self.kernel_size + stride_h, stride_w = self.stride + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) + return pad_h, pad_w + + def forward(self, x): + """Forward function.""" + + pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ]) + return x + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The config dict for embedding + conv layer type selection. Default: "Conv2d. + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: None (Would be set as `kernel_size`). + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only work when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__( + self, + in_channels=3, + embed_dims=768, + conv_type='Conv2d', + kernel_size=16, + stride=16, + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None, + ): + super(PatchEmbed, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + if stride is None: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adap_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adap_padding: + pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) + input_h, input_w = input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adap_padding: + x = self.adap_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map. Our implementation uses `nn.Unfold` to + merge patch, which is about 25% faster than original implementation. + Instead, we need to modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. + to gets fully covered by filter and stride you specified.. + Default: True. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Default: None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults: False. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adap_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + + if self.adap_padding: + x = self.adap_padding(x) + H, W = x.shape[-2:] + + x = self.sampler(x) + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + x = self.norm(x) if self.norm else x + x = self.reduction(x) + return x, output_size + + +class ScaleNorm(nn.Module): + """Scale Norm. + + Args: + dim (int): The dimension of the scale vector. + eps (float, optional): The minimum value in clamp. Defaults to 1e-5. + + Reference: + `Transformers without Tears: Improving the Normalization + of Self-Attention `_ + """ + + def __init__(self, dim, eps=1e-5): + super().__init__() + self.scale = dim**-0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(1)) + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): Input tensor. + + Returns: + torch.Tensor: The tensor after applying scale norm. + """ + + if torch.onnx.is_in_onnx_export() and \ + digit_version(TORCH_VERSION) >= digit_version('1.12'): + + norm = torch.linalg.norm(x, dim=-1, keepdim=True) + + else: + norm = torch.norm(x, dim=-1, keepdim=True) + norm = norm * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class SinePositionalEncoding(nn.Module): + """Sine Positional Encoding Module. This module implements sine positional + encoding, which is commonly used in transformer-based models to add + positional information to the input sequences. It uses sine and cosine + functions to create positional embeddings for each element in the input + sequence. + + Args: + out_channels (int): The number of features in the input sequence. + temperature (int): A temperature parameter used to scale + the positional encodings. Defaults to 10000. + spatial_dim (int): The number of spatial dimension of input + feature. 1 represents sequence data and 2 represents grid data. + Defaults to 1. + learnable (bool): Whether to optimize the frequency base. Defaults + to False. + eval_size (int, tuple[int], optional): The fixed spatial size of + input features. Defaults to None. + """ + + def __init__( + self, + out_channels: int, + spatial_dim: int = 1, + temperature: int = 1e5, + learnable: bool = False, + eval_size: Optional[Union[int, Sequence[int]]] = None, + ) -> None: + + super().__init__() + + assert out_channels % 2 == 0 + assert temperature > 0 + + self.spatial_dim = spatial_dim + self.out_channels = out_channels + self.temperature = temperature + self.eval_size = eval_size + self.learnable = learnable + + pos_dim = out_channels // 2 + dim_t = torch.arange(pos_dim, dtype=torch.float32) / pos_dim + dim_t = self.temperature**(dim_t) + + if not learnable: + self.register_buffer('dim_t', dim_t) + else: + self.dim_t = nn.Parameter(dim_t.detach()) + + # set parameters + if eval_size: + if hasattr(self, f'pos_enc_{eval_size}'): + delattr(self, f'pos_enc_{eval_size}') + pos_enc = self.generate_pos_encoding(size=eval_size) + self.register_buffer(f'pos_enc_{eval_size}', pos_enc) + + def forward(self, *args, **kwargs): + return self.generate_pos_encoding(*args, **kwargs) + + def generate_pos_encoding(self, + size: Union[int, Sequence[int]] = None, + position: Optional[Tensor] = None): + """Generate positional encoding for input features. + + Args: + size (int or tuple[int]): Size of the input features. Required + if position is None. + position (Tensor, optional): Position tensor. Required if size + is None. + """ + + assert (size is not None) ^ (position is not None) + + if (not (self.learnable + and self.training)) and size is not None and hasattr( + self, f'pos_enc_{size}'): + return getattr(self, f'pos_enc_{size}') + + if self.spatial_dim == 1: + if size is not None: + if isinstance(size, (tuple, list)): + size = size[0] + position = torch.arange( + size, dtype=torch.float32, device=self.dim_t.device) + + dim_t = self.dim_t.reshape(*((1, ) * position.ndim), -1) + freq = position.unsqueeze(-1) / dim_t + pos_enc = torch.cat((freq.cos(), freq.sin()), dim=-1) + + elif self.spatial_dim == 2: + if size is not None: + if isinstance(size, (tuple, list)): + h, w = size[:2] + elif isinstance(size, (int, float)): + h, w = int(size), int(size) + else: + raise ValueError(f'got invalid type {type(size)} for size') + grid_h, grid_w = torch.meshgrid( + torch.arange( + int(h), dtype=torch.float32, device=self.dim_t.device), + torch.arange( + int(w), dtype=torch.float32, device=self.dim_t.device)) + grid_h, grid_w = grid_h.flatten(), grid_w.flatten() + else: + assert position.size(-1) == 2 + grid_h, grid_w = torch.unbind(position, dim=-1) + + dim_t = self.dim_t.reshape(*((1, ) * grid_h.ndim), -1) + freq_h = grid_h.unsqueeze(-1) / dim_t + freq_w = grid_w.unsqueeze(-1) / dim_t + pos_enc_h = torch.cat((freq_h.cos(), freq_h.sin()), dim=-1) + pos_enc_w = torch.cat((freq_w.cos(), freq_w.sin()), dim=-1) + pos_enc = torch.stack((pos_enc_h, pos_enc_w), dim=-1) + + return pos_enc + + @staticmethod + def apply_additional_pos_enc(feature: Tensor, + pos_enc: Tensor, + spatial_dim: int = 1): + """Apply additional positional encoding to input features. + + Args: + feature (Tensor): Input feature tensor. + pos_enc (Tensor): Positional encoding tensor. + spatial_dim (int): Spatial dimension of input features. + """ + + assert spatial_dim in (1, 2), f'the argument spatial_dim must be ' \ + f'either 1 or 2, but got {spatial_dim}' + if spatial_dim == 2: + pos_enc = pos_enc.flatten(-2) + for _ in range(feature.ndim - pos_enc.ndim): + pos_enc = pos_enc.unsqueeze(0) + return feature + pos_enc + + @staticmethod + def apply_rotary_pos_enc(feature: Tensor, + pos_enc: Tensor, + spatial_dim: int = 1): + """Apply rotary positional encoding to input features. + + Args: + feature (Tensor): Input feature tensor. + pos_enc (Tensor): Positional encoding tensor. + spatial_dim (int): Spatial dimension of input features. + """ + + assert spatial_dim in (1, 2), f'the argument spatial_dim must be ' \ + f'either 1 or 2, but got {spatial_dim}' + + for _ in range(feature.ndim - pos_enc.ndim + spatial_dim - 1): + pos_enc = pos_enc.unsqueeze(0) + + x1, x2 = torch.chunk(feature, 2, dim=-1) + if spatial_dim == 1: + cos, sin = torch.chunk(pos_enc, 2, dim=-1) + feature = torch.cat((x1 * cos - x2 * sin, x2 * cos + x1 * sin), + dim=-1) + elif spatial_dim == 2: + pos_enc_h, pos_enc_w = torch.unbind(pos_enc, dim=-1) + cos_h, sin_h = torch.chunk(pos_enc_h, 2, dim=-1) + cos_w, sin_w = torch.chunk(pos_enc_w, 2, dim=-1) + feature = torch.cat( + (x1 * cos_h - x2 * sin_h, x1 * cos_w + x2 * sin_w), dim=-1) + + return feature + + +class ChannelWiseScale(nn.Module): + """Scale vector by element multiplications. + + Args: + dim (int): The dimension of the scale vector. + init_value (float, optional): The initial value of the scale vector. + Defaults to 1.0. + trainable (bool, optional): Whether the scale vector is trainable. + Defaults to True. + """ + + def __init__(self, dim, init_value=1., trainable=True): + super().__init__() + self.scale = nn.Parameter( + init_value * torch.ones(dim), requires_grad=trainable) + + def forward(self, x): + """Forward function.""" + + return x * self.scale + + +class GAUEncoder(BaseModule): + """Gated Attention Unit (GAU) Encoder. + + Args: + in_token_dims (int): The input token dimension. + out_token_dims (int): The output token dimension. + expansion_factor (int, optional): The expansion factor of the + intermediate token dimension. Defaults to 2. + s (int, optional): The self-attention feature dimension. + Defaults to 128. + eps (float, optional): The minimum value in clamp. Defaults to 1e-5. + dropout_rate (float, optional): The dropout rate. Defaults to 0.0. + drop_path (float, optional): The drop path rate. Defaults to 0.0. + act_fn (str, optional): The activation function which should be one + of the following options: + + - 'ReLU': ReLU activation. + - 'SiLU': SiLU activation. + + Defaults to 'SiLU'. + bias (bool, optional): Whether to use bias in linear layers. + Defaults to False. + pos_enc (bool, optional): Whether to use rotary position + embedding. Defaults to False. + spatial_dim (int, optional): The spatial dimension of inputs + + Reference: + `Transformer Quality in Linear Time + `_ + """ + + def __init__(self, + in_token_dims, + out_token_dims, + expansion_factor=2, + s=128, + eps=1e-5, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + bias=False, + pos_enc: str = 'none', + spatial_dim: int = 1): + + super(GAUEncoder, self).__init__() + self.s = s + self.bias = bias + self.pos_enc = pos_enc + self.in_token_dims = in_token_dims + self.spatial_dim = spatial_dim + self.drop_path = DropPath(drop_path) \ + if drop_path > 0. else nn.Identity() + + self.e = int(in_token_dims * expansion_factor) + self.o = nn.Linear(self.e, out_token_dims, bias=bias) + + self._build_layers() + + self.ln = ScaleNorm(in_token_dims, eps=eps) + + nn.init.xavier_uniform_(self.uv.weight) + + if act_fn == 'SiLU': + assert digit_version(TORCH_VERSION) >= digit_version('1.7.0'), \ + 'SiLU activation requires PyTorch version >= 1.7' + + self.act_fn = nn.SiLU(True) + else: + self.act_fn = nn.ReLU(True) + + if in_token_dims == out_token_dims: + self.shortcut = True + self.res_scale = ChannelWiseScale(in_token_dims) + else: + self.shortcut = False + + self.sqrt_s = math.sqrt(s) + self.dropout_rate = dropout_rate + + if dropout_rate > 0.: + self.dropout = nn.Dropout(dropout_rate) + + def _build_layers(self): + self.uv = nn.Linear( + self.in_token_dims, 2 * self.e + self.s, bias=self.bias) + self.gamma = nn.Parameter(torch.rand((2, self.s))) + self.beta = nn.Parameter(torch.rand((2, self.s))) + + def _forward(self, x, mask=None, pos_enc=None): + """GAU Forward function.""" + + x = self.ln(x) + + # [B, K, in_token_dims] -> [B, K, e + e + s] + uv = self.uv(x) + uv = self.act_fn(uv) + + # [B, K, e + e + s] -> [B, K, e], [B, K, e], [B, K, s] + u, v, base = torch.split(uv, [self.e, self.e, self.s], dim=-1) + # [B, K, 1, s] * [1, 1, 2, s] + [2, s] -> [B, K, 2, s] + dim = base.ndim - self.gamma.ndim + 1 + gamma = self.gamma.view(*((1, ) * dim), *self.gamma.size()) + beta = self.beta.view(*((1, ) * dim), *self.beta.size()) + base = base.unsqueeze(-2) * gamma + beta + # [B, K, 2, s] -> [B, K, s], [B, K, s] + q, k = torch.unbind(base, dim=-2) + + if self.pos_enc == 'rope': + q = SinePositionalEncoding.apply_rotary_pos_enc( + q, pos_enc, self.spatial_dim) + k = SinePositionalEncoding.apply_rotary_pos_enc( + k, pos_enc, self.spatial_dim) + elif self.pos_enc == 'add': + pos_enc = pos_enc.reshape(*((1, ) * (q.ndim - 2)), q.size(-2), + q.size(-1)) + q = q + pos_enc + k = k + pos_enc + + # [B, K, s].transpose(-1, -2) -> [B, s, K] + # [B, K, s] x [B, s, K] -> [B, K, K] + qk = torch.matmul(q, k.transpose(-1, -2)) + + # [B, K, K] + kernel = torch.square(F.relu(qk / self.sqrt_s)) + + if mask is not None: + kernel = kernel * mask + + if self.dropout_rate > 0.: + kernel = self.dropout(kernel) + + # [B, K, K] x [B, K, e] -> [B, K, e] + x = u * torch.matmul(kernel, v) + # [B, K, e] -> [B, K, out_token_dims] + x = self.o(x) + + return x + + def forward(self, x, mask=None, pos_enc=None): + """Forward function.""" + out = self.drop_path(self._forward(x, mask=mask, pos_enc=pos_enc)) + if self.shortcut: + return self.res_scale(x) + out + else: + return out + + +class DetrTransformerEncoder(BaseModule): + """Encoder of DETR. + + Args: + num_layers (int): Number of encoder layers. + layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder + layer. All the layers will share the same config. + num_cp (int): Number of checkpointing blocks in encoder layer. + Default to -1. + init_cfg (:obj:`ConfigDict` or dict, optional): the config to control + the initialization. Defaults to None. + """ + + def __init__(self, + num_layers: int, + layer_cfg: ConfigType, + num_cp: int = -1, + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + self.num_layers = num_layers + self.layer_cfg = layer_cfg + self.num_cp = num_cp + assert self.num_cp <= self.num_layers + self._init_layers() + + def _init_layers(self) -> None: + """Initialize encoder layers.""" + self.layers = ModuleList([ + DetrTransformerEncoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + + if self.num_cp > 0: + if checkpoint_wrapper is None: + raise NotImplementedError( + 'If you want to reduce GPU memory usage, \ + please install fairscale by executing the \ + following command: pip install fairscale.') + for i in range(self.num_cp): + self.layers[i] = checkpoint_wrapper(self.layers[i]) + + self.embed_dims = self.layers[0].embed_dims + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, **kwargs) -> Tensor: + """Forward function of encoder. + + Args: + query (Tensor): Input queries of encoder, has shape + (bs, num_queries, dim). + query_pos (Tensor): The positional embeddings of the queries, has + shape (bs, num_queries, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor, has shape (bs, num_queries). + + Returns: + Tensor: Has shape (bs, num_queries, dim) if `batch_first` is + `True`, otherwise (num_queries, bs, dim). + """ + for layer in self.layers: + query = layer(query, query_pos, key_padding_mask, **kwargs) + return query + + +class DetrTransformerEncoderLayer(BaseModule): + """Implements encoder layer in DETR transformer. + + Args: + self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self + attention. + ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config for + normalization layers. All the layers will share the same + config. Defaults to `LN`. + init_cfg (:obj:`ConfigDict` or dict, optional): Config to control + the initialization. Defaults to None. + """ + + def __init__(self, + self_attn_cfg: OptConfigType = dict( + embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg: OptConfigType = dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True)), + norm_cfg: OptConfigType = dict(type='LN'), + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + + self.self_attn_cfg = self_attn_cfg + if 'batch_first' not in self.self_attn_cfg: + self.self_attn_cfg['batch_first'] = True + else: + assert self.self_attn_cfg['batch_first'] is True, 'First \ + dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` flag.' + + self.ffn_cfg = ffn_cfg + self.norm_cfg = norm_cfg + self._init_layers() + + def _init_layers(self) -> None: + """Initialize self-attention, FFN, and normalization.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(2) + ] + self.norms = ModuleList(norms_list) + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, **kwargs) -> Tensor: + """Forward function of an encoder layer. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + query_pos (Tensor): The positional encoding for query, with + the same shape as `query`. + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor. has shape (bs, num_queries). + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + key_padding_mask=key_padding_mask, + **kwargs) + query = self.norms[0](query) + query = self.ffn(query) + query = self.norms[1](query) + + return query diff --git a/mmpose/models/utils/tta.py b/mmpose/models/utils/tta.py new file mode 100644 index 0000000000000000000000000000000000000000..41d2f2fd47986797aeef3b688ac519e15de1a674 --- /dev/null +++ b/mmpose/models/utils/tta.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import Tensor + + +def flip_heatmaps(heatmaps: Tensor, + flip_indices: Optional[List[int]] = None, + flip_mode: str = 'heatmap', + shift_heatmap: bool = True): + """Flip heatmaps for test-time augmentation. + + Args: + heatmaps (Tensor): The heatmaps to flip. Should be a tensor in shape + [B, C, H, W] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint. Defaults to ``None`` + flip_mode (str): Specify the flipping mode. Options are: + + - ``'heatmap'``: horizontally flip the heatmaps and swap heatmaps + of symmetric keypoints according to ``flip_indices`` + - ``'udp_combined'``: similar to ``'heatmap'`` mode but further + flip the x_offset values + - ``'offset'``: horizontally flip the offset fields and swap + heatmaps of symmetric keypoints according to + ``flip_indices``. x_offset values are also reversed + shift_heatmap (bool): Shift the flipped heatmaps to align with the + original heatmaps and improve accuracy. Defaults to ``True`` + + Returns: + Tensor: flipped heatmaps in shape [B, C, H, W] + """ + + if flip_mode == 'heatmap': + heatmaps = heatmaps.flip(-1) + if flip_indices is not None: + assert len(flip_indices) == heatmaps.shape[1] + heatmaps = heatmaps[:, flip_indices] + elif flip_mode == 'udp_combined': + B, C, H, W = heatmaps.shape + heatmaps = heatmaps.view(B, C // 3, 3, H, W) + heatmaps = heatmaps.flip(-1) + if flip_indices is not None: + assert len(flip_indices) == C // 3 + heatmaps = heatmaps[:, flip_indices] + heatmaps[:, :, 1] = -heatmaps[:, :, 1] + heatmaps = heatmaps.view(B, C, H, W) + + elif flip_mode == 'offset': + B, C, H, W = heatmaps.shape + heatmaps = heatmaps.view(B, C // 2, -1, H, W) + heatmaps = heatmaps.flip(-1) + if flip_indices is not None: + assert len(flip_indices) == C // 2 + heatmaps = heatmaps[:, flip_indices] + heatmaps[:, :, 0] = -heatmaps[:, :, 0] + heatmaps = heatmaps.view(B, C, H, W) + + else: + raise ValueError(f'Invalid flip_mode value "{flip_mode}"') + + if shift_heatmap: + # clone data to avoid unexpected in-place operation when using CPU + heatmaps[..., 1:] = heatmaps[..., :-1].clone() + + return heatmaps + + +def flip_vectors(x_labels: Tensor, y_labels: Tensor, flip_indices: List[int]): + """Flip instance-level labels in specific axis for test-time augmentation. + + Args: + x_labels (Tensor): The vector labels in x-axis to flip. Should be + a tensor in shape [B, C, Wx] + y_labels (Tensor): The vector labels in y-axis to flip. Should be + a tensor in shape [B, C, Wy] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + """ + assert x_labels.ndim == 3 and y_labels.ndim == 3 + assert len(flip_indices) == x_labels.shape[1] and len( + flip_indices) == y_labels.shape[1] + x_labels = x_labels[:, flip_indices].flip(-1) + y_labels = y_labels[:, flip_indices] + + return x_labels, y_labels + + +def flip_coordinates(coords: Tensor, flip_indices: List[int], + shift_coords: bool, input_size: Tuple[int, int]): + """Flip normalized coordinates for test-time augmentation. + + Args: + coords (Tensor): The coordinates to flip. Should be a tensor in shape + [B, K, D] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + shift_coords (bool): Shift the flipped coordinates to align with the + original coordinates and improve accuracy. Defaults to ``True`` + input_size (Tuple[int, int]): The size of input image in [w, h] + """ + assert coords.ndim == 3 + assert len(flip_indices) == coords.shape[1] + + coords[:, :, 0] = 1.0 - coords[:, :, 0] + + if shift_coords: + img_width = input_size[0] + coords[:, :, 0] -= 1.0 / img_width + + coords = coords[:, flip_indices] + return coords + + +def flip_visibility(vis: Tensor, flip_indices: List[int]): + """Flip keypoints visibility for test-time augmentation. + + Args: + vis (Tensor): The keypoints visibility to flip. Should be a tensor + in shape [B, K] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + """ + assert vis.ndim == 2 + + vis = vis[:, flip_indices] + return vis + + +def aggregate_heatmaps(heatmaps: List[Tensor], + size: Optional[Tuple[int, int]], + align_corners: bool = False, + mode: str = 'average'): + """Aggregate multiple heatmaps. + + Args: + heatmaps (List[Tensor]): Multiple heatmaps to aggregate. Each should + be in shape (B, C, H, W) + size (Tuple[int, int], optional): The target size in (w, h). All + heatmaps will be resized to the target size. If not given, the + first heatmap tensor's width and height will be used as the target + size. Defaults to ``None`` + align_corners (bool): Whether align corners when resizing heatmaps. + Defaults to ``False`` + mode (str): Aggregation mode in one of the following: + + - ``'average'``: Get average of heatmaps. All heatmaps mush have + the same channel number + - ``'concat'``: Concate the heatmaps at the channel dim + """ + + if mode not in {'average', 'concat'}: + raise ValueError(f'Invalid aggregation mode `{mode}`') + + if size is None: + h, w = heatmaps[0].shape[2:4] + else: + w, h = size + + for i, _heatmaps in enumerate(heatmaps): + assert _heatmaps.ndim == 4 + if mode == 'average': + assert _heatmaps.shape[:2] == heatmaps[0].shape[:2] + else: + assert _heatmaps.shape[0] == heatmaps[0].shape[0] + + if _heatmaps.shape[2:4] != (h, w): + heatmaps[i] = F.interpolate( + _heatmaps, + size=(h, w), + mode='bilinear', + align_corners=align_corners) + + if mode == 'average': + output = sum(heatmaps).div(len(heatmaps)) + elif mode == 'concat': + output = torch.cat(heatmaps, dim=1) + else: + raise ValueError() + + return output diff --git a/mmpose/registry.py b/mmpose/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..84903eaf2deab3711b1ff87cf93fc11fcda88730 --- /dev/null +++ b/mmpose/registry.py @@ -0,0 +1,136 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""MMPose provides following registry nodes to support using modules across +projects. + +Each node is a child of the root registry in MMEngine. +More details can be found at +https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. +""" + +from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS +from mmengine.registry import DATASETS as MMENGINE_DATASETS +from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR +from mmengine.registry import HOOKS as MMENGINE_HOOKS +from mmengine.registry import INFERENCERS as MMENGINE_INFERENCERS +from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS +from mmengine.registry import LOOPS as MMENGINE_LOOPS +from mmengine.registry import METRICS as MMENGINE_METRICS +from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS +from mmengine.registry import MODELS as MMENGINE_MODELS +from mmengine.registry import \ + OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS +from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS +from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS +from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS +from mmengine.registry import \ + RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS +from mmengine.registry import RUNNERS as MMENGINE_RUNNERS +from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS +from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS +from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS +from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS +from mmengine.registry import \ + WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS +from mmengine.registry import Registry + +# Registries For Runner and the related +# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner` +RUNNERS = Registry('runner', parent=MMENGINE_RUNNERS) +# manage runner constructors that define how to initialize runners +RUNNER_CONSTRUCTORS = Registry( + 'runner constructor', parent=MMENGINE_RUNNER_CONSTRUCTORS) +# manage all kinds of loops like `EpochBasedTrainLoop` +LOOPS = Registry('loop', parent=MMENGINE_LOOPS) +# manage all kinds of hooks like `CheckpointHook` +HOOKS = Registry( + 'hook', parent=MMENGINE_HOOKS, locations=['mmpose.engine.hooks']) + +# Registries For Data and the related +# manage data-related modules +DATASETS = Registry( + 'dataset', parent=MMENGINE_DATASETS, locations=['mmpose.datasets']) +DATA_SAMPLERS = Registry( + 'data sampler', + parent=MMENGINE_DATA_SAMPLERS, + locations=['mmpose.datasets.samplers']) +TRANSFORMS = Registry( + 'transform', + parent=MMENGINE_TRANSFORMS, + locations=['mmpose.datasets.transforms']) + +# manage all kinds of modules inheriting `nn.Module` +MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmpose.models']) +# manage all kinds of model wrappers like 'MMDistributedDataParallel' +MODEL_WRAPPERS = Registry( + 'model_wrapper', + parent=MMENGINE_MODEL_WRAPPERS, + locations=['mmpose.models']) +# manage all kinds of weight initialization modules like `Uniform` +WEIGHT_INITIALIZERS = Registry( + 'weight initializer', + parent=MMENGINE_WEIGHT_INITIALIZERS, + locations=['mmpose.models']) +# manage all kinds of batch augmentations like Mixup and CutMix. +BATCH_AUGMENTS = Registry('batch augment', locations=['mmpose.models']) + +# Registries For Optimizer and the related +# manage all kinds of optimizers like `SGD` and `Adam` +OPTIMIZERS = Registry( + 'optimizer', parent=MMENGINE_OPTIMIZERS, locations=['mmpose.engine']) +# manage optimizer wrapper +OPTIM_WRAPPERS = Registry( + 'optimizer_wrapper', + parent=MMENGINE_OPTIM_WRAPPERS, + locations=['mmpose.engine']) +# manage constructors that customize the optimization hyperparameters. +OPTIM_WRAPPER_CONSTRUCTORS = Registry( + 'optimizer wrapper constructor', + parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS, + locations=['mmpose.engine.optim_wrappers']) +# manage all kinds of parameter schedulers like `MultiStepLR` +PARAM_SCHEDULERS = Registry( + 'parameter scheduler', + parent=MMENGINE_PARAM_SCHEDULERS, + locations=['mmpose.engine.schedulers']) + +# manage all kinds of metrics +METRICS = Registry( + 'metric', parent=MMENGINE_METRICS, locations=['mmpose.evaluation.metrics']) +# manage all kinds of evaluators +EVALUATORS = Registry( + 'evaluator', + parent=MMENGINE_EVALUATOR, + locations=['mmpose.evaluation.evaluators']) + +# manage task-specific modules like anchor generators and box coders +TASK_UTILS = Registry( + 'task util', + parent=MMENGINE_TASK_UTILS, + locations=['mmpose.models.task_modules']) + +# Registries For Visualizer and the related +# manage visualizer +VISUALIZERS = Registry( + 'visualizer', + parent=MMENGINE_VISUALIZERS, + locations=['mmpose.visualization']) +# manage visualizer backend +VISBACKENDS = Registry( + 'vis_backend', + parent=MMENGINE_VISBACKENDS, + locations=['mmpose.visualization']) + +# manage all kinds log processors +LOG_PROCESSORS = Registry( + 'log processor', + parent=MMENGINE_LOG_PROCESSORS, + locations=['mmpose.visualization']) + +# manager keypoint encoder/decoder +KEYPOINT_CODECS = Registry('KEYPOINT_CODECS', locations=['mmpose.codecs']) + +# manage inferencer +INFERENCERS = Registry( + 'inferencer', + parent=MMENGINE_INFERENCERS, + locations=['mmpose.apis.inferencers']) diff --git a/mmpose/structures/__init__.py b/mmpose/structures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..56a7dd725e06943c45a047b4da2f2ddd386ab16f --- /dev/null +++ b/mmpose/structures/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox import (bbox_clip_border, bbox_corner2xyxy, bbox_cs2xywh, + bbox_cs2xyxy, bbox_xywh2cs, bbox_xywh2xyxy, + bbox_xyxy2corner, bbox_xyxy2cs, bbox_xyxy2xywh, flip_bbox, + get_pers_warp_matrix, get_udp_warp_matrix, get_warp_matrix) +from .keypoint import flip_keypoints, keypoint_clip_border, find_min_padding_exact, fix_bbox_aspect_ratio +from .multilevel_pixel_data import MultilevelPixelData +from .pose_data_sample import PoseDataSample +from .utils import merge_data_samples, revert_heatmap, split_instances + +__all__ = [ + 'PoseDataSample', 'MultilevelPixelData', 'bbox_cs2xywh', 'bbox_cs2xyxy', + 'bbox_xywh2cs', 'bbox_xywh2xyxy', 'bbox_xyxy2cs', 'bbox_xyxy2xywh', + 'flip_bbox', 'get_udp_warp_matrix', 'get_warp_matrix', 'flip_keypoints', + 'merge_data_samples', 'revert_heatmap', 'split_instances', + 'keypoint_clip_border', 'bbox_clip_border', 'bbox_xyxy2corner', + 'bbox_corner2xyxy', 'get_pers_warp_matrix', + 'find_min_padding_exact', 'fix_bbox_aspect_ratio' +] diff --git a/mmpose/structures/bbox/__init__.py b/mmpose/structures/bbox/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..abd3d5f2d9534842327cfd3a5b8a4fb225fda68d --- /dev/null +++ b/mmpose/structures/bbox/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox_overlaps import bbox_overlaps +from .transforms import (bbox_clip_border, bbox_corner2xyxy, bbox_cs2xywh, + bbox_cs2xyxy, bbox_xywh2cs, bbox_xywh2xyxy, + bbox_xyxy2corner, bbox_xyxy2cs, bbox_xyxy2xywh, + flip_bbox, get_pers_warp_matrix, get_udp_warp_matrix, + get_warp_matrix) + +__all__ = [ + 'bbox_cs2xywh', 'bbox_cs2xyxy', 'bbox_xywh2cs', 'bbox_xywh2xyxy', + 'bbox_xyxy2cs', 'bbox_xyxy2xywh', 'flip_bbox', 'get_udp_warp_matrix', + 'get_warp_matrix', 'bbox_overlaps', 'bbox_clip_border', 'bbox_xyxy2corner', + 'bbox_corner2xyxy', 'get_pers_warp_matrix' +] diff --git a/mmpose/structures/bbox/bbox_overlaps.py b/mmpose/structures/bbox/bbox_overlaps.py new file mode 100644 index 0000000000000000000000000000000000000000..682008c3378833dff45f99b41a00b56ad0d24710 --- /dev/null +++ b/mmpose/structures/bbox/bbox_overlaps.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def fp16_clamp(x, min_val=None, max_val=None): + if not x.is_cuda and x.dtype == torch.float16: + return x.float().clamp(min_val, max_val).half() + return x.clamp(min_val, max_val) + + +def bbox_overlaps(bboxes1, + bboxes2, + mode='iou', + is_aligned=False, + eps=1e-6) -> torch.Tensor: + """Calculate overlap between two sets of bounding boxes. + + Args: + bboxes1 (torch.Tensor): Bounding boxes of shape (..., m, 4) or empty. + bboxes2 (torch.Tensor): Bounding boxes of shape (..., n, 4) or empty. + mode (str): "iou" (intersection over union), + "iof" (intersection over foreground), + or "giou" (generalized intersection over union). + Defaults to "iou". + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + eps (float, optional): A small constant added to the denominator for + numerical stability. Default 1e-6. + + Returns: + torch.Tensor: Overlap values of shape (..., m, n) if is_aligned is + False, else shape (..., m). + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 10, 10], + >>> [10, 10, 20, 20], + >>> [32, 32, 38, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 10, 20], + >>> [0, 10, 10, 19], + >>> [10, 10, 20, 20], + >>> ]) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + """ + assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' + assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) + + if bboxes1.ndim == 1: + bboxes1 = bboxes1.unsqueeze(0) + if bboxes2.ndim == 1: + bboxes2 = bboxes2.unsqueeze(0) + + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( + bboxes1[..., 3] - bboxes1[..., 1]) + area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( + bboxes2[..., 3] - bboxes2[..., 1]) + + if is_aligned: + lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) + rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) + wh = fp16_clamp(rb - lt, min_val=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) + enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) + else: + lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) + rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) + wh = fp16_clamp(rb - lt, min_val=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + else: + union = area1[..., None] + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) + enclosed_rb = torch.max(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) + + eps_tensor = union.new_tensor([eps]) + union = torch.max(union, eps_tensor) + ious = overlap / union + if mode in ['iou', 'iof']: + return ious + elif mode == 'giou': + enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min_val=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] + enclose_area = torch.max(enclose_area, eps_tensor) + gious = ious - (enclose_area - union) / enclose_area + return gious diff --git a/mmpose/structures/bbox/transforms.py b/mmpose/structures/bbox/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..88db311c274a92ca36bdc085fe43d694d50c0cf1 --- /dev/null +++ b/mmpose/structures/bbox/transforms.py @@ -0,0 +1,525 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Tuple + +import cv2 +import numpy as np + + +def bbox_xyxy2xywh(bbox_xyxy: np.ndarray) -> np.ndarray: + """Transform the bbox format from x1y1x2y2 to xywh. + + Args: + bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or + (n, 5). (left, top, right, bottom, [score]) + + Returns: + np.ndarray: Bounding boxes (with scores), + shaped (n, 4) or (n, 5). (left, top, width, height, [score]) + """ + bbox_xywh = bbox_xyxy.copy() + bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + + return bbox_xywh + + +def bbox_xywh2xyxy(bbox_xywh: np.ndarray) -> np.ndarray: + """Transform the bbox format from xywh to x1y1x2y2. + + Args: + bbox_xywh (ndarray): Bounding boxes (with scores), + shaped (n, 4) or (n, 5). (left, top, width, height, [score]) + Returns: + np.ndarray: Bounding boxes (with scores), shaped (n, 4) or + (n, 5). (left, top, right, bottom, [score]) + """ + bbox_xyxy = bbox_xywh.copy() + bbox_xyxy[:, 2] = bbox_xyxy[:, 2] + bbox_xyxy[:, 0] + bbox_xyxy[:, 3] = bbox_xyxy[:, 3] + bbox_xyxy[:, 1] + + return bbox_xyxy + + +def bbox_xyxy2cs(bbox: np.ndarray, + padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: + """Transform the bbox format from (x,y,w,h) into (center, scale) + + Args: + bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted + as (left, top, right, bottom) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or + (n, 2) + - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or + (n, 2) + """ + # convert single bbox from (4, ) to (1, 4) + dim = bbox.ndim + if dim == 1: + bbox = bbox[None, :] + + scale = (bbox[..., 2:] - bbox[..., :2]) * padding + center = (bbox[..., 2:] + bbox[..., :2]) * 0.5 + + if dim == 1: + center = center[0] + scale = scale[0] + + return center, scale + + +def bbox_xywh2cs(bbox: np.ndarray, + padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: + """Transform the bbox format from (x,y,w,h) into (center, scale) + + Args: + bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted + as (x, y, h, w) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or + (n, 2) + - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or + (n, 2) + """ + + # convert single bbox from (4, ) to (1, 4) + dim = bbox.ndim + if dim == 1: + bbox = bbox[None, :] + + x, y, w, h = np.hsplit(bbox, [1, 2, 3]) + center = np.hstack([x + w * 0.5, y + h * 0.5]) + scale = np.hstack([w, h]) * padding + + if dim == 1: + center = center[0] + scale = scale[0] + + return center, scale + + +def bbox_cs2xyxy(center: np.ndarray, + scale: np.ndarray, + padding: float = 1.) -> np.ndarray: + """Transform the bbox format from (center, scale) to (x1,y1,x2,y2). + + Args: + center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) + scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + ndarray[float32]: BBox (x1, y1, x2, y2) in shape (4, ) or (n, 4) + """ + + dim = center.ndim + assert scale.ndim == dim + + if dim == 1: + center = center[None, :] + scale = scale[None, :] + + wh = scale / padding + xy = center - 0.5 * wh + bbox = np.hstack((xy, xy + wh)) + + if dim == 1: + bbox = bbox[0] + + return bbox + + +def bbox_cs2xywh(center: np.ndarray, + scale: np.ndarray, + padding: float = 1.) -> np.ndarray: + """Transform the bbox format from (center, scale) to (x,y,w,h). + + Args: + center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) + scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + ndarray[float32]: BBox (x, y, w, h) in shape (4, ) or (n, 4) + """ + + dim = center.ndim + assert scale.ndim == dim + + if dim == 1: + center = center[None, :] + scale = scale[None, :] + + wh = scale / padding + xy = center - 0.5 * wh + bbox = np.hstack((xy, wh)) + + if dim == 1: + bbox = bbox[0] + + return bbox + + +def bbox_xyxy2corner(bbox: np.ndarray): + """Convert bounding boxes from xyxy format to corner format. + + Given a numpy array containing bounding boxes in the format + (xmin, ymin, xmax, ymax), this function converts the bounding + boxes to the corner format, where each box is represented by four + corner points (top-left, top-right, bottom-right, bottom-left). + + Args: + bbox (numpy.ndarray): Input array of shape (N, 4) representing + N bounding boxes. + + Returns: + numpy.ndarray: An array of shape (N, 4, 2) containing the corner + points of the bounding boxes. + + Example: + bbox = np.array([[0, 0, 100, 50], [10, 20, 200, 150]]) + corners = bbox_xyxy2corner(bbox) + """ + dim = bbox.ndim + if dim == 1: + bbox = bbox[None] + + bbox = np.tile(bbox, 2).reshape(-1, 4, 2) + bbox[:, 1:3, 0] = bbox[:, 0:2, 0] + + if dim == 1: + bbox = bbox[0] + + return bbox + + +def bbox_corner2xyxy(bbox: np.ndarray): + """Convert bounding boxes from corner format to xyxy format. + + Given a numpy array containing bounding boxes in the corner + format (four corner points for each box), this function converts + the bounding boxes to the (xmin, ymin, xmax, ymax) format. + + Args: + bbox (numpy.ndarray): Input array of shape (N, 4, 2) representing + N bounding boxes. + + Returns: + numpy.ndarray: An array of shape (N, 4) containing the bounding + boxes in xyxy format. + + Example: + corners = np.array([[[0, 0], [100, 0], [100, 50], [0, 50]], + [[10, 20], [200, 20], [200, 150], [10, 150]]]) + bbox = bbox_corner2xyxy(corners) + """ + if bbox.shape[-1] == 8: + bbox = bbox.reshape(*bbox.shape[:-1], 4, 2) + + dim = bbox.ndim + if dim == 2: + bbox = bbox[None] + + bbox = np.concatenate((bbox.min(axis=1), bbox.max(axis=1)), axis=1) + + if dim == 2: + bbox = bbox[0] + + return bbox + + +def bbox_clip_border(bbox: np.ndarray, shape: Tuple[int, int]) -> np.ndarray: + """Clip bounding box coordinates to fit within a specified shape. + + Args: + bbox (np.ndarray): Bounding box coordinates of shape (..., 4) + or (..., 2). + shape (Tuple[int, int]): Shape of the image to which bounding + boxes are being clipped in the format of (w, h) + + Returns: + np.ndarray: Clipped bounding box coordinates. + + Example: + >>> bbox = np.array([[10, 20, 30, 40], [40, 50, 80, 90]]) + >>> shape = (50, 50) # Example image shape + >>> clipped_bbox = bbox_clip_border(bbox, shape) + """ + width, height = shape[:2] + + if bbox.shape[-1] == 2: + bbox[..., 0] = np.clip(bbox[..., 0], a_min=0, a_max=width) + bbox[..., 1] = np.clip(bbox[..., 1], a_min=0, a_max=height) + else: + bbox[..., ::2] = np.clip(bbox[..., ::2], a_min=0, a_max=width) + bbox[..., 1::2] = np.clip(bbox[..., 1::2], a_min=0, a_max=height) + + return bbox + + +def flip_bbox(bbox: np.ndarray, + image_size: Tuple[int, int], + bbox_format: str = 'xywh', + direction: str = 'horizontal') -> np.ndarray: + """Flip the bbox in the given direction. + + Args: + bbox (np.ndarray): The bounding boxes. The shape should be (..., 4) + if ``bbox_format`` is ``'xyxy'`` or ``'xywh'``, and (..., 2) if + ``bbox_format`` is ``'center'`` + image_size (tuple): The image shape in [w, h] + bbox_format (str): The bbox format. Options are ``'xywh'``, ``'xyxy'`` + and ``'center'``. + direction (str): The flip direction. Options are ``'horizontal'``, + ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` + + Returns: + np.ndarray: The flipped bounding boxes. + """ + direction_options = {'horizontal', 'vertical', 'diagonal'} + assert direction in direction_options, ( + f'Invalid flipping direction "{direction}". ' + f'Options are {direction_options}') + + format_options = {'xywh', 'xyxy', 'center'} + assert bbox_format in format_options, ( + f'Invalid bbox format "{bbox_format}". ' + f'Options are {format_options}') + + bbox_flipped = bbox.copy() + w, h = image_size + + # TODO: consider using "integer corner" coordinate system + if direction == 'horizontal': + if bbox_format == 'xywh' or bbox_format == 'center': + bbox_flipped[..., 0] = w - bbox[..., 0] - 1 + elif bbox_format == 'xyxy': + bbox_flipped[..., ::2] = w - bbox[..., -2::-2] - 1 + elif direction == 'vertical': + if bbox_format == 'xywh' or bbox_format == 'center': + bbox_flipped[..., 1] = h - bbox[..., 1] - 1 + elif bbox_format == 'xyxy': + bbox_flipped[..., 1::2] = h - bbox[..., ::-2] - 1 + elif direction == 'diagonal': + if bbox_format == 'xywh' or bbox_format == 'center': + bbox_flipped[..., :2] = [w, h] - bbox[..., :2] - 1 + elif bbox_format == 'xyxy': + bbox_flipped[...] = [w, h, w, h] - bbox - 1 + bbox_flipped = np.concatenate( + (bbox_flipped[..., 2:], bbox_flipped[..., :2]), axis=-1) + + return bbox_flipped + + +def get_udp_warp_matrix( + center: np.ndarray, + scale: np.ndarray, + rot: float, + output_size: Tuple[int, int], +) -> np.ndarray: + """Calculate the affine transformation matrix under the unbiased + constraint. See `UDP (CVPR 2020)`_ for details. + + Note: + + - The bbox number: N + + Args: + center (np.ndarray[2, ]): Center of the bounding box (x, y). + scale (np.ndarray[2, ]): Scale of the bounding box + wrt [width, height]. + rot (float): Rotation angle (degree). + output_size (tuple): Size ([w, h]) of the output image + + Returns: + np.ndarray: A 2x3 transformation matrix + + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + assert len(center) == 2 + assert len(scale) == 2 + assert len(output_size) == 2 + + input_size = center * 2 + rot_rad = np.deg2rad(rot) + warp_mat = np.zeros((2, 3), dtype=np.float32) + scale_x = (output_size[0] - 1) / scale[0] + scale_y = (output_size[1] - 1) / scale[1] + warp_mat[0, 0] = math.cos(rot_rad) * scale_x + warp_mat[0, 1] = -math.sin(rot_rad) * scale_x + warp_mat[0, 2] = scale_x * (-0.5 * input_size[0] * math.cos(rot_rad) + + 0.5 * input_size[1] * math.sin(rot_rad) + + 0.5 * scale[0]) + warp_mat[1, 0] = math.sin(rot_rad) * scale_y + warp_mat[1, 1] = math.cos(rot_rad) * scale_y + warp_mat[1, 2] = scale_y * (-0.5 * input_size[0] * math.sin(rot_rad) - + 0.5 * input_size[1] * math.cos(rot_rad) + + 0.5 * scale[1]) + return warp_mat + + +def get_warp_matrix( + center: np.ndarray, + scale: np.ndarray, + rot: float, + output_size: Tuple[int, int], + shift: Tuple[float, float] = (0., 0.), + inv: bool = False, + fix_aspect_ratio: bool = True, +) -> np.ndarray: + """Calculate the affine transformation matrix that can warp the bbox area + in the input image to the output size. + + Args: + center (np.ndarray[2, ]): Center of the bounding box (x, y). + scale (np.ndarray[2, ]): Scale of the bounding box + wrt [width, height]. + rot (float): Rotation angle (degree). + output_size (np.ndarray[2, ] | list(2,)): Size of the + destination heatmaps. + shift (0-100%): Shift translation ratio wrt the width/height. + Default (0., 0.). + inv (bool): Option to inverse the affine transform direction. + (inv=False: src->dst or inv=True: dst->src) + fix_aspect_ratio (bool): Whether to fix aspect ratio during transform. + Defaults to True. + + Returns: + np.ndarray: A 2x3 transformation matrix + """ + assert len(center) == 2 + assert len(scale) == 2 + assert len(output_size) == 2 + assert len(shift) == 2 + + shift = np.array(shift) + src_w, src_h = scale[:2] + dst_w, dst_h = output_size[:2] + + rot_rad = np.deg2rad(rot) + src_dir = _rotate_point(np.array([src_w * -0.5, 0.]), rot_rad) + dst_dir = np.array([dst_w * -0.5, 0.]) + + src = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale * shift + src[1, :] = center + src_dir + scale * shift + + dst = np.zeros((3, 2), dtype=np.float32) + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir + + if fix_aspect_ratio: + src[2, :] = _get_3rd_point(src[0, :], src[1, :]) + dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :]) + else: + src_dir_2 = _rotate_point(np.array([0., src_h * -0.5]), rot_rad) + dst_dir_2 = np.array([0., dst_h * -0.5]) + src[2, :] = center + src_dir_2 + scale * shift + dst[2, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir_2 + + if inv: + warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + return warp_mat + + +def get_pers_warp_matrix(center: np.ndarray, translate: np.ndarray, + scale: float, rot: float, + shear: np.ndarray) -> np.ndarray: + """Compute a perspective warp matrix based on specified transformations. + + Args: + center (np.ndarray): Center of the transformation. + translate (np.ndarray): Translation vector. + scale (float): Scaling factor. + rot (float): Rotation angle in degrees. + shear (np.ndarray): Shearing angles in degrees along x and y axes. + + Returns: + np.ndarray: Perspective warp matrix. + + Example: + >>> center = np.array([0, 0]) + >>> translate = np.array([10, 20]) + >>> scale = 1.2 + >>> rot = 30.0 + >>> shear = np.array([15.0, 0.0]) + >>> warp_matrix = get_pers_warp_matrix(center, translate, + scale, rot, shear) + """ + translate_mat = np.array([[1, 0, translate[0] + center[0]], + [0, 1, translate[1] + center[1]], [0, 0, 1]], + dtype=np.float32) + + shear_x = math.radians(shear[0]) + shear_y = math.radians(shear[1]) + shear_mat = np.array([[1, np.tan(shear_x), 0], [np.tan(shear_y), 1, 0], + [0, 0, 1]], + dtype=np.float32) + + rotate_angle = math.radians(rot) + rotate_mat = np.array([[np.cos(rotate_angle), -np.sin(rotate_angle), 0], + [np.sin(rotate_angle), + np.cos(rotate_angle), 0], [0, 0, 1]], + dtype=np.float32) + + scale_mat = np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]], + dtype=np.float32) + + recover_center_mat = np.array([[1, 0, -center[0]], [0, 1, -center[1]], + [0, 0, 1]], + dtype=np.float32) + + warp_matrix = np.dot( + np.dot( + np.dot(np.dot(translate_mat, shear_mat), rotate_mat), scale_mat), + recover_center_mat) + + return warp_matrix + + +def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray: + """Rotate a point by an angle. + + Args: + pt (np.ndarray): 2D point coordinates (x, y) in shape (2, ) + angle_rad (float): rotation angle in radian + + Returns: + np.ndarray: Rotated point in shape (2, ) + """ + + sn, cs = np.sin(angle_rad), np.cos(angle_rad) + rot_mat = np.array([[cs, -sn], [sn, cs]]) + return rot_mat @ pt + + +def _get_3rd_point(a: np.ndarray, b: np.ndarray): + """To calculate the affine matrix, three pairs of points are required. This + function is used to get the 3rd point, given 2D points a & b. + + The 3rd point is defined by rotating vector `a - b` by 90 degrees + anticlockwise, using b as the rotation center. + + Args: + a (np.ndarray): The 1st point (x,y) in shape (2, ) + b (np.ndarray): The 2nd point (x,y) in shape (2, ) + + Returns: + np.ndarray: The 3rd point. + """ + direction = a - b + c = b + np.r_[-direction[1], direction[0]] + return c diff --git a/mmpose/structures/keypoint/__init__.py b/mmpose/structures/keypoint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e662a9287a1cb0ad3098a1b0187f7512aafa281e --- /dev/null +++ b/mmpose/structures/keypoint/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .transforms import (flip_keypoints, flip_keypoints_custom_center, + keypoint_clip_border) +from .keypoints_min_padding import fix_bbox_aspect_ratio, find_min_padding_exact + +__all__ = [ + 'flip_keypoints', 'flip_keypoints_custom_center', 'keypoint_clip_border', + 'fix_bbox_aspect_ratio', 'find_min_padding_exact' +] diff --git a/mmpose/structures/keypoint/keypoints_min_padding.py b/mmpose/structures/keypoint/keypoints_min_padding.py new file mode 100644 index 0000000000000000000000000000000000000000..df93646716de71c5c86c1b10bd0be80cc938e66b --- /dev/null +++ b/mmpose/structures/keypoint/keypoints_min_padding.py @@ -0,0 +1,107 @@ +import numpy as np + +def find_min_padding_exact(bbox, kpts, aspect_ratio=3/4, bbox_format='xywh'): + '''Find the minimum padding to make keypoint inside bbox''' + assert bbox_format.lower() in ['xywh', 'xyxy'], f"Invalid bbox format {bbox_format}. Only 'xyxy' or 'xywh' are supported." + + if kpts.size % 2 == 0: + kpts = kpts.reshape(-1, 2) + vis = np.ones(kpts.shape[0]) + elif kpts.size % 3 == 0: + kpts = kpts.reshape(-1, 3) + vis = kpts[:, 2].flatten() + kpts = kpts[:, :2] + else: + raise ValueError('Keypoints should have 2 or 3 values each') + + if bbox_format.lower() == 'xyxy': + bbox = np.array([ + bbox[0], + bbox[1], + bbox[2] - bbox[0], + bbox[3] - bbox[1], + ]) + + if aspect_ratio is not None: + # Fix the aspect ratio of the bounding box + bbox = fix_bbox_aspect_ratio(bbox, aspect_ratio=aspect_ratio, padding=1.0, bbox_format='xywh') + + x0, y0, w, h = np.hsplit(bbox, [1, 2, 3]) + + x1 = x0 + w + y1 = y0 + h + x_bbox_distances = np.max(np.stack([ + np.clip(x0 - kpts[:, 0], a_min=0, a_max=None), + np.clip(kpts[:, 0] - x1, a_min=0, a_max=None), + ]), axis=0) + y_bbox_distances = np.max(np.stack([ + np.clip(y0 - kpts[:, 1], a_min=0, a_max=None), + np.clip(kpts[:, 1] - y1, a_min=0, a_max=None), + ]), axis=0) + + padding_x = 2 * x_bbox_distances / w + padding_y = 2 * y_bbox_distances / h + padding = 1 + np.maximum(padding_x, padding_y) + padding = np.array(padding).flatten() + + padding[vis <= 0] = -1.0 + + return padding + +def fix_bbox_aspect_ratio(bbox, aspect_ratio=3/4, padding=1.25, bbox_format='xywh'): + assert bbox_format.lower() in ['xywh', 'xyxy'], f"Invalid bbox format {bbox_format}. Only 'xyxy' or 'xywh' are supported." + + in_shape = bbox.shape + bbox = bbox.reshape((-1, 4)) + + if bbox_format.lower() == 'xywh': + bbox_xyxy = np.array([ + bbox[:, 0], + bbox[:, 1], + bbox[:, 0] + bbox[:, 2], + bbox[:, 1] + bbox[:, 3], + ]).T + else: + bbox_xyxy = np.array(bbox) + + centers = bbox_xyxy[:, :2] + (bbox_xyxy[:, 2:] - bbox_xyxy[:, :2]) / 2 + widths = bbox_xyxy[:, 2] - bbox_xyxy[:, 0] + heights = bbox_xyxy[:, 3] - bbox_xyxy[:, 1] + + new_widths = widths.copy().astype(np.float32) + new_heights = heights.copy().astype(np.float32) + + for i in range(bbox_xyxy.shape[0]): + if widths[i] == 0: + widths[i] =+ 1 + if heights[i] == 0: + heights[i] =+ 1 + + if widths[i] / heights[i] > aspect_ratio: + new_heights[i] = widths[i] / aspect_ratio + else: + new_widths[i] = heights[i] * aspect_ratio + new_widths *= padding + new_heights *= padding + + new_bbox_xyxy = np.array([ + centers[:, 0] - new_widths / 2, + centers[:, 1] - new_heights / 2, + centers[:, 0] + new_widths / 2, + centers[:, 1] + new_heights / 2, + ]).T + + if bbox_format.lower() == 'xywh': + new_bbox = np.array([ + new_bbox_xyxy[:, 0], + new_bbox_xyxy[:, 1], + new_bbox_xyxy[:, 2] - new_bbox_xyxy[:, 0], + new_bbox_xyxy[:, 3] - new_bbox_xyxy[:, 1], + ]).T + else: + new_bbox = new_bbox_xyxy + + + new_bbox = new_bbox.reshape(in_shape) + + return new_bbox \ No newline at end of file diff --git a/mmpose/structures/keypoint/transforms.py b/mmpose/structures/keypoint/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..aa7cebda907a7156d733944579790a871560ad2a --- /dev/null +++ b/mmpose/structures/keypoint/transforms.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import numpy as np + + +def flip_keypoints(keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray], + image_size: Tuple[int, int], + flip_indices: List[int], + direction: str = 'horizontal' + ) -> Tuple[np.ndarray, Optional[np.ndarray]]: + """Flip keypoints in the given direction. + + Note: + + - keypoint number: K + - keypoint dimension: D + + Args: + keypoints (np.ndarray): Keypoints in shape (..., K, D) + keypoints_visible (np.ndarray, optional): The visibility of keypoints + in shape (..., K, 1) or (..., K, 2). Set ``None`` if the keypoint + visibility is unavailable + image_size (tuple): The image shape in [w, h] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + direction (str): The flip direction. Options are ``'horizontal'``, + ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` + + Returns: + tuple: + - keypoints_flipped (np.ndarray): Flipped keypoints in shape + (..., K, D) + - keypoints_visible_flipped (np.ndarray, optional): Flipped keypoints' + visibility in shape (..., K, 1) or (..., K, 2). Return ``None`` if + the input ``keypoints_visible`` is ``None`` + """ + + ndim = keypoints.ndim + assert keypoints.shape[:-1] == keypoints_visible.shape[:ndim - 1], ( + f'Mismatched shapes of keypoints {keypoints.shape} and ' + f'keypoints_visible {keypoints_visible.shape}') + + direction_options = {'horizontal', 'vertical', 'diagonal'} + assert direction in direction_options, ( + f'Invalid flipping direction "{direction}". ' + f'Options are {direction_options}') + + # swap the symmetric keypoint pairs + if direction == 'horizontal' or direction == 'vertical': + keypoints = keypoints.take(flip_indices, axis=ndim - 2) + if keypoints_visible is not None: + keypoints_visible = keypoints_visible.take( + flip_indices, axis=ndim - 2) + + # flip the keypoints + w, h = image_size + if direction == 'horizontal': + keypoints[..., 0] = w - 1 - keypoints[..., 0] + elif direction == 'vertical': + keypoints[..., 1] = h - 1 - keypoints[..., 1] + else: + keypoints = [w, h] - keypoints - 1 + + return keypoints, keypoints_visible + + +def flip_keypoints_custom_center(keypoints: np.ndarray, + keypoints_visible: np.ndarray, + flip_indices: List[int], + center_mode: str = 'static', + center_x: float = 0.5, + center_index: Union[int, List] = 0): + """Flip human joints horizontally. + + Note: + - num_keypoint: K + - dimension: D + + Args: + keypoints (np.ndarray([..., K, D])): Coordinates of keypoints. + keypoints_visible (np.ndarray([..., K])): Visibility item of keypoints. + flip_indices (list[int]): The indices to flip the keypoints. + center_mode (str): The mode to set the center location on the x-axis + to flip around. Options are: + + - static: use a static x value (see center_x also) + - root: use a root joint (see center_index also) + + Defaults: ``'static'``. + center_x (float): Set the x-axis location of the flip center. Only used + when ``center_mode`` is ``'static'``. Defaults: 0.5. + center_index (Union[int, List]): Set the index of the root joint, whose + x location will be used as the flip center. Only used when + ``center_mode`` is ``'root'``. Defaults: 0. + + Returns: + np.ndarray([..., K, C]): Flipped joints. + """ + + assert keypoints.ndim >= 2, f'Invalid pose shape {keypoints.shape}' + + allowed_center_mode = {'static', 'root'} + assert center_mode in allowed_center_mode, 'Get invalid center_mode ' \ + f'{center_mode}, allowed choices are {allowed_center_mode}' + + if center_mode == 'static': + x_c = center_x + elif center_mode == 'root': + center_index = [center_index] if isinstance(center_index, int) else \ + center_index + assert keypoints.shape[-2] > max(center_index) + x_c = keypoints[..., center_index, 0].mean(axis=-1) + + keypoints_flipped = keypoints.copy() + keypoints_visible_flipped = keypoints_visible.copy() + # Swap left-right parts + for left, right in enumerate(flip_indices): + keypoints_flipped[..., left, :] = keypoints[..., right, :] + keypoints_visible_flipped[..., left] = keypoints_visible[..., right] + + # Flip horizontally + keypoints_flipped[..., 0] = x_c * 2 - keypoints_flipped[..., 0] + return keypoints_flipped, keypoints_visible_flipped + + +def keypoint_clip_border(keypoints: np.ndarray, keypoints_visible: np.ndarray, + shape: Tuple[int, + int]) -> Tuple[np.ndarray, np.ndarray]: + """Set the visibility values for keypoints outside the image border. + + Args: + keypoints (np.ndarray): Input keypoints coordinates. + keypoints_visible (np.ndarray): Visibility values of keypoints. + shape (Tuple[int, int]): Shape of the image to which keypoints are + being clipped in the format of (w, h). + + Note: + This function sets the visibility values of keypoints that fall outside + the specified frame border to zero (0.0). + """ + width, height = shape[:2] + + # Create a mask for keypoints outside the frame + outside_mask = ((keypoints[..., 0] > width) | (keypoints[..., 0] < 0) | + (keypoints[..., 1] > height) | (keypoints[..., 1] < 0)) + + # Update visibility values for keypoints outside the frame + if keypoints_visible.ndim == 2: + keypoints_visible[outside_mask] = 0.0 + elif keypoints_visible.ndim == 3: + keypoints_visible[outside_mask, 0] = 0.0 + + return keypoints, keypoints_visible diff --git a/mmpose/structures/multilevel_pixel_data.py b/mmpose/structures/multilevel_pixel_data.py new file mode 100644 index 0000000000000000000000000000000000000000..bea191e7297c233cc129f2da09ab5a4c6793fa0f --- /dev/null +++ b/mmpose/structures/multilevel_pixel_data.py @@ -0,0 +1,273 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import abc +from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union + +import numpy as np +import torch +from mmengine.structures import BaseDataElement, PixelData +from mmengine.utils import is_list_of + +IndexType = Union[str, slice, int, list, torch.LongTensor, + torch.cuda.LongTensor, torch.BoolTensor, + torch.cuda.BoolTensor, np.ndarray] + + +class MultilevelPixelData(BaseDataElement): + """Data structure for multi-level pixel-wise annotations or predictions. + + All data items in ``data_fields`` of ``MultilevelPixelData`` are lists + of np.ndarray or torch.Tensor, and should meet the following requirements: + + - Have the same length, which is the number of levels + - At each level, the data should have 3 dimensions in order of channel, + height and weight + - At each level, the data should have the same height and weight + + Examples: + >>> metainfo = dict(num_keypoints=17) + >>> sizes = [(64, 48), (128, 96), (256, 192)] + >>> heatmaps = [np.random.rand(17, h, w) for h, w in sizes] + >>> masks = [torch.rand(1, h, w) for h, w in sizes] + >>> data = MultilevelPixelData(metainfo=metainfo, + ... heatmaps=heatmaps, + ... masks=masks) + + >>> # get data item + >>> heatmaps = data.heatmaps # A list of 3 numpy.ndarrays + >>> masks = data.masks # A list of 3 torch.Tensors + + >>> # get level + >>> data_l0 = data[0] # PixelData with fields 'heatmaps' and 'masks' + >>> data.nlevel + 3 + + >>> # get shape + >>> data.shape + ((64, 48), (128, 96), (256, 192)) + + >>> # set + >>> offset_maps = [torch.rand(2, h, w) for h, w in sizes] + >>> data.offset_maps = offset_maps + """ + + def __init__(self, *, metainfo: Optional[dict] = None, **kwargs) -> None: + object.__setattr__(self, '_nlevel', None) + super().__init__(metainfo=metainfo, **kwargs) + + @property + def nlevel(self): + """Return the level number. + + Returns: + Optional[int]: The level number, or ``None`` if the data has not + been assigned. + """ + return self._nlevel + + def __getitem__(self, item: Union[int, str, list, + slice]) -> Union[PixelData, Sequence]: + if isinstance(item, int): + if self.nlevel is None or item >= self.nlevel: + raise IndexError( + f'Lcale index {item} out of range ({self.nlevel})') + return self.get(f'_level_{item}') + + if isinstance(item, str): + if item not in self: + raise KeyError(item) + return getattr(self, item) + + # TODO: support indexing by list and slice over levels + raise NotImplementedError( + f'{self.__class__.__name__} does not support index type ' + f'{type(item)}') + + def levels(self) -> List[PixelData]: + if self.nlevel: + return list(self[i] for i in range(self.nlevel)) + return [] + + @property + def shape(self) -> Optional[Tuple[Tuple]]: + """Get the shape of multi-level pixel data. + + Returns: + Optional[tuple]: A tuple of data shape at each level, or ``None`` + if the data has not been assigned. + """ + if self.nlevel is None: + return None + + return tuple(level.shape for level in self.levels()) + + def set_data(self, data: dict) -> None: + """Set or change key-value pairs in ``data_field`` by parameter + ``data``. + + Args: + data (dict): A dict contains annotations of image or + model predictions. + """ + assert isinstance(data, + dict), f'meta should be a `dict` but got {data}' + for k, v in data.items(): + self.set_field(v, k, field_type='data') + + def set_field(self, + value: Any, + name: str, + dtype: Optional[Union[Type, Tuple[Type, ...]]] = None, + field_type: str = 'data') -> None: + """Special method for set union field, used as property.setter + functions.""" + assert field_type in ['metainfo', 'data'] + if dtype is not None: + assert isinstance( + value, + dtype), f'{value} should be a {dtype} but got {type(value)}' + + if name.startswith('_level_'): + raise AttributeError( + f'Cannot set {name} to be a field because the pattern ' + '<_level_{n}> is reserved for inner data field') + + if field_type == 'metainfo': + if name in self._data_fields: + raise AttributeError( + f'Cannot set {name} to be a field of metainfo ' + f'because {name} is already a data field') + self._metainfo_fields.add(name) + + else: + if name in self._metainfo_fields: + raise AttributeError( + f'Cannot set {name} to be a field of data ' + f'because {name} is already a metainfo field') + + if not isinstance(value, abc.Sequence): + raise TypeError( + 'The value should be a sequence (of numpy.ndarray or' + f'torch.Tesnor), but got a {type(value)}') + + if len(value) == 0: + raise ValueError('Setting empty value is not allowed') + + if not isinstance(value[0], (torch.Tensor, np.ndarray)): + raise TypeError( + 'The value should be a sequence of numpy.ndarray or' + f'torch.Tesnor, but got a sequence of {type(value[0])}') + + if self.nlevel is not None: + assert len(value) == self.nlevel, ( + f'The length of the value ({len(value)}) should match the' + f'number of the levels ({self.nlevel})') + else: + object.__setattr__(self, '_nlevel', len(value)) + for i in range(self.nlevel): + object.__setattr__(self, f'_level_{i}', PixelData()) + + for i, v in enumerate(value): + self[i].set_field(v, name, field_type='data') + + self._data_fields.add(name) + + object.__setattr__(self, name, value) + + def __delattr__(self, item: str): + """delete the item in dataelement. + + Args: + item (str): The key to delete. + """ + if item in ('_metainfo_fields', '_data_fields'): + raise AttributeError(f'{item} has been used as a ' + 'private attribute, which is immutable. ') + + if item in self._metainfo_fields: + super().__delattr__(item) + else: + for level in self.levels(): + level.__delattr__(item) + self._data_fields.remove(item) + + def __getattr__(self, name): + if name in {'_data_fields', '_metainfo_fields' + } or name not in self._data_fields: + raise AttributeError( + f'\'{self.__class__.__name__}\' object has no attribute ' + f'\'{name}\'') + + return [getattr(level, name) for level in self.levels()] + + def pop(self, *args) -> Any: + """pop property in data and metainfo as the same as python.""" + assert len(args) < 3, '``pop`` get more than 2 arguments' + name = args[0] + if name in self._metainfo_fields: + self._metainfo_fields.remove(name) + return self.__dict__.pop(*args) + + elif name in self._data_fields: + self._data_fields.remove(name) + return [level.pop(*args) for level in self.levels()] + + # with default value + elif len(args) == 2: + return args[1] + else: + # don't just use 'self.__dict__.pop(*args)' for only popping key in + # metainfo or data + raise KeyError(f'{args[0]} is not contained in metainfo or data') + + def _convert(self, apply_to: Type, + func: Callable[[Any], Any]) -> 'MultilevelPixelData': + """Convert data items with the given function. + + Args: + apply_to (Type): The type of data items to apply the conversion + func (Callable): The conversion function that takes a data item + as the input and return the converted result + + Returns: + MultilevelPixelData: the converted data element. + """ + new_data = self.new() + for k, v in self.items(): + if is_list_of(v, apply_to): + v = [func(_v) for _v in v] + data = {k: v} + new_data.set_data(data) + return new_data + + def cpu(self) -> 'MultilevelPixelData': + """Convert all tensors to CPU in data.""" + return self._convert(apply_to=torch.Tensor, func=lambda x: x.cpu()) + + def cuda(self) -> 'MultilevelPixelData': + """Convert all tensors to GPU in data.""" + return self._convert(apply_to=torch.Tensor, func=lambda x: x.cuda()) + + def detach(self) -> 'MultilevelPixelData': + """Detach all tensors in data.""" + return self._convert(apply_to=torch.Tensor, func=lambda x: x.detach()) + + def numpy(self) -> 'MultilevelPixelData': + """Convert all tensor to np.narray in data.""" + return self._convert( + apply_to=torch.Tensor, func=lambda x: x.detach().cpu().numpy()) + + def to_tensor(self) -> 'MultilevelPixelData': + """Convert all tensor to np.narray in data.""" + return self._convert( + apply_to=np.ndarray, func=lambda x: torch.from_numpy(x)) + + # Tensor-like methods + def to(self, *args, **kwargs) -> 'MultilevelPixelData': + """Apply same name function to all tensors in data_fields.""" + new_data = self.new() + for k, v in self.items(): + if hasattr(v[0], 'to'): + v = [v_.to(*args, **kwargs) for v_ in v] + data = {k: v} + new_data.set_data(data) + return new_data diff --git a/mmpose/structures/pose_data_sample.py b/mmpose/structures/pose_data_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..53f6e8990e96206844b18b4983e416a534fd5afc --- /dev/null +++ b/mmpose/structures/pose_data_sample.py @@ -0,0 +1,104 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +from mmengine.structures import BaseDataElement, InstanceData, PixelData + +from mmpose.structures import MultilevelPixelData + + +class PoseDataSample(BaseDataElement): + """The base data structure of MMPose that is used as the interface between + modules. + + The attributes of ``PoseDataSample`` includes: + + - ``gt_instances``(InstanceData): Ground truth of instances with + keypoint annotations + - ``pred_instances``(InstanceData): Instances with keypoint + predictions + - ``gt_fields``(PixelData): Ground truth of spatial distribution + annotations like keypoint heatmaps and part affine fields (PAF) + - ``pred_fields``(PixelData): Predictions of spatial distributions + + Examples: + >>> import torch + >>> from mmengine.structures import InstanceData, PixelData + >>> from mmpose.structures import PoseDataSample + + >>> pose_meta = dict(img_shape=(800, 1216), + ... crop_size=(256, 192), + ... heatmap_size=(64, 48)) + >>> gt_instances = InstanceData() + >>> gt_instances.bboxes = torch.rand((1, 4)) + >>> gt_instances.keypoints = torch.rand((1, 17, 2)) + >>> gt_instances.keypoints_visible = torch.rand((1, 17, 1)) + >>> gt_fields = PixelData() + >>> gt_fields.heatmaps = torch.rand((17, 64, 48)) + + >>> data_sample = PoseDataSample(gt_instances=gt_instances, + ... gt_fields=gt_fields, + ... metainfo=pose_meta) + >>> assert 'img_shape' in data_sample + >>> len(data_sample.gt_instances) + 1 + """ + + @property + def gt_instances(self) -> InstanceData: + return self._gt_instances + + @gt_instances.setter + def gt_instances(self, value: InstanceData): + self.set_field(value, '_gt_instances', dtype=InstanceData) + + @gt_instances.deleter + def gt_instances(self): + del self._gt_instances + + @property + def gt_instance_labels(self) -> InstanceData: + return self._gt_instance_labels + + @gt_instance_labels.setter + def gt_instance_labels(self, value: InstanceData): + self.set_field(value, '_gt_instance_labels', dtype=InstanceData) + + @gt_instance_labels.deleter + def gt_instance_labels(self): + del self._gt_instance_labels + + @property + def pred_instances(self) -> InstanceData: + return self._pred_instances + + @pred_instances.setter + def pred_instances(self, value: InstanceData): + self.set_field(value, '_pred_instances', dtype=InstanceData) + + @pred_instances.deleter + def pred_instances(self): + del self._pred_instances + + @property + def gt_fields(self) -> Union[PixelData, MultilevelPixelData]: + return self._gt_fields + + @gt_fields.setter + def gt_fields(self, value: Union[PixelData, MultilevelPixelData]): + self.set_field(value, '_gt_fields', dtype=type(value)) + + @gt_fields.deleter + def gt_fields(self): + del self._gt_fields + + @property + def pred_fields(self) -> PixelData: + return self._pred_heatmaps + + @pred_fields.setter + def pred_fields(self, value: PixelData): + self.set_field(value, '_pred_heatmaps', dtype=PixelData) + + @pred_fields.deleter + def pred_fields(self): + del self._pred_heatmaps diff --git a/mmpose/structures/utils.py b/mmpose/structures/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..11abd9ee6c0627093d29212fceb645575b6f022f --- /dev/null +++ b/mmpose/structures/utils.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import List + +import cv2 +import numpy as np +import torch +from mmengine.structures import InstanceData, PixelData +from mmengine.utils import is_list_of + +from .bbox.transforms import get_warp_matrix +from .pose_data_sample import PoseDataSample + +from pycocotools import mask as Mask + + +def merge_data_samples(data_samples: List[PoseDataSample]) -> PoseDataSample: + """Merge the given data samples into a single data sample. + + This function can be used to merge the top-down predictions with + bboxes from the same image. The merged data sample will contain all + instances from the input data samples, and the identical metainfo with + the first input data sample. + + Args: + data_samples (List[:obj:`PoseDataSample`]): The data samples to + merge + + Returns: + PoseDataSample: The merged data sample. + """ + + if not is_list_of(data_samples, PoseDataSample): + raise ValueError('Invalid input type, should be a list of ' + ':obj:`PoseDataSample`') + + if len(data_samples) == 0: + warnings.warn('Try to merge an empty list of data samples.') + return PoseDataSample() + + merged = PoseDataSample(metainfo=data_samples[0].metainfo) + + if 'gt_instances' in data_samples[0]: + merged.gt_instances = InstanceData.cat( + [d.gt_instances for d in data_samples]) + + if 'pred_instances' in data_samples[0]: + merged.pred_instances = InstanceData.cat( + [d.pred_instances for d in data_samples]) + + if 'pred_fields' in data_samples[0] and 'heatmaps' in data_samples[ + 0].pred_fields: + reverted_heatmaps = [ + revert_heatmap(data_sample.pred_fields.heatmaps, + data_sample.input_center, data_sample.input_scale, + data_sample.ori_shape) + for data_sample in data_samples + ] + + merged_heatmaps = np.max(reverted_heatmaps, axis=0) + pred_fields = PixelData() + pred_fields.set_data(dict(heatmaps=merged_heatmaps)) + merged.pred_fields = pred_fields + + if 'gt_fields' in data_samples[0] and 'heatmaps' in data_samples[ + 0].gt_fields: + reverted_heatmaps = [ + revert_heatmap(data_sample.gt_fields.heatmaps, + data_sample.input_center, data_sample.input_scale, + data_sample.ori_shape) + for data_sample in data_samples + ] + + merged_heatmaps = np.max(reverted_heatmaps, axis=0) + gt_fields = PixelData() + gt_fields.set_data(dict(heatmaps=merged_heatmaps)) + merged.gt_fields = gt_fields + + return merged + + +def revert_heatmap(heatmap, input_center, input_scale, img_shape): + """Revert predicted heatmap on the original image. + + Args: + heatmap (np.ndarray or torch.tensor): predicted heatmap. + input_center (np.ndarray): bounding box center coordinate. + input_scale (np.ndarray): bounding box scale. + img_shape (tuple or list): size of original image. + """ + if torch.is_tensor(heatmap): + heatmap = heatmap.cpu().detach().numpy() + + ndim = heatmap.ndim + # [K, H, W] -> [H, W, K] + if ndim == 3: + heatmap = heatmap.transpose(1, 2, 0) + + hm_h, hm_w = heatmap.shape[:2] + img_h, img_w = img_shape + warp_mat = get_warp_matrix( + input_center.reshape((2, )), + input_scale.reshape((2, )), + rot=0, + output_size=(hm_w, hm_h), + inv=True) + + heatmap = cv2.warpAffine( + heatmap, warp_mat, (img_w, img_h), flags=cv2.INTER_LINEAR) + + # [H, W, K] -> [K, H, W] + if ndim == 3: + heatmap = heatmap.transpose(2, 0, 1) + + return heatmap + + +def split_instances(instances: InstanceData) -> List[InstanceData]: + """Convert instances into a list where each element is a dict that contains + information about one instance.""" + results = [] + + # return an empty list if there is no instance detected by the model + if instances is None: + return results + + for i in range(len(instances.keypoints)): + result = dict( + keypoints=instances.keypoints[i].tolist(), + keypoint_scores=instances.keypoint_scores[i].tolist(), + ) + if 'bboxes' in instances: + result['bbox'] = instances.bboxes[i].flatten().tolist() + if 'bbox_scores' in instances: + result['bbox_score'] = instances.bbox_scores[i] + if 'masks' in instances: + # Conver mask from binary to COCO polygon format + mask = instances.masks[i].astype(np.uint8) + contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + # breakpoint() + segmentation = [] + for contour in contours: + # Valid polygons have >= 6 coordinates (3 points) + if contour.size >= 6: + segmentation.append(contour.flatten().tolist()) + result['segmentation'] = segmentation + results.append(result) + + return results diff --git a/mmpose/testing/__init__.py b/mmpose/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5612dac6c66e3bf7c2bad86154ac62c9d5e9529a --- /dev/null +++ b/mmpose/testing/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ._utils import (get_coco_sample, get_config_file, get_packed_inputs, + get_pose_estimator_cfg, get_repo_dir) + +__all__ = [ + 'get_packed_inputs', 'get_coco_sample', 'get_config_file', + 'get_pose_estimator_cfg', 'get_repo_dir' +] diff --git a/mmpose/testing/_utils.py b/mmpose/testing/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2a2dd023484cf1a3ed54e8115b3694f9dd8cb9c8 --- /dev/null +++ b/mmpose/testing/_utils.py @@ -0,0 +1,252 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from copy import deepcopy +from typing import Optional + +import numpy as np +import torch +from mmengine.config import Config +from mmengine.dataset import pseudo_collate +from mmengine.structures import InstanceData, PixelData + +from mmpose.structures import MultilevelPixelData, PoseDataSample +from mmpose.structures.bbox import bbox_xyxy2cs + + +def get_coco_sample( + img_shape=(240, 320), + img_fill: Optional[int] = None, + num_instances=1, + with_bbox_cs=True, + with_img_mask=False, + random_keypoints_visible=False, + non_occlusion=False): + """Create a dummy data sample in COCO style.""" + rng = np.random.RandomState(0) + h, w = img_shape + if img_fill is None: + img = np.random.randint(0, 256, (h, w, 3), dtype=np.uint8) + else: + img = np.full((h, w, 3), img_fill, dtype=np.uint8) + + if non_occlusion: + bbox = _rand_bboxes(rng, num_instances, w / num_instances, h) + for i in range(num_instances): + bbox[i, 0::2] += w / num_instances * i + else: + bbox = _rand_bboxes(rng, num_instances, w, h) + + keypoints = _rand_keypoints(rng, bbox, 17) + if random_keypoints_visible: + keypoints_visible = np.random.randint(0, 2, (num_instances, + 17)).astype(np.float32) + else: + keypoints_visible = np.full((num_instances, 17), 1, dtype=np.float32) + + upper_body_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + lower_body_ids = [11, 12, 13, 14, 15, 16] + flip_pairs = [[2, 1], [1, 2], [4, 3], [3, 4], [6, 5], [5, 6], [8, 7], + [7, 8], [10, 9], [9, 10], [12, 11], [11, 12], [14, 13], + [13, 14], [16, 15], [15, 16]] + flip_indices = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] + dataset_keypoint_weights = np.array([ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ]).astype(np.float32) + + data = { + 'img': img, + 'img_shape': img_shape, + 'ori_shape': img_shape, + 'bbox': bbox, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'upper_body_ids': upper_body_ids, + 'lower_body_ids': lower_body_ids, + 'flip_pairs': flip_pairs, + 'flip_indices': flip_indices, + 'dataset_keypoint_weights': dataset_keypoint_weights, + 'invalid_segs': [], + } + + if with_bbox_cs: + data['bbox_center'], data['bbox_scale'] = bbox_xyxy2cs(data['bbox']) + + if with_img_mask: + data['img_mask'] = np.random.randint(0, 2, (h, w), dtype=np.uint8) + + return data + + +def get_packed_inputs(batch_size=2, + num_instances=1, + num_keypoints=17, + num_levels=1, + img_shape=(256, 192), + input_size=(192, 256), + heatmap_size=(48, 64), + simcc_split_ratio=2.0, + with_heatmap=True, + with_reg_label=True, + with_simcc_label=True): + """Create a dummy batch of model inputs and data samples.""" + rng = np.random.RandomState(0) + + inputs_list = [] + for idx in range(batch_size): + inputs = dict() + + # input + h, w = img_shape + image = rng.randint(0, 255, size=(3, h, w), dtype=np.uint8) + inputs['inputs'] = torch.from_numpy(image) + + # attributes + bboxes = _rand_bboxes(rng, num_instances, w, h) + bbox_centers, bbox_scales = bbox_xyxy2cs(bboxes) + + keypoints = _rand_keypoints(rng, bboxes, num_keypoints) + keypoints_visible = np.ones((num_instances, num_keypoints), + dtype=np.float32) + + # meta + img_meta = { + 'id': idx, + 'img_id': idx, + 'img_path': '.png', + 'img_shape': img_shape, + 'input_size': input_size, + 'input_center': bbox_centers, + 'input_scale': bbox_scales, + 'flip': False, + 'flip_direction': None, + 'flip_indices': list(range(num_keypoints)) + } + + np.random.shuffle(img_meta['flip_indices']) + data_sample = PoseDataSample(metainfo=img_meta) + + # gt_instance + gt_instances = InstanceData() + gt_instance_labels = InstanceData() + + # [N, K] -> [N, num_levels, K] + # keep the first dimension as the num_instances + if num_levels > 1: + keypoint_weights = np.tile(keypoints_visible[:, None], + (1, num_levels, 1)) + else: + keypoint_weights = keypoints_visible.copy() + + gt_instances.bboxes = bboxes + gt_instances.bbox_centers = bbox_centers + gt_instances.bbox_scales = bbox_scales + gt_instances.bbox_scores = np.ones((num_instances, ), dtype=np.float32) + gt_instances.keypoints = keypoints + gt_instances.keypoints_visible = keypoints_visible + + gt_instance_labels.keypoint_weights = torch.FloatTensor( + keypoint_weights) + + if with_reg_label: + gt_instance_labels.keypoint_labels = torch.FloatTensor(keypoints / + input_size) + + if with_simcc_label: + len_x = np.around(input_size[0] * simcc_split_ratio) + len_y = np.around(input_size[1] * simcc_split_ratio) + gt_instance_labels.keypoint_x_labels = torch.FloatTensor( + _rand_simcc_label(rng, num_instances, num_keypoints, len_x)) + gt_instance_labels.keypoint_y_labels = torch.FloatTensor( + _rand_simcc_label(rng, num_instances, num_keypoints, len_y)) + + # gt_fields + if with_heatmap: + if num_levels == 1: + gt_fields = PixelData() + # generate single-level heatmaps + W, H = heatmap_size + heatmaps = rng.rand(num_keypoints, H, W) + gt_fields.heatmaps = torch.FloatTensor(heatmaps) + else: + # generate multilevel heatmaps + heatmaps = [] + for _ in range(num_levels): + W, H = heatmap_size + heatmaps_ = rng.rand(num_keypoints, H, W) + heatmaps.append(torch.FloatTensor(heatmaps_)) + # [num_levels*K, H, W] + gt_fields = MultilevelPixelData() + gt_fields.heatmaps = heatmaps + data_sample.gt_fields = gt_fields + + data_sample.gt_instances = gt_instances + data_sample.gt_instance_labels = gt_instance_labels + + inputs['data_samples'] = data_sample + inputs_list.append(inputs) + + packed_inputs = pseudo_collate(inputs_list) + return packed_inputs + + +def _rand_keypoints(rng, bboxes, num_keypoints): + n = bboxes.shape[0] + relative_pos = rng.rand(n, num_keypoints, 2) + keypoints = relative_pos * bboxes[:, None, :2] + ( + 1 - relative_pos) * bboxes[:, None, 2:4] + + return keypoints + + +def _rand_simcc_label(rng, num_instances, num_keypoints, len_feats): + simcc_label = rng.rand(num_instances, num_keypoints, int(len_feats)) + return simcc_label + + +def _rand_bboxes(rng, num_instances, img_w, img_h): + cx, cy = rng.rand(num_instances, 2).T + bw, bh = 0.2 + 0.8 * rng.rand(num_instances, 2).T + + tl_x = ((cx * img_w) - (img_w * bw / 2)).clip(0, img_w) + tl_y = ((cy * img_h) - (img_h * bh / 2)).clip(0, img_h) + br_x = ((cx * img_w) + (img_w * bw / 2)).clip(0, img_w) + br_y = ((cy * img_h) + (img_h * bh / 2)).clip(0, img_h) + + bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T + return bboxes + + +def get_repo_dir(): + """Return the path of the MMPose repo directory.""" + try: + # Assume the function in invoked is the source mmpose repo + repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) + except NameError: + # For IPython development when __file__ is not defined + import mmpose + repo_dir = osp.dirname(osp.dirname(mmpose.__file__)) + + return repo_dir + + +def get_config_file(fn: str): + """Return full path of a config file from the given relative path.""" + repo_dir = get_repo_dir() + if fn.startswith('configs'): + fn_config = osp.join(repo_dir, fn) + else: + fn_config = osp.join(repo_dir, 'configs', fn) + + if not osp.isfile(fn_config): + raise FileNotFoundError(f'Cannot find config file {fn_config}') + + return fn_config + + +def get_pose_estimator_cfg(fn: str): + """Load model config from a config file.""" + + fn_config = get_config_file(fn) + config = Config.fromfile(fn_config) + return deepcopy(config.model) diff --git a/mmpose/utils/__init__.py b/mmpose/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb9c018ed0c48c56d3ef17a3783c15f37f0292a4 --- /dev/null +++ b/mmpose/utils/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .camera import SimpleCamera, SimpleCameraTorch +from .collect_env import collect_env +from .config_utils import adapt_mmdet_pipeline +from .dist_utils import reduce_mean +from .logger import get_root_logger +from .setup_env import register_all_modules, setup_multi_processes +from .timer import StopWatch + +__all__ = [ + 'get_root_logger', 'collect_env', 'StopWatch', 'setup_multi_processes', + 'register_all_modules', 'SimpleCamera', 'SimpleCameraTorch', + 'adapt_mmdet_pipeline', 'reduce_mean' +] diff --git a/mmpose/utils/camera.py b/mmpose/utils/camera.py new file mode 100644 index 0000000000000000000000000000000000000000..a7759d308f38fda99fcf56910b09251d24eccbed --- /dev/null +++ b/mmpose/utils/camera.py @@ -0,0 +1,280 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import numpy as np +import torch +from mmengine.registry import Registry + +CAMERAS = Registry('camera') + + +class SingleCameraBase(metaclass=ABCMeta): + """Base class for single camera model. + + Args: + param (dict): Camera parameters + + Methods: + world_to_camera: Project points from world coordinates to camera + coordinates + camera_to_world: Project points from camera coordinates to world + coordinates + camera_to_pixel: Project points from camera coordinates to pixel + coordinates + world_to_pixel: Project points from world coordinates to pixel + coordinates + """ + + @abstractmethod + def __init__(self, param): + """Load camera parameters and check validity.""" + + def world_to_camera(self, X): + """Project points from world coordinates to camera coordinates.""" + raise NotImplementedError + + def camera_to_world(self, X): + """Project points from camera coordinates to world coordinates.""" + raise NotImplementedError + + def camera_to_pixel(self, X): + """Project points from camera coordinates to pixel coordinates.""" + raise NotImplementedError + + def world_to_pixel(self, X): + """Project points from world coordinates to pixel coordinates.""" + _X = self.world_to_camera(X) + return self.camera_to_pixel(_X) + + +@CAMERAS.register_module() +class SimpleCamera(SingleCameraBase): + """Camera model to calculate coordinate transformation with given + intrinsic/extrinsic camera parameters. + + Note: + The keypoint coordinate should be an np.ndarray with a shape of + [...,J, C] where J is the keypoint number of an instance, and C is + the coordinate dimension. For example: + + [J, C]: shape of joint coordinates of a person with J joints. + [N, J, C]: shape of a batch of person joint coordinates. + [N, T, J, C]: shape of a batch of pose sequences. + + Args: + param (dict): camera parameters including: + - R: 3x3, camera rotation matrix (camera-to-world) + - T: 3x1, camera translation (camera-to-world) + - K: (optional) 2x3, camera intrinsic matrix + - k: (optional) nx1, camera radial distortion coefficients + - p: (optional) mx1, camera tangential distortion coefficients + - f: (optional) 2x1, camera focal length + - c: (optional) 2x1, camera center + if K is not provided, it will be calculated from f and c. + + Methods: + world_to_camera: Project points from world coordinates to camera + coordinates + camera_to_pixel: Project points from camera coordinates to pixel + coordinates + world_to_pixel: Project points from world coordinates to pixel + coordinates + """ + + def __init__(self, param): + + self.param = {} + # extrinsic param + R = np.array(param['R'], dtype=np.float32) + T = np.array(param['T'], dtype=np.float32) + assert R.shape == (3, 3) + assert T.shape == (3, 1) + # The camera matrices are transposed in advance because the joint + # coordinates are stored as row vectors. + self.param['R_c2w'] = R.T + self.param['T_c2w'] = T.T + self.param['R_w2c'] = R + self.param['T_w2c'] = -self.param['T_c2w'] @ self.param['R_w2c'] + + # intrinsic param + if 'K' in param: + K = np.array(param['K'], dtype=np.float32) + assert K.shape == (2, 3) + self.param['K'] = K.T + self.param['f'] = np.array([K[0, 0], K[1, 1]])[:, np.newaxis] + self.param['c'] = np.array([K[0, 2], K[1, 2]])[:, np.newaxis] + elif 'f' in param and 'c' in param: + f = np.array(param['f'], dtype=np.float32) + c = np.array(param['c'], dtype=np.float32) + assert f.shape == (2, 1) + assert c.shape == (2, 1) + self.param['K'] = np.concatenate((np.diagflat(f), c), axis=-1).T + self.param['f'] = f + self.param['c'] = c + else: + raise ValueError('Camera intrinsic parameters are missing. ' + 'Either "K" or "f"&"c" should be provided.') + + # distortion param + if 'k' in param and 'p' in param: + self.undistortion = True + self.param['k'] = np.array(param['k'], dtype=np.float32).flatten() + self.param['p'] = np.array(param['p'], dtype=np.float32).flatten() + assert self.param['k'].size in {3, 6} + assert self.param['p'].size == 2 + else: + self.undistortion = False + + def world_to_camera(self, X): + assert isinstance(X, np.ndarray) + assert X.ndim >= 2 and X.shape[-1] == 3 + return X @ self.param['R_w2c'] + self.param['T_w2c'] + + def camera_to_world(self, X): + assert isinstance(X, np.ndarray) + assert X.ndim >= 2 and X.shape[-1] == 3 + return X @ self.param['R_c2w'] + self.param['T_c2w'] + + def camera_to_pixel(self, X): + assert isinstance(X, np.ndarray) + assert X.ndim >= 2 and X.shape[-1] == 3 + + _X = X / X[..., 2:] + + if self.undistortion: + k = self.param['k'] + p = self.param['p'] + _X_2d = _X[..., :2] + r2 = (_X_2d**2).sum(-1) + radial = 1 + sum(ki * r2**(i + 1) for i, ki in enumerate(k[:3])) + if k.size == 6: + radial /= 1 + sum( + (ki * r2**(i + 1) for i, ki in enumerate(k[3:]))) + + tangential = 2 * (p[1] * _X[..., 0] + p[0] * _X[..., 1]) + + _X[..., :2] = _X_2d * (radial + tangential)[..., None] + np.outer( + r2, p[::-1]).reshape(_X_2d.shape) + return _X @ self.param['K'] + + def pixel_to_camera(self, X): + assert isinstance(X, np.ndarray) + assert X.ndim >= 2 and X.shape[-1] == 3 + _X = X.copy() + _X[:, :2] = (X[:, :2] - self.param['c'].T) / self.param['f'].T * X[:, + [2]] + return _X + + +@CAMERAS.register_module() +class SimpleCameraTorch(SingleCameraBase): + """Camera model to calculate coordinate transformation with given + intrinsic/extrinsic camera parameters. + + Notes: + The keypoint coordinate should be an np.ndarray with a shape of + [...,J, C] where J is the keypoint number of an instance, and C is + the coordinate dimension. For example: + + [J, C]: shape of joint coordinates of a person with J joints. + [N, J, C]: shape of a batch of person joint coordinates. + [N, T, J, C]: shape of a batch of pose sequences. + + Args: + param (dict): camera parameters including: + - R: 3x3, camera rotation matrix (camera-to-world) + - T: 3x1, camera translation (camera-to-world) + - K: (optional) 2x3, camera intrinsic matrix + - k: (optional) nx1, camera radial distortion coefficients + - p: (optional) mx1, camera tangential distortion coefficients + - f: (optional) 2x1, camera focal length + - c: (optional) 2x1, camera center + if K is not provided, it will be calculated from f and c. + + Methods: + world_to_camera: Project points from world coordinates to camera + coordinates + camera_to_pixel: Project points from camera coordinates to pixel + coordinates + world_to_pixel: Project points from world coordinates to pixel + coordinates + """ + + def __init__(self, param, device): + + self.param = {} + # extrinsic param + R = torch.tensor(param['R'], device=device) + T = torch.tensor(param['T'], device=device) + + assert R.shape == (3, 3) + assert T.shape == (3, 1) + # The camera matrices are transposed in advance because the joint + # coordinates are stored as row vectors. + self.param['R_c2w'] = R.T + self.param['T_c2w'] = T.T + self.param['R_w2c'] = R + self.param['T_w2c'] = -self.param['T_c2w'] @ self.param['R_w2c'] + + # intrinsic param + if 'K' in param: + K = torch.tensor(param['K'], device=device) + assert K.shape == (2, 3) + self.param['K'] = K.T + self.param['f'] = torch.tensor([[K[0, 0]], [K[1, 1]]], + device=device) + self.param['c'] = torch.tensor([[K[0, 2]], [K[1, 2]]], + device=device) + elif 'f' in param and 'c' in param: + f = torch.tensor(param['f'], device=device) + c = torch.tensor(param['c'], device=device) + assert f.shape == (2, 1) + assert c.shape == (2, 1) + self.param['K'] = torch.cat([torch.diagflat(f), c], dim=-1).T + self.param['f'] = f + self.param['c'] = c + else: + raise ValueError('Camera intrinsic parameters are missing. ' + 'Either "K" or "f"&"c" should be provided.') + + # distortion param + if 'k' in param and 'p' in param: + self.undistortion = True + self.param['k'] = torch.tensor(param['k'], device=device).view(-1) + self.param['p'] = torch.tensor(param['p'], device=device).view(-1) + assert len(self.param['k']) in {3, 6} + assert len(self.param['p']) == 2 + else: + self.undistortion = False + + def world_to_camera(self, X): + assert isinstance(X, torch.Tensor) + assert X.ndim >= 2 and X.shape[-1] == 3 + return X @ self.param['R_w2c'] + self.param['T_w2c'] + + def camera_to_world(self, X): + assert isinstance(X, torch.Tensor) + assert X.ndim >= 2 and X.shape[-1] == 3 + return X @ self.param['R_c2w'] + self.param['T_c2w'] + + def camera_to_pixel(self, X): + assert isinstance(X, torch.Tensor) + assert X.ndim >= 2 and X.shape[-1] == 3 + + _X = X / X[..., 2:] + + if self.undistortion: + k = self.param['k'] + p = self.param['p'] + _X_2d = _X[..., :2] + r2 = (_X_2d**2).sum(-1) + radial = 1 + sum(ki * r2**(i + 1) for i, ki in enumerate(k[:3])) + if k.size == 6: + radial /= 1 + sum( + (ki * r2**(i + 1) for i, ki in enumerate(k[3:]))) + + tangential = 2 * (p[1] * _X[..., 0] + p[0] * _X[..., 1]) + + _X[..., :2] = _X_2d * (radial + tangential)[..., None] + torch.ger( + r2, p.flip([0])).reshape(_X_2d.shape) + return _X @ self.param['K'] diff --git a/mmpose/utils/collect_env.py b/mmpose/utils/collect_env.py new file mode 100644 index 0000000000000000000000000000000000000000..e8fb5f35e10fe6535b49b7eb7def1459b28835e3 --- /dev/null +++ b/mmpose/utils/collect_env.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.utils import get_git_hash +from mmengine.utils.dl_utils import collect_env as collect_base_env + +import mmpose + + +def collect_env(): + env_info = collect_base_env() + env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7)) + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/mmpose/utils/config_utils.py b/mmpose/utils/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2f54d2ef24093a77933dbf8026465e3cdaf5e839 --- /dev/null +++ b/mmpose/utils/config_utils.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.utils.typing import ConfigDict + + +def adapt_mmdet_pipeline(cfg: ConfigDict) -> ConfigDict: + """Converts pipeline types in MMDetection's test dataloader to use the + 'mmdet' namespace. + + Args: + cfg (ConfigDict): Configuration dictionary for MMDetection. + + Returns: + ConfigDict: Configuration dictionary with updated pipeline types. + """ + # use lazy import to avoid hard dependence on mmdet + from mmdet.datasets import transforms + + if 'test_dataloader' not in cfg: + return cfg + + pipeline = cfg.test_dataloader.dataset.pipeline + for trans in pipeline: + if trans['type'] in dir(transforms): + trans['type'] = 'mmdet.' + trans['type'] + + return cfg diff --git a/mmpose/utils/dist_utils.py b/mmpose/utils/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..915f92585a3780576490e92199a0baebe0cb7e7d --- /dev/null +++ b/mmpose/utils/dist_utils.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.distributed as dist + + +def reduce_mean(tensor): + """"Obtain the mean of tensor on different GPUs.""" + if not (dist.is_available() and dist.is_initialized()): + return tensor + tensor = tensor.clone() + dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) + return tensor diff --git a/mmpose/utils/hooks.py b/mmpose/utils/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..4a2eb8aea29646d5de3587a43f2746bd8a64e30f --- /dev/null +++ b/mmpose/utils/hooks.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools + + +class OutputHook: + + def __init__(self, module, outputs=None, as_tensor=False): + self.outputs = outputs + self.as_tensor = as_tensor + self.layer_outputs = {} + self.register(module) + + def register(self, module): + + def hook_wrapper(name): + + def hook(model, input, output): + if self.as_tensor: + self.layer_outputs[name] = output + else: + if isinstance(output, list): + self.layer_outputs[name] = [ + out.detach().cpu().numpy() for out in output + ] + else: + self.layer_outputs[name] = output.detach().cpu().numpy( + ) + + return hook + + self.handles = [] + if isinstance(self.outputs, (list, tuple)): + for name in self.outputs: + try: + layer = rgetattr(module, name) + h = layer.register_forward_hook(hook_wrapper(name)) + except ModuleNotFoundError as module_not_found: + raise ModuleNotFoundError( + f'Module {name} not found') from module_not_found + self.handles.append(h) + + def remove(self): + for h in self.handles: + h.remove() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.remove() + + +# using wonder's beautiful simplification: +# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects +def rsetattr(obj, attr, val): + """Set the value of a nested attribute of an object. + + This function splits the attribute path and sets the value of the + nested attribute. If the attribute path is nested (e.g., 'x.y.z'), it + traverses through each attribute until it reaches the last one and sets + its value. + + Args: + obj (object): The object whose attribute needs to be set. + attr (str): The attribute path in dot notation (e.g., 'x.y.z'). + val (any): The value to set at the specified attribute path. + """ + pre, _, post = attr.rpartition('.') + return setattr(rgetattr(obj, pre) if pre else obj, post, val) + + +def rgetattr(obj, attr, *args): + """Recursively get a nested attribute of an object. + + This function splits the attribute path and retrieves the value of the + nested attribute. If the attribute path is nested (e.g., 'x.y.z'), it + traverses through each attribute. If an attribute in the path does not + exist, it returns the value specified as the third argument. + + Args: + obj (object): The object whose attribute needs to be retrieved. + attr (str): The attribute path in dot notation (e.g., 'x.y.z'). + *args (any): Optional default value to return if the attribute + does not exist. + """ + + def _getattr(obj, attr): + return getattr(obj, attr, *args) + + return functools.reduce(_getattr, [obj] + attr.split('.')) diff --git a/mmpose/utils/logger.py b/mmpose/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..f67e56efeb998cf966e3729c90791b4a70f2bb84 --- /dev/null +++ b/mmpose/utils/logger.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +from mmengine.logging import MMLogger + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Use `MMLogger` class in mmengine to get the root logger. + + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. The name of the root logger is the top-level package name, + e.g., "mmpose". + + Args: + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the root logger. + log_level (int): The root logger level. Note that only the process of + rank 0 is affected, while other processes will set the level to + "Error" and be silent most of the time. + + Returns: + logging.Logger: The root logger. + """ + return MMLogger('MMLogger', __name__.split('.')[0], log_file, log_level) diff --git a/mmpose/utils/setup_env.py b/mmpose/utils/setup_env.py new file mode 100644 index 0000000000000000000000000000000000000000..ff299539ef8cc83a17a24e41498c01ff4f26667f --- /dev/null +++ b/mmpose/utils/setup_env.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import os +import platform +import warnings + +import cv2 +import torch.multiprocessing as mp +from mmengine import DefaultScope + + +def setup_multi_processes(cfg): + """Setup multi-processing environment variables.""" + # set multi-process start method as `fork` to speed up the training + if platform.system() != 'Windows': + mp_start_method = cfg.get('mp_start_method', 'fork') + current_method = mp.get_start_method(allow_none=True) + if current_method is not None and current_method != mp_start_method: + warnings.warn( + f'Multi-processing start method `{mp_start_method}` is ' + f'different from the previous setting `{current_method}`.' + f'It will be force set to `{mp_start_method}`. You can change ' + f'this behavior by changing `mp_start_method` in your config.') + mp.set_start_method(mp_start_method, force=True) + + # disable opencv multithreading to avoid system being overloaded + opencv_num_threads = cfg.get('opencv_num_threads', 0) + cv2.setNumThreads(opencv_num_threads) + + # setup OMP threads + # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa + if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: + omp_num_threads = 1 + warnings.warn( + f'Setting OMP_NUM_THREADS environment variable for each process ' + f'to be {omp_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) + + # setup MKL threads + if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: + mkl_num_threads = 1 + warnings.warn( + f'Setting MKL_NUM_THREADS environment variable for each process ' + f'to be {mkl_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) + + +def register_all_modules(init_default_scope: bool = True) -> None: + """Register all modules in mmpose into the registries. + + Args: + init_default_scope (bool): Whether initialize the mmpose default scope. + When `init_default_scope=True`, the global default scope will be + set to `mmpose`, and all registries will build modules from mmpose's + registry node. To understand more about the registry, please refer + to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md + Defaults to True. + """ # noqa + + import mmpose.codecs # noqa: F401, F403 + import mmpose.datasets # noqa: F401,F403 + import mmpose.engine # noqa: F401,F403 + import mmpose.evaluation # noqa: F401,F403 + import mmpose.models # noqa: F401,F403 + import mmpose.visualization # noqa: F401,F403 + + if init_default_scope: + never_created = DefaultScope.get_current_instance() is None \ + or not DefaultScope.check_instance_created('mmpose') + if never_created: + DefaultScope.get_instance('mmpose', scope_name='mmpose') + return + current_scope = DefaultScope.get_current_instance() + if current_scope.scope_name != 'mmpose': + warnings.warn('The current default scope ' + f'"{current_scope.scope_name}" is not "mmpose", ' + '`register_all_modules` will force the current' + 'default scope to be "mmpose". If this is not ' + 'expected, please set `init_default_scope=False`.') + # avoid name conflict + new_instance_name = f'mmpose-{datetime.datetime.now()}' + DefaultScope.get_instance(new_instance_name, scope_name='mmpose') diff --git a/mmpose/utils/tensor_utils.py b/mmpose/utils/tensor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..755e26854cb379d6218d1ac2bfad15039330df42 --- /dev/null +++ b/mmpose/utils/tensor_utils.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Any, Optional, Sequence, Union + +import numpy as np +import torch +from mmengine.utils import is_seq_of +from torch import Tensor + + +def to_numpy(x: Union[Tensor, Sequence[Tensor]], + return_device: bool = False, + unzip: bool = False) -> Union[np.ndarray, tuple]: + """Convert torch tensor to numpy.ndarray. + + Args: + x (Tensor | Sequence[Tensor]): A single tensor or a sequence of + tensors + return_device (bool): Whether return the tensor device. Defaults to + ``False`` + unzip (bool): Whether unzip the input sequence. Defaults to ``False`` + + Returns: + np.ndarray | tuple: If ``return_device`` is ``True``, return a tuple + of converted numpy array(s) and the device indicator; otherwise only + return the numpy array(s) + """ + + if isinstance(x, Tensor): + arrays = x.detach().cpu().numpy() + device = x.device + elif isinstance(x, np.ndarray) or is_seq_of(x, np.ndarray): + arrays = x + device = 'cpu' + elif is_seq_of(x, Tensor): + if unzip: + # convert (A, B) -> [(A[0], B[0]), (A[1], B[1]), ...] + arrays = [ + tuple(to_numpy(_x[None, :]) for _x in _each) + for _each in zip(*x) + ] + else: + arrays = [to_numpy(_x) for _x in x] + + device = x[0].device + + else: + raise ValueError(f'Invalid input type {type(x)}') + + if return_device: + return arrays, device + else: + return arrays + + +def to_tensor(x: Union[np.ndarray, Sequence[np.ndarray]], + device: Optional[Any] = None) -> Union[Tensor, Sequence[Tensor]]: + """Convert numpy.ndarray to torch tensor. + + Args: + x (np.ndarray | Sequence[np.ndarray]): A single np.ndarray or a + sequence of tensors + tensor (Any, optional): The device indicator. Defaults to ``None`` + + Returns: + tuple: + - Tensor | Sequence[Tensor]: The converted Tensor or Tensor sequence + """ + if isinstance(x, np.ndarray): + return torch.tensor(x, device=device) + elif is_seq_of(x, np.ndarray): + return [to_tensor(_x, device=device) for _x in x] + else: + raise ValueError(f'Invalid input type {type(x)}') diff --git a/mmpose/utils/timer.py b/mmpose/utils/timer.py new file mode 100644 index 0000000000000000000000000000000000000000..c219c04069d239605a7854b06a370876dbe8fd58 --- /dev/null +++ b/mmpose/utils/timer.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from contextlib import contextmanager +from functools import partial + +import numpy as np +from mmengine import Timer + + +class RunningAverage(): + r"""A helper class to calculate running average in a sliding window. + + Args: + window (int): The size of the sliding window. + """ + + def __init__(self, window: int = 1): + self.window = window + self._data = [] + + def update(self, value): + """Update a new data sample.""" + self._data.append(value) + self._data = self._data[-self.window:] + + def average(self): + """Get the average value of current window.""" + return np.mean(self._data) + + +class StopWatch: + r"""A helper class to measure FPS and detailed time consuming of each phase + in a video processing loop or similar scenarios. + + Args: + window (int): The sliding window size to calculate the running average + of the time consuming. + + Example: + >>> from mmpose.utils import StopWatch + >>> import time + >>> stop_watch = StopWatch(window=10) + >>> with stop_watch.timeit('total'): + >>> time.sleep(0.1) + >>> # 'timeit' support nested use + >>> with stop_watch.timeit('phase1'): + >>> time.sleep(0.1) + >>> with stop_watch.timeit('phase2'): + >>> time.sleep(0.2) + >>> time.sleep(0.2) + >>> report = stop_watch.report() + """ + + def __init__(self, window=1): + self.window = window + self._record = defaultdict(partial(RunningAverage, window=self.window)) + self._timer_stack = [] + + @contextmanager + def timeit(self, timer_name='_FPS_'): + """Timing a code snippet with an assigned name. + + Args: + timer_name (str): The unique name of the interested code snippet to + handle multiple timers and generate reports. Note that '_FPS_' + is a special key that the measurement will be in `fps` instead + of `millisecond`. Also see `report` and `report_strings`. + Default: '_FPS_'. + Note: + This function should always be used in a `with` statement, as shown + in the example. + """ + self._timer_stack.append((timer_name, Timer())) + try: + yield + finally: + timer_name, timer = self._timer_stack.pop() + self._record[timer_name].update(timer.since_start()) + + def report(self, key=None): + """Report timing information. + + Returns: + dict: The key is the timer name and the value is the \ + corresponding average time consuming. + """ + result = { + name: r.average() * 1000. + for name, r in self._record.items() + } + + if '_FPS_' in result: + result['_FPS_'] = 1000. / result.pop('_FPS_') + + if key is None: + return result + return result[key] + + def report_strings(self): + """Report timing information in texture strings. + + Returns: + list(str): Each element is the information string of a timed \ + event, in format of '{timer_name}: {time_in_ms}'. \ + Specially, if timer_name is '_FPS_', the result will \ + be converted to fps. + """ + result = self.report() + strings = [] + if '_FPS_' in result: + strings.append(f'FPS: {result["_FPS_"]:>5.1f}') + strings += [f'{name}: {val:>3.0f}' for name, val in result.items()] + return strings + + def reset(self): + self._record = defaultdict(list) + self._active_timer_stack = [] diff --git a/mmpose/utils/typing.py b/mmpose/utils/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..557891b3b92e657de43eb50d4b5fbce7d369e7ee --- /dev/null +++ b/mmpose/utils/typing.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple, Union + +from mmengine.config import ConfigDict +from mmengine.structures import InstanceData, PixelData +from torch import Tensor + +from mmpose.structures import PoseDataSample + +# Type hint of config data +ConfigType = Union[ConfigDict, dict] +OptConfigType = Optional[ConfigType] +# Type hint of one or more config data +MultiConfig = Union[ConfigType, List[ConfigType]] +OptMultiConfig = Optional[MultiConfig] +# Type hint of data samples +SampleList = List[PoseDataSample] +OptSampleList = Optional[SampleList] +InstanceList = List[InstanceData] +PixelDataList = List[PixelData] +Predictions = Union[InstanceList, Tuple[InstanceList, PixelDataList]] +# Type hint of model outputs +ForwardResults = Union[Dict[str, Tensor], List[PoseDataSample], Tuple[Tensor], + Tensor] +# Type hint of features +# - Tuple[Tensor]: multi-level features extracted by the network +# - List[Tuple[Tensor]]: multiple feature pyramids for TTA +# - List[List[Tuple[Tensor]]]: multi-scale feature pyramids +Features = Union[Tuple[Tensor], List[Tuple[Tensor]], List[List[Tuple[Tensor]]]] diff --git a/mmpose/version.py b/mmpose/version.py new file mode 100644 index 0000000000000000000000000000000000000000..39bc36f2bb2fcd1f52e62b4453acb4f4b50e9d8f --- /dev/null +++ b/mmpose/version.py @@ -0,0 +1,31 @@ +# Copyright (c) Open-MMLab. All rights reserved. + +__version__ = '1.3.1' +short_version = __version__ + + +def parse_version_info(version_str): + """Parse a version string into a tuple. + + Args: + version_str (str): The version string. + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). + """ + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + elif x.find('b') != -1: + patch_version = x.split('b') + version_info.append(int(patch_version[0])) + version_info.append(f'b{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/mmpose/visualization/__init__.py b/mmpose/visualization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a18e8bc5b4fa8d58adee30576013bb780bd9a19 --- /dev/null +++ b/mmpose/visualization/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .fast_visualizer import FastVisualizer +from .local_visualizer import PoseLocalVisualizer +from .local_visualizer_3d import Pose3dLocalVisualizer + +__all__ = ['PoseLocalVisualizer', 'FastVisualizer', 'Pose3dLocalVisualizer'] diff --git a/mmpose/visualization/fast_visualizer.py b/mmpose/visualization/fast_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..fa0cb385270832f12a9d12fac892e920f32c2002 --- /dev/null +++ b/mmpose/visualization/fast_visualizer.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 + + +class FastVisualizer: + """MMPose Fast Visualizer. + + A simple yet fast visualizer for video/webcam inference. + + Args: + metainfo (dict): pose meta information + radius (int, optional)): Keypoint radius for visualization. + Defaults to 6. + line_width (int, optional): Link width for visualization. + Defaults to 3. + kpt_thr (float, optional): Threshold for keypoints' confidence score, + keypoints with score below this value will not be drawn. + Defaults to 0.3. + """ + + def __init__(self, metainfo, radius=6, line_width=3, kpt_thr=0.3): + self.radius = radius + self.line_width = line_width + self.kpt_thr = kpt_thr + + self.keypoint_id2name = metainfo['keypoint_id2name'] + self.keypoint_name2id = metainfo['keypoint_name2id'] + self.keypoint_colors = metainfo['keypoint_colors'] + self.skeleton_links = metainfo['skeleton_links'] + self.skeleton_link_colors = metainfo['skeleton_link_colors'] + + def draw_pose(self, img, instances): + """Draw pose estimations on the given image. + + This method draws keypoints and skeleton links on the input image + using the provided instances. + + Args: + img (numpy.ndarray): The input image on which to + draw the pose estimations. + instances (object): An object containing detected instances' + information, including keypoints and keypoint_scores. + + Returns: + None: The input image will be modified in place. + """ + + if instances is None: + print('no instance detected') + return + + keypoints = instances.keypoints + scores = instances.keypoint_scores + + for kpts, score in zip(keypoints, scores): + for sk_id, sk in enumerate(self.skeleton_links): + if score[sk[0]] < self.kpt_thr or score[sk[1]] < self.kpt_thr: + # skip the link that should not be drawn + continue + + pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) + pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) + + color = self.skeleton_link_colors[sk_id].tolist() + cv2.line(img, pos1, pos2, color, thickness=self.line_width) + + for kid, kpt in enumerate(kpts): + if score[kid] < self.kpt_thr: + # skip the point that should not be drawn + continue + + x_coord, y_coord = int(kpt[0]), int(kpt[1]) + + color = self.keypoint_colors[kid].tolist() + cv2.circle(img, (int(x_coord), int(y_coord)), self.radius, + color, -1) + cv2.circle(img, (int(x_coord), int(y_coord)), self.radius, + (255, 255, 255)) diff --git a/mmpose/visualization/local_visualizer.py b/mmpose/visualization/local_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..f147919457b9b75177b511b034b7710f3dccd239 --- /dev/null +++ b/mmpose/visualization/local_visualizer.py @@ -0,0 +1,789 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings +from typing import Dict, List, Optional, Tuple, Union + +import cv2 +import mmcv +import numpy as np +import torch +from mmengine.dist import master_only +from mmengine.structures import InstanceData, PixelData + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.registry import VISUALIZERS +from mmpose.structures import PoseDataSample +from .opencv_backend_visualizer import OpencvBackendVisualizer +from .simcc_vis import SimCCVisualizer + +from mmpose.structures.keypoint import fix_bbox_aspect_ratio + +import cv2 + +try: + POSEVIS=True + from posevis import pose_visualization +except ImportError: + POSEVIS=False + + +def _get_adaptive_scales(areas: np.ndarray, + min_area: int = 800, + max_area: int = 30000) -> np.ndarray: + """Get adaptive scales according to areas. + + The scale range is [0.5, 1.0]. When the area is less than + ``min_area``, the scale is 0.5 while the area is larger than + ``max_area``, the scale is 1.0. + + Args: + areas (ndarray): The areas of bboxes or masks with the + shape of (n, ). + min_area (int): Lower bound areas for adaptive scales. + Defaults to 800. + max_area (int): Upper bound areas for adaptive scales. + Defaults to 30000. + + Returns: + ndarray: The adaotive scales with the shape of (n, ). + """ + scales = 0.5 + (areas - min_area) / (max_area - min_area) + scales = np.clip(scales, 0.5, 1.0) + return scales + + +@VISUALIZERS.register_module() +class PoseLocalVisualizer(OpencvBackendVisualizer): + """MMPose Local Visualizer. + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + image (np.ndarray, optional): the origin image to draw. The format + should be RGB. Defaults to ``None`` + vis_backends (list, optional): Visual backend config list. Defaults to + ``None`` + save_dir (str, optional): Save file dir for all storage backends. + If it is ``None``, the backend storage will not save any data. + Defaults to ``None`` + bbox_color (str, tuple(int), optional): Color of bbox lines. + The tuple of color should be in BGR order. Defaults to ``'green'`` + kpt_color (str, tuple(tuple(int)), optional): Color of keypoints. + The tuple of color should be in BGR order. Defaults to ``'red'`` + link_color (str, tuple(tuple(int)), optional): Color of skeleton. + The tuple of color should be in BGR order. Defaults to ``None`` + line_width (int, float): The width of lines. Defaults to 1 + radius (int, float): The radius of keypoints. Defaults to 4 + show_keypoint_weight (bool): Whether to adjust the transparency + of keypoints according to their score. Defaults to ``False`` + alpha (int, float): The transparency of bboxes. Defaults to ``1.0`` + + Examples: + >>> import numpy as np + >>> from mmengine.structures import InstanceData + >>> from mmpose.structures import PoseDataSample + >>> from mmpose.visualization import PoseLocalVisualizer + + >>> pose_local_visualizer = PoseLocalVisualizer(radius=1) + >>> image = np.random.randint(0, 256, + ... size=(10, 12, 3)).astype('uint8') + >>> gt_instances = InstanceData() + >>> gt_instances.keypoints = np.array([[[1, 1], [2, 2], [4, 4], + ... [8, 8]]]) + >>> gt_pose_data_sample = PoseDataSample() + >>> gt_pose_data_sample.gt_instances = gt_instances + >>> dataset_meta = {'skeleton_links': [[0, 1], [1, 2], [2, 3]]} + >>> pose_local_visualizer.set_dataset_meta(dataset_meta) + >>> pose_local_visualizer.add_datasample('image', image, + ... gt_pose_data_sample) + >>> pose_local_visualizer.add_datasample( + ... 'image', image, gt_pose_data_sample, + ... out_file='out_file.jpg') + >>> pose_local_visualizer.add_datasample( + ... 'image', image, gt_pose_data_sample, + ... show=True) + >>> pred_instances = InstanceData() + >>> pred_instances.keypoints = np.array([[[1, 1], [2, 2], [4, 4], + ... [8, 8]]]) + >>> pred_instances.score = np.array([0.8, 1, 0.9, 1]) + >>> pred_pose_data_sample = PoseDataSample() + >>> pred_pose_data_sample.pred_instances = pred_instances + >>> pose_local_visualizer.add_datasample('image', image, + ... gt_pose_data_sample, + ... pred_pose_data_sample) + """ + + def __init__(self, + name: str = 'visualizer', + image: Optional[np.ndarray] = None, + vis_backends: Optional[Dict] = None, + save_dir: Optional[str] = None, + bbox_color: Optional[Union[str, Tuple[int]]] = 'green', + kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = 'red', + link_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, + text_color: Optional[Union[str, + Tuple[int]]] = (255, 255, 255), + skeleton: Optional[Union[List, Tuple]] = None, + line_width: Union[int, float] = 1, + radius: Union[int, float] = 3, + show_keypoint_weight: bool = False, + backend: str = 'opencv', + alpha: float = 1.0): + + warnings.filterwarnings( + 'ignore', + message='.*please provide the `save_dir` argument.*', + category=UserWarning) + + super().__init__( + name=name, + image=image, + vis_backends=vis_backends, + save_dir=save_dir, + backend=backend) + + self.bbox_color = bbox_color + self.kpt_color = kpt_color + self.link_color = link_color + self.line_width = line_width + self.text_color = text_color + self.skeleton = skeleton + self.radius = radius + self.alpha = alpha + self.show_keypoint_weight = show_keypoint_weight + # Set default value. When calling + # `PoseLocalVisualizer().set_dataset_meta(xxx)`, + # it will override the default value. + self.dataset_meta = {} + + def set_dataset_meta(self, + dataset_meta: Dict, + skeleton_style: str = 'mmpose'): + """Assign dataset_meta to the visualizer. The default visualization + settings will be overridden. + + Args: + dataset_meta (dict): meta information of dataset. + """ + if skeleton_style == 'openpose': + dataset_name = dataset_meta['dataset_name'] + if dataset_name == 'coco': + dataset_meta = parse_pose_metainfo( + dict(from_file='configs/_base_/datasets/coco_openpose.py')) + elif dataset_name == 'coco_wholebody': + dataset_meta = parse_pose_metainfo( + dict(from_file='configs/_base_/datasets/' + 'coco_wholebody_openpose.py')) + else: + raise NotImplementedError( + f'openpose style has not been ' + f'supported for {dataset_name} dataset') + + if isinstance(dataset_meta, dict): + self.dataset_meta = dataset_meta.copy() + self.bbox_color = dataset_meta.get('bbox_color', self.bbox_color) + self.kpt_color = dataset_meta.get('keypoint_colors', + self.kpt_color) + self.link_color = dataset_meta.get('skeleton_link_colors', + self.link_color) + self.skeleton = dataset_meta.get('skeleton_links', self.skeleton) + # sometimes self.dataset_meta is manually set, which might be None. + # it should be converted to a dict at these times + if self.dataset_meta is None: + self.dataset_meta = {} + + def _draw_instances_bbox(self, image: np.ndarray, + instances: InstanceData) -> np.ndarray: + """Draw bounding boxes and corresponding labels of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + self.set_image(image) + + if 'bboxes' in instances: + bboxes = instances.bboxes + self.draw_bboxes( + bboxes, + edge_colors=self.bbox_color, + alpha=self.alpha, + line_widths=self.line_width) + else: + return self.get_image() + + if 'labels' in instances and self.text_color is not None: + classes = self.dataset_meta.get('classes', None) + labels = instances.labels + + positions = bboxes[:, :2] + areas = (bboxes[:, 3] - bboxes[:, 1]) * ( + bboxes[:, 2] - bboxes[:, 0]) + scales = _get_adaptive_scales(areas) + + for i, (pos, label) in enumerate(zip(positions, labels)): + label_text = classes[ + label] if classes is not None else f'class {label}' + + if isinstance(self.bbox_color, + tuple) and max(self.bbox_color) > 1: + facecolor = [c / 255.0 for c in self.bbox_color] + else: + facecolor = self.bbox_color + + self.draw_texts( + label_text, + pos, + colors=self.text_color, + font_sizes=int(13 * scales[i]), + vertical_alignments='bottom', + bboxes=[{ + 'facecolor': facecolor, + 'alpha': 0.8, + 'pad': 0.7, + 'edgecolor': 'none' + }]) + + return self.get_image() + + def _draw_instances_kpts(self, + image: np.ndarray, + instances: InstanceData, + kpt_thr: float = 0.3, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose'): + """Draw keypoints and skeletons (optional) of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + skeleton_style (str): Skeleton style selection. Defaults to + ``'mmpose'`` + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + + if skeleton_style == 'openpose': + return self._draw_instances_kpts_openpose(image, instances, + kpt_thr) + + self.set_image(image) + img_h, img_w, _ = image.shape + + if 'keypoints' in instances: + keypoints = instances.get('transformed_keypoints', + instances.keypoints) + + if 'keypoints_visible' in instances: + keypoints_visible = instances.keypoints_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + + for kpts, visible in zip(keypoints, keypoints_visible): + kpts = np.array(kpts, copy=False) + + if self.kpt_color is None or isinstance(self.kpt_color, str): + kpt_color = [self.kpt_color] * len(kpts) + elif len(self.kpt_color) == len(kpts): + kpt_color = self.kpt_color + else: + raise ValueError( + f'the length of kpt_color ' + f'({len(self.kpt_color)}) does not matches ' + f'that of keypoints ({len(kpts)})') + + # draw links + if self.skeleton is not None and self.link_color is not None: + if self.link_color is None or isinstance( + self.link_color, str): + link_color = [self.link_color] * len(self.skeleton) + elif len(self.link_color) == len(self.skeleton): + link_color = self.link_color + else: + raise ValueError( + f'the length of link_color ' + f'({len(self.link_color)}) does not matches ' + f'that of skeleton ({len(self.skeleton)})') + + for sk_id, sk in enumerate(self.skeleton): + pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) + pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) + + if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 + or pos1[1] >= img_h or pos2[0] <= 0 + or pos2[0] >= img_w or pos2[1] <= 0 + or pos2[1] >= img_h or visible[sk[0]] < kpt_thr + or visible[sk[1]] < kpt_thr + or link_color[sk_id] is None): + # skip the link that should not be drawn + continue + + X = np.array((pos1[0], pos2[0])) + Y = np.array((pos1[1], pos2[1])) + color = link_color[sk_id] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max( + 0, + min(1, + 0.5 * (visible[sk[0]] + visible[sk[1]]))) + + self.draw_lines( + X, Y, color, line_widths=self.line_width) + + # draw each point on image + for kid, kpt in enumerate(kpts): + if visible[kid] < kpt_thr or kpt_color[kid] is None: + # skip the point that should not be drawn + continue + + color = kpt_color[kid] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max(0, min(1, visible[kid])) + self.draw_circles( + kpt, + radius=np.array([self.radius]), + face_colors=color, + edge_colors=color, + alpha=transparency, + line_widths=self.radius) + if show_kpt_idx: + kpt_idx_coords = kpt + [self.radius, -self.radius] + self.draw_texts( + str(kid), + kpt_idx_coords, + colors=color, + font_sizes=self.radius * 3, + vertical_alignments='bottom', + horizontal_alignments='center') + + return self.get_image() + + def _draw_instances_kpts_openpose(self, + image: np.ndarray, + instances: InstanceData, + kpt_thr: float = 0.3): + """Draw keypoints and skeletons (optional) of GT or prediction in + openpose style. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + + self.set_image(image) + img_h, img_w, _ = image.shape + + if 'keypoints' in instances: + keypoints = instances.get('transformed_keypoints', + instances.keypoints) + + if 'keypoints_visible' in instances: + keypoints_visible = instances.keypoints_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + + keypoints_info = np.concatenate( + (keypoints, keypoints_visible[..., None]), axis=-1) + # compute neck joint + neck = np.mean(keypoints_info[:, [5, 6]], axis=1) + # neck score when visualizing pred + neck[:, 2:3] = np.logical_and( + keypoints_info[:, 5, 2:3] > kpt_thr, + keypoints_info[:, 6, 2:3] > kpt_thr).astype(int) + new_keypoints_info = np.insert(keypoints_info, 17, neck, axis=1) + + mmpose_idx = [17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3] + openpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17] + new_keypoints_info[:, openpose_idx] = \ + new_keypoints_info[:, mmpose_idx] + keypoints_info = new_keypoints_info + + keypoints, keypoints_visible = keypoints_info[ + ..., :2], keypoints_info[..., 2] + + for kpts, visible in zip(keypoints, keypoints_visible): + kpts = np.array(kpts, copy=False) + + if self.kpt_color is None or isinstance(self.kpt_color, str): + kpt_color = [self.kpt_color] * len(kpts) + elif len(self.kpt_color) == len(kpts): + kpt_color = self.kpt_color + else: + raise ValueError( + f'the length of kpt_color ' + f'({len(self.kpt_color)}) does not matches ' + f'that of keypoints ({len(kpts)})') + + # draw links + if self.skeleton is not None and self.link_color is not None: + if self.link_color is None or isinstance( + self.link_color, str): + link_color = [self.link_color] * len(self.skeleton) + elif len(self.link_color) == len(self.skeleton): + link_color = self.link_color + else: + raise ValueError( + f'the length of link_color ' + f'({len(self.link_color)}) does not matches ' + f'that of skeleton ({len(self.skeleton)})') + + for sk_id, sk in enumerate(self.skeleton): + pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) + pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) + + if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 + or pos1[1] >= img_h or pos2[0] <= 0 + or pos2[0] >= img_w or pos2[1] <= 0 + or pos2[1] >= img_h or visible[sk[0]] < kpt_thr + or visible[sk[1]] < kpt_thr + or link_color[sk_id] is None): + # skip the link that should not be drawn + continue + + X = np.array((pos1[0], pos2[0])) + Y = np.array((pos1[1], pos2[1])) + color = link_color[sk_id] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max( + 0, + min(1, + 0.5 * (visible[sk[0]] + visible[sk[1]]))) + + if sk_id <= 16: + # body part + mX = np.mean(X) + mY = np.mean(Y) + length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5 + transparency = 0.6 + angle = math.degrees( + math.atan2(Y[0] - Y[1], X[0] - X[1])) + polygons = cv2.ellipse2Poly( + (int(mX), int(mY)), + (int(length / 2), int(self.line_width)), + int(angle), 0, 360, 1) + + self.draw_polygons( + polygons, + edge_colors=color, + face_colors=color, + alpha=transparency) + + else: + # hand part + self.draw_lines(X, Y, color, line_widths=2) + + # draw each point on image + for kid, kpt in enumerate(kpts): + if visible[kid] < kpt_thr or kpt_color[ + kid] is None or kpt_color[kid].sum() == 0: + # skip the point that should not be drawn + continue + + color = kpt_color[kid] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max(0, min(1, visible[kid])) + + # draw smaller dots for face & hand keypoints + radius = self.radius // 2 if kid > 17 else self.radius + + self.draw_circles( + kpt, + radius=np.array([radius]), + face_colors=color, + edge_colors=color, + alpha=transparency, + line_widths=radius) + + return self.get_image() + + def _draw_instance_heatmap( + self, + fields: PixelData, + overlaid_image: Optional[np.ndarray] = None, + ): + """Draw heatmaps of GT or prediction. + + Args: + fields (:obj:`PixelData`): Data structure for + pixel-level annotations or predictions. + overlaid_image (np.ndarray): The image to draw. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + if 'heatmaps' not in fields: + return None + heatmaps = fields.heatmaps + if isinstance(heatmaps, np.ndarray): + heatmaps = torch.from_numpy(heatmaps) + if heatmaps.dim() == 3: + heatmaps, _ = heatmaps.max(dim=0) + heatmaps = heatmaps.unsqueeze(0) + + # Pad the image such that the heatmap is visible + # breakpoint() + + out_image = self.draw_featmap(heatmaps, overlaid_image) + return out_image + + def _draw_instance_xy_heatmap( + self, + fields: PixelData, + overlaid_image: Optional[np.ndarray] = None, + n: int = 20, + ): + """Draw heatmaps of GT or prediction. + + Args: + fields (:obj:`PixelData`): Data structure for + pixel-level annotations or predictions. + overlaid_image (np.ndarray): The image to draw. + n (int): Number of keypoint, up to 20. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + if 'heatmaps' not in fields: + return None + heatmaps = fields.heatmaps + _, h, w = heatmaps.shape + if isinstance(heatmaps, np.ndarray): + heatmaps = torch.from_numpy(heatmaps) + out_image = SimCCVisualizer().draw_instance_xy_heatmap( + heatmaps, overlaid_image, n) + out_image = cv2.resize(out_image[:, :, ::-1], (w, h)) + return out_image + + @master_only + def add_datasample(self, + name: str, + image: np.ndarray, + data_sample: PoseDataSample, + draw_gt: bool = True, + draw_pred: bool = True, + draw_heatmap: bool = True, + draw_bbox: bool = False, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose', + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + kpt_thr: float = 0.3, + step: int = 0) -> None: + """Draw datasample and save to all backends. + + - If GT and prediction are plotted at the same time, they are + displayed in a stitched image where the left image is the + ground truth and the right image is the prediction. + - If ``show`` is True, all storage backends are ignored, and + the images will be displayed in a local window. + - If ``out_file`` is specified, the drawn image will be + saved to ``out_file``. t is usually used when the display + is not available. + + Args: + name (str): The image identifier + image (np.ndarray): The image to draw + data_sample (:obj:`PoseDataSample`, optional): The data sample + to visualize + draw_gt (bool): Whether to draw GT PoseDataSample. Default to + ``True`` + draw_pred (bool): Whether to draw Prediction PoseDataSample. + Defaults to ``True`` + draw_bbox (bool): Whether to draw bounding boxes. Default to + ``False`` + draw_heatmap (bool): Whether to draw heatmaps. Defaults to + ``False`` + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + skeleton_style (str): Skeleton style selection. Defaults to + ``'mmpose'`` + show (bool): Whether to display the drawn image. Default to + ``False`` + wait_time (float): The interval of show (s). Defaults to 0 + out_file (str): Path to output file. Defaults to ``None`` + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + step (int): Global step value to record. Defaults to 0 + """ + + gt_img_data = None + pred_img_data = None + + if draw_gt: + gt_img_data = image.copy() + gt_img_heatmap = None + + # draw bboxes & keypoints + if 'gt_instances' in data_sample: + gt_img_data = self._draw_instances_kpts( + gt_img_data, data_sample.gt_instances, kpt_thr, + show_kpt_idx, skeleton_style) + if draw_bbox: + gt_img_data = self._draw_instances_bbox( + gt_img_data, data_sample.gt_instances) + + # draw heatmaps + if 'gt_fields' in data_sample and draw_heatmap: + gt_img_heatmap = self._draw_instance_heatmap( + data_sample.gt_fields, image) + + # Draw abox over heatmap + bbox_xyxy = data_sample.gt_instances.bboxes.squeeze() + abox_xyxy = fix_bbox_aspect_ratio(bbox_xyxy, aspect_ratio=3/4, padding=1.25, bbox_format='xyxy') + abox_xyxy = abox_xyxy.flatten().astype(int) + gt_img_heatmap = cv2.rectangle(gt_img_heatmap, (abox_xyxy[0], abox_xyxy[1]), (abox_xyxy[2], abox_xyxy[3]), (0, 255, 0), 2) + + if gt_img_heatmap is not None: + gt_img_data = np.concatenate((gt_img_data, gt_img_heatmap), + axis=0) + + if draw_pred: + pred_img_data = image.copy() + pred_img_heatmap = None + + # draw bboxes & keypoints + if 'pred_instances' in data_sample: + if POSEVIS: + pred_samples = [] + for i in range(data_sample.pred_instances.keypoints.shape[0]): + kpts = data_sample.pred_instances.keypoints[i].reshape(-1, 2) + try: + vis = data_sample.pred_instances.keypoints_probs[i][:, None] + vis[vis < 0.5] = 0 + vis[vis >= 0.5] = 2 + except AttributeError: + vis = data_sample.pred_instances.keypoint_scores[i][:, None] + kpts = np.concatenate([kpts, vis], axis=1) + kpts[kpts[:, -1] < 1e-6, :] = 0 + bbox_xyxy = data_sample.pred_instances.bboxes[i].squeeze() + bbox_xywh = np.array([ + bbox_xyxy[0], + bbox_xyxy[1], + bbox_xyxy[2] - bbox_xyxy[0], + bbox_xyxy[3] - bbox_xyxy[1], + ]) + pred_samples.append({ + 'keypoints': kpts[:17, :], + 'bbox': bbox_xywh, + }) + + pred_img_data = pose_visualization( + pred_img_data, + pred_samples, + format="COCO", + greyness=1.0, + show_markers=True, + show_bones=True, + line_type="solid", + width_multiplier=8.0, + bbox_width_multiplier=3.0, + show_bbox=draw_bbox, + differ_individuals=False, + ) + + # # Draw GT + # # breakpoint() + # gt_kpts = np.array(data_sample.raw_ann_info['keypoints']).reshape(-1, 3) + # gt_kpts[gt_kpts[:, 2] > 0, 2] = 1 + # pred_samples.append({ + # 'keypoints': gt_kpts, + # }) + # pred_img_data = pose_visualization( + # pred_img_data, + # pred_samples, + # format="COCO", + # greyness=1.0, + # show_markers=True, + # show_bones=True, + # line_type="dashed", + # width_multiplier=3.0, + # bbox_width_multiplier=1.0, + # show_bbox=False, + # differ_individuals=False, + # ) + + else: + pred_img_data = self._draw_instances_kpts( + pred_img_data, data_sample.pred_instances, kpt_thr, + show_kpt_idx, skeleton_style) + if draw_bbox: + pred_img_data = self._draw_instances_bbox( + pred_img_data, data_sample.pred_instances) + + # draw heatmaps + if 'pred_fields' in data_sample and draw_heatmap: + if 'keypoint_x_labels' in data_sample.pred_instances: + pred_img_heatmap = self._draw_instance_xy_heatmap( + data_sample.pred_fields, image) + else: + pred_img_heatmap = self._draw_instance_heatmap( + data_sample.pred_fields, image) + + # Draw abox over heatmap + bbox_xyxy = data_sample.gt_instances.bboxes.squeeze() + abox_xyxy = fix_bbox_aspect_ratio(bbox_xyxy, aspect_ratio=3/4, padding=1.25, bbox_format='xyxy') + abox_xyxy = abox_xyxy.flatten().astype(int) + pred_img_heatmap = cv2.rectangle(pred_img_heatmap, (abox_xyxy[0], abox_xyxy[1]), (abox_xyxy[2], abox_xyxy[3]), (0, 255, 0), 1) + + if pred_img_heatmap is not None: + pred_img_heatmap = cv2.resize(pred_img_heatmap, (pred_img_data.shape[:2][::-1])) + pred_img_data = np.concatenate( + (pred_img_data, pred_img_heatmap), axis=0) + + # merge visualization results + if gt_img_data is not None and pred_img_data is not None: + if gt_img_heatmap is None and pred_img_heatmap is not None: + gt_img_data = np.concatenate((gt_img_data, image), axis=0) + elif gt_img_heatmap is not None and pred_img_heatmap is None: + pred_img_data = np.concatenate((pred_img_data, image), axis=0) + + drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1) + + elif gt_img_data is not None: + drawn_img = gt_img_data + else: + drawn_img = pred_img_data + + # It is convenient for users to obtain the drawn image. + # For example, the user wants to obtain the drawn image and + # save it as a video during video inference. + self.set_image(drawn_img) + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + # save drawn_img to backends + self.add_image(name, drawn_img, step) + + return self.get_image() diff --git a/mmpose/visualization/local_visualizer_3d.py b/mmpose/visualization/local_visualizer_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..09603dba8064fab9c35571ebfb4b094ce2a819eb --- /dev/null +++ b/mmpose/visualization/local_visualizer_3d.py @@ -0,0 +1,637 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Dict, List, Optional, Tuple, Union + +import cv2 +import mmcv +import numpy as np +from matplotlib import pyplot as plt +from mmengine.dist import master_only +from mmengine.structures import InstanceData + +from mmpose.apis import convert_keypoint_definition +from mmpose.registry import VISUALIZERS +from mmpose.structures import PoseDataSample +from . import PoseLocalVisualizer + + +@VISUALIZERS.register_module() +class Pose3dLocalVisualizer(PoseLocalVisualizer): + """MMPose 3d Local Visualizer. + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + image (np.ndarray, optional): the origin image to draw. The format + should be RGB. Defaults to ``None`` + vis_backends (list, optional): Visual backend config list. Defaults to + ``None`` + save_dir (str, optional): Save file dir for all storage backends. + If it is ``None``, the backend storage will not save any data. + Defaults to ``None`` + bbox_color (str, tuple(int), optional): Color of bbox lines. + The tuple of color should be in BGR order. Defaults to ``'green'`` + kpt_color (str, tuple(tuple(int)), optional): Color of keypoints. + The tuple of color should be in BGR order. Defaults to ``'red'`` + link_color (str, tuple(tuple(int)), optional): Color of skeleton. + The tuple of color should be in BGR order. Defaults to ``None`` + line_width (int, float): The width of lines. Defaults to 1 + radius (int, float): The radius of keypoints. Defaults to 4 + show_keypoint_weight (bool): Whether to adjust the transparency + of keypoints according to their score. Defaults to ``False`` + alpha (int, float): The transparency of bboxes. Defaults to ``0.8`` + det_kpt_color (str, tuple(tuple(int)), optional): Keypoints color + info for detection. Defaults to ``None`` + det_dataset_skeleton (list): Skeleton info for detection. Defaults to + ``None`` + det_dataset_link_color (list): Link color for detection. Defaults to + ``None`` + """ + + def __init__( + self, + name: str = 'visualizer', + image: Optional[np.ndarray] = None, + vis_backends: Optional[Dict] = None, + save_dir: Optional[str] = None, + bbox_color: Optional[Union[str, Tuple[int]]] = 'green', + kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = 'red', + link_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, + text_color: Optional[Union[str, Tuple[int]]] = (255, 255, 255), + skeleton: Optional[Union[List, Tuple]] = None, + line_width: Union[int, float] = 1, + radius: Union[int, float] = 3, + show_keypoint_weight: bool = False, + backend: str = 'opencv', + alpha: float = 0.8, + det_kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, + det_dataset_skeleton: Optional[Union[str, + Tuple[Tuple[int]]]] = None, + det_dataset_link_color: Optional[np.ndarray] = None): + super().__init__(name, image, vis_backends, save_dir, bbox_color, + kpt_color, link_color, text_color, skeleton, + line_width, radius, show_keypoint_weight, backend, + alpha) + self.det_kpt_color = det_kpt_color + self.det_dataset_skeleton = det_dataset_skeleton + self.det_dataset_link_color = det_dataset_link_color + + def _draw_3d_data_samples(self, + image: np.ndarray, + pose_samples: PoseDataSample, + draw_gt: bool = True, + kpt_thr: float = 0.3, + num_instances=-1, + axis_azimuth: float = 70, + axis_limit: float = 1.7, + axis_dist: float = 10.0, + axis_elev: float = 15.0, + show_kpt_idx: bool = False, + scores_2d: Optional[np.ndarray] = None): + """Draw keypoints and skeletons (optional) of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + draw_gt (bool): Whether to draw GT PoseDataSample. Default to + ``True`` + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + num_instances (int): Number of instances to be shown in 3D. If + smaller than 0, all the instances in the pose_result will be + shown. Otherwise, pad or truncate the pose_result to a length + of num_instances. + axis_azimuth (float): axis azimuth angle for 3D visualizations. + axis_dist (float): axis distance for 3D visualizations. + axis_elev (float): axis elevation view angle for 3D visualizations. + axis_limit (float): The axis limit to visualize 3d pose. The xyz + range will be set as: + - x: [x_c - axis_limit/2, x_c + axis_limit/2] + - y: [y_c - axis_limit/2, y_c + axis_limit/2] + - z: [0, axis_limit] + Where x_c, y_c is the mean value of x and y coordinates + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + scores_2d (np.ndarray, optional): Keypoint scores of 2d estimation + that will be used to filter 3d instances. + + Returns: + Tuple(np.ndarray): the drawn image which channel is RGB. + """ + vis_width = max(image.shape) + vis_height = vis_width + + if 'pred_instances' in pose_samples: + pred_instances = pose_samples.pred_instances + else: + pred_instances = InstanceData() + if num_instances < 0: + if 'keypoints' in pred_instances: + num_instances = len(pred_instances) + else: + num_instances = 0 + else: + if len(pred_instances) > num_instances: + pred_instances_ = InstanceData() + for k in pred_instances.keys(): + new_val = pred_instances[k][:num_instances] + pred_instances_.set_field(new_val, k) + pred_instances = pred_instances_ + elif num_instances < len(pred_instances): + num_instances = len(pred_instances) + + num_fig = num_instances + if draw_gt: + vis_width *= 2 + num_fig *= 2 + + plt.ioff() + fig = plt.figure( + figsize=(vis_width * num_instances * 0.01, vis_height * 0.01)) + + def _draw_3d_instances_kpts(keypoints, + scores, + scores_2d, + keypoints_visible, + fig_idx, + show_kpt_idx, + title=None): + + for idx, (kpts, score, score_2d) in enumerate( + zip(keypoints, scores, scores_2d)): + + valid = np.logical_and(score >= kpt_thr, score_2d >= kpt_thr, + np.any(~np.isnan(kpts), axis=-1)) + + kpts_valid = kpts[valid] + ax = fig.add_subplot( + 1, num_fig, fig_idx * (idx + 1), projection='3d') + ax.view_init(elev=axis_elev, azim=axis_azimuth) + ax.set_aspect('auto') + ax.set_xticks([]) + ax.set_yticks([]) + ax.set_zticks([]) + ax.set_xticklabels([]) + ax.set_yticklabels([]) + ax.set_zticklabels([]) + if title: + ax.set_title(f'{title} ({idx})') + ax.dist = axis_dist + + x_c = np.mean(kpts_valid[:, 0]) if valid.any() else 0 + y_c = np.mean(kpts_valid[:, 1]) if valid.any() else 0 + z_c = np.mean(kpts_valid[:, 2]) if valid.any() else 0 + + ax.set_xlim3d([x_c - axis_limit / 2, x_c + axis_limit / 2]) + ax.set_ylim3d([y_c - axis_limit / 2, y_c + axis_limit / 2]) + ax.set_zlim3d( + [min(0, z_c - axis_limit / 2), z_c + axis_limit / 2]) + + if self.kpt_color is None or isinstance(self.kpt_color, str): + kpt_color = [self.kpt_color] * len(kpts) + elif len(self.kpt_color) == len(kpts): + kpt_color = self.kpt_color + else: + raise ValueError( + f'the length of kpt_color ' + f'({len(self.kpt_color)}) does not matches ' + f'that of keypoints ({len(kpts)})') + + x_3d, y_3d, z_3d = np.split(kpts_valid[:, :3], [1, 2], axis=1) + + kpt_color = kpt_color[valid] / 255. + + ax.scatter(x_3d, y_3d, z_3d, marker='o', c=kpt_color) + + if show_kpt_idx: + for kpt_idx in range(len(x_3d)): + ax.text(x_3d[kpt_idx][0], y_3d[kpt_idx][0], + z_3d[kpt_idx][0], str(kpt_idx)) + + if self.skeleton is not None and self.link_color is not None: + if self.link_color is None or isinstance( + self.link_color, str): + link_color = [self.link_color] * len(self.skeleton) + elif len(self.link_color) == len(self.skeleton): + link_color = self.link_color + else: + raise ValueError( + f'the length of link_color ' + f'({len(self.link_color)}) does not matches ' + f'that of skeleton ({len(self.skeleton)})') + + for sk_id, sk in enumerate(self.skeleton): + sk_indices = [_i for _i in sk] + xs_3d = kpts[sk_indices, 0] + ys_3d = kpts[sk_indices, 1] + zs_3d = kpts[sk_indices, 2] + kpt_score = score[sk_indices] + kpt_score_2d = score_2d[sk_indices] + if kpt_score.min() > kpt_thr and kpt_score_2d.min( + ) > kpt_thr: + # matplotlib uses RGB color in [0, 1] value range + _color = link_color[sk_id] / 255. + ax.plot( + xs_3d, ys_3d, zs_3d, color=_color, zdir='z') + + if 'keypoints' in pred_instances: + keypoints = pred_instances.get('keypoints', + pred_instances.keypoints) + + if 'keypoint_scores' in pred_instances: + scores = pred_instances.keypoint_scores + else: + scores = np.ones(keypoints.shape[:-1]) + + if scores_2d is None: + scores_2d = np.ones(keypoints.shape[:-1]) + + if 'keypoints_visible' in pred_instances: + keypoints_visible = pred_instances.keypoints_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + + _draw_3d_instances_kpts(keypoints, scores, scores_2d, + keypoints_visible, 1, show_kpt_idx, + 'Prediction') + + if draw_gt and 'gt_instances' in pose_samples: + gt_instances = pose_samples.gt_instances + if 'lifting_target' in gt_instances: + keypoints = gt_instances.get('lifting_target', + gt_instances.lifting_target) + scores = np.ones(keypoints.shape[:-1]) + + if 'lifting_target_visible' in gt_instances: + keypoints_visible = gt_instances.lifting_target_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + elif 'keypoints_gt' in gt_instances: + keypoints = gt_instances.get('keypoints_gt', + gt_instances.keypoints_gt) + scores = np.ones(keypoints.shape[:-1]) + + if 'keypoints_visible' in gt_instances: + keypoints_visible = gt_instances.keypoints_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + else: + raise ValueError('to visualize ground truth results, ' + 'data sample must contain ' + '"lifting_target" or "keypoints_gt"') + + if scores_2d is None: + scores_2d = np.ones(keypoints.shape[:-1]) + + _draw_3d_instances_kpts(keypoints, scores, scores_2d, + keypoints_visible, 2, show_kpt_idx, + 'Ground Truth') + + # convert figure to numpy array + fig.tight_layout() + fig.canvas.draw() + + pred_img_data = np.frombuffer( + fig.canvas.tostring_rgb(), dtype=np.uint8) + + if not pred_img_data.any(): + pred_img_data = np.full((vis_height, vis_width, 3), 255) + else: + width, height = fig.get_size_inches() * fig.get_dpi() + pred_img_data = pred_img_data.reshape( + int(height), + int(width) * num_instances, 3) + + plt.close(fig) + + return pred_img_data + + def _draw_instances_kpts(self, + image: np.ndarray, + instances: InstanceData, + kpt_thr: float = 0.3, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose'): + """Draw keypoints and skeletons (optional) of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + skeleton_style (str): Skeleton style selection. Defaults to + ``'mmpose'`` + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + + self.set_image(image) + img_h, img_w, _ = image.shape + scores = None + + if 'keypoints' in instances: + keypoints = instances.get('transformed_keypoints', + instances.keypoints) + + if 'keypoint_scores' in instances: + scores = instances.keypoint_scores + else: + scores = np.ones(keypoints.shape[:-1]) + + if 'keypoints_visible' in instances: + keypoints_visible = instances.keypoints_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + + if skeleton_style == 'openpose': + keypoints_info = np.concatenate( + (keypoints, scores[..., None], keypoints_visible[..., + None]), + axis=-1) + # compute neck joint + neck = np.mean(keypoints_info[:, [5, 6]], axis=1) + # neck score when visualizing pred + neck[:, 2:4] = np.logical_and( + keypoints_info[:, 5, 2:4] > kpt_thr, + keypoints_info[:, 6, 2:4] > kpt_thr).astype(int) + new_keypoints_info = np.insert( + keypoints_info, 17, neck, axis=1) + + mmpose_idx = [ + 17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3 + ] + openpose_idx = [ + 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17 + ] + new_keypoints_info[:, openpose_idx] = \ + new_keypoints_info[:, mmpose_idx] + keypoints_info = new_keypoints_info + + keypoints, scores, keypoints_visible = keypoints_info[ + ..., :2], keypoints_info[..., 2], keypoints_info[..., 3] + + kpt_color = self.kpt_color + if self.det_kpt_color is not None: + kpt_color = self.det_kpt_color + + for kpts, score, visible in zip(keypoints, scores, + keypoints_visible): + kpts = np.array(kpts[..., :2], copy=False) + + if kpt_color is None or isinstance(kpt_color, str): + kpt_color = [kpt_color] * len(kpts) + elif len(kpt_color) == len(kpts): + kpt_color = kpt_color + else: + raise ValueError(f'the length of kpt_color ' + f'({len(kpt_color)}) does not matches ' + f'that of keypoints ({len(kpts)})') + + # draw each point on image + for kid, kpt in enumerate(kpts): + if score[kid] < kpt_thr or not visible[ + kid] or kpt_color[kid] is None: + # skip the point that should not be drawn + continue + + color = kpt_color[kid] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max(0, min(1, score[kid])) + self.draw_circles( + kpt, + radius=np.array([self.radius]), + face_colors=color, + edge_colors=color, + alpha=transparency, + line_widths=self.radius) + if show_kpt_idx: + self.draw_texts( + str(kid), + kpt, + colors=color, + font_sizes=self.radius * 3, + vertical_alignments='bottom', + horizontal_alignments='center') + + # draw links + skeleton = self.skeleton + if self.det_dataset_skeleton is not None: + skeleton = self.det_dataset_skeleton + link_color = self.link_color + if self.det_dataset_link_color is not None: + link_color = self.det_dataset_link_color + if skeleton is not None and link_color is not None: + if link_color is None or isinstance(link_color, str): + link_color = [link_color] * len(skeleton) + elif len(link_color) == len(skeleton): + link_color = link_color + else: + raise ValueError( + f'the length of link_color ' + f'({len(link_color)}) does not matches ' + f'that of skeleton ({len(skeleton)})') + + for sk_id, sk in enumerate(skeleton): + pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) + pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) + if not (visible[sk[0]] and visible[sk[1]]): + continue + + if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 + or pos1[1] >= img_h or pos2[0] <= 0 + or pos2[0] >= img_w or pos2[1] <= 0 + or pos2[1] >= img_h or score[sk[0]] < kpt_thr + or score[sk[1]] < kpt_thr + or link_color[sk_id] is None): + # skip the link that should not be drawn + continue + X = np.array((pos1[0], pos2[0])) + Y = np.array((pos1[1], pos2[1])) + color = link_color[sk_id] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max( + 0, min(1, 0.5 * (score[sk[0]] + score[sk[1]]))) + + if skeleton_style == 'openpose': + mX = np.mean(X) + mY = np.mean(Y) + length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5 + angle = math.degrees( + math.atan2(Y[0] - Y[1], X[0] - X[1])) + stickwidth = 2 + polygons = cv2.ellipse2Poly( + (int(mX), int(mY)), + (int(length / 2), int(stickwidth)), int(angle), + 0, 360, 1) + + self.draw_polygons( + polygons, + edge_colors=color, + face_colors=color, + alpha=transparency) + + else: + self.draw_lines( + X, Y, color, line_widths=self.line_width) + + return self.get_image(), scores + + @master_only + def add_datasample(self, + name: str, + image: np.ndarray, + data_sample: PoseDataSample, + det_data_sample: Optional[PoseDataSample] = None, + draw_gt: bool = True, + draw_pred: bool = True, + draw_2d: bool = True, + draw_bbox: bool = False, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose', + dataset_2d: str = 'coco', + dataset_3d: str = 'h36m', + convert_keypoint: bool = True, + axis_azimuth: float = 70, + axis_limit: float = 1.7, + axis_dist: float = 10.0, + axis_elev: float = 15.0, + num_instances: int = -1, + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + kpt_thr: float = 0.3, + step: int = 0) -> None: + """Draw datasample and save to all backends. + + - If GT and prediction are plotted at the same time, they are + displayed in a stitched image where the left image is the + ground truth and the right image is the prediction. + - If ``show`` is True, all storage backends are ignored, and + the images will be displayed in a local window. + - If ``out_file`` is specified, the drawn image will be + saved to ``out_file``. t is usually used when the display + is not available. + + Args: + name (str): The image identifier + image (np.ndarray): The image to draw + data_sample (:obj:`PoseDataSample`): The 3d data sample + to visualize + det_data_sample (:obj:`PoseDataSample`, optional): The 2d detection + data sample to visualize + draw_gt (bool): Whether to draw GT PoseDataSample. Default to + ``True`` + draw_pred (bool): Whether to draw Prediction PoseDataSample. + Defaults to ``True`` + draw_2d (bool): Whether to draw 2d detection results. Defaults to + ``True`` + draw_bbox (bool): Whether to draw bounding boxes. Default to + ``False`` + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + skeleton_style (str): Skeleton style selection. Defaults to + ``'mmpose'`` + dataset_2d (str): Name of 2d keypoint dataset. Defaults to + ``'CocoDataset'`` + dataset_3d (str): Name of 3d keypoint dataset. Defaults to + ``'Human36mDataset'`` + convert_keypoint (bool): Whether to convert keypoint definition. + Defaults to ``True`` + axis_azimuth (float): axis azimuth angle for 3D visualizations. + axis_dist (float): axis distance for 3D visualizations. + axis_elev (float): axis elevation view angle for 3D visualizations. + axis_limit (float): The axis limit to visualize 3d pose. The xyz + range will be set as: + - x: [x_c - axis_limit/2, x_c + axis_limit/2] + - y: [y_c - axis_limit/2, y_c + axis_limit/2] + - z: [0, axis_limit] + Where x_c, y_c is the mean value of x and y coordinates + num_instances (int): Number of instances to be shown in 3D. If + smaller than 0, all the instances in the pose_result will be + shown. Otherwise, pad or truncate the pose_result to a length + of num_instances. Defaults to -1 + show (bool): Whether to display the drawn image. Default to + ``False`` + wait_time (float): The interval of show (s). Defaults to 0 + out_file (str): Path to output file. Defaults to ``None`` + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + step (int): Global step value to record. Defaults to 0 + """ + + det_img_data = None + scores_2d = None + + if draw_2d: + det_img_data = image.copy() + + # draw bboxes & keypoints + if (det_data_sample is not None + and 'pred_instances' in det_data_sample): + det_img_data, scores_2d = self._draw_instances_kpts( + image=det_img_data, + instances=det_data_sample.pred_instances, + kpt_thr=kpt_thr, + show_kpt_idx=show_kpt_idx, + skeleton_style=skeleton_style) + if draw_bbox: + det_img_data = self._draw_instances_bbox( + det_img_data, det_data_sample.pred_instances) + if scores_2d is not None and convert_keypoint: + if scores_2d.ndim == 2: + scores_2d = scores_2d[..., None] + scores_2d = np.squeeze( + convert_keypoint_definition(scores_2d, dataset_2d, dataset_3d), + axis=-1) + pred_img_data = self._draw_3d_data_samples( + image.copy(), + data_sample, + draw_gt=draw_gt, + num_instances=num_instances, + axis_azimuth=axis_azimuth, + axis_limit=axis_limit, + show_kpt_idx=show_kpt_idx, + axis_dist=axis_dist, + axis_elev=axis_elev, + scores_2d=scores_2d) + + # merge visualization results + if det_img_data is not None: + width = max(pred_img_data.shape[1] - det_img_data.shape[1], 0) + height = max(pred_img_data.shape[0] - det_img_data.shape[0], 0) + det_img_data = cv2.copyMakeBorder( + det_img_data, + height // 2, + (height // 2 + 1) if height % 2 == 1 else height // 2, + width // 2, (width // 2 + 1) if width % 2 == 1 else width // 2, + cv2.BORDER_CONSTANT, + value=(255, 255, 255)) + drawn_img = np.concatenate((det_img_data, pred_img_data), axis=1) + else: + drawn_img = pred_img_data + + # It is convenient for users to obtain the drawn image. + # For example, the user wants to obtain the drawn image and + # save it as a video during video inference. + self.set_image(drawn_img) + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + # save drawn_img to backends + self.add_image(name, drawn_img, step) + + return self.get_image() diff --git a/mmpose/visualization/opencv_backend_visualizer.py b/mmpose/visualization/opencv_backend_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..9604d07fead0187fc08e084f07f7714406072b93 --- /dev/null +++ b/mmpose/visualization/opencv_backend_visualizer.py @@ -0,0 +1,465 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import cv2 +import mmcv +import numpy as np +import torch +from mmengine.dist import master_only +from mmengine.visualization import Visualizer + + +class OpencvBackendVisualizer(Visualizer): + """Base visualizer with opencv backend support. + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + image (np.ndarray, optional): the origin image to draw. The format + should be RGB. Defaults to None. + vis_backends (list, optional): Visual backend config list. + Defaults to None. + save_dir (str, optional): Save file dir for all storage backends. + If it is None, the backend storage will not save any data. + fig_save_cfg (dict): Keyword parameters of figure for saving. + Defaults to empty dict. + fig_show_cfg (dict): Keyword parameters of figure for showing. + Defaults to empty dict. + backend (str): Backend used to draw elements on the image and display + the image. Defaults to 'matplotlib'. + alpha (int, float): The transparency of bboxes. Defaults to ``1.0`` + """ + + def __init__(self, + name='visualizer', + backend: str = 'matplotlib', + *args, + **kwargs): + super().__init__(name, *args, **kwargs) + assert backend in ('opencv', 'matplotlib'), f'the argument ' \ + f'\'backend\' must be either \'opencv\' or \'matplotlib\', ' \ + f'but got \'{backend}\'.' + self.backend = backend + + @master_only + def set_image(self, image: np.ndarray) -> None: + """Set the image to draw. + + Args: + image (np.ndarray): The image to draw. + backend (str): The backend to save the image. + """ + assert image is not None + image = image.astype('uint8') + self._image = image + self.width, self.height = image.shape[1], image.shape[0] + self._default_font_size = max( + np.sqrt(self.height * self.width) // 90, 10) + + if self.backend == 'matplotlib': + # add a small 1e-2 to avoid precision lost due to matplotlib's + # truncation (https://github.com/matplotlib/matplotlib/issues/15363) # noqa + self.fig_save.set_size_inches( # type: ignore + (self.width + 1e-2) / self.dpi, + (self.height + 1e-2) / self.dpi) + # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) + self.ax_save.cla() + self.ax_save.axis(False) + self.ax_save.imshow( + image, + extent=(0, self.width, self.height, 0), + interpolation='none') + + @master_only + def get_image(self) -> np.ndarray: + """Get the drawn image. The format is RGB. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + assert self._image is not None, 'Please set image using `set_image`' + if self.backend == 'matplotlib': + return super().get_image() + else: + return self._image + + @master_only + def draw_circles(self, + center: Union[np.ndarray, torch.Tensor], + radius: Union[np.ndarray, torch.Tensor], + face_colors: Union[str, tuple, List[str], + List[tuple]] = 'none', + alpha: float = 1.0, + **kwargs) -> 'Visualizer': + """Draw single or multiple circles. + + Args: + center (Union[np.ndarray, torch.Tensor]): The x coordinate of + each line' start and end points. + radius (Union[np.ndarray, torch.Tensor]): The y coordinate of + each line' start and end points. + edge_colors (Union[str, tuple, List[str], List[tuple]]): The + colors of circles. ``colors`` can have the same length with + lines or just single value. If ``colors`` is single value, + all the lines will have the same colors. Reference to + https://matplotlib.org/stable/gallery/color/named_colors.html + for more details. Defaults to 'g. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 2. + face_colors (Union[str, tuple, List[str], List[tuple]]): + The face colors. Defaults to None. + alpha (Union[int, float]): The transparency of circles. + Defaults to 0.8. + """ + if self.backend == 'matplotlib': + super().draw_circles( + center=center, + radius=radius, + face_colors=face_colors, + alpha=alpha, + **kwargs) + elif self.backend == 'opencv': + if isinstance(face_colors, str): + face_colors = mmcv.color_val(face_colors)[::-1] + + if alpha == 1.0: + self._image = cv2.circle(self._image, + (int(center[0]), int(center[1])), + int(radius), face_colors, -1) + else: + img = cv2.circle(self._image.copy(), + (int(center[0]), int(center[1])), int(radius), + face_colors, -1) + self._image = cv2.addWeighted(self._image, 1 - alpha, img, + alpha, 0) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def draw_texts( + self, + texts: Union[str, List[str]], + positions: Union[np.ndarray, torch.Tensor], + font_sizes: Optional[Union[int, List[int]]] = None, + colors: Union[str, tuple, List[str], List[tuple]] = 'g', + vertical_alignments: Union[str, List[str]] = 'top', + horizontal_alignments: Union[str, List[str]] = 'left', + bboxes: Optional[Union[dict, List[dict]]] = None, + **kwargs, + ) -> 'Visualizer': + """Draw single or multiple text boxes. + + Args: + texts (Union[str, List[str]]): Texts to draw. + positions (Union[np.ndarray, torch.Tensor]): The position to draw + the texts, which should have the same length with texts and + each dim contain x and y. + font_sizes (Union[int, List[int]], optional): The font size of + texts. ``font_sizes`` can have the same length with texts or + just single value. If ``font_sizes`` is single value, all the + texts will have the same font size. Defaults to None. + colors (Union[str, tuple, List[str], List[tuple]]): The colors + of texts. ``colors`` can have the same length with texts or + just single value. If ``colors`` is single value, all the + texts will have the same colors. Reference to + https://matplotlib.org/stable/gallery/color/named_colors.html + for more details. Defaults to 'g. + vertical_alignments (Union[str, List[str]]): The verticalalignment + of texts. verticalalignment controls whether the y positional + argument for the text indicates the bottom, center or top side + of the text bounding box. + ``vertical_alignments`` can have the same length with + texts or just single value. If ``vertical_alignments`` is + single value, all the texts will have the same + verticalalignment. verticalalignment can be 'center' or + 'top', 'bottom' or 'baseline'. Defaults to 'top'. + horizontal_alignments (Union[str, List[str]]): The + horizontalalignment of texts. Horizontalalignment controls + whether the x positional argument for the text indicates the + left, center or right side of the text bounding box. + ``horizontal_alignments`` can have + the same length with texts or just single value. + If ``horizontal_alignments`` is single value, all the texts + will have the same horizontalalignment. Horizontalalignment + can be 'center','right' or 'left'. Defaults to 'left'. + font_families (Union[str, List[str]]): The font family of + texts. ``font_families`` can have the same length with texts or + just single value. If ``font_families`` is single value, all + the texts will have the same font family. + font_familiy can be 'serif', 'sans-serif', 'cursive', 'fantasy' + or 'monospace'. Defaults to 'sans-serif'. + bboxes (Union[dict, List[dict]], optional): The bounding box of the + texts. If bboxes is None, there are no bounding box around + texts. ``bboxes`` can have the same length with texts or + just single value. If ``bboxes`` is single value, all + the texts will have the same bbox. Reference to + https://matplotlib.org/stable/api/_as_gen/matplotlib.patches.FancyBboxPatch.html#matplotlib.patches.FancyBboxPatch + for more details. Defaults to None. + font_properties (Union[FontProperties, List[FontProperties]], optional): + The font properties of texts. FontProperties is + a ``font_manager.FontProperties()`` object. + If you want to draw Chinese texts, you need to prepare + a font file that can show Chinese characters properly. + For example: `simhei.ttf`, `simsun.ttc`, `simkai.ttf` and so on. + Then set ``font_properties=matplotlib.font_manager.FontProperties(fname='path/to/font_file')`` + ``font_properties`` can have the same length with texts or + just single value. If ``font_properties`` is single value, + all the texts will have the same font properties. + Defaults to None. + `New in version 0.6.0.` + """ # noqa: E501 + + if self.backend == 'matplotlib': + super().draw_texts( + texts=texts, + positions=positions, + font_sizes=font_sizes, + colors=colors, + vertical_alignments=vertical_alignments, + horizontal_alignments=horizontal_alignments, + bboxes=bboxes, + **kwargs) + + elif self.backend == 'opencv': + font_scale = max(0.1, font_sizes / 30) + thickness = max(1, font_sizes // 15) + + text_size, text_baseline = cv2.getTextSize(texts, + cv2.FONT_HERSHEY_DUPLEX, + font_scale, thickness) + + x = int(positions[0]) + if horizontal_alignments == 'right': + x = max(0, x - text_size[0]) + y = int(positions[1]) + if vertical_alignments == 'top': + y = min(self.height, y + text_size[1]) + + if bboxes is not None: + bbox_color = bboxes[0]['facecolor'] + if isinstance(bbox_color, str): + bbox_color = mmcv.color_val(bbox_color)[::-1] + + y = y - text_baseline // 2 + self._image = cv2.rectangle( + self._image, (x, y - text_size[1] - text_baseline // 2), + (x + text_size[0], y + text_baseline // 2), bbox_color, + cv2.FILLED) + + self._image = cv2.putText(self._image, texts, (x, y), + cv2.FONT_HERSHEY_SIMPLEX, font_scale, + colors, thickness - 1) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def draw_bboxes(self, + bboxes: Union[np.ndarray, torch.Tensor], + edge_colors: Union[str, tuple, List[str], + List[tuple]] = 'g', + line_widths: Union[Union[int, float], + List[Union[int, float]]] = 2, + **kwargs) -> 'Visualizer': + """Draw single or multiple bboxes. + + Args: + bboxes (Union[np.ndarray, torch.Tensor]): The bboxes to draw with + the format of(x1,y1,x2,y2). + edge_colors (Union[str, tuple, List[str], List[tuple]]): The + colors of bboxes. ``colors`` can have the same length with + lines or just single value. If ``colors`` is single value, all + the lines will have the same colors. Refer to `matplotlib. + colors` for full list of formats that are accepted. + Defaults to 'g'. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 2. + face_colors (Union[str, tuple, List[str], List[tuple]]): + The face colors. Defaults to None. + alpha (Union[int, float]): The transparency of bboxes. + Defaults to 0.8. + """ + if self.backend == 'matplotlib': + super().draw_bboxes( + bboxes=bboxes, + edge_colors=edge_colors, + line_widths=line_widths, + **kwargs) + + elif self.backend == 'opencv': + self._image = mmcv.imshow_bboxes( + self._image, + bboxes, + edge_colors, + top_k=-1, + thickness=line_widths, + show=False) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def draw_lines(self, + x_datas: Union[np.ndarray, torch.Tensor], + y_datas: Union[np.ndarray, torch.Tensor], + colors: Union[str, tuple, List[str], List[tuple]] = 'g', + line_widths: Union[Union[int, float], + List[Union[int, float]]] = 2, + **kwargs) -> 'Visualizer': + """Draw single or multiple line segments. + + Args: + x_datas (Union[np.ndarray, torch.Tensor]): The x coordinate of + each line' start and end points. + y_datas (Union[np.ndarray, torch.Tensor]): The y coordinate of + each line' start and end points. + colors (Union[str, tuple, List[str], List[tuple]]): The colors of + lines. ``colors`` can have the same length with lines or just + single value. If ``colors`` is single value, all the lines + will have the same colors. Reference to + https://matplotlib.org/stable/gallery/color/named_colors.html + for more details. Defaults to 'g'. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 2. + """ + if self.backend == 'matplotlib': + super().draw_lines( + x_datas=x_datas, + y_datas=y_datas, + colors=colors, + line_widths=line_widths, + **kwargs) + + elif self.backend == 'opencv': + if isinstance(colors, str): + colors = mmcv.color_val(colors)[::-1] + self._image = cv2.line( + self._image, (x_datas[0], y_datas[0]), + (x_datas[1], y_datas[1]), + colors, + thickness=line_widths) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def draw_polygons(self, + polygons: Union[Union[np.ndarray, torch.Tensor], + List[Union[np.ndarray, torch.Tensor]]], + edge_colors: Union[str, tuple, List[str], + List[tuple]] = 'g', + alpha: float = 1.0, + **kwargs) -> 'Visualizer': + """Draw single or multiple bboxes. + + Args: + polygons (Union[Union[np.ndarray, torch.Tensor],\ + List[Union[np.ndarray, torch.Tensor]]]): The polygons to draw + with the format of (x1,y1,x2,y2,...,xn,yn). + edge_colors (Union[str, tuple, List[str], List[tuple]]): The + colors of polygons. ``colors`` can have the same length with + lines or just single value. If ``colors`` is single value, + all the lines will have the same colors. Refer to + `matplotlib.colors` for full list of formats that are accepted. + Defaults to 'g. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 2. + face_colors (Union[str, tuple, List[str], List[tuple]]): + The face colors. Defaults to None. + alpha (Union[int, float]): The transparency of polygons. + Defaults to 0.8. + """ + if self.backend == 'matplotlib': + super().draw_polygons( + polygons=polygons, + edge_colors=edge_colors, + alpha=alpha, + **kwargs) + + elif self.backend == 'opencv': + if alpha == 1.0: + self._image = cv2.fillConvexPoly(self._image, polygons, + edge_colors) + else: + img = cv2.fillConvexPoly(self._image.copy(), polygons, + edge_colors) + self._image = cv2.addWeighted(self._image, 1 - alpha, img, + alpha, 0) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def show(self, + drawn_img: Optional[np.ndarray] = None, + win_name: str = 'image', + wait_time: float = 0., + continue_key=' ') -> None: + """Show the drawn image. + + Args: + drawn_img (np.ndarray, optional): The image to show. If drawn_img + is None, it will show the image got by Visualizer. Defaults + to None. + win_name (str): The image title. Defaults to 'image'. + wait_time (float): Delay in seconds. 0 is the special + value that means "forever". Defaults to 0. + continue_key (str): The key for users to continue. Defaults to + the space key. + """ + if self.backend == 'matplotlib': + super().show( + drawn_img=drawn_img, + win_name=win_name, + wait_time=wait_time, + continue_key=continue_key) + + elif self.backend == 'opencv': + # Keep images are shown in the same window, and the title of window + # will be updated with `win_name`. + if not hasattr(self, win_name): + self._cv_win_name = win_name + cv2.namedWindow(winname=f'{id(self)}') + cv2.setWindowTitle(f'{id(self)}', win_name) + else: + cv2.setWindowTitle(f'{id(self)}', win_name) + shown_img = self.get_image() if drawn_img is None else drawn_img + cv2.imshow(str(id(self)), mmcv.bgr2rgb(shown_img)) + cv2.waitKey(int(np.ceil(wait_time * 1000))) + else: + raise ValueError(f'got unsupported backend {self.backend}') diff --git a/mmpose/visualization/simcc_vis.py b/mmpose/visualization/simcc_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5b602fb5c4ffe2a46ddb2cf09a2cd4501b1664 --- /dev/null +++ b/mmpose/visualization/simcc_vis.py @@ -0,0 +1,136 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Union + +import cv2 as cv +import numpy as np +import torch +from torchvision.transforms import ToPILImage + + +class SimCCVisualizer: + + def draw_instance_xy_heatmap(self, + heatmap: torch.Tensor, + overlaid_image: Optional[np.ndarray], + n: int = 20, + mix: bool = True, + weight: float = 0.5): + """Draw heatmaps of GT or prediction. + + Args: + heatmap (torch.Tensor): Tensor of heatmap. + overlaid_image (np.ndarray): The image to draw. + n (int): Number of keypoint, up to 20. + mix (bool):Whether to merge heatmap and original image. + weight (float): Weight of original image during fusion. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + heatmap2d = heatmap.data.max(0, keepdim=True)[0] + xy_heatmap, K = self.split_simcc_xy(heatmap) + K = K if K <= n else n + blank_size = tuple(heatmap.size()[1:]) + maps = {'x': [], 'y': []} + for i in xy_heatmap: + x, y = self.draw_1d_heatmaps(i['x']), self.draw_1d_heatmaps(i['y']) + maps['x'].append(x) + maps['y'].append(y) + white = self.creat_blank(blank_size, K) + map2d = self.draw_2d_heatmaps(heatmap2d) + if mix: + map2d = cv.addWeighted(overlaid_image, 1 - weight, map2d, weight, + 0) + self.image_cover(white, map2d, int(blank_size[1] * 0.1), + int(blank_size[0] * 0.1)) + white = self.add_1d_heatmaps(maps, white, blank_size, K) + return white + + def split_simcc_xy(self, heatmap: Union[np.ndarray, torch.Tensor]): + """Extract one-dimensional heatmap from two-dimensional heatmap and + calculate the number of keypoint.""" + size = heatmap.size() + k = size[0] if size[0] <= 20 else 20 + maps = [] + for _ in range(k): + xy_dict = {} + single_heatmap = heatmap[_] + xy_dict['x'], xy_dict['y'] = self.merge_maps(single_heatmap) + maps.append(xy_dict) + return maps, k + + def merge_maps(self, map_2d): + """Synthesis of one-dimensional heatmap.""" + x = map_2d.data.max(0, keepdim=True)[0] + y = map_2d.data.max(1, keepdim=True)[0] + return x, y + + def draw_1d_heatmaps(self, heatmap_1d): + """Draw one-dimensional heatmap.""" + size = heatmap_1d.size() + length = max(size) + np_heatmap = ToPILImage()(heatmap_1d).convert('RGB') + cv_img = cv.cvtColor(np.asarray(np_heatmap), cv.COLOR_RGB2BGR) + if size[0] < size[1]: + cv_img = cv.resize(cv_img, (length, 15)) + else: + cv_img = cv.resize(cv_img, (15, length)) + single_map = cv.applyColorMap(cv_img, cv.COLORMAP_JET) + return single_map + + def creat_blank(self, + size: Union[list, tuple], + K: int = 20, + interval: int = 10): + """Create the background.""" + blank_height = int( + max(size[0] * 2, size[0] * 1.1 + (K + 1) * (15 + interval))) + blank_width = int( + max(size[1] * 2, size[1] * 1.1 + (K + 1) * (15 + interval))) + blank = np.zeros((blank_height, blank_width, 3), np.uint8) + blank.fill(255) + return blank + + def draw_2d_heatmaps(self, heatmap_2d): + """Draw a two-dimensional heatmap fused with the original image.""" + np_heatmap = ToPILImage()(heatmap_2d).convert('RGB') + cv_img = cv.cvtColor(np.asarray(np_heatmap), cv.COLOR_RGB2BGR) + map_2d = cv.applyColorMap(cv_img, cv.COLORMAP_JET) + return map_2d + + def image_cover(self, background: np.ndarray, foreground: np.ndarray, + x: int, y: int): + """Paste the foreground on the background.""" + fore_size = foreground.shape + background[y:y + fore_size[0], x:x + fore_size[1]] = foreground + return background + + def add_1d_heatmaps(self, + maps: dict, + background: np.ndarray, + map2d_size: Union[tuple, list], + K: int, + interval: int = 10): + """Paste one-dimensional heatmaps onto the background in turn.""" + y_startpoint, x_startpoint = [int(1.1*map2d_size[1]), + int(0.1*map2d_size[0])],\ + [int(0.1*map2d_size[1]), + int(1.1*map2d_size[0])] + x_startpoint[1] += interval * 2 + y_startpoint[0] += interval * 2 + add = interval + 10 + for i in range(K): + self.image_cover(background, maps['x'][i], x_startpoint[0], + x_startpoint[1]) + cv.putText(background, str(i), + (x_startpoint[0] - 30, x_startpoint[1] + 10), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) + self.image_cover(background, maps['y'][i], y_startpoint[0], + y_startpoint[1]) + cv.putText(background, str(i), + (y_startpoint[0], y_startpoint[1] - 5), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) + x_startpoint[1] += add + y_startpoint[0] += add + return background[:x_startpoint[1] + y_startpoint[1] + + 1, :y_startpoint[0] + x_startpoint[0] + 1] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..af8564ec7bfa2bf30f4ddcc876f9c22dcd4099d5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,44 @@ +torch==2.1.2 +torchvision==0.16.2 +numpy==1.25.1 +opencv-python==4.9.0.80 + +mmcv==2.1.0 +mmdet==3.3.0 +mmpretrain==1.2.0 + +av>=13.0.0 +black==24.2.0 +chumpy +dataclasses-json>=0.6.7 +eva-decord>=0.6.1 +Flask-Cors>=5.0.0 +Flask>=3.0.3 +fvcore>=0.1.5.post20221221 +gunicorn>=23.0.0 +hydra-core>=1.3.2 +imagesize>=1.4.1 +iopath>=0.1.10 +json_tricks +jupyter>=1.0.0 +loguru +matplotlib>=3.9.1 +munkres +pandas>=2.2.2 +pillow>=9.4.0 +pycocotools>=2.0.8 +scikit-image>=0.24.0 +scipy +sparsemax +strawberry-graphql>=0.243.0 +submitit>=1.5.1 +tensorboard>=2.17.0 +tensordict>=0.6.0 +tqdm>=4.66.1 +ufmt==2.0.0b2 +usort==1.0.2 +xtcocotools>=1.12 + +# gradio app +gradio-webrtc +twilio \ No newline at end of file diff --git a/sam2/__init__.py b/sam2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0712dd03cb280ab94ba04f8a32aa8ddc8aa3db4a --- /dev/null +++ b/sam2/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from hydra import initialize_config_module +from hydra.core.global_hydra import GlobalHydra + +if not GlobalHydra.instance().is_initialized(): + initialize_config_module("sam2", version_base="1.2") diff --git a/sam2/automatic_mask_generator.py b/sam2/automatic_mask_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..065e469e27c2d3af40d51d072031e828692c799b --- /dev/null +++ b/sam2/automatic_mask_generator.py @@ -0,0 +1,454 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py +from typing import Any, Dict, List, Optional, Tuple + +import numpy as np +import torch +from torchvision.ops.boxes import batched_nms, box_area # type: ignore + +from sam2.modeling.sam2_base import SAM2Base +from sam2.sam2_image_predictor import SAM2ImagePredictor +from sam2.utils.amg import ( + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + MaskData, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +class SAM2AutomaticMaskGenerator: + def __init__( + self, + model: SAM2Base, + points_per_side: Optional[int] = 32, + points_per_batch: int = 64, + pred_iou_thresh: float = 0.8, + stability_score_thresh: float = 0.95, + stability_score_offset: float = 1.0, + mask_threshold: float = 0.0, + box_nms_thresh: float = 0.7, + crop_n_layers: int = 0, + crop_nms_thresh: float = 0.7, + crop_overlap_ratio: float = 512 / 1500, + crop_n_points_downscale_factor: int = 1, + point_grids: Optional[List[np.ndarray]] = None, + min_mask_region_area: int = 0, + output_mode: str = "binary_mask", + use_m2m: bool = False, + multimask_output: bool = True, + **kwargs, + ) -> None: + """ + Using a SAM 2 model, generates masks for the entire image. + Generates a grid of point prompts over the image, then filters + low quality and duplicate masks. The default settings are chosen + for SAM 2 with a HieraL backbone. + + Arguments: + model (Sam): The SAM 2 model to use for mask prediction. + points_per_side (int or None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_per_batch (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + pred_iou_thresh (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + mask_threshold (float): Threshold for binarizing the mask logits + box_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks. + crop_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crop_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray) or None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + min_mask_region_area (int): If >0, postprocessing will be applied + to remove disconnected regions and holes in masks with area smaller + than min_mask_region_area. Requires opencv. + output_mode (str): The form masks are returned in. Can be 'binary_mask', + 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. + For large resolutions, 'binary_mask' may consume large amounts of + memory. + use_m2m (bool): Whether to add a one step refinement using previous mask predictions. + multimask_output (bool): Whether to output multimask at each point of the grid. + """ + + assert (points_per_side is None) != ( + point_grids is None + ), "Exactly one of points_per_side or point_grid must be provided." + if points_per_side is not None: + self.point_grids = build_all_layer_point_grids( + points_per_side, + crop_n_layers, + crop_n_points_downscale_factor, + ) + elif point_grids is not None: + self.point_grids = point_grids + else: + raise ValueError("Can't have both points_per_side and point_grid be None.") + + assert output_mode in [ + "binary_mask", + "uncompressed_rle", + "coco_rle", + ], f"Unknown output_mode {output_mode}." + if output_mode == "coco_rle": + try: + from pycocotools import mask as mask_utils # type: ignore # noqa: F401 + except ImportError as e: + print("Please install pycocotools") + raise e + + self.predictor = SAM2ImagePredictor( + model, + max_hole_area=min_mask_region_area, + max_sprinkle_area=min_mask_region_area, + ) + self.points_per_batch = points_per_batch + self.pred_iou_thresh = pred_iou_thresh + self.stability_score_thresh = stability_score_thresh + self.stability_score_offset = stability_score_offset + self.mask_threshold = mask_threshold + self.box_nms_thresh = box_nms_thresh + self.crop_n_layers = crop_n_layers + self.crop_nms_thresh = crop_nms_thresh + self.crop_overlap_ratio = crop_overlap_ratio + self.crop_n_points_downscale_factor = crop_n_points_downscale_factor + self.min_mask_region_area = min_mask_region_area + self.output_mode = output_mode + self.use_m2m = use_m2m + self.multimask_output = multimask_output + + @classmethod + def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2AutomaticMaskGenerator": + """ + Load a pretrained model from the Hugging Face hub. + + Arguments: + model_id (str): The Hugging Face repository ID. + **kwargs: Additional arguments to pass to the model constructor. + + Returns: + (SAM2AutomaticMaskGenerator): The loaded model. + """ + from sam2.build_sam import build_sam2_hf + + sam_model = build_sam2_hf(model_id, **kwargs) + return cls(sam_model, **kwargs) + + @torch.no_grad() + def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: + """ + Generates masks for the given image. + + Arguments: + image (np.ndarray): The image to generate masks for, in HWC uint8 format. + + Returns: + list(dict(str, any)): A list over records for masks. Each record is + a dict containing the following keys: + segmentation (dict(str, any) or np.ndarray): The mask. If + output_mode='binary_mask', is an array of shape HW. Otherwise, + is a dictionary containing the RLE. + bbox (list(float)): The box around the mask, in XYWH format. + area (int): The area in pixels of the mask. + predicted_iou (float): The model's own prediction of the mask's + quality. This is filtered by the pred_iou_thresh parameter. + point_coords (list(list(float))): The point coordinates input + to the model to generate this mask. + stability_score (float): A measure of the mask's quality. This + is filtered on using the stability_score_thresh parameter. + crop_box (list(float)): The crop of the image used to generate + the mask, given in XYWH format. + """ + + # Generate masks + mask_data = self._generate_masks(image) + + # Encode masks + if self.output_mode == "coco_rle": + mask_data["segmentations"] = [ + coco_encode_rle(rle) for rle in mask_data["rles"] + ] + elif self.output_mode == "binary_mask": + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + else: + mask_data["segmentations"] = mask_data["rles"] + + # Write mask records + curr_anns = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + curr_anns.append(ann) + + return curr_anns + + def _generate_masks(self, image: np.ndarray) -> MaskData: + orig_size = image.shape[:2] + crop_boxes, layer_idxs = generate_crop_boxes( + orig_size, self.crop_n_layers, self.crop_overlap_ratio + ) + + # Iterate over image crops + data = MaskData() + for crop_box, layer_idx in zip(crop_boxes, layer_idxs): + crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) + data.cat(crop_data) + + # Remove duplicate masks between crops + if len(crop_boxes) > 1: + # Prefer masks from smaller crops + scores = 1 / box_area(data["crop_boxes"]) + scores = scores.to(data["boxes"].device) + keep_by_nms = batched_nms( + data["boxes"].float(), + scores, + torch.zeros_like(data["boxes"][:, 0]), # categories + iou_threshold=self.crop_nms_thresh, + ) + data.filter(keep_by_nms) + data.to_numpy() + return data + + def _process_crop( + self, + image: np.ndarray, + crop_box: List[int], + crop_layer_idx: int, + orig_size: Tuple[int, ...], + ) -> MaskData: + # Crop the image and calculate embeddings + x0, y0, x1, y1 = crop_box + cropped_im = image[y0:y1, x0:x1, :] + cropped_im_size = cropped_im.shape[:2] + self.predictor.set_image(cropped_im) + + # Get points for this crop + points_scale = np.array(cropped_im_size)[None, ::-1] + points_for_image = self.point_grids[crop_layer_idx] * points_scale + + # Generate masks for this crop in batches + data = MaskData() + for (points,) in batch_iterator(self.points_per_batch, points_for_image): + batch_data = self._process_batch( + points, cropped_im_size, crop_box, orig_size, normalize=True + ) + data.cat(batch_data) + del batch_data + self.predictor.reset_predictor() + + # Remove duplicates within this crop. + keep_by_nms = batched_nms( + data["boxes"].float(), + data["iou_preds"], + torch.zeros_like(data["boxes"][:, 0]), # categories + iou_threshold=self.box_nms_thresh, + ) + data.filter(keep_by_nms) + + # Return to the original image frame + data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) + data["points"] = uncrop_points(data["points"], crop_box) + data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) + + return data + + def _process_batch( + self, + points: np.ndarray, + im_size: Tuple[int, ...], + crop_box: List[int], + orig_size: Tuple[int, ...], + normalize=False, + ) -> MaskData: + orig_h, orig_w = orig_size + + # Run model on this batch + points = torch.as_tensor( + points, dtype=torch.float32, device=self.predictor.device + ) + in_points = self.predictor._transforms.transform_coords( + points, normalize=normalize, orig_hw=im_size + ) + in_labels = torch.ones( + in_points.shape[0], dtype=torch.int, device=in_points.device + ) + masks, iou_preds, low_res_masks = self.predictor._predict( + in_points[:, None, :], + in_labels[:, None], + multimask_output=self.multimask_output, + return_logits=True, + ) + + # Serialize predictions and store in MaskData + data = MaskData( + masks=masks.flatten(0, 1), + iou_preds=iou_preds.flatten(0, 1), + points=points.repeat_interleave(masks.shape[1], dim=0), + low_res_masks=low_res_masks.flatten(0, 1), + ) + del masks + + if not self.use_m2m: + # Filter by predicted IoU + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + # Calculate and filter by stability score + data["stability_score"] = calculate_stability_score( + data["masks"], self.mask_threshold, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + else: + # One step refinement using previous mask predictions + in_points = self.predictor._transforms.transform_coords( + data["points"], normalize=normalize, orig_hw=im_size + ) + labels = torch.ones( + in_points.shape[0], dtype=torch.int, device=in_points.device + ) + masks, ious = self.refine_with_m2m( + in_points, labels, data["low_res_masks"], self.points_per_batch + ) + data["masks"] = masks.squeeze(1) + data["iou_preds"] = ious.squeeze(1) + + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + data["stability_score"] = calculate_stability_score( + data["masks"], self.mask_threshold, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + + # Threshold masks and calculate boxes + data["masks"] = data["masks"] > self.mask_threshold + data["boxes"] = batched_mask_to_box(data["masks"]) + + # Filter boxes that touch crop boundaries + keep_mask = ~is_box_near_crop_edge( + data["boxes"], crop_box, [0, 0, orig_w, orig_h] + ) + if not torch.all(keep_mask): + data.filter(keep_mask) + + # Compress to RLE + data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) + data["rles"] = mask_to_rle_pytorch(data["masks"]) + del data["masks"] + + return data + + @staticmethod + def postprocess_small_regions( + mask_data: MaskData, min_area: int, nms_thresh: float + ) -> MaskData: + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. + + Edits mask_data in place. + + Requires open-cv as a dependency. + """ + if len(mask_data["rles"]) == 0: + return mask_data + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for rle in mask_data["rles"]: + mask = rle_to_mask(rle) + + mask, changed = remove_small_regions(mask, min_area, mode="holes") + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode="islands") + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(masks) + keep_by_nms = batched_nms( + boxes.float(), + torch.as_tensor(scores), + torch.zeros_like(boxes[:, 0]), # categories + iou_threshold=nms_thresh, + ) + + # Only recalculate RLEs for masks that have changed + for i_mask in keep_by_nms: + if scores[i_mask] == 0.0: + mask_torch = masks[i_mask].unsqueeze(0) + mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] + mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly + mask_data.filter(keep_by_nms) + + return mask_data + + def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch): + new_masks = [] + new_iou_preds = [] + + for cur_points, cur_point_labels, low_res_mask in batch_iterator( + points_per_batch, points, point_labels, low_res_masks + ): + best_masks, best_iou_preds, _ = self.predictor._predict( + cur_points[:, None, :], + cur_point_labels[:, None], + mask_input=low_res_mask[:, None, :], + multimask_output=False, + return_logits=True, + ) + new_masks.append(best_masks) + new_iou_preds.append(best_iou_preds) + masks = torch.cat(new_masks, dim=0) + return masks, torch.cat(new_iou_preds, dim=0) diff --git a/sam2/benchmark.py b/sam2/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..6519534c8619e04b9a632859a5128ad2cee34c13 --- /dev/null +++ b/sam2/benchmark.py @@ -0,0 +1,92 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import time + +import numpy as np +import torch +from tqdm import tqdm + +from sam2.build_sam import build_sam2_video_predictor + +# Only cuda supported +assert torch.cuda.is_available() +device = torch.device("cuda") + +torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__() +if torch.cuda.get_device_properties(0).major >= 8: + # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices) + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + +# Config and checkpoint +sam2_checkpoint = "checkpoints/sam2.1_hiera_base_plus.pt" +model_cfg = "configs/sam2.1/sam2.1_hiera_b+.yaml" + +# Build video predictor with vos_optimized=True setting +predictor = build_sam2_video_predictor( + model_cfg, sam2_checkpoint, device=device, vos_optimized=True +) + + +# Initialize with video +video_dir = "notebooks/videos/bedroom" +# scan all the JPEG frame names in this directory +frame_names = [ + p + for p in os.listdir(video_dir) + if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"] +] +frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) +inference_state = predictor.init_state(video_path=video_dir) + + +# Number of runs, warmup etc +warm_up, runs = 5, 25 +verbose = True +num_frames = len(frame_names) +total, count = 0, 0 +torch.cuda.empty_cache() + +# We will select an object with a click. +# See video_predictor_example.ipynb for more detailed explanation +ann_frame_idx, ann_obj_id = 0, 1 +# Add a positive click at (x, y) = (210, 350) +# For labels, `1` means positive click +points = np.array([[210, 350]], dtype=np.float32) +labels = np.array([1], np.int32) + +_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + points=points, + labels=labels, +) + +# Warmup and then average FPS over several runs +with torch.autocast("cuda", torch.bfloat16): + with torch.inference_mode(): + for i in tqdm(range(runs), disable=not verbose, desc="Benchmarking"): + start = time.time() + # Start tracking + for ( + out_frame_idx, + out_obj_ids, + out_mask_logits, + ) in predictor.propagate_in_video(inference_state): + pass + + end = time.time() + total += end - start + count += 1 + if i == warm_up - 1: + print("Warmup FPS: ", count * num_frames / total) + total = 0 + count = 0 + +print("FPS: ", count * num_frames / total) diff --git a/sam2/build_sam.py b/sam2/build_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..f0c79b6462848a185770f60e343f8a23ab9489ea --- /dev/null +++ b/sam2/build_sam.py @@ -0,0 +1,179 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import os + +import torch +from hydra import compose +from hydra.utils import instantiate +from omegaconf import OmegaConf + +import sam2 + +# Check if the user is running Python from the parent directory of the sam2 repo +# (i.e. the directory where this repo is cloned into) -- this is not supported since +# it could shadow the sam2 package and cause issues. +if os.path.isdir(os.path.join(sam2.__path__[0], "sam2")): + # If the user has "sam2/sam2" in their path, they are likey importing the repo itself + # as "sam2" rather than importing the "sam2" python package (i.e. "sam2/sam2" directory). + # This typically happens because the user is running Python from the parent directory + # that contains the sam2 repo they cloned. + raise RuntimeError( + "You're likely running Python from the parent directory of the sam2 repository " + "(i.e. the directory where https://github.com/facebookresearch/sam2 is cloned into). " + "This is not supported since the `sam2` Python package could be shadowed by the " + "repository name (the repository is also named `sam2` and contains the Python package " + "in `sam2/sam2`). Please run Python from another directory (e.g. from the repo dir " + "rather than its parent dir, or from your home directory) after installing SAM 2." + ) + + +HF_MODEL_ID_TO_FILENAMES = { + "facebook/sam2-hiera-tiny": ( + "configs/sam2/sam2_hiera_t.yaml", + "sam2_hiera_tiny.pt", + ), + "facebook/sam2-hiera-small": ( + "configs/sam2/sam2_hiera_s.yaml", + "sam2_hiera_small.pt", + ), + "facebook/sam2-hiera-base-plus": ( + "configs/sam2/sam2_hiera_b+.yaml", + "sam2_hiera_base_plus.pt", + ), + "facebook/sam2-hiera-large": ( + "configs/sam2/sam2_hiera_l.yaml", + "sam2_hiera_large.pt", + ), + "facebook/sam2.1-hiera-tiny": ( + "configs/sam2.1/sam2.1_hiera_t.yaml", + "sam2.1_hiera_tiny.pt", + ), + "facebook/sam2.1-hiera-small": ( + "configs/sam2.1/sam2.1_hiera_s.yaml", + "sam2.1_hiera_small.pt", + ), + "facebook/sam2.1-hiera-base-plus": ( + "configs/sam2.1/sam2.1_hiera_b+.yaml", + "sam2.1_hiera_base_plus.pt", + ), + "facebook/sam2.1-hiera-large": ( + "configs/sam2.1/sam2.1_hiera_l.yaml", + "sam2.1_hiera_large.pt", + ), +} + + +def build_sam2( + config_file, + ckpt_path=None, + device="cuda", + mode="eval", + hydra_overrides_extra=[], + apply_postprocessing=True, + **kwargs, +): + + if apply_postprocessing: + hydra_overrides_extra = hydra_overrides_extra.copy() + hydra_overrides_extra += [ + # dynamically fall back to multi-mask if the single mask is not stable + "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98", + ] + # Read config and init model + try: + cfg = compose(config_name=config_file) + except Exception as e: + logging.error(f"Error loading config: {e}") + cfg = compose(config_name=config_file, overrides=hydra_overrides_extra) + + OmegaConf.resolve(cfg) + model = instantiate(cfg.model, _recursive_=True) + _load_checkpoint(model, ckpt_path) + model = model.to(device) + if mode == "eval": + model.eval() + return model + + +def build_sam2_video_predictor( + config_file, + ckpt_path=None, + device="cuda", + mode="eval", + hydra_overrides_extra=[], + apply_postprocessing=True, + vos_optimized=False, + **kwargs, +): + hydra_overrides = [ + "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor", + ] + if vos_optimized: + hydra_overrides = [ + "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictorVOS", + "++model.compile_image_encoder=True", # Let sam2_base handle this + ] + + if apply_postprocessing: + hydra_overrides_extra = hydra_overrides_extra.copy() + hydra_overrides_extra += [ + # dynamically fall back to multi-mask if the single mask is not stable + "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98", + # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking + "++model.binarize_mask_from_pts_for_mem_enc=true", + # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution) + "++model.fill_hole_area=8", + ] + hydra_overrides.extend(hydra_overrides_extra) + + # Read config and init model + cfg = compose(config_name=config_file, overrides=hydra_overrides) + OmegaConf.resolve(cfg) + model = instantiate(cfg.model, _recursive_=True) + _load_checkpoint(model, ckpt_path) + model = model.to(device) + if mode == "eval": + model.eval() + return model + + +def _hf_download(model_id): + from huggingface_hub import hf_hub_download + + config_name, checkpoint_name = HF_MODEL_ID_TO_FILENAMES[model_id] + ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name) + return config_name, ckpt_path + + +def build_sam2_hf(model_id, **kwargs): + config_name, ckpt_path = _hf_download(model_id) + return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs) + + +def build_sam2_video_predictor_hf(model_id, **kwargs): + config_name, ckpt_path = _hf_download(model_id) + return build_sam2_video_predictor( + config_file=config_name, ckpt_path=ckpt_path, **kwargs + ) + + +def _load_checkpoint(model, ckpt_path): + if ckpt_path is not None: + sd = torch.load(ckpt_path, map_location="cpu", weights_only=True)["model"] + missing_keys, unexpected_keys = model.load_state_dict(sd) + if missing_keys: + logging.error(missing_keys) + raise RuntimeError() + if unexpected_keys: + logging.error(unexpected_keys) + raise RuntimeError() + logging.info("Loaded checkpoint sucessfully") diff --git a/sam2/colorblind.py b/sam2/colorblind.py new file mode 100644 index 0000000000000000000000000000000000000000..fc8d298d5c1de8b48b1d4a47a704a5be61704206 --- /dev/null +++ b/sam2/colorblind.py @@ -0,0 +1,379 @@ +""" +Adapted from "The Color Blind Simulation function" by Matthew Wickline +and the Human - Computer Interaction Resource Network (http://hcirn.com/), 2000 - 2001. +""" +import numpy as np + +rBlind = { + "protan": {"cpu": 0.735, "cpv": 0.265, "am": 1.273463, "ayi": -0.073894}, + "deutan": {"cpu": 1.14, "cpv": -0.14, "am": 0.968437, "ayi": 0.003331}, + "tritan": {"cpu": 0.171, "cpv": -0.003, "am": 0.062921, "ayi": 0.292119}, +} + + +def rgb2xyz(rgb): + r = rgb[0] + g = rgb[1] + b = rgb[2] + + x = 0.430574 * r + 0.341550 * g + 0.178325 * b + y = 0.222015 * r + 0.706655 * g + 0.071330 * b + z = 0.020183 * r + 0.129553 * g + 0.939180 * b + + return x, y, z + + +def xyz2rgb(xyz): + x = xyz[0] + y = xyz[1] + z = xyz[2] + + r = 3.063218 * x - 1.393325 * y - 0.475802 * z + g = -0.969243 * x + 1.875966 * y + 0.041555 * z + b = 0.067871 * x - 0.228834 * y + 1.069251 * z + + return r, g, b + + +def anomylize(a, b): + v = 1.75 + d = v * 1 + 1 + + return ( + (v * b[0] + a[0] * 1) / d, + (v * b[1] + a[1] * 1) / d, + (v * b[2] + a[2] * 1) / d, + ) + + +def monochrome(rgb): + z = rgb[0] * 0.299 + rgb[1] * 0.587 + rgb[2] * 0.114 + return z, z, z + + +def blindMK(rgb, t): + gamma = 2.2 + wx = 0.312713 + wy = 0.329016 + wz = 0.358271 + + r = rgb[0] + g = rgb[1] + b = rgb[2] + + c_rgb = (r**gamma, g**gamma, b**gamma) + c_xyz = rgb2xyz(c_rgb) + + sum_xyz = sum(c_xyz) + + c_u = 0 + c_v = 0 + + if sum_xyz != 0: + c_u = c_xyz[0] / sum_xyz + c_v = c_xyz[1] / sum_xyz + + nx = wx * c_xyz[1] / wy + nz = wz * c_xyz[1] / wy + + d_y = 0 + + if c_u < rBlind[t]["cpu"]: + clm = (rBlind[t]["cpv"] - c_v) / (rBlind[t]["cpu"] - c_u) + else: + clm = (c_v - rBlind[t]["cpv"]) / (c_u - rBlind[t]["cpu"]) + + clyi = c_v - c_u * clm + d_u = (rBlind[t]["ayi"] - clyi) / (clm - rBlind[t]["am"]) + d_v = (clm * d_u) + clyi + + s_x = d_u * c_xyz[1] / d_v + s_y = c_xyz[1] + s_z = (1 - (d_u + d_v)) * c_xyz[1] / d_v + + s_rgb = xyz2rgb((s_x, s_y, s_z)) + + d_x = nx - s_x + d_z = nz - s_z + + d_rgb = xyz2rgb((d_x, d_y, d_z)) + + if d_rgb[0]: + const = 0 if s_rgb[0] < 0 else 1 + adjr = (const - s_rgb[0]) / d_rgb[0] + else: + adjr = 0 + + if d_rgb[1]: + const = 0 if s_rgb[1] < 0 else 1 + adjg = (const - s_rgb[1]) / d_rgb[1] + else: + adjg = 0 + + if d_rgb[2]: + const = 0 if s_rgb[2] < 0 else 1 + adjb = (const - s_rgb[2]) / d_rgb[2] + else: + adjb = 0 + + adjust = max( + [ + 0 if adjr > 1 or adjr < 0 else adjr, + 0 if adjg > 1 or adjg < 0 else adjg, + 0 if adjb > 1 or adjb < 0 else adjb, + ] + ) + + s_r = s_rgb[0] + (adjust * d_rgb[0]) + s_g = s_rgb[1] + (adjust * d_rgb[1]) + s_b = s_rgb[2] + (adjust * d_rgb[2]) + + def z(v): + if v <= 0: + const = 0.0 + elif v >= 1: + const = 1.0 + else: + const = v ** (1 / gamma) + + return const + + return z(s_r), z(s_g), z(s_b) + + +fBlind = { + "Normal": lambda v: v, + "Protanopia": lambda v: blindMK(v, "protan"), + "Protanomaly": lambda v: anomylize(v, blindMK(v, "protan")), + "Deuteranopia": lambda v: blindMK(v, "deutan"), + "Deuteranomaly": lambda v: anomylize(v, blindMK(v, "deutan")), + "Tritanopia": lambda v: blindMK(v, "tritan"), + "Tritanomaly": lambda v: anomylize(v, blindMK(v, "tritan")), + "Achromatopsia": lambda v: monochrome(v), + "Achromatomaly": lambda v: anomylize(v, monochrome(v)), +} + + +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + +def simulate_image(img_path, colorblind_type): + """ + + :param img_path: + + :param colorblind_type: Type of colourblindness to simulate, can be: + + * 'Normal': Normal vision + * 'Protanopia': Red-green colorblindness (1% males) + * 'Protanomaly': Red-green colorblindness (1% males, 0.01% females) + * 'Deuteranopia': Red-green colorblindness (1% males) + * 'Deuteranomaly': Red-green colorblindness (most common type: 6% males, + 0.4% females) + * 'Tritanopia': Blue-yellow colourblindness (<1% males and females) + * 'Tritanomaly' Blue-yellow colourblindness (0.01% males and females) + * 'Achromatopsia': Total colourblindness + * 'Achromatomaly': Total colourblindness + + :return: + """ + import matplotlib.image as mpimg + import matplotlib.pyplot as plt + + filter_function = fBlind[colorblind_type] + + img = mpimg.imread(img_path) + n_rows = img.shape[0] + n_columns = img.shape[1] + + filtered_img = np.zeros((n_rows, n_columns, 3)) + + for r in range(n_rows): + for c in range(n_columns): + filtered_img[r, c] = filter_function(img[r, c, 0:3]) + + fig, axes = plt.subplots(1, 2, figsize=(12, 6)) + + axes[0].imshow(img) + axes[1].imshow(filtered_img) + + axes[0].axis("off") + axes[1].axis("off") + + axes[0].set_title("Normal Vision") + axes[1].set_title("With " + colorblind_type) + + plt.show() + + +def colorblind_filter(color, colorblind_type="Deuteranomaly"): + """ + Transforms an (r,g,b) colour into a simulation of how a person with colourblindnes + would see that colour. + + :param color: rgb colour tuple to convert + + :param colorblind_type: Type of colourblindness to simulate, can be: + + * 'Normal': Normal vision + * 'Protanopia': Red-green colorblindness (1% males) + * 'Protanomaly': Red-green colorblindness (1% males, 0.01% females) + * 'Deuteranopia': Red-green colorblindness (1% males) + * 'Deuteranomaly': Red-green colorblindness (most common type: 6% males, + 0.4% females) + * 'Tritanopia': Blue-yellow colourblindness (<1% males and females) + * 'Tritanomaly' Blue-yellow colourblindness (0.01% males and females) + * 'Achromatopsia': Total colourblindness + * 'Achromatomaly': Total colourblindness + + :return: + """ + filter_function = fBlind[colorblind_type] + + return filter_function(color) + + +def simulate_colors(colors, colorblind_type="Deuteranomaly", one_row=None, show=True): + """ + Simulate the appearance of colors with and without colourblindness. + + :param colors: A list of (r,g,b) colour tuples, with r, g andb floats between 0 + and 1. + + :param colorblind_type: Type of colourblindness to simulate, can be: + + * 'Normal': Normal vision + * 'Protanopia': Red-green colorblindness (1% males) + * 'Protanomaly': Red-green colorblindness (1% males, 0.01% females) + * 'Deuteranopia': Red-green colorblindness (1% males) + * 'Deuteranomaly': Red-green colorblindness (most common type: 6% males, + 0.4% females) + * 'Tritanopia': Blue-yellow colourblindness (<1% males and females) + * 'Tritanomaly' Blue-yellow colourblindness (0.01% males and females) + * 'Achromatopsia': Total colourblindness + * 'Achromatomaly': Total colourblindness + + :param one_row: If True display colours on one row, if False as a grid. If + one_row=None a grid is used when there are more than 8 colours. + + :param show: if True, calls ``plt.show()``. + + :return: + """ + import matplotlib.pyplot as plt + + from distinctipy import distinctipy + + filtered_colors = [colorblind_filter(color, colorblind_type) for color in colors] + + fig, axes = plt.subplots(1, 2, figsize=(8, 4)) + + distinctipy.color_swatch( + colors, ax=axes[0], one_row=one_row, title="Viewed with Normal Sight" + ) + + distinctipy.color_swatch( + filtered_colors, + ax=axes[1], + one_row=one_row, + title="Viewed with " + colorblind_type + " Colour Blindness", + ) + + if show: + plt.show() + + +def simulate_clusters( + dataset="s2", + colorblind_type="Deuteranomaly", + colorblind_distinct=False, + show=True, +): + """ + Simulates the appearance of an example clustering dataset with and without + colourblindness. + + :param dataset: The dataset to display, the options are: + + * s1, s2, s3, s4: 15 clusters with increasing overlaps from s1 to s4 + * a1: 20 clusters + * a2: 35 clusters + * a3: 50 clusters + * b1: 100 clusters + + :param colorblind_type: Type of colourblindness to simulate, can be: + + * 'Normal': Normal vision + * 'Protanopia': Red-green colorblindness (1% males) + * 'Protanomaly': Red-green colorblindness (1% males, 0.01% females) + * 'Deuteranopia': Red-green colorblindness (1% males) + * 'Deuteranomaly': Red-green colorblindness (most common type: 6% males, + 0.4% females) + * 'Tritanopia': Blue-yellow colourblindness (<1% males and females) + * 'Tritanomaly' Blue-yellow colourblindness (0.01% males and females) + * 'Achromatopsia': Total colourblindness + * 'Achromatomaly': Total colourblindness + + :param colorblind_distinct: If True generate colours to be as distinct as possible + for colorblind_type. Else generate colours that are as distinct as possible for + normal vision. + + :param show: if True, calls ``plt.show()``. + + :return: + """ + import matplotlib.pyplot as plt + import pandas as pd + + from distinctipy import distinctipy + + if dataset not in ("s1", "s2", "s3", "s4", "a1", "a2", "a3", "b1"): + raise ValueError("dataset must be s1, s2, s3, s4, a1, a2, a3 or b1") + + URL = ( + "https://raw.githubusercontent.com/alan-turing-institute/distinctipy/" + "main/distinctipy/datasets/" + ) + df = pd.read_csv(URL + dataset + ".csv") + + if colorblind_distinct: + orig_colors = distinctipy.get_colors( + df["cluster"].nunique(), colorblind_type=colorblind_type + ) + else: + orig_colors = distinctipy.get_colors(df["cluster"].nunique()) + + orig_cmap = distinctipy.get_colormap(orig_colors) + + filtered_colors = [ + colorblind_filter(color, colorblind_type) for color in orig_colors + ] + filtered_cmap = distinctipy.get_colormap(filtered_colors) + + fig, axes = plt.subplots(1, 2, figsize=(10, 5)) + fig.tight_layout(rect=[0, 0.03, 1, 0.95]) + fig.suptitle(str(df["cluster"].nunique()) + " clusters", fontsize=20) + + axes[0].scatter(df["x"], df["y"], c=df["cluster"], cmap=orig_cmap, s=6) + axes[0].get_xaxis().set_visible(False) + axes[0].get_yaxis().set_visible(False) + axes[0].set_title("With Normal Vision") + + axes[1].scatter(df["x"], df["y"], c=df["cluster"], cmap=filtered_cmap, s=6) + axes[1].get_xaxis().set_visible(False) + axes[1].get_yaxis().set_visible(False) + axes[1].set_title("With " + colorblind_type + " Colourblindness") + + if show: + plt.show() + + +def _main(): + from distinctipy import distinctipy + + colors = distinctipy.get_colors(36) + simulate_colors(colors, "Deuteranomaly") + + +if __name__ == "__main__": + _main() \ No newline at end of file diff --git a/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml b/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c1bde042c39212ed2782e769f3036a03a967799 --- /dev/null +++ b/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml @@ -0,0 +1,116 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 512 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/sam2/configs/sam2.1/sam2.1_hiera_l.yaml b/sam2/configs/sam2.1/sam2.1_hiera_l.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23073ea7a95901be656b3c6d1a66ce8736ab7ad3 --- /dev/null +++ b/sam2/configs/sam2.1/sam2.1_hiera_l.yaml @@ -0,0 +1,120 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 144 + num_heads: 2 + stages: [2, 6, 36, 4] + global_att_blocks: [23, 33, 43] + window_pos_embed_bkg_spatial_size: [7, 7] + window_spec: [8, 4, 16, 8] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [1152, 576, 288, 144] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/sam2/configs/sam2.1/sam2.1_hiera_s.yaml b/sam2/configs/sam2.1/sam2.1_hiera_s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd8d40465b18b3de39b0a565aca712306306c4ed --- /dev/null +++ b/sam2/configs/sam2.1/sam2.1_hiera_s.yaml @@ -0,0 +1,119 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 11, 2] + global_att_blocks: [7, 10, 13] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/sam2/configs/sam2.1/sam2.1_hiera_t.yaml b/sam2/configs/sam2.1/sam2.1_hiera_t.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e762aec932f26436d13798f3feb3ec82c360a943 --- /dev/null +++ b/sam2/configs/sam2.1/sam2.1_hiera_t.yaml @@ -0,0 +1,121 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 7, 2] + global_att_blocks: [5, 7, 9] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + # SAM decoder + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # HieraT does not currently support compilation, should always be set to False + compile_image_encoder: False diff --git a/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_1024_prompt.yaml b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_1024_prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ef690bfe5b6eb7185c567a01b581a3daeed06d8d --- /dev/null +++ b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_1024_prompt.yaml @@ -0,0 +1,346 @@ +# @package _global_ + +scratch: + resolution: 1024 + train_batch_size: 2 + num_train_workers: 10 + num_frames: 1 + max_num_objects: 5 + base_lr: 5.0e-6 + vision_lr: 3.0e-06 + phases_per_epoch: 1 + num_epochs: 40 + +dataset: + # PATHS to Dataset + img_folder: /mnt/personal/purkrmir/data/COCO/original/train2017/ # PATH to MOSE JPEGImages folder + gt_folder: /mnt/personal/purkrmir/data/COCO/original/annotations/ # PATH to MOSE Annotations folder + # img_folder: /datagrid/personal/purkrmir/data/COCO/original/val2017/ # PATH to MOSE JPEGImages folder + # gt_folder: /datagrid/personal/purkrmir/data/COCO/original/annotations/ # PATH to MOSE Annotations folder + file_list_txt: null # Optional PATH to filelist containing a subset of videos to be used for training + multiplier: 2 + +# Video transforms +vos: + train_transforms: + - _target_: training.dataset.transforms.ComposeAPI + transforms: + - _target_: training.dataset.transforms.RandomPoseJitter + location_jitter: 0.6 + confidence_jitter: 0.5 + - _target_: training.dataset.transforms.RandomHorizontalFlip + consistent_transform: True + - _target_: training.dataset.transforms.RandomAffine + degrees: 25 + shear: 20 + image_interpolation: bilinear + consistent_transform: True + - _target_: training.dataset.transforms.RandomResizeAPI + sizes: ${scratch.resolution} + square: true + consistent_transform: True + - _target_: training.dataset.transforms.ColorJitter + consistent_transform: True + brightness: 0.1 + contrast: 0.03 + saturation: 0.03 + hue: null + - _target_: training.dataset.transforms.RandomGrayscale + p: 0.05 + consistent_transform: True + - _target_: training.dataset.transforms.ColorJitter + consistent_transform: False + brightness: 0.1 + contrast: 0.05 + saturation: 0.05 + hue: null + - _target_: training.dataset.transforms.ToTensorAPI + - _target_: training.dataset.transforms.NormalizeAPI + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + +trainer: + _target_: training.trainer.Trainer + mode: train_only + max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}} + accelerator: cuda + seed_value: 123 + unfreeze_prompt: True + unfreeze_decoder: False + + model: + _target_: training.model.sam2.SAM2Train + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + drop_path_rate: 0.1 + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: ${scratch.resolution} + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # compile_image_encoder: False + + ####### Training specific params ####### + # box/point input and corrections + prob_to_use_pt_input_for_train: 0.5 + prob_to_use_pt_input_for_eval: 0.0 + prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points + prob_to_use_box_input_for_eval: 0.0 + prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors + num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame) + num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame + rand_frames_to_correct_for_train: True # random #init-cond-frame ~ 2 + add_all_frames_to_correct_as_cond: True # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame) + # maximum 2 initial conditioning frames + num_init_cond_frames_for_train: 2 + rand_init_cond_frames_for_train: True # random 1~2 + num_correction_pt_per_frame: 7 + use_act_ckpt_iterative_pt_sampling: false + + + + num_init_cond_frames_for_eval: 1 # only mask on the first frame + forward_backbone_per_frame_for_eval: True + + + data: + train: + _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset + phases_per_epoch: ${scratch.phases_per_epoch} + batch_sizes: + - ${scratch.train_batch_size} + + datasets: + - _target_: training.dataset.utils.RepeatFactorWrapper + dataset: + _target_: training.dataset.utils.ConcatDataset + datasets: + - _target_: training.dataset.vos_dataset.VOSDataset + transforms: ${vos.train_transforms} + training: true + video_dataset: + _target_: training.dataset.vos_raw_dataset.COCORawDataset + img_folder: ${dataset.img_folder} + gt_folder: ${dataset.gt_folder} + file_list_txt: ${dataset.file_list_txt} + sampler: + _target_: training.dataset.vos_sampler.RandomUniformSampler + num_frames: ${scratch.num_frames} + max_num_objects: ${scratch.max_num_objects} + multiplier: ${dataset.multiplier} + shuffle: True + num_workers: ${scratch.num_train_workers} + pin_memory: True + drop_last: True + collate_fn: + _target_: training.utils.data_utils.collate_fn + _partial_: true + dict_key: all + + optim: + amp: + enabled: False + amp_dtype: float16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: training.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: training.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: 0.9 + apply_to: 'image_encoder.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: + _target_: fvcore.common.param_scheduler.CosineParamScheduler + start_value: ${scratch.base_lr} + end_value: ${divide:${scratch.base_lr},10} + - scheduler: + _target_: fvcore.common.param_scheduler.CosineParamScheduler + start_value: ${scratch.vision_lr} + end_value: ${divide:${scratch.vision_lr},10} + param_names: + - 'image_encoder.*' + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.1 + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + loss: + all: + _target_: training.loss_fns.MultiStepMultiMasksAndIous + weight_dict: + loss_mask: 20 + loss_dice: 1 + loss_iou: 1 + loss_class: 1 + supervise_all_iou: true + iou_use_l1_loss: true + pred_obj_scores: true + focal_gamma_obj_score: 0.0 + focal_alpha_obj_score: -1.0 + + distributed: + backend: nccl + find_unused_parameters: True + + logging: + tensorboard_writer: + _target_: training.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + log_dir: ${launcher.experiment_log_dir}/logs + log_freq: 10 + + # initialize from a SAM 2 checkpoint + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + model_weight_initializer: + _partial_: True + _target_: training.utils.checkpoint_utils.load_state_dict_into_model + strict: True + ignore_unexpected_keys: null + ignore_missing_keys: null + + state_dict: + _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels + checkpoint_path: ./checkpoints/sam2.1_hiera_base_plus.pt # PATH to SAM 2.1 checkpoint + ckpt_state_dict_keys: ['model'] + +launcher: + num_nodes: 1 + gpus_per_node: 8 + experiment_log_dir: null # Path to log directory, defaults to ./sam2_logs/${config_name} + +# SLURM args if running on a cluster +submitit: + partition: null + account: null + qos: null + cpus_per_task: 10 + use_cluster: false + timeout_hour: 24 + name: null + port_range: [10000, 65000] + diff --git a/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_finetune.yaml b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_finetune.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e0c2d6a10703185254d4e8fe41e3d44fc8f495a --- /dev/null +++ b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_finetune.yaml @@ -0,0 +1,346 @@ +# @package _global_ + +scratch: + resolution: 512 + train_batch_size: 4 + num_train_workers: 10 + num_frames: 1 + max_num_objects: 5 + base_lr: 5.0e-6 + vision_lr: 3.0e-06 + phases_per_epoch: 1 + num_epochs: 40 + +dataset: + # PATHS to Dataset + img_folder: /mnt/personal/purkrmir/data/COCO/original/train2017/ # PATH to MOSE JPEGImages folder + gt_folder: /mnt/personal/purkrmir/data/COCO/original/annotations/ # PATH to MOSE Annotations folder + # img_folder: /datagrid/personal/purkrmir/data/COCO/original/val2017/ # PATH to MOSE JPEGImages folder + # gt_folder: /datagrid/personal/purkrmir/data/COCO/original/annotations/ # PATH to MOSE Annotations folder + file_list_txt: null # Optional PATH to filelist containing a subset of videos to be used for training + multiplier: 2 + +# Video transforms +vos: + train_transforms: + - _target_: training.dataset.transforms.ComposeAPI + transforms: + - _target_: training.dataset.transforms.RandomPoseJitter + location_jitter: 0.6 + confidence_jitter: 0.5 + - _target_: training.dataset.transforms.RandomHorizontalFlip + consistent_transform: True + - _target_: training.dataset.transforms.RandomAffine + degrees: 25 + shear: 20 + image_interpolation: bilinear + consistent_transform: True + - _target_: training.dataset.transforms.RandomResizeAPI + sizes: ${scratch.resolution} + square: true + consistent_transform: True + - _target_: training.dataset.transforms.ColorJitter + consistent_transform: True + brightness: 0.1 + contrast: 0.03 + saturation: 0.03 + hue: null + - _target_: training.dataset.transforms.RandomGrayscale + p: 0.05 + consistent_transform: True + - _target_: training.dataset.transforms.ColorJitter + consistent_transform: False + brightness: 0.1 + contrast: 0.05 + saturation: 0.05 + hue: null + - _target_: training.dataset.transforms.ToTensorAPI + - _target_: training.dataset.transforms.NormalizeAPI + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + +trainer: + _target_: training.trainer.Trainer + mode: train_only + max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}} + accelerator: cuda + seed_value: 123 + unfreeze_prompt: True + unfreeze_decoder: False + + model: + _target_: training.model.sam2.SAM2Train + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + drop_path_rate: 0.1 + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: ${scratch.resolution} + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # compile_image_encoder: False + + ####### Training specific params ####### + # box/point input and corrections + prob_to_use_pt_input_for_train: 0.5 + prob_to_use_pt_input_for_eval: 0.0 + prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points + prob_to_use_box_input_for_eval: 0.0 + prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors + num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame) + num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame + rand_frames_to_correct_for_train: True # random #init-cond-frame ~ 2 + add_all_frames_to_correct_as_cond: True # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame) + # maximum 2 initial conditioning frames + num_init_cond_frames_for_train: 2 + rand_init_cond_frames_for_train: True # random 1~2 + num_correction_pt_per_frame: 7 + use_act_ckpt_iterative_pt_sampling: false + + + + num_init_cond_frames_for_eval: 1 # only mask on the first frame + forward_backbone_per_frame_for_eval: True + + + data: + train: + _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset + phases_per_epoch: ${scratch.phases_per_epoch} + batch_sizes: + - ${scratch.train_batch_size} + + datasets: + - _target_: training.dataset.utils.RepeatFactorWrapper + dataset: + _target_: training.dataset.utils.ConcatDataset + datasets: + - _target_: training.dataset.vos_dataset.VOSDataset + transforms: ${vos.train_transforms} + training: true + video_dataset: + _target_: training.dataset.vos_raw_dataset.COCORawDataset + img_folder: ${dataset.img_folder} + gt_folder: ${dataset.gt_folder} + file_list_txt: ${dataset.file_list_txt} + sampler: + _target_: training.dataset.vos_sampler.RandomUniformSampler + num_frames: ${scratch.num_frames} + max_num_objects: ${scratch.max_num_objects} + multiplier: ${dataset.multiplier} + shuffle: True + num_workers: ${scratch.num_train_workers} + pin_memory: True + drop_last: True + collate_fn: + _target_: training.utils.data_utils.collate_fn + _partial_: true + dict_key: all + + optim: + amp: + enabled: False + amp_dtype: float16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: training.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: training.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: 0.9 + apply_to: 'image_encoder.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: + _target_: fvcore.common.param_scheduler.CosineParamScheduler + start_value: ${scratch.base_lr} + end_value: ${divide:${scratch.base_lr},10} + - scheduler: + _target_: fvcore.common.param_scheduler.CosineParamScheduler + start_value: ${scratch.vision_lr} + end_value: ${divide:${scratch.vision_lr},10} + param_names: + - 'image_encoder.*' + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.1 + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + loss: + all: + _target_: training.loss_fns.MultiStepMultiMasksAndIous + weight_dict: + loss_mask: 20 + loss_dice: 1 + loss_iou: 1 + loss_class: 1 + supervise_all_iou: true + iou_use_l1_loss: true + pred_obj_scores: true + focal_gamma_obj_score: 0.0 + focal_alpha_obj_score: -1.0 + + distributed: + backend: nccl + find_unused_parameters: True + + logging: + tensorboard_writer: + _target_: training.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + log_dir: ${launcher.experiment_log_dir}/logs + log_freq: 10 + + # initialize from a SAM 2 checkpoint + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + model_weight_initializer: + _partial_: True + _target_: training.utils.checkpoint_utils.load_state_dict_into_model + strict: True + ignore_unexpected_keys: null + ignore_missing_keys: null + + state_dict: + _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels + checkpoint_path: ./checkpoints/sam2.1_hiera_base_plus.pt # PATH to SAM 2.1 checkpoint + ckpt_state_dict_keys: ['model'] + +launcher: + num_nodes: 1 + gpus_per_node: 8 + experiment_log_dir: null # Path to log directory, defaults to ./sam2_logs/${config_name} + +# SLURM args if running on a cluster +submitit: + partition: null + account: null + qos: null + cpus_per_task: 10 + use_cluster: false + timeout_hour: 24 + name: null + port_range: [10000, 65000] + diff --git a/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_finetune_prompt+decoder.yaml b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_finetune_prompt+decoder.yaml new file mode 100644 index 0000000000000000000000000000000000000000..48c020084162252c3af061592325dfbcd60de2b6 --- /dev/null +++ b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_COCO_finetune_prompt+decoder.yaml @@ -0,0 +1,346 @@ +# @package _global_ + +scratch: + resolution: 512 + train_batch_size: 4 + num_train_workers: 10 + num_frames: 1 + max_num_objects: 5 + base_lr: 5.0e-6 + vision_lr: 3.0e-06 + phases_per_epoch: 1 + num_epochs: 40 + +dataset: + # PATHS to Dataset + img_folder: /mnt/personal/purkrmir/data/COCO/original/train2017/ # PATH to MOSE JPEGImages folder + gt_folder: /mnt/personal/purkrmir/data/COCO/original/annotations/ # PATH to MOSE Annotations folder + # img_folder: /datagrid/personal/purkrmir/data/COCO/original/train2017/ # PATH to MOSE JPEGImages folder + # gt_folder: /datagrid/personal/purkrmir/data/COCO/original/annotations/ # PATH to MOSE Annotations folder + file_list_txt: null # Optional PATH to filelist containing a subset of videos to be used for training + multiplier: 2 + +# Video transforms +vos: + train_transforms: + - _target_: training.dataset.transforms.ComposeAPI + transforms: + - _target_: training.dataset.transforms.RandomPoseJitter + location_jitter: 0.6 + confidence_jitter: 0.5 + - _target_: training.dataset.transforms.RandomHorizontalFlip + consistent_transform: True + - _target_: training.dataset.transforms.RandomAffine + degrees: 25 + shear: 20 + image_interpolation: bilinear + consistent_transform: True + - _target_: training.dataset.transforms.RandomResizeAPI + sizes: ${scratch.resolution} + square: true + consistent_transform: True + - _target_: training.dataset.transforms.ColorJitter + consistent_transform: True + brightness: 0.1 + contrast: 0.03 + saturation: 0.03 + hue: null + - _target_: training.dataset.transforms.RandomGrayscale + p: 0.05 + consistent_transform: True + - _target_: training.dataset.transforms.ColorJitter + consistent_transform: False + brightness: 0.1 + contrast: 0.05 + saturation: 0.05 + hue: null + - _target_: training.dataset.transforms.ToTensorAPI + - _target_: training.dataset.transforms.NormalizeAPI + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + +trainer: + _target_: training.trainer.Trainer + mode: train_only + max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}} + accelerator: cuda + seed_value: 123 + unfreeze_prompt: True + unfreeze_decoder: True + + model: + _target_: training.model.sam2.SAM2Train + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + drop_path_rate: 0.1 + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: ${scratch.resolution} + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # compile_image_encoder: False + + ####### Training specific params ####### + # box/point input and corrections + prob_to_use_pt_input_for_train: 0.5 + prob_to_use_pt_input_for_eval: 0.0 + prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points + prob_to_use_box_input_for_eval: 0.0 + prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors + num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame) + num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame + rand_frames_to_correct_for_train: True # random #init-cond-frame ~ 2 + add_all_frames_to_correct_as_cond: True # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame) + # maximum 2 initial conditioning frames + num_init_cond_frames_for_train: 2 + rand_init_cond_frames_for_train: True # random 1~2 + num_correction_pt_per_frame: 7 + use_act_ckpt_iterative_pt_sampling: false + + + + num_init_cond_frames_for_eval: 1 # only mask on the first frame + forward_backbone_per_frame_for_eval: True + + + data: + train: + _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset + phases_per_epoch: ${scratch.phases_per_epoch} + batch_sizes: + - ${scratch.train_batch_size} + + datasets: + - _target_: training.dataset.utils.RepeatFactorWrapper + dataset: + _target_: training.dataset.utils.ConcatDataset + datasets: + - _target_: training.dataset.vos_dataset.VOSDataset + transforms: ${vos.train_transforms} + training: true + video_dataset: + _target_: training.dataset.vos_raw_dataset.COCORawDataset + img_folder: ${dataset.img_folder} + gt_folder: ${dataset.gt_folder} + file_list_txt: ${dataset.file_list_txt} + sampler: + _target_: training.dataset.vos_sampler.RandomUniformSampler + num_frames: ${scratch.num_frames} + max_num_objects: ${scratch.max_num_objects} + multiplier: ${dataset.multiplier} + shuffle: True + num_workers: ${scratch.num_train_workers} + pin_memory: True + drop_last: True + collate_fn: + _target_: training.utils.data_utils.collate_fn + _partial_: true + dict_key: all + + optim: + amp: + enabled: False + amp_dtype: float16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: training.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: training.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: 0.9 + apply_to: 'image_encoder.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: + _target_: fvcore.common.param_scheduler.CosineParamScheduler + start_value: ${scratch.base_lr} + end_value: ${divide:${scratch.base_lr},10} + - scheduler: + _target_: fvcore.common.param_scheduler.CosineParamScheduler + start_value: ${scratch.vision_lr} + end_value: ${divide:${scratch.vision_lr},10} + param_names: + - 'image_encoder.*' + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.1 + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + loss: + all: + _target_: training.loss_fns.MultiStepMultiMasksAndIous + weight_dict: + loss_mask: 20 + loss_dice: 1 + loss_iou: 1 + loss_class: 1 + supervise_all_iou: true + iou_use_l1_loss: true + pred_obj_scores: true + focal_gamma_obj_score: 0.0 + focal_alpha_obj_score: -1.0 + + distributed: + backend: nccl + find_unused_parameters: True + + logging: + tensorboard_writer: + _target_: training.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + log_dir: ${launcher.experiment_log_dir}/logs + log_freq: 10 + + # initialize from a SAM 2 checkpoint + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + model_weight_initializer: + _partial_: True + _target_: training.utils.checkpoint_utils.load_state_dict_into_model + strict: True + ignore_unexpected_keys: null + ignore_missing_keys: null + + state_dict: + _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels + checkpoint_path: ./checkpoints/sam2.1_hiera_base_plus.pt # PATH to SAM 2.1 checkpoint + ckpt_state_dict_keys: ['model'] + +launcher: + num_nodes: 1 + gpus_per_node: 8 + experiment_log_dir: null # Path to log directory, defaults to ./sam2_logs/${config_name} + +# SLURM args if running on a cluster +submitit: + partition: null + account: null + qos: null + cpus_per_task: 10 + use_cluster: false + timeout_hour: 24 + name: null + port_range: [10000, 65000] + diff --git a/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e4c5a947f8a42bdcbf1048b1a16de127b3bd0e1 --- /dev/null +++ b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml @@ -0,0 +1,339 @@ +# @package _global_ + +scratch: + resolution: 512 + train_batch_size: 1 + num_train_workers: 10 + num_frames: 8 + max_num_objects: 3 + base_lr: 5.0e-6 + vision_lr: 3.0e-06 + phases_per_epoch: 1 + num_epochs: 40 + +dataset: + # PATHS to Dataset + img_folder: /datagrid/personal/purkrmir/data/MOSE/train/JPEGImages/ # PATH to MOSE JPEGImages folder + gt_folder: /datagrid/personal/purkrmir/data/MOSE/train/Annotations/ # PATH to MOSE Annotations folder + file_list_txt: training/assets/MOSE_sample_train_list.txt # Optional PATH to filelist containing a subset of videos to be used for training + multiplier: 2 + +# Video transforms +vos: + train_transforms: + - _target_: training.dataset.transforms.ComposeAPI + transforms: + - _target_: training.dataset.transforms.RandomHorizontalFlip + consistent_transform: True + - _target_: training.dataset.transforms.RandomAffine + degrees: 25 + shear: 20 + image_interpolation: bilinear + consistent_transform: True + - _target_: training.dataset.transforms.RandomResizeAPI + sizes: ${scratch.resolution} + square: true + consistent_transform: True + - _target_: training.dataset.transforms.ColorJitter + consistent_transform: True + brightness: 0.1 + contrast: 0.03 + saturation: 0.03 + hue: null + - _target_: training.dataset.transforms.RandomGrayscale + p: 0.05 + consistent_transform: True + - _target_: training.dataset.transforms.ColorJitter + consistent_transform: False + brightness: 0.1 + contrast: 0.05 + saturation: 0.05 + hue: null + - _target_: training.dataset.transforms.ToTensorAPI + - _target_: training.dataset.transforms.NormalizeAPI + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + +trainer: + _target_: training.trainer.Trainer + mode: train_only + max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}} + accelerator: cuda + seed_value: 123 + + model: + _target_: training.model.sam2.SAM2Train + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + drop_path_rate: 0.1 + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [64, 64] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: ${scratch.resolution} + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # compile_image_encoder: False + + ####### Training specific params ####### + # box/point input and corrections + prob_to_use_pt_input_for_train: 0.5 + prob_to_use_pt_input_for_eval: 0.0 + prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points + prob_to_use_box_input_for_eval: 0.0 + prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors + num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame) + num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame + rand_frames_to_correct_for_train: True # random #init-cond-frame ~ 2 + add_all_frames_to_correct_as_cond: True # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame) + # maximum 2 initial conditioning frames + num_init_cond_frames_for_train: 2 + rand_init_cond_frames_for_train: True # random 1~2 + num_correction_pt_per_frame: 7 + use_act_ckpt_iterative_pt_sampling: false + + + + num_init_cond_frames_for_eval: 1 # only mask on the first frame + forward_backbone_per_frame_for_eval: True + + + data: + train: + _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset + phases_per_epoch: ${scratch.phases_per_epoch} + batch_sizes: + - ${scratch.train_batch_size} + + datasets: + - _target_: training.dataset.utils.RepeatFactorWrapper + dataset: + _target_: training.dataset.utils.ConcatDataset + datasets: + - _target_: training.dataset.vos_dataset.VOSDataset + transforms: ${vos.train_transforms} + training: true + video_dataset: + _target_: training.dataset.vos_raw_dataset.PNGRawDataset + img_folder: ${dataset.img_folder} + gt_folder: ${dataset.gt_folder} + file_list_txt: ${dataset.file_list_txt} + sampler: + _target_: training.dataset.vos_sampler.RandomUniformSampler + num_frames: ${scratch.num_frames} + max_num_objects: ${scratch.max_num_objects} + multiplier: ${dataset.multiplier} + shuffle: True + num_workers: ${scratch.num_train_workers} + pin_memory: True + drop_last: True + collate_fn: + _target_: training.utils.data_utils.collate_fn + _partial_: true + dict_key: all + + optim: + amp: + enabled: False + amp_dtype: float16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: training.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: training.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: 0.9 + apply_to: 'image_encoder.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: + _target_: fvcore.common.param_scheduler.CosineParamScheduler + start_value: ${scratch.base_lr} + end_value: ${divide:${scratch.base_lr},10} + - scheduler: + _target_: fvcore.common.param_scheduler.CosineParamScheduler + start_value: ${scratch.vision_lr} + end_value: ${divide:${scratch.vision_lr},10} + param_names: + - 'image_encoder.*' + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.1 + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + loss: + all: + _target_: training.loss_fns.MultiStepMultiMasksAndIous + weight_dict: + loss_mask: 20 + loss_dice: 1 + loss_iou: 1 + loss_class: 1 + supervise_all_iou: true + iou_use_l1_loss: true + pred_obj_scores: true + focal_gamma_obj_score: 0.0 + focal_alpha_obj_score: -1.0 + + distributed: + backend: nccl + find_unused_parameters: True + + logging: + tensorboard_writer: + _target_: training.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + log_dir: ${launcher.experiment_log_dir}/logs + log_freq: 10 + + # initialize from a SAM 2 checkpoint + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + model_weight_initializer: + _partial_: True + _target_: training.utils.checkpoint_utils.load_state_dict_into_model + strict: True + ignore_unexpected_keys: null + ignore_missing_keys: null + + state_dict: + _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels + checkpoint_path: ./checkpoints/sam2.1_hiera_base_plus.pt # PATH to SAM 2.1 checkpoint + ckpt_state_dict_keys: ['model'] + +launcher: + num_nodes: 1 + gpus_per_node: 8 + experiment_log_dir: null # Path to log directory, defaults to ./sam2_logs/${config_name} + +# SLURM args if running on a cluster +submitit: + partition: null + account: null + qos: null + cpus_per_task: 10 + use_cluster: false + timeout_hour: 24 + name: null + port_range: [10000, 65000] + diff --git a/sam2/configs/sam2/sam2_hiera_b+.yaml b/sam2/configs/sam2/sam2_hiera_b+.yaml new file mode 100644 index 0000000000000000000000000000000000000000..58f3eb81554018e873f8515ecb98e36d16ac29e4 --- /dev/null +++ b/sam2/configs/sam2/sam2_hiera_b+.yaml @@ -0,0 +1,113 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/sam2/configs/sam2/sam2_hiera_l.yaml b/sam2/configs/sam2/sam2_hiera_l.yaml new file mode 100644 index 0000000000000000000000000000000000000000..918667f50c3e1ad2dcf77c0c14cb4dd114cfd080 --- /dev/null +++ b/sam2/configs/sam2/sam2_hiera_l.yaml @@ -0,0 +1,117 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 144 + num_heads: 2 + stages: [2, 6, 36, 4] + global_att_blocks: [23, 33, 43] + window_pos_embed_bkg_spatial_size: [7, 7] + window_spec: [8, 4, 16, 8] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [1152, 576, 288, 144] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/sam2/configs/sam2/sam2_hiera_s.yaml b/sam2/configs/sam2/sam2_hiera_s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26e5d4d39f7b2892396106005c37c7ffe6c83bc2 --- /dev/null +++ b/sam2/configs/sam2/sam2_hiera_s.yaml @@ -0,0 +1,116 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 11, 2] + global_att_blocks: [7, 10, 13] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/sam2/configs/sam2/sam2_hiera_t.yaml b/sam2/configs/sam2/sam2_hiera_t.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a62c903aaa5f80828077c6e06a59626926570ed6 --- /dev/null +++ b/sam2/configs/sam2/sam2_hiera_t.yaml @@ -0,0 +1,118 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 7, 2] + global_att_blocks: [5, 7, 9] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + # SAM decoder + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # HieraT does not currently support compilation, should always be set to False + compile_image_encoder: False diff --git a/sam2/configs/samurai/__init__.py b/sam2/configs/samurai/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/sam2/configs/samurai/sam2.1_hiera_b+.yaml b/sam2/configs/samurai/sam2.1_hiera_b+.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3650edc236d02c03a5492f55e7d8cb5a946b758b --- /dev/null +++ b/sam2/configs/samurai/sam2.1_hiera_b+.yaml @@ -0,0 +1,125 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False + # SAMURAI + samurai_mode: true + stable_frames_threshold: 15 + stable_ious_threshold: 0.3 + min_obj_score_logits: -1 + kf_score_weight: 0.25 + memory_bank_iou_threshold: 0.5 + memory_bank_obj_score_threshold: 0.0 + memory_bank_kf_score_threshold: 0.0 diff --git a/sam2/configs/samurai/sam2.1_hiera_l.yaml b/sam2/configs/samurai/sam2.1_hiera_l.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8458dbbc0261da04619ec902cffb03e3fb44499c --- /dev/null +++ b/sam2/configs/samurai/sam2.1_hiera_l.yaml @@ -0,0 +1,129 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 144 + num_heads: 2 + stages: [2, 6, 36, 4] + global_att_blocks: [23, 33, 43] + window_pos_embed_bkg_spatial_size: [7, 7] + window_spec: [8, 4, 16, 8] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [1152, 576, 288, 144] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False + # SAMURAI + samurai_mode: true + stable_frames_threshold: 15 + stable_ious_threshold: 0.3 + min_obj_score_logits: -1 + kf_score_weight: 0.15 + memory_bank_iou_threshold: 0.5 + memory_bank_obj_score_threshold: 0.0 + memory_bank_kf_score_threshold: 0.0 \ No newline at end of file diff --git a/sam2/configs/samurai/sam2.1_hiera_s.yaml b/sam2/configs/samurai/sam2.1_hiera_s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d703cf7651229858f28204f3f2f9a541a3a88040 --- /dev/null +++ b/sam2/configs/samurai/sam2.1_hiera_s.yaml @@ -0,0 +1,128 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 11, 2] + global_att_blocks: [7, 10, 13] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False + # SAMURAI + samurai_mode: true + stable_frames_threshold: 15 + stable_ious_threshold: 0.3 + min_obj_score_logits: -1 + kf_score_weight: 0.25 + memory_bank_iou_threshold: 0.5 + memory_bank_obj_score_threshold: 0.0 + memory_bank_kf_score_threshold: 0.0 \ No newline at end of file diff --git a/sam2/configs/samurai/sam2.1_hiera_t.yaml b/sam2/configs/samurai/sam2.1_hiera_t.yaml new file mode 100644 index 0000000000000000000000000000000000000000..43c1435134510b6fcc4251601b1840339fb8c92d --- /dev/null +++ b/sam2/configs/samurai/sam2.1_hiera_t.yaml @@ -0,0 +1,130 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 7, 2] + global_att_blocks: [5, 7, 9] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + # SAM decoder + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + no_obj_embed_spatial: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: true + proj_tpos_enc_in_obj_ptrs: true + use_signed_tpos_enc_to_obj_ptrs: true + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # HieraT does not currently support compilation, should always be set to False + compile_image_encoder: False + # SAMURAI + samurai_mode: true + stable_frames_threshold: 15 + stable_ious_threshold: 0.3 + min_obj_score_logits: -1 + kf_score_weight: 0.25 + memory_bank_iou_threshold: 0.5 + memory_bank_obj_score_threshold: 0.0 + memory_bank_kf_score_threshold: 0.0 \ No newline at end of file diff --git a/sam2/csrc/connected_components.cu b/sam2/csrc/connected_components.cu new file mode 100644 index 0000000000000000000000000000000000000000..ced21eb32eaaadb818d441c1322b99d1bf068f45 --- /dev/null +++ b/sam2/csrc/connected_components.cu @@ -0,0 +1,289 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// All rights reserved. + +// This source code is licensed under the license found in the +// LICENSE file in the root directory of this source tree. + +// adapted from https://github.com/zsef123/Connected_components_PyTorch +// with license found in the LICENSE_cctorch file in the root directory. +#include +#include +#include +#include +#include +#include + +// 2d +#define BLOCK_ROWS 16 +#define BLOCK_COLS 16 + +namespace cc2d { + +template +__device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) { + return (bitmap >> pos) & 1; +} + +__device__ int32_t find(const int32_t* s_buf, int32_t n) { + while (s_buf[n] != n) + n = s_buf[n]; + return n; +} + +__device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) { + const int32_t id = n; + while (s_buf[n] != n) { + n = s_buf[n]; + s_buf[id] = n; + } + return n; +} + +__device__ void union_(int32_t* s_buf, int32_t a, int32_t b) { + bool done; + do { + a = find(s_buf, a); + b = find(s_buf, b); + + if (a < b) { + int32_t old = atomicMin(s_buf + b, a); + done = (old == b); + b = old; + } else if (b < a) { + int32_t old = atomicMin(s_buf + a, b); + done = (old == a); + a = old; + } else + done = true; + + } while (!done); +} + +__global__ void +init_labeling(int32_t* label, const uint32_t W, const uint32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2; + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2; + const uint32_t idx = row * W + col; + + if (row < H && col < W) + label[idx] = idx; +} + +__global__ void +merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2; + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2; + const uint32_t idx = row * W + col; + + if (row >= H || col >= W) + return; + + uint32_t P = 0; + + if (img[idx]) + P |= 0x777; + if (row + 1 < H && img[idx + W]) + P |= 0x777 << 4; + if (col + 1 < W && img[idx + 1]) + P |= 0x777 << 1; + + if (col == 0) + P &= 0xEEEE; + if (col + 1 >= W) + P &= 0x3333; + else if (col + 2 >= W) + P &= 0x7777; + + if (row == 0) + P &= 0xFFF0; + if (row + 1 >= H) + P &= 0xFF; + + if (P > 0) { + // If need check about top-left pixel(if flag the first bit) and hit the + // top-left pixel + if (hasBit(P, 0) && img[idx - W - 1]) { + union_(label, idx, idx - 2 * W - 2); // top left block + } + + if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1])) + union_(label, idx, idx - 2 * W); // top bottom block + + if (hasBit(P, 3) && img[idx + 2 - W]) + union_(label, idx, idx - 2 * W + 2); // top right block + + if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1])) + union_(label, idx, idx - 2); // just left block + } +} + +__global__ void compression(int32_t* label, const int32_t W, const int32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2; + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2; + const uint32_t idx = row * W + col; + + if (row < H && col < W) + find_n_compress(label, idx); +} + +__global__ void final_labeling( + const uint8_t* img, + int32_t* label, + const int32_t W, + const int32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2; + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2; + const uint32_t idx = row * W + col; + + if (row >= H || col >= W) + return; + + int32_t y = label[idx] + 1; + + if (img[idx]) + label[idx] = y; + else + label[idx] = 0; + + if (col + 1 < W) { + if (img[idx + 1]) + label[idx + 1] = y; + else + label[idx + 1] = 0; + + if (row + 1 < H) { + if (img[idx + W + 1]) + label[idx + W + 1] = y; + else + label[idx + W + 1] = 0; + } + } + + if (row + 1 < H) { + if (img[idx + W]) + label[idx + W] = y; + else + label[idx + W] = 0; + } +} + +__global__ void init_counting( + const int32_t* label, + int32_t* count_init, + const int32_t W, + const int32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y); + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x); + const uint32_t idx = row * W + col; + + if (row >= H || col >= W) + return; + + int32_t y = label[idx]; + if (y > 0) { + int32_t count_idx = y - 1; + atomicAdd(count_init + count_idx, 1); + } +} + +__global__ void final_counting( + const int32_t* label, + const int32_t* count_init, + int32_t* count_final, + const int32_t W, + const int32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y); + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x); + const uint32_t idx = row * W + col; + + if (row >= H || col >= W) + return; + + int32_t y = label[idx]; + if (y > 0) { + int32_t count_idx = y - 1; + count_final[idx] = count_init[count_idx]; + } else { + count_final[idx] = 0; + } +} + +} // namespace cc2d + +std::vector get_connected_componnets( + const torch::Tensor& inputs) { + AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor"); + AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape"); + AT_ASSERTM( + inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type"); + + const uint32_t N = inputs.size(0); + const uint32_t C = inputs.size(1); + const uint32_t H = inputs.size(2); + const uint32_t W = inputs.size(3); + + AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape"); + AT_ASSERTM((H % 2) == 0, "height must be an even number"); + AT_ASSERTM((W % 2) == 0, "width must be an even number"); + + // label must be uint32_t + auto label_options = + torch::TensorOptions().dtype(torch::kInt32).device(inputs.device()); + torch::Tensor labels = torch::zeros({N, C, H, W}, label_options); + torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options); + torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options); + + dim3 grid = dim3( + ((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS, + ((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS); + dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS); + dim3 grid_count = + dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS); + dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + for (int n = 0; n < N; n++) { + uint32_t offset = n * H * W; + + cc2d::init_labeling<<>>( + labels.data_ptr() + offset, W, H); + cc2d::merge<<>>( + inputs.data_ptr() + offset, + labels.data_ptr() + offset, + W, + H); + cc2d::compression<<>>( + labels.data_ptr() + offset, W, H); + cc2d::final_labeling<<>>( + inputs.data_ptr() + offset, + labels.data_ptr() + offset, + W, + H); + + // get the counting of each pixel + cc2d::init_counting<<>>( + labels.data_ptr() + offset, + counts_init.data_ptr() + offset, + W, + H); + cc2d::final_counting<<>>( + labels.data_ptr() + offset, + counts_init.data_ptr() + offset, + counts_final.data_ptr() + offset, + W, + H); + } + + // returned values are [labels, counts] + std::vector outputs; + outputs.push_back(labels); + outputs.push_back(counts_final); + return outputs; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def( + "get_connected_componnets", + &get_connected_componnets, + "get_connected_componnets"); +} diff --git a/sam2/distinctipy.py b/sam2/distinctipy.py new file mode 100644 index 0000000000000000000000000000000000000000..2044b335e2bac2a24a4eab908b64b37be829bfa8 --- /dev/null +++ b/sam2/distinctipy.py @@ -0,0 +1,503 @@ +import math +import random + +import numpy as np + +from . import colorblind + +# pre-define interesting colours/points at corners, edges, faces and interior of +# r,g,b cube +WHITE = (1.0, 1.0, 1.0) +BLACK = (0.0, 0.0, 0.0) +RED = (1.0, 0.0, 0.0) +GREEN = (0.0, 1.0, 0.0) +BLUE = (0.0, 0.0, 1.0) +CYAN = (0.0, 1.0, 1.0) +YELLOW = (1.0, 1.0, 0.0) +MAGENTA = (1.0, 0.0, 1.0) + +CORNERS = [WHITE, BLACK, RED, GREEN, BLUE, CYAN, YELLOW, MAGENTA] + +MID_FACE = [ + (0.0, 0.5, 0.0), + (0.0, 0.0, 0.5), + (0.0, 1.0, 0.5), + (0.0, 0.5, 1.0), + (0.0, 0.5, 0.5), + (0.5, 0.0, 0.0), + (0.5, 0.5, 0.0), + (0.5, 1.0, 0.0), + (0.5, 0.0, 0.5), + (0.5, 0.0, 1.0), + (0.5, 1.0, 0.5), + (0.5, 1.0, 1.0), + (0.5, 0.5, 1.0), + (1.0, 0.5, 0.0), + (1.0, 0.0, 0.5), + (1.0, 0.5, 0.5), + (1.0, 1.0, 0.5), + (1.0, 0.5, 1.0), +] + +INTERIOR = [ + (0.5, 0.5, 0.5), + (0.75, 0.5, 0.5), + (0.25, 0.5, 0.5), + (0.5, 0.75, 0.5), + (0.5, 0.25, 0.5), + (0.5, 0.5, 0.75), + (0.5, 0.5, 0.25), +] + +POINTS_OF_INTEREST = CORNERS + MID_FACE + INTERIOR + + +_SEED_MAX = int(2**32 - 1) + + +def _ensure_rng(rng): + """ + Returns a random.Random state based on the input + """ + if rng is None: + rng = random._inst + elif isinstance(rng, int): + rng = random.Random(int(rng) % _SEED_MAX) + elif isinstance(rng, float): + rng = float(rng) + # Coerce the float into an integer + a, b = rng.as_integer_ratio() + if b == 1: + seed = a_color + else: + s = max(a.bit_length(), b.bit_length()) + seed = (b << s) | a + rng = random.Random(seed % _SEED_MAX) + elif isinstance(rng, random.Random): + rng = rng + else: + raise TypeError(type(rng)) + return rng + + +def get_random_color(pastel_factor=0.0, rng=None): + """ + Generate a random rgb colour. + + :param pastel_factor: float between 0 and 1. If pastel_factor>0 paler colours will + be generated. + + :param rng: A random integer seed or random.Random state. + If unspecified the global random is used. + + :return: color: a (r,g,b) tuple. r, g and b are values between 0 and 1. + """ + rng = _ensure_rng(rng) + + color = [(rng.random() + pastel_factor) / (1.0 + pastel_factor) for _ in range(3)] + + return tuple(color) + + +def color_distance(c1, c2): + """ + Metric to define the visual distinction between two (r,g,b) colours. + Inspired by: https://www.compuphase.com/cmetric.htm + + :param c1: (r,g,b) colour tuples. r,g and b are values between 0 and 1. + + :param c2: (r,g,b) colour tuples. r,g and b are values between 0 and 1. + + :return: distance: float representing visual distinction between c1 and c2. + Larger values = more distinct. + """ + + r1, g1, b1 = c1 + r2, g2, b2 = c2 + + mean_r = (r1 + r2) / 2 + delta_r = (r1 - r2) ** 2 + delta_g = (g1 - g2) ** 2 + delta_b = (b1 - b2) ** 2 + + distance = (2 + mean_r) * delta_r + 4 * delta_g + (3 - mean_r) * delta_b + + return distance + + +def distinct_color( + exclude_colors, pastel_factor=0.0, n_attempts=1000, colorblind_type=None, rng=None +): + """ + Generate a colour as distinct as possible from the colours defined in exclude_colors + Inspired by: https://gist.github.com/adewes/5884820 + + :param exclude_colors: a list of (r,g,b) tuples. r,g,b are values between 0 and 1. + + :param pastel_factor: float between 0 and 1. If pastel_factor>0 paler colours will + be generated. + + :param n_attempts: number of random colours to generate to find most distinct colour + + :param colorblind_type: Type of colourblindness to simulate, can be: + + * 'Normal': Normal vision + * 'Protanopia': Red-green colorblindness (1% males) + * 'Protanomaly': Red-green colorblindness (1% males, 0.01% females) + * 'Deuteranopia': Red-green colorblindness (1% males) + * 'Deuteranomaly': Red-green colorblindness (most common type: 6% males, + 0.4% females) + * 'Tritanopia': Blue-yellow colourblindness (<1% males and females) + * 'Tritanomaly' Blue-yellow colourblindness (0.01% males and females) + * 'Achromatopsia': Total colourblindness + * 'Achromatomaly': Total colourblindness + + :param rng: A random integer seed or random.Random state. + If unspecified the global random is used. + + :return: (r,g,b) color tuple of the generated colour with the largest minimum + color_distance to the colours in exclude_colors. + """ + rng = _ensure_rng(rng) + + if not exclude_colors: + return get_random_color(pastel_factor=pastel_factor, rng=rng) + + if colorblind_type: + exclude_colors = [ + colorblind.colorblind_filter(color, colorblind_type) + for color in exclude_colors + ] + + max_distance = None + best_color = None + + # try pre-defined corners, edges, interior points first + if pastel_factor == 0: + for color in POINTS_OF_INTEREST: + if color not in exclude_colors: + if colorblind_type: + compare_color = colorblind.colorblind_filter(color, colorblind_type) + else: + compare_color = color + + distance_to_nearest = min( + [color_distance(compare_color, c) for c in exclude_colors] + ) + + if (not max_distance) or (distance_to_nearest > max_distance): + max_distance = distance_to_nearest + best_color = color + + # try n_attempts randomly generated colours + for _ in range(n_attempts): + color = get_random_color(pastel_factor=pastel_factor, rng=rng) + + if not exclude_colors: + return color + + else: + if colorblind_type: + compare_color = colorblind.colorblind_filter(color, colorblind_type) + else: + compare_color = color + + distance_to_nearest = min( + [color_distance(compare_color, c) for c in exclude_colors] + ) + + if (not max_distance) or (distance_to_nearest > max_distance): + max_distance = distance_to_nearest + best_color = color + + return tuple(best_color) + + +def get_text_color(background_color, threshold=0.6): + """ + Choose whether black or white text will work better on top of background_color. + Inspired by: https://stackoverflow.com/a/3943023 + + :param background_color: The colour the text will be displayed on + + :param threshold: float between 0 and 1. With threshold close to 1 white text will + be chosen more often. + + :return: (0,0,0) if black text should be used or (1,1,1) if white text should be + used. + """ + + r, g, b = background_color[0], background_color[1], background_color[2] + + if (r * 0.299 + g * 0.587 + b * 0.114) > threshold: + return BLACK + else: + return WHITE + + +def get_colors( + n_colors, + exclude_colors=None, + return_excluded=False, + pastel_factor=0.0, + n_attempts=1000, + colorblind_type=None, + rng=None, +): + """ + Generate a list of n visually distinct colours. + + :param n_colors: How many colours to generate + + :param exclude_colors: A list of (r,g,b) colours that new colours should be distinct + from. If exclude_colours=None then exclude_colours will be set to avoid white + and black (exclude_colours=[(0,0,0), (1,1,1)]). (r,g,b) values should be floats + between 0 and 1. + + :param return_excluded: If return_excluded=True then exclude_colors will be included + in the returned color list. Otherwise only the newly generated colors are + returned (default). + + :param pastel_factor: float between 0 and 1. If pastel_factor>0 paler colours will + be generated. + + :param n_attempts: number of random colours to generated to find most distinct + colour. + + :param colorblind_type: Generate colours that are distinct with given type of + colourblindness. Can be: + + * 'Normal': Normal vision + * 'Protanopia': Red-green colorblindness (1% males) + * 'Protanomaly': Red-green colorblindness (1% males, 0.01% females) + * 'Deuteranopia': Red-green colorblindness (1% males) + * 'Deuteranomaly': Red-green colorblindness (most common type: 6% males, + 0.4% females) + * 'Tritanopia': Blue-yellow colourblindness (<1% males and females) + * 'Tritanomaly' Blue-yellow colourblindness (0.01% males and females) + * 'Achromatopsia': Total colourblindness + * 'Achromatomaly': Total colourblindness + + :param rng: A random integer seed or random.Random state. + If unspecified the global random is used. + + :return: colors - A list of (r,g,b) colors that are visually distinct to each other + and to the colours in exclude_colors. (r,g,b) values are floats between 0 and 1. + """ + rng = _ensure_rng(rng) + + if exclude_colors is None: + exclude_colors = [WHITE, BLACK] + + colors = exclude_colors.copy() + + for i in range(n_colors): + colors.append( + distinct_color( + colors, + pastel_factor=pastel_factor, + n_attempts=n_attempts, + colorblind_type=colorblind_type, + rng=rng, + ) + ) + + if return_excluded: + return colors + else: + return colors[len(exclude_colors) :] + + +def invert_colors(colors): + """ + Generates inverted colours for each colour in the given colour list, using a simple + inversion of each colour to the opposite corner on the r,g,b cube. + + :return: inverted_colors - A list of inverted (r,g,b) (r,g,b) values are floats + between 0 and 1. + """ + inverted_colors = [] + + for color in colors: + r = 0.0 if color[0] > 0.5 else 1.0 + g = 0.0 if color[1] > 0.5 else 1.0 + b = 0.0 if color[2] > 0.5 else 1.0 + + inverted_colors.append((r, g, b)) + + return inverted_colors + + +def color_swatch( + colors, + edgecolors=None, + show_text=False, + text_threshold=0.6, + ax=None, + title=None, + one_row=None, + fontsize=None, +): + """ + Display the colours defined in a list of colors. + + :param colors: List of (r,g,b) colour tuples to display. (r,g,b) should be floats + between 0 and 1. + + :param edgecolors: If None displayed colours have no outline. Otherwise a list of + (r,g,b) colours to use as outlines for each colour. + + :param show_text: If True writes the background colour's hex on top of it in black + or white, as appropriate. + + :param text_threshold: float between 0 and 1. With threshold close to 1 white text + will be chosen more often. + + :param ax: Matplotlib axis to plot to. If ax is None plt.show() is run in function + call. + + :param title: Add a title to the colour swatch. + + :param one_row: If True display colours on one row, if False as a grid. If + one_row=None a grid is used when there are more than 8 colours. + + :param fontsize: Fontsize of text on colour swatch. If None fontsize will attempt to + be set to an appropriate size based on the number of colours. + + :return: + """ + import matplotlib.colors + import matplotlib.patches as patches + import matplotlib.pyplot as plt + + if one_row is None: + if len(colors) > 8: + one_row = False + else: + one_row = True + + if one_row: + n_grid = len(colors) + else: + n_grid = math.ceil(np.sqrt(len(colors))) + + if fontsize is None: + fontsize = 60 / n_grid + + width = 1 + height = 1 + + x = 0 + y = 0 + + max_x = 0 + max_y = 0 + + if ax is None: + show = True + fig = plt.figure(figsize=(8, 8)) + ax = fig.add_subplot(111, aspect="equal") + else: + show = False + + for idx, color in enumerate(colors): + if edgecolors is None: + ax.add_patch(patches.Rectangle((x, y), width, height, color=color)) + else: + ax.add_patch( + patches.Rectangle( + (x, y), + width, + height, + facecolor=color, + edgecolor=edgecolors[idx], + linewidth=5, + ) + ) + + if show_text: + ax.text( + x + (width / 2), + y + (height / 2), + matplotlib.colors.rgb2hex(color), + fontsize=fontsize, + ha="center", + va="center", + color=get_text_color(color, threshold=text_threshold), + ) + + if (idx + 1) % n_grid == 0: + if edgecolors is None: + y += height + x = 0 + else: + y += height + (height / 10) + x = 0 + else: + if edgecolors is None: + x += width + else: + x += width + (width / 10) + + if x > max_x: + max_x = x + + if y > max_y: + max_y = y + + ax.set_ylim([-height / 10, max_y + 1.1 * height]) + ax.set_xlim([-width / 10, max_x + 1.1 * width]) + ax.invert_yaxis() + ax.axis("off") + + if title is not None: + ax.set_title(title) + + if show: + plt.show() + + +def get_hex(color): + """ + Returns hex of given color + + :param color: (r,g,b) color tuple. r,g,b are floats between 0 and 1. + + :return: hex str of color + """ + import matplotlib.colors + + return matplotlib.colors.rgb2hex(color) + + +def get_rgb256(color): + """ + Converts 0.0-1.0 rgb colour into 0-255 integer rgb colour. + + :param color: (r,g,b) tuple with r,g,b floats between 0.0 and 1.0 + + :return: (r,g,b) ints between 0 and 255 + """ + return ( + int(round(color[0] * 255)), + int(round(color[1] * 255)), + int(round(color[2] * 255)), + ) + + +def get_colormap(list_of_colors, name="distinctipy"): + """ + Converts a list of colors into a matplotlib colormap. + + :param list_of_colors: a list of (r,g,b) color tuples. (r,g,b) values should be + floats between 0 and 1. + + :param name: name of the generated colormap + + :return: cmap: a matplotlib colormap. + """ + import matplotlib.colors + + cmap = matplotlib.colors.ListedColormap(list_of_colors, name=name) + + return cmap \ No newline at end of file diff --git a/sam2/modeling/__init__.py b/sam2/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/sam2/modeling/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/sam2/modeling/backbones/__init__.py b/sam2/modeling/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/sam2/modeling/backbones/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/sam2/modeling/backbones/hieradet.py b/sam2/modeling/backbones/hieradet.py new file mode 100644 index 0000000000000000000000000000000000000000..19ac77b61d8e1345a301686d39ef2ab6e4b035fb --- /dev/null +++ b/sam2/modeling/backbones/hieradet.py @@ -0,0 +1,317 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from functools import partial +from typing import List, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from iopath.common.file_io import g_pathmgr + +from sam2.modeling.backbones.utils import ( + PatchEmbed, + window_partition, + window_unpartition, +) + +from sam2.modeling.sam2_utils import DropPath, MLP + + +def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor: + if pool is None: + return x + # (B, H, W, C) -> (B, C, H, W) + x = x.permute(0, 3, 1, 2) + x = pool(x) + # (B, C, H', W') -> (B, H', W', C) + x = x.permute(0, 2, 3, 1) + if norm: + x = norm(x) + + return x + + +class MultiScaleAttention(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + num_heads: int, + q_pool: nn.Module = None, + ): + super().__init__() + + self.dim = dim + self.dim_out = dim_out + self.num_heads = num_heads + self.q_pool = q_pool + self.qkv = nn.Linear(dim, dim_out * 3) + self.proj = nn.Linear(dim_out, dim_out) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (B, H * W, 3, nHead, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1) + # q, k, v with shape (B, H * W, nheads, C) + q, k, v = torch.unbind(qkv, 2) + + # Q pooling (for downsample at stage changes) + if self.q_pool: + q = do_pool(q.reshape(B, H, W, -1), self.q_pool) + H, W = q.shape[1:3] # downsampled shape + q = q.reshape(B, H * W, self.num_heads, -1) + + # Torch's SDPA expects [B, nheads, H*W, C] so we transpose + x = F.scaled_dot_product_attention( + q.transpose(1, 2), + k.transpose(1, 2), + v.transpose(1, 2), + ) + # Transpose back + x = x.transpose(1, 2) + x = x.reshape(B, H, W, -1) + + x = self.proj(x) + + return x + + +class MultiScaleBlock(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + num_heads: int, + mlp_ratio: float = 4.0, + drop_path: float = 0.0, + norm_layer: Union[nn.Module, str] = "LayerNorm", + q_stride: Tuple[int, int] = None, + act_layer: nn.Module = nn.GELU, + window_size: int = 0, + ): + super().__init__() + + if isinstance(norm_layer, str): + norm_layer = partial(getattr(nn, norm_layer), eps=1e-6) + + self.dim = dim + self.dim_out = dim_out + self.norm1 = norm_layer(dim) + + self.window_size = window_size + + self.pool, self.q_stride = None, q_stride + if self.q_stride: + self.pool = nn.MaxPool2d( + kernel_size=q_stride, stride=q_stride, ceil_mode=False + ) + + self.attn = MultiScaleAttention( + dim, + dim_out, + num_heads=num_heads, + q_pool=self.pool, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim_out) + self.mlp = MLP( + dim_out, + int(dim_out * mlp_ratio), + dim_out, + num_layers=2, + activation=act_layer, + ) + + if dim != dim_out: + self.proj = nn.Linear(dim, dim_out) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x # B, H, W, C + x = self.norm1(x) + + # Skip connection + if self.dim != self.dim_out: + shortcut = do_pool(self.proj(x), self.pool) + + # Window partition + window_size = self.window_size + if window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, window_size) + + # Window Attention + Q Pooling (if stage change) + x = self.attn(x) + if self.q_stride: + # Shapes have changed due to Q pooling + window_size = self.window_size // self.q_stride[0] + H, W = shortcut.shape[1:3] + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + pad_hw = (H + pad_h, W + pad_w) + + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, window_size, pad_hw, (H, W)) + + x = shortcut + self.drop_path(x) + # MLP + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Hiera(nn.Module): + """ + Reference: https://arxiv.org/abs/2306.00989 + """ + + def __init__( + self, + embed_dim: int = 96, # initial embed dim + num_heads: int = 1, # initial number of heads + drop_path_rate: float = 0.0, # stochastic depth + q_pool: int = 3, # number of q_pool stages + q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages + stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage + dim_mul: float = 2.0, # dim_mul factor at stage shift + head_mul: float = 2.0, # head_mul factor at stage shift + window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14), + # window size per stage, when not using global att. + window_spec: Tuple[int, ...] = ( + 8, + 4, + 14, + 7, + ), + # global attn in these blocks + global_att_blocks: Tuple[int, ...] = ( + 12, + 16, + 20, + ), + weights_path=None, + return_interm_layers=True, # return feats from every stage + ): + super().__init__() + + assert len(stages) == len(window_spec) + self.window_spec = window_spec + + depth = sum(stages) + self.q_stride = q_stride + self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] + assert 0 <= q_pool <= len(self.stage_ends[:-1]) + self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool] + self.return_interm_layers = return_interm_layers + + self.patch_embed = PatchEmbed( + embed_dim=embed_dim, + ) + # Which blocks have global att? + self.global_att_blocks = global_att_blocks + + # Windowed positional embedding (https://arxiv.org/abs/2311.05613) + self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size + self.pos_embed = nn.Parameter( + torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size) + ) + self.pos_embed_window = nn.Parameter( + torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]) + ) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + + cur_stage = 1 + self.blocks = nn.ModuleList() + + for i in range(depth): + dim_out = embed_dim + # lags by a block, so first block of + # next stage uses an initial window size + # of previous stage and final window size of current stage + window_size = self.window_spec[cur_stage - 1] + + if self.global_att_blocks is not None: + window_size = 0 if i in self.global_att_blocks else window_size + + if i - 1 in self.stage_ends: + dim_out = int(embed_dim * dim_mul) + num_heads = int(num_heads * head_mul) + cur_stage += 1 + + block = MultiScaleBlock( + dim=embed_dim, + dim_out=dim_out, + num_heads=num_heads, + drop_path=dpr[i], + q_stride=self.q_stride if i in self.q_pool_blocks else None, + window_size=window_size, + ) + + embed_dim = dim_out + self.blocks.append(block) + + self.channel_list = ( + [self.blocks[i].dim_out for i in self.stage_ends[::-1]] + if return_interm_layers + else [self.blocks[-1].dim_out] + ) + + if weights_path is not None: + with g_pathmgr.open(weights_path, "rb") as f: + chkpt = torch.load(f, map_location="cpu") + logging.info("loading Hiera", self.load_state_dict(chkpt, strict=False)) + + def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor: + h, w = hw + window_embed = self.pos_embed_window + pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic") + pos_embed = pos_embed + window_embed.tile( + [x // y for x, y in zip(pos_embed.shape, window_embed.shape)] + ) + pos_embed = pos_embed.permute(0, 2, 3, 1) + return pos_embed + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + x = self.patch_embed(x) + # x: (B, H, W, C) + + # Add pos embed + x = x + self._get_pos_embed(x.shape[1:3]) + + outputs = [] + for i, blk in enumerate(self.blocks): + x = blk(x) + if (i == self.stage_ends[-1]) or ( + i in self.stage_ends and self.return_interm_layers + ): + feats = x.permute(0, 3, 1, 2) + outputs.append(feats) + + return outputs + + def get_layer_id(self, layer_name): + # https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33 + num_layers = self.get_num_layers() + + if layer_name.find("rel_pos") != -1: + return num_layers + 1 + elif layer_name.find("pos_embed") != -1: + return 0 + elif layer_name.find("patch_embed") != -1: + return 0 + elif layer_name.find("blocks") != -1: + return int(layer_name.split("blocks")[1].split(".")[1]) + 1 + else: + return num_layers + 1 + + def get_num_layers(self) -> int: + return len(self.blocks) diff --git a/sam2/modeling/backbones/image_encoder.py b/sam2/modeling/backbones/image_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..37e9266bc98596e97ca303118c910ed24f6cee2c --- /dev/null +++ b/sam2/modeling/backbones/image_encoder.py @@ -0,0 +1,134 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ImageEncoder(nn.Module): + def __init__( + self, + trunk: nn.Module, + neck: nn.Module, + scalp: int = 0, + ): + super().__init__() + self.trunk = trunk + self.neck = neck + self.scalp = scalp + assert ( + self.trunk.channel_list == self.neck.backbone_channel_list + ), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}" + + def forward(self, sample: torch.Tensor): + # Forward through backbone + features, pos = self.neck(self.trunk(sample)) + if self.scalp > 0: + # Discard the lowest resolution features + features, pos = features[: -self.scalp], pos[: -self.scalp] + + src = features[-1] + output = { + "vision_features": src, + "vision_pos_enc": pos, + "backbone_fpn": features, + } + return output + + +class FpnNeck(nn.Module): + """ + A modified variant of Feature Pyramid Network (FPN) neck + (we remove output conv and also do bicubic interpolation similar to ViT + pos embed interpolation) + """ + + def __init__( + self, + position_encoding: nn.Module, + d_model: int, + backbone_channel_list: List[int], + kernel_size: int = 1, + stride: int = 1, + padding: int = 0, + fpn_interp_model: str = "bilinear", + fuse_type: str = "sum", + fpn_top_down_levels: Optional[List[int]] = None, + ): + """Initialize the neck + :param trunk: the backbone + :param position_encoding: the positional encoding to use + :param d_model: the dimension of the model + :param neck_norm: the normalization to use + """ + super().__init__() + self.position_encoding = position_encoding + self.convs = nn.ModuleList() + self.backbone_channel_list = backbone_channel_list + self.d_model = d_model + for dim in backbone_channel_list: + current = nn.Sequential() + current.add_module( + "conv", + nn.Conv2d( + in_channels=dim, + out_channels=d_model, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ), + ) + + self.convs.append(current) + self.fpn_interp_model = fpn_interp_model + assert fuse_type in ["sum", "avg"] + self.fuse_type = fuse_type + + # levels to have top-down features in its outputs + # e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3 + # have top-down propagation, while outputs of level 0 and level 1 have only + # lateral features from the same backbone level. + if fpn_top_down_levels is None: + # default is to have top-down features on all levels + fpn_top_down_levels = range(len(self.convs)) + self.fpn_top_down_levels = list(fpn_top_down_levels) + + def forward(self, xs: List[torch.Tensor]): + + out = [None] * len(self.convs) + pos = [None] * len(self.convs) + assert len(xs) == len(self.convs) + # fpn forward pass + # see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py + prev_features = None + # forward in top-down order (from low to high resolution) + n = len(self.convs) - 1 + for i in range(n, -1, -1): + x = xs[i] + lateral_features = self.convs[n - i](x) + if i in self.fpn_top_down_levels and prev_features is not None: + top_down_features = F.interpolate( + prev_features.to(dtype=torch.float32), + scale_factor=2.0, + mode=self.fpn_interp_model, + align_corners=( + None if self.fpn_interp_model == "nearest" else False + ), + antialias=False, + ) + prev_features = lateral_features + top_down_features + if self.fuse_type == "avg": + prev_features /= 2 + else: + prev_features = lateral_features + x_out = prev_features + out[i] = x_out + pos[i] = self.position_encoding(x_out).to(x_out.dtype) + + return out, pos diff --git a/sam2/modeling/backbones/utils.py b/sam2/modeling/backbones/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..930b1b7622e7b0e7270120dcafccc242ef0f4f28 --- /dev/null +++ b/sam2/modeling/backbones/utils.py @@ -0,0 +1,93 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +"""Some utilities for backbones, in particular for windowing""" + +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def window_partition(x, window_size): + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).reshape(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition(windows, window_size, pad_hw, hw): + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.reshape( + B, Hp // window_size, Wp // window_size, window_size, window_size, -1 + ) + x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :] + return x + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, ...] = (7, 7), + stride: Tuple[int, ...] = (4, 4), + padding: Tuple[int, ...] = (3, 3), + in_chans: int = 3, + embed_dim: int = 768, + ): + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/sam2/modeling/memory_attention.py b/sam2/modeling/memory_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..0b07f9d87e3d8194ca5e11fc20f01604d591a59d --- /dev/null +++ b/sam2/modeling/memory_attention.py @@ -0,0 +1,169 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional + +import torch +from torch import nn, Tensor + +from sam2.modeling.sam.transformer import RoPEAttention + +from sam2.modeling.sam2_utils import get_activation_fn, get_clones + + +class MemoryAttentionLayer(nn.Module): + + def __init__( + self, + activation: str, + cross_attention: nn.Module, + d_model: int, + dim_feedforward: int, + dropout: float, + pos_enc_at_attn: bool, + pos_enc_at_cross_attn_keys: bool, + pos_enc_at_cross_attn_queries: bool, + self_attention: nn.Module, + ): + super().__init__() + self.d_model = d_model + self.dim_feedforward = dim_feedforward + self.dropout_value = dropout + self.self_attn = self_attention + self.cross_attn_image = cross_attention + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation_str = activation + self.activation = get_activation_fn(activation) + + # Where to add pos enc + self.pos_enc_at_attn = pos_enc_at_attn + self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries + self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys + + def _forward_sa(self, tgt, query_pos): + # Self-Attention + tgt2 = self.norm1(tgt) + q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 + tgt2 = self.self_attn(q, k, v=tgt2) + tgt = tgt + self.dropout1(tgt2) + return tgt + + def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0): + kwds = {} + if num_k_exclude_rope > 0: + assert isinstance(self.cross_attn_image, RoPEAttention) + kwds = {"num_k_exclude_rope": num_k_exclude_rope} + + # Cross-Attention + tgt2 = self.norm2(tgt) + tgt2 = self.cross_attn_image( + q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2, + k=memory + pos if self.pos_enc_at_cross_attn_keys else memory, + v=memory, + **kwds, + ) + tgt = tgt + self.dropout2(tgt2) + return tgt + + def forward( + self, + tgt, + memory, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + num_k_exclude_rope: int = 0, + ) -> torch.Tensor: + + # Self-Attn, Cross-Attn + tgt = self._forward_sa(tgt, query_pos) + tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope) + # MLP + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + +class MemoryAttention(nn.Module): + def __init__( + self, + d_model: int, + pos_enc_at_input: bool, + layer: nn.Module, + num_layers: int, + batch_first: bool = True, # Do layers expect batch first input? + ): + super().__init__() + self.d_model = d_model + self.layers = get_clones(layer, num_layers) + self.num_layers = num_layers + self.norm = nn.LayerNorm(d_model) + self.pos_enc_at_input = pos_enc_at_input + self.batch_first = batch_first + + def forward( + self, + curr: torch.Tensor, # self-attention inputs + memory: torch.Tensor, # cross-attention inputs + curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs + memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs + num_obj_ptr_tokens: int = 0, # number of object pointer *tokens* + ): + if isinstance(curr, list): + assert isinstance(curr_pos, list) + assert len(curr) == len(curr_pos) == 1 + curr, curr_pos = ( + curr[0], + curr_pos[0], + ) + + assert ( + curr.shape[1] == memory.shape[1] + ), "Batch size must be the same for curr and memory" + + output = curr + if self.pos_enc_at_input and curr_pos is not None: + output = output + 0.1 * curr_pos + + if self.batch_first: + # Convert to batch first + output = output.transpose(0, 1) + curr_pos = curr_pos.transpose(0, 1) + memory = memory.transpose(0, 1) + memory_pos = memory_pos.transpose(0, 1) + + for layer in self.layers: + kwds = {} + if isinstance(layer.cross_attn_image, RoPEAttention): + kwds = {"num_k_exclude_rope": num_obj_ptr_tokens} + + output = layer( + tgt=output, + memory=memory, + pos=memory_pos, + query_pos=curr_pos, + **kwds, + ) + normed_output = self.norm(output) + + if self.batch_first: + # Convert back to seq first + normed_output = normed_output.transpose(0, 1) + curr_pos = curr_pos.transpose(0, 1) + + return normed_output diff --git a/sam2/modeling/memory_encoder.py b/sam2/modeling/memory_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f60202dfaba87232c3870fb2101b5322a119d985 --- /dev/null +++ b/sam2/modeling/memory_encoder.py @@ -0,0 +1,181 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d + + +class MaskDownSampler(nn.Module): + """ + Progressively downsample a mask by total_stride, each time by stride. + Note that LayerNorm is applied per *token*, like in ViT. + + With each downsample (by a factor stride**2), channel capacity increases by the same factor. + In the end, we linearly project to embed_dim channels. + """ + + def __init__( + self, + embed_dim=256, + kernel_size=4, + stride=4, + padding=0, + total_stride=16, + activation=nn.GELU, + ): + super().__init__() + num_layers = int(math.log2(total_stride) // math.log2(stride)) + assert stride**num_layers == total_stride + self.encoder = nn.Sequential() + mask_in_chans, mask_out_chans = 1, 1 + for _ in range(num_layers): + mask_out_chans = mask_in_chans * (stride**2) + self.encoder.append( + nn.Conv2d( + mask_in_chans, + mask_out_chans, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ) + ) + self.encoder.append(LayerNorm2d(mask_out_chans)) + self.encoder.append(activation()) + mask_in_chans = mask_out_chans + + self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1)) + + def forward(self, x): + return self.encoder(x) + + +# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt) +class CXBlock(nn.Module): + r"""ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__( + self, + dim, + kernel_size=7, + padding=3, + drop_path=0.0, + layer_scale_init_value=1e-6, + use_dwconv=True, + ): + super().__init__() + self.dwconv = nn.Conv2d( + dim, + dim, + kernel_size=kernel_size, + padding=padding, + groups=dim if use_dwconv else 1, + ) # depthwise conv + self.norm = LayerNorm2d(dim, eps=1e-6) + self.pwconv1 = nn.Linear( + dim, 4 * dim + ) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + self.gamma = ( + nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True) + if layer_scale_init_value > 0 + else None + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = self.norm(x) + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +class Fuser(nn.Module): + def __init__(self, layer, num_layers, dim=None, input_projection=False): + super().__init__() + self.proj = nn.Identity() + self.layers = get_clones(layer, num_layers) + + if input_projection: + assert dim is not None + self.proj = nn.Conv2d(dim, dim, kernel_size=1) + + def forward(self, x): + # normally x: (N, C, H, W) + x = self.proj(x) + for layer in self.layers: + x = layer(x) + return x + + +class MemoryEncoder(nn.Module): + def __init__( + self, + out_dim, + mask_downsampler, + fuser, + position_encoding, + in_dim=256, # in_dim of pix_feats + ): + super().__init__() + + self.mask_downsampler = mask_downsampler + + self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1) + self.fuser = fuser + self.position_encoding = position_encoding + self.out_proj = nn.Identity() + if out_dim != in_dim: + self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1) + + def forward( + self, + pix_feat: torch.Tensor, + masks: torch.Tensor, + skip_mask_sigmoid: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + ## Process masks + # sigmoid, so that less domain shift from gt masks which are bool + if not skip_mask_sigmoid: + masks = F.sigmoid(masks) + masks = self.mask_downsampler(masks) + + ## Fuse pix_feats and downsampled masks + # in case the visual features are on CPU, cast them to CUDA + pix_feat = pix_feat.to(masks.device) + + x = self.pix_feat_proj(pix_feat) + x = x + masks + x = self.fuser(x) + x = self.out_proj(x) + + pos = self.position_encoding(x).to(x.dtype) + + return {"vision_features": x, "vision_pos_enc": [pos]} diff --git a/sam2/modeling/position_encoding.py b/sam2/modeling/position_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..2241d4cf1a4495b4c67dc35cbed1c606357b9b7a --- /dev/null +++ b/sam2/modeling/position_encoding.py @@ -0,0 +1,239 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import Any, Optional, Tuple + +import numpy as np + +import torch +from torch import nn + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention Is All You Need paper, generalized to work on images. + """ + + def __init__( + self, + num_pos_feats, + temperature: int = 10000, + normalize: bool = True, + scale: Optional[float] = None, + # Following settings only relevant + # for warmping up cache for compilation + warmup_cache: bool = True, + image_size: int = 1024, + strides: Tuple[int] = (4, 8, 16, 32), + ): + super().__init__() + assert num_pos_feats % 2 == 0, "Expecting even model width" + self.num_pos_feats = num_pos_feats // 2 + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + self.cache = {} + if warmup_cache and torch.cuda.is_available(): + # Warmup cache for cuda, to help with compilation + device = torch.device("cuda") + for stride in strides: + cache_key = (image_size // stride, image_size // stride) + self._pe(1, device, *cache_key) + + def _encode_xy(self, x, y): + # The positions are expected to be normalized + assert len(x) == len(y) and x.ndim == y.ndim == 1 + x_embed = x * self.scale + y_embed = y * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, None] / dim_t + pos_y = y_embed[:, None] / dim_t + pos_x = torch.stack( + (pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2 + ).flatten(1) + pos_y = torch.stack( + (pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2 + ).flatten(1) + return pos_x, pos_y + + @torch.no_grad() + def encode_boxes(self, x, y, w, h): + pos_x, pos_y = self._encode_xy(x, y) + pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1) + return pos + + encode = encode_boxes # Backwards compatibility + + @torch.no_grad() + def encode_points(self, x, y, labels): + (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape + assert bx == by and nx == ny and bx == bl and nx == nl + pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten()) + pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1) + pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2) + return pos + + @torch.no_grad() + def _pe(self, B, device, *cache_key): + H, W = cache_key + if cache_key in self.cache: + return self.cache[cache_key].to(device)[None].repeat(B, 1, 1, 1) + + y_embed = ( + torch.arange(1, H + 1, dtype=torch.float32, device=device) + .view(1, -1, 1) + .repeat(B, 1, W) + ) + x_embed = ( + torch.arange(1, W + 1, dtype=torch.float32, device=device) + .view(1, 1, -1) + .repeat(B, H, 1) + ) + + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + self.cache[cache_key] = pos[0] + return pos + + @torch.no_grad() + def forward(self, x: torch.Tensor): + B = x.shape[0] + cache_key = (x.shape[-2], x.shape[-1]) + return self._pe(B, x.device, *cache_key) + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords( + self, coords_input: torch.Tensor, image_size: Tuple[int, int] + ) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C + + +# Rotary Positional Encoding, adapted from: +# 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py +# 2. https://github.com/naver-ai/rope-vit +# 3. https://github.com/lucidrains/rotary-embedding-torch + + +def init_t_xy(end_x: int, end_y: int): + t = torch.arange(end_x * end_y, dtype=torch.float32) + t_x = (t % end_x).float() + t_y = torch.div(t, end_x, rounding_mode="floor").float() + return t_x, t_y + + +def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0): + freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) + freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) + + t_x, t_y = init_t_xy(end_x, end_y) + freqs_x = torch.outer(t_x, freqs_x) + freqs_y = torch.outer(t_y, freqs_y) + freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x) + freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y) + return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1) + + +def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): + ndim = x.ndim + assert 0 <= 1 < ndim + assert freqs_cis.shape == (x.shape[-2], x.shape[-1]) + shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def apply_rotary_enc( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, + repeat_freqs_k: bool = False, +): + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = ( + torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + if xk.shape[-2] != 0 + else None + ) + freqs_cis = reshape_for_broadcast(freqs_cis, xq_) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + if xk_ is None: + # no keys to rotate, due to dropout + return xq_out.type_as(xq).to(xq.device), xk + # repeat freqs along seq_len dim to match k seq_len + if repeat_freqs_k: + r = xk_.shape[-2] // xq_.shape[-2] + if freqs_cis.is_cuda: + freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1) + else: + # torch.repeat on complex numbers may not be supported on non-CUDA devices + # (freqs_cis has 4 dims and we repeat on dim 2) so we use expand + flatten + freqs_cis = freqs_cis.unsqueeze(2).expand(-1, -1, r, -1, -1).flatten(2, 3) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device) diff --git a/sam2/modeling/sam/__init__.py b/sam2/modeling/sam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/sam2/modeling/sam/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/sam2/modeling/sam/mask_decoder.py b/sam2/modeling/sam/mask_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..9bebc0366b2703ffcb80a44bfd19cce8339b4fed --- /dev/null +++ b/sam2/modeling/sam/mask_decoder.py @@ -0,0 +1,295 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional, Tuple, Type + +import torch +from torch import nn + +from sam2.modeling.sam2_utils import LayerNorm2d, MLP + + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + use_high_res_features: bool = False, + iou_prediction_use_sigmoid=False, + dynamic_multimask_via_stability=False, + dynamic_multimask_stability_delta=0.05, + dynamic_multimask_stability_thresh=0.98, + pred_obj_scores: bool = False, + pred_obj_scores_mlp: bool = False, + use_multimask_token_for_obj_ptr: bool = False, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.pred_obj_scores = pred_obj_scores + if self.pred_obj_scores: + self.obj_score_token = nn.Embedding(1, transformer_dim) + self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d( + transformer_dim, transformer_dim // 4, kernel_size=2, stride=2 + ), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d( + transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2 + ), + activation(), + ) + self.use_high_res_features = use_high_res_features + if use_high_res_features: + self.conv_s0 = nn.Conv2d( + transformer_dim, transformer_dim // 8, kernel_size=1, stride=1 + ) + self.conv_s1 = nn.Conv2d( + transformer_dim, transformer_dim // 4, kernel_size=1, stride=1 + ) + + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, + iou_head_hidden_dim, + self.num_mask_tokens, + iou_head_depth, + sigmoid_output=iou_prediction_use_sigmoid, + ) + if self.pred_obj_scores: + self.pred_obj_score_head = nn.Linear(transformer_dim, 1) + if pred_obj_scores_mlp: + self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3) + + # When outputting a single mask, optionally we can dynamically fall back to the best + # multimask output token if the single mask output token gives low stability scores. + self.dynamic_multimask_via_stability = dynamic_multimask_via_stability + self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta + self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + repeat_image: bool, + high_res_features: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + torch.Tensor: batched SAM token for mask output + """ + masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + repeat_image=repeat_image, + high_res_features=high_res_features, + ) + + # Select the correct mask or masks for output + if multimask_output: + masks = masks[:, 1:, :, :] + iou_pred = iou_pred[:, 1:] + elif self.dynamic_multimask_via_stability and not self.training: + masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred) + else: + masks = masks[:, 0:1, :, :] + iou_pred = iou_pred[:, 0:1] + + if multimask_output and self.use_multimask_token_for_obj_ptr: + sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape + else: + # Take the mask output token. Here we *always* use the token for single mask output. + # At test time, even if we track after 1-click (and using multimask_output=True), + # we still take the single mask token here. The rationale is that we always track + # after multiple clicks during training, so the past tokens seen during training + # are always the single mask token (and we'll let it be the object-memory token). + sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape + + # Prepare output + return masks, iou_pred, sam_tokens_out, object_score_logits + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + repeat_image: bool, + high_res_features: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + s = 0 + if self.pred_obj_scores: + output_tokens = torch.cat( + [ + self.obj_score_token.weight, + self.iou_token.weight, + self.mask_tokens.weight, + ], + dim=0, + ) + s = 1 + else: + output_tokens = torch.cat( + [self.iou_token.weight, self.mask_tokens.weight], dim=0 + ) + output_tokens = output_tokens.unsqueeze(0).expand( + sparse_prompt_embeddings.size(0), -1, -1 + ) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + if repeat_image: + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + else: + assert image_embeddings.shape[0] == tokens.shape[0] + src = image_embeddings + src = src + dense_prompt_embeddings + assert ( + image_pe.size(0) == 1 + ), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)" + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, s, :] + mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + if not self.use_high_res_features: + upscaled_embedding = self.output_upscaling(src) + else: + dc1, ln1, act1, dc2, act2 = self.output_upscaling + feat_s0, feat_s1 = high_res_features + upscaled_embedding = act1(ln1(dc1(src) + feat_s1)) + upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0) + + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append( + self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) + ) + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + if self.pred_obj_scores: + assert s == 1 + object_score_logits = self.pred_obj_score_head(hs[:, 0, :]) + else: + # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1 + object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1) + + return masks, iou_pred, mask_tokens_out, object_score_logits + + def _get_stability_scores(self, mask_logits): + """ + Compute stability scores of the mask logits based on the IoU between upper and + lower thresholds. + """ + mask_logits = mask_logits.flatten(-2) + stability_delta = self.dynamic_multimask_stability_delta + area_i = torch.sum(mask_logits > stability_delta, dim=-1).float() + area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float() + stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0) + return stability_scores + + def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): + """ + When outputting a single mask, if the stability score from the current single-mask + output (based on output token 0) falls below a threshold, we instead select from + multi-mask outputs (based on output token 1~3) the mask with the highest predicted + IoU score. This is intended to ensure a valid mask for both clicking and tracking. + """ + # The best mask from multimask output tokens (1~3) + multimask_logits = all_mask_logits[:, 1:, :, :] + multimask_iou_scores = all_iou_scores[:, 1:] + best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) + batch_inds = torch.arange( + multimask_iou_scores.size(0), device=all_iou_scores.device + ) + best_multimask_logits = multimask_logits[batch_inds, best_scores_inds] + best_multimask_logits = best_multimask_logits.unsqueeze(1) + best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds] + best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1) + + # The mask from singlemask output token 0 and its stability score + singlemask_logits = all_mask_logits[:, 0:1, :, :] + singlemask_iou_scores = all_iou_scores[:, 0:1] + stability_scores = self._get_stability_scores(singlemask_logits) + is_stable = stability_scores >= self.dynamic_multimask_stability_thresh + + # Dynamically fall back to best multimask output upon low stability scores. + mask_logits_out = torch.where( + is_stable[..., None, None].expand_as(singlemask_logits), + singlemask_logits, + best_multimask_logits, + ) + iou_scores_out = torch.where( + is_stable.expand_as(singlemask_iou_scores), + singlemask_iou_scores, + best_multimask_iou_scores, + ) + return mask_logits_out, iou_scores_out diff --git a/sam2/modeling/sam/pose_encoder.py b/sam2/modeling/sam/pose_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..6b48f57ebf6de91bae2e94c0307df8663724b1fc --- /dev/null +++ b/sam2/modeling/sam/pose_encoder.py @@ -0,0 +1,191 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional, Tuple, Type + +import torch +from torch import nn + +from sam2.modeling.position_encoding import PositionEmbeddingRandom + +from sam2.modeling.sam2_utils import LayerNorm2d + + +class PoseEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 17 # 17 COCO keypoints + point_embeddings = [ + nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings) + ] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = ( + 4 * image_embedding_size[0], + 4 * image_embedding_size[1], + ) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords( + points, self.input_image_size + ) + + kpt_embeddings = torch.cat([self.point_embeddings[i].weight for i in range(self.num_point_embeddings)], dim=0) + negative_embedding = torch.zeros_like(point_embedding) + self.not_a_point_embed.weight + positive_embedding = point_embedding + kpt_embeddings + weighted_embedding = ( + positive_embedding * labels.unsqueeze(-1).float() + + negative_embedding * (1 - labels.unsqueeze(-1).float()) + ) + + point_embedding = torch.where( + (labels == 0).unsqueeze(-1), + negative_embedding, + weighted_embedding, + ) + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords( + coords, self.input_image_size + ) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + # skeletons: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty( + (bs, 0, self.embed_dim), device=self._get_device() + ) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings diff --git a/sam2/modeling/sam/prompt_encoder.py b/sam2/modeling/sam/prompt_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..abdd9cb72da9eb86f46a03f4a6fc0f90c0cdd451 --- /dev/null +++ b/sam2/modeling/sam/prompt_encoder.py @@ -0,0 +1,203 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional, Tuple, Type + +import torch +from torch import nn + +from sam2.modeling.position_encoding import PositionEmbeddingRandom + +from sam2.modeling.sam2_utils import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [ + nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings) + ] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = ( + 4 * image_embedding_size[0], + 4 * image_embedding_size[1], + ) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords( + points, self.input_image_size + ) + + point_embedding = torch.where( + (labels == -1).unsqueeze(-1), + torch.zeros_like(point_embedding) + self.not_a_point_embed.weight, + point_embedding, + ) + point_embedding = torch.where( + (labels == 0).unsqueeze(-1), + point_embedding + self.point_embeddings[0].weight, + point_embedding, + ) + point_embedding = torch.where( + (labels == 1).unsqueeze(-1), + point_embedding + self.point_embeddings[1].weight, + point_embedding, + ) + point_embedding = torch.where( + (labels == 2).unsqueeze(-1), + point_embedding + self.point_embeddings[2].weight, + point_embedding, + ) + point_embedding = torch.where( + (labels == 3).unsqueeze(-1), + point_embedding + self.point_embeddings[3].weight, + point_embedding, + ) + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords( + coords, self.input_image_size + ) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + # skeletons: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty( + (bs, 0, self.embed_dim), device=self._get_device() + ) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..f9fe9a3fbc5cce4f1abe8ee0ae3a8602bbe2ff1b --- /dev/null +++ b/sam2/modeling/sam/transformer.py @@ -0,0 +1,311 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from functools import partial +from typing import Tuple, Type + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis +from sam2.modeling.sam2_utils import MLP + + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attention layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLP( + embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation + ) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor + ) -> Tuple[Tensor, Tensor]: + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + dropout: float = 0.0, + kv_in_dim: int = None, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert ( + self.internal_dim % num_heads == 0 + ), "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim) + self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + self.dropout_p = dropout + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out + + +class RoPEAttention(Attention): + """Attention with rotary position encoding.""" + + def __init__( + self, + *args, + rope_theta=10000.0, + # whether to repeat q rope to match k length + # this is needed for cross-attention to memories + rope_k_repeat=False, + feat_sizes=(64, 64), # [w, h] for stride 16 feats at 1024 resolution + **kwargs, + ): + super().__init__(*args, **kwargs) + + self.compute_cis = partial( + compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta + ) + freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1]) + self.freqs_cis = ( + freqs_cis.to("cuda") if torch.cuda.is_available() else freqs_cis + ) + self.rope_k_repeat = rope_k_repeat + + def forward( + self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0 + ) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Apply rotary position encoding + w = h = math.sqrt(q.shape[-2]) + self.freqs_cis = self.freqs_cis.to(q.device) + if self.freqs_cis.shape[0] != q.shape[-2]: + self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) + if q.shape[-2] != k.shape[-2]: + assert self.rope_k_repeat + + num_k_rope = k.size(-2) - num_k_exclude_rope + q, k[:, :, :num_k_rope] = apply_rotary_enc( + q, + k[:, :, :num_k_rope], + freqs_cis=self.freqs_cis, + repeat_freqs_k=self.rope_k_repeat, + ) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py new file mode 100644 index 0000000000000000000000000000000000000000..d3afd12e1e7f77e271acec11038f4d71d6c85a6c --- /dev/null +++ b/sam2/modeling/sam2_base.py @@ -0,0 +1,1061 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from loguru import logger + +import torch +import torch.distributed +import torch.nn.functional as F + +from torch.nn.init import trunc_normal_ + +from sam2.modeling.sam.mask_decoder import MaskDecoder +from sam2.modeling.sam.prompt_encoder import PromptEncoder +from sam2.modeling.sam.transformer import TwoWayTransformer +from sam2.modeling.sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames + +from sam2.utils.kalman_filter import KalmanFilter + +# a large negative value as a placeholder score for missing objects +NO_OBJ_SCORE = -1024.0 + + +class SAM2Base(torch.nn.Module): + def __init__( + self, + image_encoder, + memory_attention, + memory_encoder, + num_maskmem=7, # default 1 input frame + 6 previous frames + image_size=512, + backbone_stride=16, # stride of the image backbone output + sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob + sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob + # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks + binarize_mask_from_pts_for_mem_enc=False, + use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder + # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit, + # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model + # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM. + max_cond_frames_in_attn=-1, + # on the first frame, whether to directly add the no-memory embedding to the image feature + # (instead of using the transformer encoder) + directly_add_no_mem_embed=False, + # whether to use high-resolution feature maps in the SAM mask decoder + use_high_res_features_in_sam=False, + # whether to output multiple (3) masks for the first click on initial conditioning frames + multimask_output_in_sam=False, + # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`; + # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points) + multimask_min_pt_num=1, + multimask_max_pt_num=1, + # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`) + multimask_output_for_tracking=False, + # Whether to use multimask tokens for obj ptr; Only relevant when both + # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True + use_multimask_token_for_obj_ptr: bool = False, + # whether to use sigmoid to restrict ious prediction to [0-1] + iou_prediction_use_sigmoid=False, + # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5). + # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of + # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame. + memory_temporal_stride_for_eval=1, + # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks) + non_overlap_masks_for_mem_enc=False, + # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder=False, + # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`) + max_obj_ptrs_in_encoder=16, + # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`) + add_tpos_enc_to_obj_ptrs=True, + # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference + # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`) + proj_tpos_enc_in_obj_ptrs=False, + # whether to use signed distance (instead of unsigned absolute distance) in the temporal positional encoding in the object pointers + # (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`) + use_signed_tpos_enc_to_obj_ptrs=False, + # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation + # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking) + only_obj_ptrs_in_the_past_for_eval=False, + # Whether to predict if there is an object in the frame + pred_obj_scores: bool = False, + # Whether to use an MLP to predict object scores + pred_obj_scores_mlp: bool = False, + # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True; + # Whether to have a fixed no obj pointer when there is no object present + # or to use it as an additive embedding with obj_ptr produced by decoder + fixed_no_obj_ptr: bool = False, + # Soft no object, i.e. mix in no_obj_ptr softly, + # hope to make recovery easier if there is a mistake and mitigate accumulation of errors + soft_no_obj_ptr: bool = False, + use_mlp_for_obj_ptr_proj: bool = False, + # add no obj embedding to spatial frames + no_obj_embed_spatial: bool = False, + # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class. + sam_mask_decoder_extra_args=None, + compile_image_encoder: bool = False, + # Whether to use SAMURAI or original SAM 2 + samurai_mode: bool = False, + # Hyperparameters for SAMURAI + stable_frames_threshold: int = 15, + stable_ious_threshold: float = 0.3, + min_obj_score_logits: float = -1, + kf_score_weight: float = 0.15, + memory_bank_iou_threshold: float = 0.5, + memory_bank_obj_score_threshold: float = 0.0, + memory_bank_kf_score_threshold: float = 0.0, + ): + super().__init__() + + # Part 1: the image backbone + self.image_encoder = image_encoder + # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting + self.use_high_res_features_in_sam = use_high_res_features_in_sam + self.num_feature_levels = 3 if use_high_res_features_in_sam else 1 + self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder + self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder + if use_obj_ptrs_in_encoder: + # A conv layer to downsample the mask prompt to stride 4 (the same stride as + # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale, + # so that it can be fed into the SAM mask decoder to generate a pointer. + self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4) + self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs + if proj_tpos_enc_in_obj_ptrs: + assert add_tpos_enc_to_obj_ptrs # these options need to be used together + self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs + self.use_signed_tpos_enc_to_obj_ptrs = use_signed_tpos_enc_to_obj_ptrs + self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval + + # Part 2: memory attention to condition current frame's visual features + # with memories (and obj ptrs) from past frames + self.memory_attention = memory_attention + self.hidden_dim = image_encoder.neck.d_model + + # Part 3: memory encoder for the previous frame's outputs + self.memory_encoder = memory_encoder + self.mem_dim = self.hidden_dim + if hasattr(self.memory_encoder, "out_proj") and hasattr( + self.memory_encoder.out_proj, "weight" + ): + # if there is compression of memories along channel dim + self.mem_dim = self.memory_encoder.out_proj.weight.shape[0] + self.num_maskmem = num_maskmem # Number of memories accessible + # Temporal encoding of the memories + self.maskmem_tpos_enc = torch.nn.Parameter( + torch.zeros(num_maskmem, 1, 1, self.mem_dim) + ) + trunc_normal_(self.maskmem_tpos_enc, std=0.02) + # a single token to indicate no memory embedding from previous frames + self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + trunc_normal_(self.no_mem_embed, std=0.02) + trunc_normal_(self.no_mem_pos_enc, std=0.02) + self.directly_add_no_mem_embed = directly_add_no_mem_embed + # Apply sigmoid to the output raw mask logits (to turn them from + # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder + self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc + self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc + self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc + self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc + self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval + # On frames with mask input, whether to directly output the input mask without + # using a SAM prompt encoder + mask decoder + self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam + self.multimask_output_in_sam = multimask_output_in_sam + self.multimask_min_pt_num = multimask_min_pt_num + self.multimask_max_pt_num = multimask_max_pt_num + self.multimask_output_for_tracking = multimask_output_for_tracking + self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr + self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid + + # Part 4: SAM-style prompt encoder (for both mask and point inputs) + # and SAM-style mask decoder for the final mask output + self.image_size = image_size + self.backbone_stride = backbone_stride + self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args + self.pred_obj_scores = pred_obj_scores + self.pred_obj_scores_mlp = pred_obj_scores_mlp + self.fixed_no_obj_ptr = fixed_no_obj_ptr + self.soft_no_obj_ptr = soft_no_obj_ptr + if self.fixed_no_obj_ptr: + assert self.pred_obj_scores + assert self.use_obj_ptrs_in_encoder + if self.pred_obj_scores and self.use_obj_ptrs_in_encoder: + self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim)) + trunc_normal_(self.no_obj_ptr, std=0.02) + self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj + self.no_obj_embed_spatial = None + if no_obj_embed_spatial: + self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim)) + trunc_normal_(self.no_obj_embed_spatial, std=0.02) + + self._build_sam_heads() + self.max_cond_frames_in_attn = max_cond_frames_in_attn + + # Whether to use SAMURAI or original SAM 2 + self.samurai_mode = samurai_mode + + # Init Kalman Filter + self.kf = KalmanFilter() + self.kf_mean = None + self.kf_covariance = None + self.stable_frames = 0 + + # Debug purpose + self.history = {} # debug + self.frame_cnt = 0 # debug + + # Hyperparameters for SAMURAI + self.stable_frames_threshold = stable_frames_threshold + self.stable_ious_threshold = stable_ious_threshold + self.min_obj_score_logits = min_obj_score_logits + self.kf_score_weight = kf_score_weight + self.memory_bank_iou_threshold = memory_bank_iou_threshold + self.memory_bank_obj_score_threshold = memory_bank_obj_score_threshold + self.memory_bank_kf_score_threshold = memory_bank_kf_score_threshold + + print(f"\033[93mSAMURAI mode: {self.samurai_mode}\033[0m") + + # Model compilation + if compile_image_encoder: + # Compile the forward function (not the full module) to allow loading checkpoints. + print( + "Image encoder compilation is enabled. First forward pass will be slow." + ) + self.image_encoder.forward = torch.compile( + self.image_encoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, + ) + + @property + def device(self): + return next(self.parameters()).device + + def forward(self, *args, **kwargs): + raise NotImplementedError( + "Please use the corresponding methods in SAM2VideoPredictor for inference or SAM2Train for training/fine-tuning" + "See notebooks/video_predictor_example.ipynb for an inference example." + ) + + def _build_sam_heads(self): + """Build SAM-style prompt encoder and mask decoder.""" + self.sam_prompt_embed_dim = self.hidden_dim + self.sam_image_embedding_size = self.image_size // self.backbone_stride + + # build PromptEncoder and MaskDecoder from SAM + # (their hyperparameters like `mask_in_chans=16` are from SAM code) + self.sam_prompt_encoder = PromptEncoder( + embed_dim=self.sam_prompt_embed_dim, + image_embedding_size=( + self.sam_image_embedding_size, + self.sam_image_embedding_size, + ), + input_image_size=(self.image_size, self.image_size), + mask_in_chans=16, + ) + self.sam_mask_decoder = MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=self.sam_prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=self.sam_prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + use_high_res_features=self.use_high_res_features_in_sam, + iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid, + pred_obj_scores=self.pred_obj_scores, + pred_obj_scores_mlp=self.pred_obj_scores_mlp, + use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr, + **(self.sam_mask_decoder_extra_args or {}), + ) + if self.use_obj_ptrs_in_encoder: + # a linear projection on SAM output tokens to turn them into object pointers + self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim) + if self.use_mlp_for_obj_ptr_proj: + self.obj_ptr_proj = MLP( + self.hidden_dim, self.hidden_dim, self.hidden_dim, 3 + ) + else: + self.obj_ptr_proj = torch.nn.Identity() + if self.proj_tpos_enc_in_obj_ptrs: + # a linear projection on temporal positional encoding in object pointers to + # avoid potential interference with spatial positional encoding + self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim) + else: + self.obj_ptr_tpos_proj = torch.nn.Identity() + + def _forward_sam_heads( + self, + backbone_features, + point_inputs=None, + mask_inputs=None, + high_res_features=None, + multimask_output=False, + ): + """ + Forward SAM prompt encoders and mask heads. + + Inputs: + - backbone_features: image features of [B, C, H, W] shape + - point_inputs: a dictionary with "point_coords" and "point_labels", where + 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the + absolute pixel-unit coordinate in (x, y) format of the P input points + 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means + positive clicks, 0 means negative clicks, and -1 means padding + - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the + same spatial size as the image. + - high_res_features: either 1) None or 2) or a list of length 2 containing + two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively, + which will be used as high-resolution feature maps for SAM decoder. + - multimask_output: if it's True, we output 3 candidate masks and their 3 + corresponding IoU estimates, and if it's False, we output only 1 mask and + its corresponding IoU estimate. + + Outputs: + - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if + `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM + output mask logits (before sigmoid) for the low-resolution masks, with 4x + the resolution (1/4 stride) of the input backbone_features. + - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3 + if `multimask_output=True` and M = 1 if `multimask_output=False`), + upsampled from the low-resolution masks, with shape size as the image + (stride is 1 pixel). + - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1 + if `multimask_output=False`), the estimated IoU of each output mask. + - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `low_res_multimasks`. + - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `high_res_multimasks`. + - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted + based on the output token from the SAM mask decoder. + """ + B = backbone_features.size(0) + device = backbone_features.device + assert backbone_features.size(1) == self.sam_prompt_embed_dim + assert backbone_features.size(2) == self.sam_image_embedding_size + assert backbone_features.size(3) == self.sam_image_embedding_size + + # a) Handle point prompts + if point_inputs is not None: + sam_point_coords = point_inputs["point_coords"] + sam_point_labels = point_inputs["point_labels"] + assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B + else: + # If no points are provide, pad with an empty point (with label -1) + sam_point_coords = torch.zeros(B, 1, 2, device=device) + sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device) + + # b) Handle mask prompts + if mask_inputs is not None: + # If mask_inputs is provided, downsize it into low-res mask input if needed + # and feed it as a dense mask prompt into the SAM mask encoder + assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1) + if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size: + sam_mask_prompt = F.interpolate( + mask_inputs.float(), + size=self.sam_prompt_encoder.mask_input_size, + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + else: + sam_mask_prompt = mask_inputs + else: + # Otherwise, simply feed None (and SAM's prompt encoder will add + # a learned `no_mask_embed` to indicate no mask input in this case). + sam_mask_prompt = None + + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder( + points=(sam_point_coords, sam_point_labels), + boxes=None, + masks=sam_mask_prompt, + ) + ( + low_res_multimasks, + ious, + sam_output_tokens, + object_score_logits, + ) = self.sam_mask_decoder( + image_embeddings=backbone_features, + image_pe=self.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=False, # the image is already batched + high_res_features=high_res_features, + ) + if self.pred_obj_scores: + is_obj_appearing = object_score_logits > self.min_obj_score_logits + + # Mask used for spatial memories is always a *hard* choice between obj and no obj, + # consistent with the actual mask prediction + low_res_multimasks = torch.where( + is_obj_appearing[:, None, None], + low_res_multimasks, + NO_OBJ_SCORE, + ) + + # convert masks from possibly bfloat16 (or float16) to float32 + # (older PyTorch versions before 2.1 don't support `interpolate` on bf16) + low_res_multimasks = low_res_multimasks.float() + high_res_multimasks = F.interpolate( + low_res_multimasks, + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + + sam_output_token = sam_output_tokens[:, 0] + kf_ious = None + if multimask_output and self.samurai_mode: + if self.kf_mean is None and self.kf_covariance is None or self.stable_frames == 0: + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + non_zero_indices = torch.argwhere(high_res_masks[0][0] > 0.0) + if len(non_zero_indices) == 0: + high_res_bbox = [0, 0, 0, 0] + else: + y_min, x_min = non_zero_indices.min(dim=0).values + y_max, x_max = non_zero_indices.max(dim=0).values + high_res_bbox = [x_min.item(), y_min.item(), x_max.item(), y_max.item()] + self.kf_mean, self.kf_covariance = self.kf.initiate(self.kf.xyxy_to_xyah(high_res_bbox)) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + self.frame_cnt += 1 + self.stable_frames += 1 + elif self.stable_frames < self.stable_frames_threshold: + self.kf_mean, self.kf_covariance = self.kf.predict(self.kf_mean, self.kf_covariance) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + non_zero_indices = torch.argwhere(high_res_masks[0][0] > 0.0) + if len(non_zero_indices) == 0: + high_res_bbox = [0, 0, 0, 0] + else: + y_min, x_min = non_zero_indices.min(dim=0).values + y_max, x_max = non_zero_indices.max(dim=0).values + high_res_bbox = [x_min.item(), y_min.item(), x_max.item(), y_max.item()] + if ious[0][best_iou_inds] > self.stable_ious_threshold: + self.kf_mean, self.kf_covariance = self.kf.update(self.kf_mean, self.kf_covariance, self.kf.xyxy_to_xyah(high_res_bbox)) + self.stable_frames += 1 + else: + self.stable_frames = 0 + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + self.frame_cnt += 1 + else: + self.kf_mean, self.kf_covariance = self.kf.predict(self.kf_mean, self.kf_covariance) + high_res_multibboxes = [] + batch_inds = torch.arange(B, device=device) + for i in range(ious.shape[1]): + non_zero_indices = torch.argwhere(high_res_multimasks[batch_inds, i].unsqueeze(1)[0][0] > 0.0) + if len(non_zero_indices) == 0: + high_res_multibboxes.append([0, 0, 0, 0]) + else: + y_min, x_min = non_zero_indices.min(dim=0).values + y_max, x_max = non_zero_indices.max(dim=0).values + high_res_multibboxes.append([x_min.item(), y_min.item(), x_max.item(), y_max.item()]) + # compute the IoU between the predicted bbox and the high_res_multibboxes + kf_ious = torch.tensor(self.kf.compute_iou(self.kf_mean[:4], high_res_multibboxes), device=device) + # weighted iou + weighted_ious = self.kf_score_weight * kf_ious + (1 - self.kf_score_weight) * ious + best_iou_inds = torch.argmax(weighted_ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + + if False: + # make all these on cpu + self.history[self.frame_cnt] = { + "kf_predicted_bbox": self.kf.xyah_to_xyxy(self.kf_mean[:4]), + # "multi_masks": high_res_multimasks.cpu(), + "ious": ious.cpu(), + "multi_bboxes": high_res_multibboxes, + "kf_ious": kf_ious, + "weighted_ious": weighted_ious.cpu(), + "final_selection": best_iou_inds.cpu(), + } + self.frame_cnt += 1 + + if ious[0][best_iou_inds] < self.stable_ious_threshold: + self.stable_frames = 0 + else: + self.kf_mean, self.kf_covariance = self.kf.update(self.kf_mean, self.kf_covariance, self.kf.xyxy_to_xyah(high_res_multibboxes[best_iou_inds])) + elif multimask_output and not self.samurai_mode: + # take the best mask prediction (with the highest IoU estimation) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + else: + best_iou_inds = 0 + low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks + + # Extract object pointer from the SAM output token (with occlusion handling) + obj_ptr = self.obj_ptr_proj(sam_output_token) + if self.pred_obj_scores: + # Allow *soft* no obj ptr, unlike for masks + if self.soft_no_obj_ptr: + lambda_is_obj_appearing = object_score_logits.sigmoid() + else: + lambda_is_obj_appearing = is_obj_appearing.float() + + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_multimasks, + high_res_multimasks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ious[0][best_iou_inds], + kf_ious[best_iou_inds] if kf_ious is not None else None, + ) + + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs): + """ + Directly turn binary `mask_inputs` into a output mask logits without using SAM. + (same input and output shapes as in _forward_sam_heads above). + """ + # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid). + out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05 + mask_inputs_float = mask_inputs.float() + high_res_masks = mask_inputs_float * out_scale + out_bias + low_res_masks = F.interpolate( + high_res_masks, + size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + # a dummy IoU prediction of all 1's under mask input + ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float() + if not self.use_obj_ptrs_in_encoder: + # all zeros as a dummy object pointer (of shape [B, C]) + obj_ptr = torch.zeros( + mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device + ) + else: + # produce an object pointer using the SAM decoder from the mask input + _, _, _, _, _, obj_ptr, _, _, _ = self._forward_sam_heads( + backbone_features=backbone_features, + mask_inputs=self.mask_downsample(mask_inputs_float), + high_res_features=high_res_features, + ) + # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; + # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying + # on the object_scores from the SAM decoder. + is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1) + is_obj_appearing = is_obj_appearing[..., None] + lambda_is_obj_appearing = is_obj_appearing.float() + object_score_logits = out_scale * lambda_is_obj_appearing + out_bias + if self.pred_obj_scores: + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_masks, + high_res_masks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def forward_image(self, img_batch: torch.Tensor): + """Get the image feature on the input batch.""" + backbone_out = self.image_encoder(img_batch) + if self.use_high_res_features_in_sam: + # precompute projected level 0 and level 1 features in SAM decoder + # to avoid running it again on every SAM click + backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0( + backbone_out["backbone_fpn"][0] + ) + backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1( + backbone_out["backbone_fpn"][1] + ) + return backbone_out + + def _prepare_backbone_features(self, backbone_out): + """Prepare and flatten visual features.""" + backbone_out = backbone_out.copy() + assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"]) + assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels + + feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :] + vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :] + + feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds] + # flatten NxCxHxW to HWxNxC + vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps] + vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds] + + return backbone_out, vision_feats, vision_pos_embeds, feat_sizes + + def _prepare_memory_conditioned_features( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + ): + """Fuse the current frame's visual feature map with previous memory.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + device = current_vision_feats[-1].device + # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images. + # In this case, we skip the fusion with any memory. + if self.num_maskmem == 0: # Disable memory and skip fusion + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + return pix_feat + + num_obj_ptr_tokens = 0 + tpos_sign_mul = -1 if track_in_reverse else 1 + # Step 1: condition the visual features of the current frame on previous memories + if not is_init_cond_frame: + # Retrieve the memories encoded with the maskmem backbone + to_cat_memory, to_cat_memory_pos_embed = [], [] + # Add conditioning frames's output first (all cond frames have t_pos=0 for + # when getting temporal positional embedding below) + assert len(output_dict["cond_frame_outputs"]) > 0 + # Select a maximum number of temporally closest cond frames for cross attention + cond_outputs = output_dict["cond_frame_outputs"] + selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames( + frame_idx, cond_outputs, self.max_cond_frames_in_attn + ) + t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()] + # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory + # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1 + # We also allow taking the memory frame non-consecutively (with stride>1), in which case + # we take (self.num_maskmem - 2) frames among every stride-th frames plus the last frame. + stride = 1 if self.training else self.memory_temporal_stride_for_eval + + if self.samurai_mode: + valid_indices = [] + if frame_idx > 1: # Ensure we have previous frames to evaluate + for i in range(frame_idx - 1, 1, -1): # Iterate backwards through previous frames + iou_score = output_dict["non_cond_frame_outputs"][i]["best_iou_score"] # Get mask affinity score + obj_score = output_dict["non_cond_frame_outputs"][i]["object_score_logits"] # Get object score + kf_score = output_dict["non_cond_frame_outputs"][i]["kf_score"] if "kf_score" in output_dict["non_cond_frame_outputs"][i] else None # Get motion score if available + # Check if the scores meet the criteria for being a valid index + if iou_score.item() > self.memory_bank_iou_threshold and \ + obj_score.item() > self.memory_bank_obj_score_threshold and \ + (kf_score is None or kf_score.item() > self.memory_bank_kf_score_threshold): + valid_indices.insert(0, i) + # Check the number of valid indices + if len(valid_indices) >= self.max_obj_ptrs_in_encoder - 1: + break + if frame_idx - 1 not in valid_indices: + valid_indices.append(frame_idx - 1) + for t_pos in range(1, self.num_maskmem): # Iterate over the number of mask memories + idx = t_pos - self.num_maskmem # Calculate the index for valid indices + if idx < -len(valid_indices): # Skip if index is out of bounds + continue + out = output_dict["non_cond_frame_outputs"].get(valid_indices[idx], None) # Get output for the valid index + if out is None: # If not found, check unselected outputs + out = unselected_cond_outputs.get(valid_indices[idx], None) + t_pos_and_prevs.append((t_pos, out)) # Append the temporal position and output to the list + else: + for t_pos in range(1, self.num_maskmem): + t_rel = self.num_maskmem - t_pos # how many frames before current frame + if t_rel == 1: + # for t_rel == 1, we take the last frame (regardless of r) + if not track_in_reverse: + # the frame immediately before this frame (i.e. frame_idx - 1) + prev_frame_idx = frame_idx - t_rel + else: + # the frame immediately after this frame (i.e. frame_idx + 1) + prev_frame_idx = frame_idx + t_rel + else: + # for t_rel >= 2, we take the memory frame from every r-th frames + if not track_in_reverse: + # first find the nearest frame among every r-th frames before this frame + # for r=1, this would be (frame_idx - 2) + prev_frame_idx = ((frame_idx - 2) // stride) * stride + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx - (t_rel - 2) * stride + else: + # first find the nearest frame among every r-th frames after this frame + # for r=1, this would be (frame_idx + 2) + prev_frame_idx = -(-(frame_idx + 2) // stride) * stride + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx + (t_rel - 2) * stride + out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None) + if out is None: + # If an unselected conditioning frame is among the last (self.num_maskmem - 1) + # frames, we still attend to it as if it's a non-conditioning frame. + out = unselected_cond_outputs.get(prev_frame_idx, None) + t_pos_and_prevs.append((t_pos, out)) + + for t_pos, prev in t_pos_and_prevs: + if prev is None: + continue # skip padding frames + # "maskmem_features" might have been offloaded to CPU in demo use cases, + # so we load it back to GPU (it's a no-op if it's already on GPU). + feats = prev["maskmem_features"].to(device, non_blocking=True) + to_cat_memory.append(feats.flatten(2).permute(2, 0, 1)) + # Spatial positional encoding (it might have been offloaded to CPU in eval) + maskmem_enc = prev["maskmem_pos_enc"][-1].to(device) + maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1) + # Temporal positional encoding + maskmem_enc = ( + maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1] + ) + to_cat_memory_pos_embed.append(maskmem_enc) + + # Construct the list of past object pointers + if self.use_obj_ptrs_in_encoder: + max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder) + # First add those object pointers from selected conditioning frames + # (optionally, only include object pointers in the past during evaluation) + if not self.training and self.only_obj_ptrs_in_the_past_for_eval: + ptr_cond_outputs = { + t: out + for t, out in selected_cond_outputs.items() + if (t >= frame_idx if track_in_reverse else t <= frame_idx) + } + else: + ptr_cond_outputs = selected_cond_outputs + pos_and_ptrs = [ + # Temporal pos encoding contains how far away each pointer is from current frame + ( + ( + (frame_idx - t) * tpos_sign_mul + if self.use_signed_tpos_enc_to_obj_ptrs + else abs(frame_idx - t) + ), + out["obj_ptr"], + ) + for t, out in ptr_cond_outputs.items() + ] + # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame + for t_diff in range(1, max_obj_ptrs_in_encoder): + t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff + if t < 0 or (num_frames is not None and t >= num_frames): + break + out = output_dict["non_cond_frame_outputs"].get( + t, unselected_cond_outputs.get(t, None) + ) + if out is not None: + pos_and_ptrs.append((t_diff, out["obj_ptr"])) + # If we have at least one object pointer, add them to the across attention + if len(pos_and_ptrs) > 0: + pos_list, ptrs_list = zip(*pos_and_ptrs) + # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape + obj_ptrs = torch.stack(ptrs_list, dim=0) + # a temporal positional embedding based on how far each object pointer is from + # the current frame (sine embedding normalized by the max pointer num). + if self.add_tpos_enc_to_obj_ptrs: + t_diff_max = max_obj_ptrs_in_encoder - 1 + tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim + obj_pos = torch.tensor(pos_list).to( + device=device, non_blocking=True + ) + obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim) + obj_pos = self.obj_ptr_tpos_proj(obj_pos) + obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim) + else: + obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim) + if self.mem_dim < C: + # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C + obj_ptrs = obj_ptrs.reshape( + -1, B, C // self.mem_dim, self.mem_dim + ) + obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1) + obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0) + to_cat_memory.append(obj_ptrs) + to_cat_memory_pos_embed.append(obj_pos) + num_obj_ptr_tokens = obj_ptrs.shape[0] + else: + num_obj_ptr_tokens = 0 + else: + # for initial conditioning frames, encode them without using any previous memory + if self.directly_add_no_mem_embed: + # directly add no-mem embedding (instead of using the transformer encoder) + pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + # Use a dummy token on the first frame (to avoid empty memory input to tranformer encoder) + to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)] + to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)] + + # Step 2: Concatenate the memories and forward through the transformer encoder + memory = torch.cat(to_cat_memory, dim=0) + memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) + + pix_feat_with_mem = self.memory_attention( + curr=current_vision_feats, + curr_pos=current_vision_pos_embeds, + memory=memory, + memory_pos=memory_pos_embed, + num_obj_ptr_tokens=num_obj_ptr_tokens, + ) + # reshape the output (HW)BC => BCHW + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + def _encode_new_memory( + self, + current_vision_feats, + feat_sizes, + pred_masks_high_res, + object_score_logits, + is_mask_from_pts, + ): + """Encode the current image and its prediction into a memory feature.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + # top-level feature, (HW)BC => BCHW + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + if self.non_overlap_masks_for_mem_enc and not self.training: + # optionally, apply non-overlapping constraints to the masks (it's applied + # in the batch dimension and should only be used during eval, where all + # the objects come from the same video under batch size 1). + pred_masks_high_res = self._apply_non_overlapping_constraints( + pred_masks_high_res + ) + # scale the raw mask logits with a temperature before applying sigmoid + binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts + if binarize and not self.training: + mask_for_mem = (pred_masks_high_res > 0).float() + else: + # apply sigmoid on the raw mask logits to turn them into range (0, 1) + mask_for_mem = torch.sigmoid(pred_masks_high_res) + # apply scale and bias terms to the sigmoid probabilities + if self.sigmoid_scale_for_mem_enc != 1.0: + mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc + if self.sigmoid_bias_for_mem_enc != 0.0: + mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc + maskmem_out = self.memory_encoder( + pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied + ) + maskmem_features = maskmem_out["vision_features"] + maskmem_pos_enc = maskmem_out["vision_pos_enc"] + # add a no-object embedding to the spatial memory to indicate that the frame + # is predicted to be occluded (i.e. no object is appearing in the frame) + if self.no_obj_embed_spatial is not None: + is_obj_appearing = (object_score_logits > 0).float() + maskmem_features += ( + 1 - is_obj_appearing[..., None, None] + ) * self.no_obj_embed_spatial[..., None, None].expand( + *maskmem_features.shape + ) + + return maskmem_features, maskmem_pos_enc + + def _track_step( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse, + prev_sam_mask_logits, + ): + current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} + # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW + if len(current_vision_feats) > 1: + high_res_features = [ + x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) + for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1]) + ] + else: + high_res_features = None + if mask_inputs is not None and self.use_mask_input_as_output_without_sam: + # When use_mask_input_as_output_without_sam=True, we directly output the mask input + # (see it as a GT mask) without using a SAM prompt encoder + mask decoder. + pix_feat = current_vision_feats[-1].permute(1, 2, 0) + pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) + sam_outputs = self._use_mask_as_output( + pix_feat, high_res_features, mask_inputs + ) + else: + # fused the visual feature with previous memory features in the memory bank + pix_feat = self._prepare_memory_conditioned_features( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats[-1:], + current_vision_pos_embeds=current_vision_pos_embeds[-1:], + feat_sizes=feat_sizes[-1:], + output_dict=output_dict, + num_frames=num_frames, + track_in_reverse=track_in_reverse, + ) + # apply SAM-style segmentation head + # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, + # e.g. in demo where such logits come from earlier interaction instead of correction sampling + # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead) + if prev_sam_mask_logits is not None: + assert point_inputs is not None and mask_inputs is None + mask_inputs = prev_sam_mask_logits + multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) + sam_outputs = self._forward_sam_heads( + backbone_features=pix_feat, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + high_res_features=high_res_features, + multimask_output=multimask_output, + ) + + return current_out, sam_outputs, high_res_features, pix_feat + + def _encode_memory_in_output( + self, + current_vision_feats, + feat_sizes, + point_inputs, + run_mem_encoder, + high_res_masks, + object_score_logits, + current_out, + ): + if run_mem_encoder and self.num_maskmem > 0: + high_res_masks_for_mem_enc = high_res_masks + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks_for_mem_enc, + object_score_logits=object_score_logits, + is_mask_from_pts=(point_inputs is not None), + ) + current_out["maskmem_features"] = maskmem_features + current_out["maskmem_pos_enc"] = maskmem_pos_enc + else: + current_out["maskmem_features"] = None + current_out["maskmem_pos_enc"] = None + + def track_step( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + # Whether to run the memory encoder on the predicted masks. Sometimes we might want + # to skip the memory encoder with `run_mem_encoder=False`. For example, + # in demo we might call `track_step` multiple times for each user click, + # and only encode the memory when the user finalizes their clicks. And in ablation + # settings like SAM training on static images, we don't need the memory encoder. + run_mem_encoder=True, + # The previously predicted SAM mask logits (which can be fed together with new clicks in demo). + prev_sam_mask_logits=None, + ): + current_out, sam_outputs, _, _ = self._track_step( + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse, + prev_sam_mask_logits, + ) + + ( + _, + _, + _, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + best_iou_score, + kf_ious + ) = sam_outputs + + current_out["pred_masks"] = low_res_masks + current_out["pred_masks_high_res"] = high_res_masks + current_out["obj_ptr"] = obj_ptr + current_out["best_iou_score"] = best_iou_score + current_out["kf_ious"] = kf_ious + if not self.training: + # Only add this in inference (to avoid unused param in activation checkpointing; + # it's mainly used in the demo to encode spatial memories w/ consolidated masks) + current_out["object_score_logits"] = object_score_logits + + # Finally run the memory encoder on the predicted mask to encode + # it into a new memory feature (that can be used in future frames) + self._encode_memory_in_output( + current_vision_feats, + feat_sizes, + point_inputs, + run_mem_encoder, + high_res_masks, + object_score_logits, + current_out, + ) + + return current_out + + def _use_multimask(self, is_init_cond_frame, point_inputs): + """Whether to use multimask output in the SAM head.""" + num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1) + multimask_output = ( + self.multimask_output_in_sam + and (is_init_cond_frame or self.multimask_output_for_tracking) + and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num) + ) + return multimask_output + + def _apply_non_overlapping_constraints(self, pred_masks): + """ + Apply non-overlapping constraints to the object scores in pred_masks. Here we + keep only the highest scoring object at each spatial location in pred_masks. + """ + batch_size = pred_masks.size(0) + if batch_size == 1: + return pred_masks + + device = pred_masks.device + # "max_obj_inds": object index of the object with the highest score at each location + max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True) + # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks` + batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None] + keep = max_obj_inds == batch_obj_inds + # suppress overlapping regions' scores below -10.0 so that the foreground regions + # don't overlap (here sigmoid(-10.0)=4.5398e-05) + pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0)) + return pred_masks diff --git a/sam2/modeling/sam2_base_pose.py b/sam2/modeling/sam2_base_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..f7bd3f58c10971fdf71f6bb2951282f252c39f06 --- /dev/null +++ b/sam2/modeling/sam2_base_pose.py @@ -0,0 +1,1061 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from loguru import logger + +import torch +import torch.distributed +import torch.nn.functional as F + +from torch.nn.init import trunc_normal_ + +from sam2.modeling.sam.mask_decoder import MaskDecoder +from sam2.modeling.sam.pose_encoder import PoseEncoder +from sam2.modeling.sam.transformer import TwoWayTransformer +from sam2.modeling.sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames + +from sam2.utils.kalman_filter import KalmanFilter + +# a large negative value as a placeholder score for missing objects +NO_OBJ_SCORE = -1024.0 + + +class SAM2Base(torch.nn.Module): + def __init__( + self, + image_encoder, + memory_attention, + memory_encoder, + num_maskmem=7, # default 1 input frame + 6 previous frames + image_size=512, + backbone_stride=16, # stride of the image backbone output + sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob + sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob + # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks + binarize_mask_from_pts_for_mem_enc=False, + use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder + # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit, + # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model + # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM. + max_cond_frames_in_attn=-1, + # on the first frame, whether to directly add the no-memory embedding to the image feature + # (instead of using the transformer encoder) + directly_add_no_mem_embed=False, + # whether to use high-resolution feature maps in the SAM mask decoder + use_high_res_features_in_sam=False, + # whether to output multiple (3) masks for the first click on initial conditioning frames + multimask_output_in_sam=False, + # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`; + # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points) + multimask_min_pt_num=1, + multimask_max_pt_num=1, + # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`) + multimask_output_for_tracking=False, + # Whether to use multimask tokens for obj ptr; Only relevant when both + # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True + use_multimask_token_for_obj_ptr: bool = False, + # whether to use sigmoid to restrict ious prediction to [0-1] + iou_prediction_use_sigmoid=False, + # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5). + # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of + # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame. + memory_temporal_stride_for_eval=1, + # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks) + non_overlap_masks_for_mem_enc=False, + # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder=False, + # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`) + max_obj_ptrs_in_encoder=16, + # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`) + add_tpos_enc_to_obj_ptrs=True, + # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference + # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`) + proj_tpos_enc_in_obj_ptrs=False, + # whether to use signed distance (instead of unsigned absolute distance) in the temporal positional encoding in the object pointers + # (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`) + use_signed_tpos_enc_to_obj_ptrs=False, + # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation + # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking) + only_obj_ptrs_in_the_past_for_eval=False, + # Whether to predict if there is an object in the frame + pred_obj_scores: bool = False, + # Whether to use an MLP to predict object scores + pred_obj_scores_mlp: bool = False, + # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True; + # Whether to have a fixed no obj pointer when there is no object present + # or to use it as an additive embedding with obj_ptr produced by decoder + fixed_no_obj_ptr: bool = False, + # Soft no object, i.e. mix in no_obj_ptr softly, + # hope to make recovery easier if there is a mistake and mitigate accumulation of errors + soft_no_obj_ptr: bool = False, + use_mlp_for_obj_ptr_proj: bool = False, + # add no obj embedding to spatial frames + no_obj_embed_spatial: bool = False, + # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class. + sam_mask_decoder_extra_args=None, + compile_image_encoder: bool = False, + # Whether to use SAMURAI or original SAM 2 + samurai_mode: bool = False, + # Hyperparameters for SAMURAI + stable_frames_threshold: int = 15, + stable_ious_threshold: float = 0.3, + min_obj_score_logits: float = -1, + kf_score_weight: float = 0.15, + memory_bank_iou_threshold: float = 0.5, + memory_bank_obj_score_threshold: float = 0.0, + memory_bank_kf_score_threshold: float = 0.0, + ): + super().__init__() + + # Part 1: the image backbone + self.image_encoder = image_encoder + # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting + self.use_high_res_features_in_sam = use_high_res_features_in_sam + self.num_feature_levels = 3 if use_high_res_features_in_sam else 1 + self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder + self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder + if use_obj_ptrs_in_encoder: + # A conv layer to downsample the mask prompt to stride 4 (the same stride as + # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale, + # so that it can be fed into the SAM mask decoder to generate a pointer. + self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4) + self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs + if proj_tpos_enc_in_obj_ptrs: + assert add_tpos_enc_to_obj_ptrs # these options need to be used together + self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs + self.use_signed_tpos_enc_to_obj_ptrs = use_signed_tpos_enc_to_obj_ptrs + self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval + + # Part 2: memory attention to condition current frame's visual features + # with memories (and obj ptrs) from past frames + self.memory_attention = memory_attention + self.hidden_dim = image_encoder.neck.d_model + + # Part 3: memory encoder for the previous frame's outputs + self.memory_encoder = memory_encoder + self.mem_dim = self.hidden_dim + if hasattr(self.memory_encoder, "out_proj") and hasattr( + self.memory_encoder.out_proj, "weight" + ): + # if there is compression of memories along channel dim + self.mem_dim = self.memory_encoder.out_proj.weight.shape[0] + self.num_maskmem = num_maskmem # Number of memories accessible + # Temporal encoding of the memories + self.maskmem_tpos_enc = torch.nn.Parameter( + torch.zeros(num_maskmem, 1, 1, self.mem_dim) + ) + trunc_normal_(self.maskmem_tpos_enc, std=0.02) + # a single token to indicate no memory embedding from previous frames + self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + trunc_normal_(self.no_mem_embed, std=0.02) + trunc_normal_(self.no_mem_pos_enc, std=0.02) + self.directly_add_no_mem_embed = directly_add_no_mem_embed + # Apply sigmoid to the output raw mask logits (to turn them from + # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder + self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc + self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc + self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc + self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc + self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval + # On frames with mask input, whether to directly output the input mask without + # using a SAM prompt encoder + mask decoder + self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam + self.multimask_output_in_sam = multimask_output_in_sam + self.multimask_min_pt_num = multimask_min_pt_num + self.multimask_max_pt_num = multimask_max_pt_num + self.multimask_output_for_tracking = multimask_output_for_tracking + self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr + self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid + + # Part 4: SAM-style prompt encoder (for both mask and point inputs) + # and SAM-style mask decoder for the final mask output + self.image_size = image_size + self.backbone_stride = backbone_stride + self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args + self.pred_obj_scores = pred_obj_scores + self.pred_obj_scores_mlp = pred_obj_scores_mlp + self.fixed_no_obj_ptr = fixed_no_obj_ptr + self.soft_no_obj_ptr = soft_no_obj_ptr + if self.fixed_no_obj_ptr: + assert self.pred_obj_scores + assert self.use_obj_ptrs_in_encoder + if self.pred_obj_scores and self.use_obj_ptrs_in_encoder: + self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim)) + trunc_normal_(self.no_obj_ptr, std=0.02) + self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj + self.no_obj_embed_spatial = None + if no_obj_embed_spatial: + self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim)) + trunc_normal_(self.no_obj_embed_spatial, std=0.02) + + self._build_sam_heads() + self.max_cond_frames_in_attn = max_cond_frames_in_attn + + # Whether to use SAMURAI or original SAM 2 + self.samurai_mode = samurai_mode + + # Init Kalman Filter + self.kf = KalmanFilter() + self.kf_mean = None + self.kf_covariance = None + self.stable_frames = 0 + + # Debug purpose + self.history = {} # debug + self.frame_cnt = 0 # debug + + # Hyperparameters for SAMURAI + self.stable_frames_threshold = stable_frames_threshold + self.stable_ious_threshold = stable_ious_threshold + self.min_obj_score_logits = min_obj_score_logits + self.kf_score_weight = kf_score_weight + self.memory_bank_iou_threshold = memory_bank_iou_threshold + self.memory_bank_obj_score_threshold = memory_bank_obj_score_threshold + self.memory_bank_kf_score_threshold = memory_bank_kf_score_threshold + + print(f"\033[93mSAMURAI mode: {self.samurai_mode}\033[0m") + + # Model compilation + if compile_image_encoder: + # Compile the forward function (not the full module) to allow loading checkpoints. + print( + "Image encoder compilation is enabled. First forward pass will be slow." + ) + self.image_encoder.forward = torch.compile( + self.image_encoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, + ) + + @property + def device(self): + return next(self.parameters()).device + + def forward(self, *args, **kwargs): + raise NotImplementedError( + "Please use the corresponding methods in SAM2VideoPredictor for inference or SAM2Train for training/fine-tuning" + "See notebooks/video_predictor_example.ipynb for an inference example." + ) + + def _build_sam_heads(self): + """Build SAM-style prompt encoder and mask decoder.""" + self.sam_prompt_embed_dim = self.hidden_dim + self.sam_image_embedding_size = self.image_size // self.backbone_stride + + # build PoseEncoder and MaskDecoder from SAM + # (their hyperparameters like `mask_in_chans=16` are from SAM code) + self.sam_prompt_encoder = PoseEncoder( + embed_dim=self.sam_prompt_embed_dim, + image_embedding_size=( + self.sam_image_embedding_size, + self.sam_image_embedding_size, + ), + input_image_size=(self.image_size, self.image_size), + mask_in_chans=16, + ) + self.sam_mask_decoder = MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=self.sam_prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=self.sam_prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + use_high_res_features=self.use_high_res_features_in_sam, + iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid, + pred_obj_scores=self.pred_obj_scores, + pred_obj_scores_mlp=self.pred_obj_scores_mlp, + use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr, + **(self.sam_mask_decoder_extra_args or {}), + ) + if self.use_obj_ptrs_in_encoder: + # a linear projection on SAM output tokens to turn them into object pointers + self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim) + if self.use_mlp_for_obj_ptr_proj: + self.obj_ptr_proj = MLP( + self.hidden_dim, self.hidden_dim, self.hidden_dim, 3 + ) + else: + self.obj_ptr_proj = torch.nn.Identity() + if self.proj_tpos_enc_in_obj_ptrs: + # a linear projection on temporal positional encoding in object pointers to + # avoid potential interference with spatial positional encoding + self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim) + else: + self.obj_ptr_tpos_proj = torch.nn.Identity() + + def _forward_sam_heads( + self, + backbone_features, + point_inputs=None, + mask_inputs=None, + high_res_features=None, + multimask_output=False, + ): + """ + Forward SAM prompt encoders and mask heads. + + Inputs: + - backbone_features: image features of [B, C, H, W] shape + - point_inputs: a dictionary with "point_coords" and "point_labels", where + 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the + absolute pixel-unit coordinate in (x, y) format of the P input points + 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means + positive clicks, 0 means negative clicks, and -1 means padding + - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the + same spatial size as the image. + - high_res_features: either 1) None or 2) or a list of length 2 containing + two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively, + which will be used as high-resolution feature maps for SAM decoder. + - multimask_output: if it's True, we output 3 candidate masks and their 3 + corresponding IoU estimates, and if it's False, we output only 1 mask and + its corresponding IoU estimate. + + Outputs: + - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if + `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM + output mask logits (before sigmoid) for the low-resolution masks, with 4x + the resolution (1/4 stride) of the input backbone_features. + - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3 + if `multimask_output=True` and M = 1 if `multimask_output=False`), + upsampled from the low-resolution masks, with shape size as the image + (stride is 1 pixel). + - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1 + if `multimask_output=False`), the estimated IoU of each output mask. + - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `low_res_multimasks`. + - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `high_res_multimasks`. + - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted + based on the output token from the SAM mask decoder. + """ + B = backbone_features.size(0) + device = backbone_features.device + assert backbone_features.size(1) == self.sam_prompt_embed_dim + assert backbone_features.size(2) == self.sam_image_embedding_size + assert backbone_features.size(3) == self.sam_image_embedding_size + + # a) Handle point prompts + if point_inputs is not None: + sam_point_coords = point_inputs["point_coords"] + sam_point_labels = point_inputs["point_labels"] + assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B + else: + # If no points are provide, pad with an empty point (with label -1) + sam_point_coords = torch.zeros(B, 1, 2, device=device) + sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device) + + # b) Handle mask prompts + if mask_inputs is not None: + # If mask_inputs is provided, downsize it into low-res mask input if needed + # and feed it as a dense mask prompt into the SAM mask encoder + assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1) + if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size: + sam_mask_prompt = F.interpolate( + mask_inputs.float(), + size=self.sam_prompt_encoder.mask_input_size, + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + else: + sam_mask_prompt = mask_inputs + else: + # Otherwise, simply feed None (and SAM's prompt encoder will add + # a learned `no_mask_embed` to indicate no mask input in this case). + sam_mask_prompt = None + + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder( + points=(sam_point_coords, sam_point_labels), + boxes=None, + masks=sam_mask_prompt, + ) + ( + low_res_multimasks, + ious, + sam_output_tokens, + object_score_logits, + ) = self.sam_mask_decoder( + image_embeddings=backbone_features, + image_pe=self.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=False, # the image is already batched + high_res_features=high_res_features, + ) + if self.pred_obj_scores: + is_obj_appearing = object_score_logits > self.min_obj_score_logits + + # Mask used for spatial memories is always a *hard* choice between obj and no obj, + # consistent with the actual mask prediction + low_res_multimasks = torch.where( + is_obj_appearing[:, None, None], + low_res_multimasks, + NO_OBJ_SCORE, + ) + + # convert masks from possibly bfloat16 (or float16) to float32 + # (older PyTorch versions before 2.1 don't support `interpolate` on bf16) + low_res_multimasks = low_res_multimasks.float() + high_res_multimasks = F.interpolate( + low_res_multimasks, + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + + sam_output_token = sam_output_tokens[:, 0] + kf_ious = None + if multimask_output and self.samurai_mode: + if self.kf_mean is None and self.kf_covariance is None or self.stable_frames == 0: + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + non_zero_indices = torch.argwhere(high_res_masks[0][0] > 0.0) + if len(non_zero_indices) == 0: + high_res_bbox = [0, 0, 0, 0] + else: + y_min, x_min = non_zero_indices.min(dim=0).values + y_max, x_max = non_zero_indices.max(dim=0).values + high_res_bbox = [x_min.item(), y_min.item(), x_max.item(), y_max.item()] + self.kf_mean, self.kf_covariance = self.kf.initiate(self.kf.xyxy_to_xyah(high_res_bbox)) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + self.frame_cnt += 1 + self.stable_frames += 1 + elif self.stable_frames < self.stable_frames_threshold: + self.kf_mean, self.kf_covariance = self.kf.predict(self.kf_mean, self.kf_covariance) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + non_zero_indices = torch.argwhere(high_res_masks[0][0] > 0.0) + if len(non_zero_indices) == 0: + high_res_bbox = [0, 0, 0, 0] + else: + y_min, x_min = non_zero_indices.min(dim=0).values + y_max, x_max = non_zero_indices.max(dim=0).values + high_res_bbox = [x_min.item(), y_min.item(), x_max.item(), y_max.item()] + if ious[0][best_iou_inds] > self.stable_ious_threshold: + self.kf_mean, self.kf_covariance = self.kf.update(self.kf_mean, self.kf_covariance, self.kf.xyxy_to_xyah(high_res_bbox)) + self.stable_frames += 1 + else: + self.stable_frames = 0 + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + self.frame_cnt += 1 + else: + self.kf_mean, self.kf_covariance = self.kf.predict(self.kf_mean, self.kf_covariance) + high_res_multibboxes = [] + batch_inds = torch.arange(B, device=device) + for i in range(ious.shape[1]): + non_zero_indices = torch.argwhere(high_res_multimasks[batch_inds, i].unsqueeze(1)[0][0] > 0.0) + if len(non_zero_indices) == 0: + high_res_multibboxes.append([0, 0, 0, 0]) + else: + y_min, x_min = non_zero_indices.min(dim=0).values + y_max, x_max = non_zero_indices.max(dim=0).values + high_res_multibboxes.append([x_min.item(), y_min.item(), x_max.item(), y_max.item()]) + # compute the IoU between the predicted bbox and the high_res_multibboxes + kf_ious = torch.tensor(self.kf.compute_iou(self.kf_mean[:4], high_res_multibboxes), device=device) + # weighted iou + weighted_ious = self.kf_score_weight * kf_ious + (1 - self.kf_score_weight) * ious + best_iou_inds = torch.argmax(weighted_ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + + if False: + # make all these on cpu + self.history[self.frame_cnt] = { + "kf_predicted_bbox": self.kf.xyah_to_xyxy(self.kf_mean[:4]), + # "multi_masks": high_res_multimasks.cpu(), + "ious": ious.cpu(), + "multi_bboxes": high_res_multibboxes, + "kf_ious": kf_ious, + "weighted_ious": weighted_ious.cpu(), + "final_selection": best_iou_inds.cpu(), + } + self.frame_cnt += 1 + + if ious[0][best_iou_inds] < self.stable_ious_threshold: + self.stable_frames = 0 + else: + self.kf_mean, self.kf_covariance = self.kf.update(self.kf_mean, self.kf_covariance, self.kf.xyxy_to_xyah(high_res_multibboxes[best_iou_inds])) + elif multimask_output and not self.samurai_mode: + # take the best mask prediction (with the highest IoU estimation) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + else: + best_iou_inds = 0 + low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks + + # Extract object pointer from the SAM output token (with occlusion handling) + obj_ptr = self.obj_ptr_proj(sam_output_token) + if self.pred_obj_scores: + # Allow *soft* no obj ptr, unlike for masks + if self.soft_no_obj_ptr: + lambda_is_obj_appearing = object_score_logits.sigmoid() + else: + lambda_is_obj_appearing = is_obj_appearing.float() + + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_multimasks, + high_res_multimasks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ious[0][best_iou_inds], + kf_ious[best_iou_inds] if kf_ious is not None else None, + ) + + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs): + """ + Directly turn binary `mask_inputs` into a output mask logits without using SAM. + (same input and output shapes as in _forward_sam_heads above). + """ + # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid). + out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05 + mask_inputs_float = mask_inputs.float() + high_res_masks = mask_inputs_float * out_scale + out_bias + low_res_masks = F.interpolate( + high_res_masks, + size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + # a dummy IoU prediction of all 1's under mask input + ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float() + if not self.use_obj_ptrs_in_encoder: + # all zeros as a dummy object pointer (of shape [B, C]) + obj_ptr = torch.zeros( + mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device + ) + else: + # produce an object pointer using the SAM decoder from the mask input + _, _, _, _, _, obj_ptr, _, _, _ = self._forward_sam_heads( + backbone_features=backbone_features, + mask_inputs=self.mask_downsample(mask_inputs_float), + high_res_features=high_res_features, + ) + # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; + # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying + # on the object_scores from the SAM decoder. + is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1) + is_obj_appearing = is_obj_appearing[..., None] + lambda_is_obj_appearing = is_obj_appearing.float() + object_score_logits = out_scale * lambda_is_obj_appearing + out_bias + if self.pred_obj_scores: + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_masks, + high_res_masks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def forward_image(self, img_batch: torch.Tensor): + """Get the image feature on the input batch.""" + backbone_out = self.image_encoder(img_batch) + if self.use_high_res_features_in_sam: + # precompute projected level 0 and level 1 features in SAM decoder + # to avoid running it again on every SAM click + backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0( + backbone_out["backbone_fpn"][0] + ) + backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1( + backbone_out["backbone_fpn"][1] + ) + return backbone_out + + def _prepare_backbone_features(self, backbone_out): + """Prepare and flatten visual features.""" + backbone_out = backbone_out.copy() + assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"]) + assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels + + feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :] + vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :] + + feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds] + # flatten NxCxHxW to HWxNxC + vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps] + vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds] + + return backbone_out, vision_feats, vision_pos_embeds, feat_sizes + + def _prepare_memory_conditioned_features( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + ): + """Fuse the current frame's visual feature map with previous memory.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + device = current_vision_feats[-1].device + # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images. + # In this case, we skip the fusion with any memory. + if self.num_maskmem == 0: # Disable memory and skip fusion + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + return pix_feat + + num_obj_ptr_tokens = 0 + tpos_sign_mul = -1 if track_in_reverse else 1 + # Step 1: condition the visual features of the current frame on previous memories + if not is_init_cond_frame: + # Retrieve the memories encoded with the maskmem backbone + to_cat_memory, to_cat_memory_pos_embed = [], [] + # Add conditioning frames's output first (all cond frames have t_pos=0 for + # when getting temporal positional embedding below) + assert len(output_dict["cond_frame_outputs"]) > 0 + # Select a maximum number of temporally closest cond frames for cross attention + cond_outputs = output_dict["cond_frame_outputs"] + selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames( + frame_idx, cond_outputs, self.max_cond_frames_in_attn + ) + t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()] + # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory + # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1 + # We also allow taking the memory frame non-consecutively (with stride>1), in which case + # we take (self.num_maskmem - 2) frames among every stride-th frames plus the last frame. + stride = 1 if self.training else self.memory_temporal_stride_for_eval + + if self.samurai_mode: + valid_indices = [] + if frame_idx > 1: # Ensure we have previous frames to evaluate + for i in range(frame_idx - 1, 1, -1): # Iterate backwards through previous frames + iou_score = output_dict["non_cond_frame_outputs"][i]["best_iou_score"] # Get mask affinity score + obj_score = output_dict["non_cond_frame_outputs"][i]["object_score_logits"] # Get object score + kf_score = output_dict["non_cond_frame_outputs"][i]["kf_score"] if "kf_score" in output_dict["non_cond_frame_outputs"][i] else None # Get motion score if available + # Check if the scores meet the criteria for being a valid index + if iou_score.item() > self.memory_bank_iou_threshold and \ + obj_score.item() > self.memory_bank_obj_score_threshold and \ + (kf_score is None or kf_score.item() > self.memory_bank_kf_score_threshold): + valid_indices.insert(0, i) + # Check the number of valid indices + if len(valid_indices) >= self.max_obj_ptrs_in_encoder - 1: + break + if frame_idx - 1 not in valid_indices: + valid_indices.append(frame_idx - 1) + for t_pos in range(1, self.num_maskmem): # Iterate over the number of mask memories + idx = t_pos - self.num_maskmem # Calculate the index for valid indices + if idx < -len(valid_indices): # Skip if index is out of bounds + continue + out = output_dict["non_cond_frame_outputs"].get(valid_indices[idx], None) # Get output for the valid index + if out is None: # If not found, check unselected outputs + out = unselected_cond_outputs.get(valid_indices[idx], None) + t_pos_and_prevs.append((t_pos, out)) # Append the temporal position and output to the list + else: + for t_pos in range(1, self.num_maskmem): + t_rel = self.num_maskmem - t_pos # how many frames before current frame + if t_rel == 1: + # for t_rel == 1, we take the last frame (regardless of r) + if not track_in_reverse: + # the frame immediately before this frame (i.e. frame_idx - 1) + prev_frame_idx = frame_idx - t_rel + else: + # the frame immediately after this frame (i.e. frame_idx + 1) + prev_frame_idx = frame_idx + t_rel + else: + # for t_rel >= 2, we take the memory frame from every r-th frames + if not track_in_reverse: + # first find the nearest frame among every r-th frames before this frame + # for r=1, this would be (frame_idx - 2) + prev_frame_idx = ((frame_idx - 2) // stride) * stride + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx - (t_rel - 2) * stride + else: + # first find the nearest frame among every r-th frames after this frame + # for r=1, this would be (frame_idx + 2) + prev_frame_idx = -(-(frame_idx + 2) // stride) * stride + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx + (t_rel - 2) * stride + out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None) + if out is None: + # If an unselected conditioning frame is among the last (self.num_maskmem - 1) + # frames, we still attend to it as if it's a non-conditioning frame. + out = unselected_cond_outputs.get(prev_frame_idx, None) + t_pos_and_prevs.append((t_pos, out)) + + for t_pos, prev in t_pos_and_prevs: + if prev is None: + continue # skip padding frames + # "maskmem_features" might have been offloaded to CPU in demo use cases, + # so we load it back to GPU (it's a no-op if it's already on GPU). + feats = prev["maskmem_features"].to(device, non_blocking=True) + to_cat_memory.append(feats.flatten(2).permute(2, 0, 1)) + # Spatial positional encoding (it might have been offloaded to CPU in eval) + maskmem_enc = prev["maskmem_pos_enc"][-1].to(device) + maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1) + # Temporal positional encoding + maskmem_enc = ( + maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1] + ) + to_cat_memory_pos_embed.append(maskmem_enc) + + # Construct the list of past object pointers + if self.use_obj_ptrs_in_encoder: + max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder) + # First add those object pointers from selected conditioning frames + # (optionally, only include object pointers in the past during evaluation) + if not self.training and self.only_obj_ptrs_in_the_past_for_eval: + ptr_cond_outputs = { + t: out + for t, out in selected_cond_outputs.items() + if (t >= frame_idx if track_in_reverse else t <= frame_idx) + } + else: + ptr_cond_outputs = selected_cond_outputs + pos_and_ptrs = [ + # Temporal pos encoding contains how far away each pointer is from current frame + ( + ( + (frame_idx - t) * tpos_sign_mul + if self.use_signed_tpos_enc_to_obj_ptrs + else abs(frame_idx - t) + ), + out["obj_ptr"], + ) + for t, out in ptr_cond_outputs.items() + ] + # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame + for t_diff in range(1, max_obj_ptrs_in_encoder): + t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff + if t < 0 or (num_frames is not None and t >= num_frames): + break + out = output_dict["non_cond_frame_outputs"].get( + t, unselected_cond_outputs.get(t, None) + ) + if out is not None: + pos_and_ptrs.append((t_diff, out["obj_ptr"])) + # If we have at least one object pointer, add them to the across attention + if len(pos_and_ptrs) > 0: + pos_list, ptrs_list = zip(*pos_and_ptrs) + # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape + obj_ptrs = torch.stack(ptrs_list, dim=0) + # a temporal positional embedding based on how far each object pointer is from + # the current frame (sine embedding normalized by the max pointer num). + if self.add_tpos_enc_to_obj_ptrs: + t_diff_max = max_obj_ptrs_in_encoder - 1 + tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim + obj_pos = torch.tensor(pos_list).to( + device=device, non_blocking=True + ) + obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim) + obj_pos = self.obj_ptr_tpos_proj(obj_pos) + obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim) + else: + obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim) + if self.mem_dim < C: + # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C + obj_ptrs = obj_ptrs.reshape( + -1, B, C // self.mem_dim, self.mem_dim + ) + obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1) + obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0) + to_cat_memory.append(obj_ptrs) + to_cat_memory_pos_embed.append(obj_pos) + num_obj_ptr_tokens = obj_ptrs.shape[0] + else: + num_obj_ptr_tokens = 0 + else: + # for initial conditioning frames, encode them without using any previous memory + if self.directly_add_no_mem_embed: + # directly add no-mem embedding (instead of using the transformer encoder) + pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + # Use a dummy token on the first frame (to avoid empty memory input to tranformer encoder) + to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)] + to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)] + + # Step 2: Concatenate the memories and forward through the transformer encoder + memory = torch.cat(to_cat_memory, dim=0) + memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) + + pix_feat_with_mem = self.memory_attention( + curr=current_vision_feats, + curr_pos=current_vision_pos_embeds, + memory=memory, + memory_pos=memory_pos_embed, + num_obj_ptr_tokens=num_obj_ptr_tokens, + ) + # reshape the output (HW)BC => BCHW + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + def _encode_new_memory( + self, + current_vision_feats, + feat_sizes, + pred_masks_high_res, + object_score_logits, + is_mask_from_pts, + ): + """Encode the current image and its prediction into a memory feature.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + # top-level feature, (HW)BC => BCHW + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + if self.non_overlap_masks_for_mem_enc and not self.training: + # optionally, apply non-overlapping constraints to the masks (it's applied + # in the batch dimension and should only be used during eval, where all + # the objects come from the same video under batch size 1). + pred_masks_high_res = self._apply_non_overlapping_constraints( + pred_masks_high_res + ) + # scale the raw mask logits with a temperature before applying sigmoid + binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts + if binarize and not self.training: + mask_for_mem = (pred_masks_high_res > 0).float() + else: + # apply sigmoid on the raw mask logits to turn them into range (0, 1) + mask_for_mem = torch.sigmoid(pred_masks_high_res) + # apply scale and bias terms to the sigmoid probabilities + if self.sigmoid_scale_for_mem_enc != 1.0: + mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc + if self.sigmoid_bias_for_mem_enc != 0.0: + mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc + maskmem_out = self.memory_encoder( + pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied + ) + maskmem_features = maskmem_out["vision_features"] + maskmem_pos_enc = maskmem_out["vision_pos_enc"] + # add a no-object embedding to the spatial memory to indicate that the frame + # is predicted to be occluded (i.e. no object is appearing in the frame) + if self.no_obj_embed_spatial is not None: + is_obj_appearing = (object_score_logits > 0).float() + maskmem_features += ( + 1 - is_obj_appearing[..., None, None] + ) * self.no_obj_embed_spatial[..., None, None].expand( + *maskmem_features.shape + ) + + return maskmem_features, maskmem_pos_enc + + def _track_step( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse, + prev_sam_mask_logits, + ): + current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} + # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW + if len(current_vision_feats) > 1: + high_res_features = [ + x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) + for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1]) + ] + else: + high_res_features = None + if mask_inputs is not None and self.use_mask_input_as_output_without_sam: + # When use_mask_input_as_output_without_sam=True, we directly output the mask input + # (see it as a GT mask) without using a SAM prompt encoder + mask decoder. + pix_feat = current_vision_feats[-1].permute(1, 2, 0) + pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) + sam_outputs = self._use_mask_as_output( + pix_feat, high_res_features, mask_inputs + ) + else: + # fused the visual feature with previous memory features in the memory bank + pix_feat = self._prepare_memory_conditioned_features( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats[-1:], + current_vision_pos_embeds=current_vision_pos_embeds[-1:], + feat_sizes=feat_sizes[-1:], + output_dict=output_dict, + num_frames=num_frames, + track_in_reverse=track_in_reverse, + ) + # apply SAM-style segmentation head + # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, + # e.g. in demo where such logits come from earlier interaction instead of correction sampling + # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead) + if prev_sam_mask_logits is not None: + assert point_inputs is not None and mask_inputs is None + mask_inputs = prev_sam_mask_logits + multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) + sam_outputs = self._forward_sam_heads( + backbone_features=pix_feat, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + high_res_features=high_res_features, + multimask_output=multimask_output, + ) + + return current_out, sam_outputs, high_res_features, pix_feat + + def _encode_memory_in_output( + self, + current_vision_feats, + feat_sizes, + point_inputs, + run_mem_encoder, + high_res_masks, + object_score_logits, + current_out, + ): + if run_mem_encoder and self.num_maskmem > 0: + high_res_masks_for_mem_enc = high_res_masks + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks_for_mem_enc, + object_score_logits=object_score_logits, + is_mask_from_pts=(point_inputs is not None), + ) + current_out["maskmem_features"] = maskmem_features + current_out["maskmem_pos_enc"] = maskmem_pos_enc + else: + current_out["maskmem_features"] = None + current_out["maskmem_pos_enc"] = None + + def track_step( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + # Whether to run the memory encoder on the predicted masks. Sometimes we might want + # to skip the memory encoder with `run_mem_encoder=False`. For example, + # in demo we might call `track_step` multiple times for each user click, + # and only encode the memory when the user finalizes their clicks. And in ablation + # settings like SAM training on static images, we don't need the memory encoder. + run_mem_encoder=True, + # The previously predicted SAM mask logits (which can be fed together with new clicks in demo). + prev_sam_mask_logits=None, + ): + current_out, sam_outputs, _, _ = self._track_step( + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse, + prev_sam_mask_logits, + ) + + ( + _, + _, + _, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + best_iou_score, + kf_ious + ) = sam_outputs + + current_out["pred_masks"] = low_res_masks + current_out["pred_masks_high_res"] = high_res_masks + current_out["obj_ptr"] = obj_ptr + current_out["best_iou_score"] = best_iou_score + current_out["kf_ious"] = kf_ious + if not self.training: + # Only add this in inference (to avoid unused param in activation checkpointing; + # it's mainly used in the demo to encode spatial memories w/ consolidated masks) + current_out["object_score_logits"] = object_score_logits + + # Finally run the memory encoder on the predicted mask to encode + # it into a new memory feature (that can be used in future frames) + self._encode_memory_in_output( + current_vision_feats, + feat_sizes, + point_inputs, + run_mem_encoder, + high_res_masks, + object_score_logits, + current_out, + ) + + return current_out + + def _use_multimask(self, is_init_cond_frame, point_inputs): + """Whether to use multimask output in the SAM head.""" + num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1) + multimask_output = ( + self.multimask_output_in_sam + and (is_init_cond_frame or self.multimask_output_for_tracking) + and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num) + ) + return multimask_output + + def _apply_non_overlapping_constraints(self, pred_masks): + """ + Apply non-overlapping constraints to the object scores in pred_masks. Here we + keep only the highest scoring object at each spatial location in pred_masks. + """ + batch_size = pred_masks.size(0) + if batch_size == 1: + return pred_masks + + device = pred_masks.device + # "max_obj_inds": object index of the object with the highest score at each location + max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True) + # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks` + batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None] + keep = max_obj_inds == batch_obj_inds + # suppress overlapping regions' scores below -10.0 so that the foreground regions + # don't overlap (here sigmoid(-10.0)=4.5398e-05) + pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0)) + return pred_masks diff --git a/sam2/modeling/sam2_utils.py b/sam2/modeling/sam2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e16caae3a9a49e451b2d03d1ee60c47f8e9ed23c --- /dev/null +++ b/sam2/modeling/sam2_utils.py @@ -0,0 +1,323 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import copy +from typing import Tuple + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from sam2.utils.misc import mask_to_box + + +def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num): + """ + Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs` + that are temporally closest to the current frame at `frame_idx`. Here, we take + - a) the closest conditioning frame before `frame_idx` (if any); + - b) the closest conditioning frame after `frame_idx` (if any); + - c) any other temporally closest conditioning frames until reaching a total + of `max_cond_frame_num` conditioning frames. + + Outputs: + - selected_outputs: selected items (keys & values) from `cond_frame_outputs`. + - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`. + """ + if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num: + selected_outputs = cond_frame_outputs + unselected_outputs = {} + else: + assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames" + selected_outputs = {} + + # the closest conditioning frame before `frame_idx` (if any) + idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None) + if idx_before is not None: + selected_outputs[idx_before] = cond_frame_outputs[idx_before] + + # the closest conditioning frame after `frame_idx` (if any) + idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None) + if idx_after is not None: + selected_outputs[idx_after] = cond_frame_outputs[idx_after] + + # add other temporally closest conditioning frames until reaching a total + # of `max_cond_frame_num` conditioning frames. + num_remain = max_cond_frame_num - len(selected_outputs) + inds_remain = sorted( + (t for t in cond_frame_outputs if t not in selected_outputs), + key=lambda x: abs(x - frame_idx), + )[:num_remain] + selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain) + unselected_outputs = { + t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs + } + + return selected_outputs, unselected_outputs + + +def get_1d_sine_pe(pos_inds, dim, temperature=10000): + """ + Get 1D sine positional embedding as in the original Transformer paper. + """ + pe_dim = dim // 2 + dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device) + dim_t = temperature ** (2 * (dim_t // 2) / pe_dim) + + pos_embed = pos_inds.unsqueeze(-1) / dim_t + pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1) + return pos_embed + + +def get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +class DropPath(nn.Module): + # adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py + def __init__(self, drop_prob=0.0, scale_by_keep=True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + if self.drop_prob == 0.0 or not self.training: + return x + keep_prob = 1 - self.drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and self.scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + activation: nn.Module = nn.ReLU, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.sigmoid_output = sigmoid_output + self.act = activation() + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +def sample_box_points( + masks: torch.Tensor, + noise: float = 0.1, # SAM default + noise_bound: int = 20, # SAM default + top_left_label: int = 2, + bottom_right_label: int = 3, +) -> Tuple[np.array, np.array]: + """ + Sample a noised version of the top left and bottom right corners of a given `bbox` + + Inputs: + - masks: [B, 1, H,W] boxes, dtype=torch.Tensor + - noise: noise as a fraction of box width and height, dtype=float + - noise_bound: maximum amount of noise (in pure pixesl), dtype=int + + Returns: + - box_coords: [B, num_pt, 2], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.float + - box_labels: [B, num_pt], label 2 is reserverd for top left and 3 for bottom right corners, dtype=torch.int32 + """ + device = masks.device + box_coords = mask_to_box(masks) + B, _, H, W = masks.shape + box_labels = torch.tensor( + [top_left_label, bottom_right_label], dtype=torch.int, device=device + ).repeat(B) + if noise > 0.0: + if not isinstance(noise_bound, torch.Tensor): + noise_bound = torch.tensor(noise_bound, device=device) + bbox_w = box_coords[..., 2] - box_coords[..., 0] + bbox_h = box_coords[..., 3] - box_coords[..., 1] + max_dx = torch.min(bbox_w * noise, noise_bound) + max_dy = torch.min(bbox_h * noise, noise_bound) + box_noise = 2 * torch.rand(B, 1, 4, device=device) - 1 + box_noise = box_noise * torch.stack((max_dx, max_dy, max_dx, max_dy), dim=-1) + + box_coords = box_coords + box_noise + img_bounds = ( + torch.tensor([W, H, W, H], device=device) - 1 + ) # uncentered pixel coords + box_coords.clamp_(torch.zeros_like(img_bounds), img_bounds) # In place clamping + + box_coords = box_coords.reshape(-1, 2, 2) # always 2 points + box_labels = box_labels.reshape(-1, 2) + return box_coords, box_labels + + +def sample_random_points_from_errors(gt_masks, pred_masks, num_pt=1): + """ + Sample `num_pt` random points (along with their labels) independently from the error regions. + + Inputs: + - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool + - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None + - num_pt: int, number of points to sample independently for each of the B error maps + + Outputs: + - points: [B, num_pt, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point + - labels: [B, num_pt], dtype=torch.int32, where 1 means positive clicks and 0 means + negative clicks + """ + if pred_masks is None: # if pred_masks is not provided, treat it as empty + pred_masks = torch.zeros_like(gt_masks) + assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1 + assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape + assert num_pt >= 0 + + B, _, H_im, W_im = gt_masks.shape + device = gt_masks.device + + # false positive region, a new point sampled in this region should have + # negative label to correct the FP error + fp_masks = ~gt_masks & pred_masks + # false negative region, a new point sampled in this region should have + # positive label to correct the FN error + fn_masks = gt_masks & ~pred_masks + # whether the prediction completely match the ground-truth on each mask + all_correct = torch.all((gt_masks == pred_masks).flatten(2), dim=2) + all_correct = all_correct[..., None, None] + + # channel 0 is FP map, while channel 1 is FN map + pts_noise = torch.rand(B, num_pt, H_im, W_im, 2, device=device) + # sample a negative new click from FP region or a positive new click + # from FN region, depend on where the maximum falls, + # and in case the predictions are all correct (no FP or FN), we just + # sample a negative click from the background region + pts_noise[..., 0] *= fp_masks | (all_correct & ~gt_masks) + pts_noise[..., 1] *= fn_masks + pts_idx = pts_noise.flatten(2).argmax(dim=2) + labels = (pts_idx % 2).to(torch.int32) + pts_idx = pts_idx // 2 + pts_x = pts_idx % W_im + pts_y = pts_idx // W_im + points = torch.stack([pts_x, pts_y], dim=2).to(torch.float) + return points, labels + + +def sample_one_point_from_error_center(gt_masks, pred_masks, padding=True): + """ + Sample 1 random point (along with its label) from the center of each error region, + that is, the point with the largest distance to the boundary of each error region. + This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py + + Inputs: + - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool + - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None + - padding: if True, pad with boundary of 1 px for distance transform + + Outputs: + - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point + - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks + """ + import cv2 + + if pred_masks is None: + pred_masks = torch.zeros_like(gt_masks) + assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1 + assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape + + B, _, _, W_im = gt_masks.shape + device = gt_masks.device + + # false positive region, a new point sampled in this region should have + # negative label to correct the FP error + fp_masks = ~gt_masks & pred_masks + # false negative region, a new point sampled in this region should have + # positive label to correct the FN error + fn_masks = gt_masks & ~pred_masks + + fp_masks = fp_masks.cpu().numpy() + fn_masks = fn_masks.cpu().numpy() + points = torch.zeros(B, 1, 2, dtype=torch.float) + labels = torch.ones(B, 1, dtype=torch.int32) + for b in range(B): + fn_mask = fn_masks[b, 0] + fp_mask = fp_masks[b, 0] + if padding: + fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant") + fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant") + # compute the distance of each point in FN/FP region to its boundary + fn_mask_dt = cv2.distanceTransform(fn_mask.astype(np.uint8), cv2.DIST_L2, 0) + fp_mask_dt = cv2.distanceTransform(fp_mask.astype(np.uint8), cv2.DIST_L2, 0) + if padding: + fn_mask_dt = fn_mask_dt[1:-1, 1:-1] + fp_mask_dt = fp_mask_dt[1:-1, 1:-1] + + # take the point in FN/FP region with the largest distance to its boundary + fn_mask_dt_flat = fn_mask_dt.reshape(-1) + fp_mask_dt_flat = fp_mask_dt.reshape(-1) + fn_argmax = np.argmax(fn_mask_dt_flat) + fp_argmax = np.argmax(fp_mask_dt_flat) + is_positive = fn_mask_dt_flat[fn_argmax] > fp_mask_dt_flat[fp_argmax] + pt_idx = fn_argmax if is_positive else fp_argmax + points[b, 0, 0] = pt_idx % W_im # x + points[b, 0, 1] = pt_idx // W_im # y + labels[b, 0] = int(is_positive) + + points = points.to(device) + labels = labels.to(device) + return points, labels + + +def get_next_point(gt_masks, pred_masks, method): + if method == "uniform": + return sample_random_points_from_errors(gt_masks, pred_masks) + elif method == "center": + return sample_one_point_from_error_center(gt_masks, pred_masks) + else: + raise ValueError(f"unknown sampling method {method}") diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..84b01b1f764ece7a545cc254b9ad407b5b2e11c5 --- /dev/null +++ b/sam2/sam2_image_predictor.py @@ -0,0 +1,469 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL.Image import Image + +from sam2.modeling.sam2_base import SAM2Base + +from sam2.utils.transforms import SAM2Transforms + + +class SAM2ImagePredictor: + def __init__( + self, + sam_model: SAM2Base, + mask_threshold=0.0, + max_hole_area=0.0, + max_sprinkle_area=0.0, + **kwargs, + ) -> None: + """ + Uses SAM-2 to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam-2): The model to use for mask prediction. + mask_threshold (float): The threshold to use when converting mask logits + to binary masks. Masks are thresholded at 0 by default. + max_hole_area (int): If max_hole_area > 0, we fill small holes in up to + the maximum area of max_hole_area in low_res_masks. + max_sprinkle_area (int): If max_sprinkle_area > 0, we remove small sprinkles up to + the maximum area of max_sprinkle_area in low_res_masks. + """ + super().__init__() + self.model = sam_model + self._transforms = SAM2Transforms( + resolution=self.model.image_size, + mask_threshold=mask_threshold, + max_hole_area=max_hole_area, + max_sprinkle_area=max_sprinkle_area, + ) + + # Predictor state + self._is_image_set = False + self._features = None + self._orig_hw = None + # Whether the predictor is set for single image or a batch of images + self._is_batch = False + + # Predictor config + self.mask_threshold = mask_threshold + + # Spatial dim for backbone feature maps + isize = self.model.image_size + self._bb_feat_sizes = [ + (isize//4, isize//4), + (isize//8, isize//8), + (isize//16, isize//16), + ] + + @classmethod + def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor": + """ + Load a pretrained model from the Hugging Face hub. + + Arguments: + model_id (str): The Hugging Face repository ID. + **kwargs: Additional arguments to pass to the model constructor. + + Returns: + (SAM2ImagePredictor): The loaded model. + """ + from sam2.build_sam import build_sam2_hf + + sam_model = build_sam2_hf(model_id, **kwargs) + return cls(sam_model, **kwargs) + + @torch.no_grad() + def set_image( + self, + image: Union[np.ndarray, Image], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image + with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + self.reset_predictor() + # Transform the image to the form expected by the model + if isinstance(image, np.ndarray): + logging.info("For numpy array image, we assume (HxWxC) format") + self._orig_hw = [image.shape[:2]] + elif isinstance(image, Image): + w, h = image.size + self._orig_hw = [(h, w)] + else: + raise NotImplementedError("Image format not supported") + + input_image = self._transforms(image) + input_image = input_image[None, ...].to(self.device) + + assert ( + len(input_image.shape) == 4 and input_image.shape[1] == 3 + ), f"input_image must be of size 1x3xHxW, got {input_image.shape}" + logging.info("Computing image embeddings for the provided image...") + backbone_out = self.model.forward_image(input_image) + _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.model.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + # breakpoint() + feats = [ + feat.permute(1, 2, 0).view(1, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} + self._is_image_set = True + logging.info("Image embeddings computed.") + # breakpoint() + + @torch.no_grad() + def set_image_batch( + self, + image_list: List[Union[np.ndarray]], + ) -> None: + """ + Calculates the image embeddings for the provided image batch, allowing + masks to be predicted with the 'predict_batch' method. + + Arguments: + image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray + with pixel values in [0, 255]. + """ + self.reset_predictor() + assert isinstance(image_list, list) + self._orig_hw = [] + for image in image_list: + assert isinstance( + image, np.ndarray + ), "Images are expected to be an np.ndarray in RGB format, and of shape HWC" + self._orig_hw.append(image.shape[:2]) + # Transform the image to the form expected by the model + img_batch = self._transforms.forward_batch(image_list) + img_batch = img_batch.to(self.device) + batch_size = img_batch.shape[0] + assert ( + len(img_batch.shape) == 4 and img_batch.shape[1] == 3 + ), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}" + logging.info("Computing image embeddings for the provided images...") + backbone_out = self.model.forward_image(img_batch) + _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.model.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} + self._is_image_set = True + self._is_batch = True + logging.info("Image embeddings computed.") + + def predict_batch( + self, + point_coords_batch: List[np.ndarray] = None, + point_labels_batch: List[np.ndarray] = None, + box_batch: List[np.ndarray] = None, + mask_input_batch: List[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + normalize_coords=True, + ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images. + It returns a tuple of lists of masks, ious, and low_res_masks_logits. + """ + assert self._is_batch, "This function should only be used when in batched mode" + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image_batch(...) before mask prediction." + ) + num_images = len(self._features["image_embed"]) + all_masks = [] + all_ious = [] + all_low_res_masks = [] + for img_idx in range(num_images): + # Transform input prompts + point_coords = ( + point_coords_batch[img_idx] if point_coords_batch is not None else None + ) + point_labels = ( + point_labels_batch[img_idx] if point_labels_batch is not None else None + ) + box = box_batch[img_idx] if box_batch is not None else None + mask_input = ( + mask_input_batch[img_idx] if mask_input_batch is not None else None + ) + mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( + point_coords, + point_labels, + box, + mask_input, + normalize_coords, + img_idx=img_idx, + ) + masks, iou_predictions, low_res_masks = self._predict( + unnorm_coords, + labels, + unnorm_box, + mask_input, + multimask_output, + return_logits=return_logits, + img_idx=img_idx, + ) + masks_np = masks.squeeze(0).float().detach().cpu().numpy() + iou_predictions_np = ( + iou_predictions.squeeze(0).float().detach().cpu().numpy() + ) + low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() + all_masks.append(masks_np) + all_ious.append(iou_predictions_np) + all_low_res_masks.append(low_res_masks_np) + + return all_masks, all_ious, all_low_res_masks + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + normalize_coords=True, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + # Transform input prompts + + mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( + point_coords, point_labels, box, mask_input, normalize_coords + ) + + masks, iou_predictions, low_res_masks = self._predict( + unnorm_coords, + labels, + unnorm_box, + mask_input, + multimask_output, + return_logits=return_logits, + ) + + masks_np = masks.squeeze(0).float().detach().cpu().numpy() + iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy() + low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() + return masks_np, iou_predictions_np, low_res_masks_np + + def _prep_prompts( + self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1 + ): + + unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = torch.as_tensor( + point_coords, dtype=torch.float, device=self.device + ) + unnorm_coords = self._transforms.transform_coords( + point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx] + ) + labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) + if len(unnorm_coords.shape) == 2: + unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...] + if box is not None: + box = torch.as_tensor(box, dtype=torch.float, device=self.device) + unnorm_box = self._transforms.transform_boxes( + box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx] + ) # Bx2x2 + if mask_logits is not None: + mask_input = torch.as_tensor( + mask_logits, dtype=torch.float, device=self.device + ) + if len(mask_input.shape) == 3: + mask_input = mask_input[None, :, :, :] + return mask_input, unnorm_coords, labels, unnorm_box + + @torch.no_grad() + def _predict( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + img_idx: int = -1, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using SAM2Transforms. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + boxes (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + if point_coords is not None: + concat_points = (point_coords, point_labels) + else: + concat_points = None + + # Embed prompts + if boxes is not None: + box_coords = boxes.reshape(-1, 2, 2) + box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device) + box_labels = box_labels.repeat(boxes.size(0), 1) + # we merge "boxes" and "points" into a single "concat_points" input (where + # boxes are added at the beginning) to sam_prompt_encoder + if concat_points is not None: + concat_coords = torch.cat([box_coords, concat_points[0]], dim=1) + concat_labels = torch.cat([box_labels, concat_points[1]], dim=1) + concat_points = (concat_coords, concat_labels) + else: + concat_points = (box_coords, box_labels) + + sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( + points=concat_points, + boxes=None, + masks=mask_input, + ) + + # Predict masks + batched_mode = ( + concat_points is not None and concat_points[0].shape[0] > 1 + ) # multi object prediction + high_res_features = [ + feat_level[img_idx].unsqueeze(0) + for feat_level in self._features["high_res_feats"] + ] + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( + image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), + image_pe=self.model.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=batched_mode, + high_res_features=high_res_features, + ) + + # Upscale the masks to the original image resolution + masks = self._transforms.postprocess_masks( + low_res_masks, self._orig_hw[img_idx] + ) + low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0) + if not return_logits: + masks = masks > self.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert ( + self._features is not None + ), "Features must exist if an image has been set." + return self._features["image_embed"] + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_predictor(self) -> None: + """ + Resets the image embeddings and other state variables. + """ + self._is_image_set = False + self._features = None + self._orig_hw = None + self._is_batch = False diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..1f2626ceba4f8f079e66553e549db4fb8d28a777 --- /dev/null +++ b/sam2/sam2_video_predictor.py @@ -0,0 +1,1422 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import warnings +from collections import OrderedDict + +import torch +import torch.nn.functional as F + +from tqdm import tqdm + +from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base +from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames + + +class SAM2VideoPredictor(SAM2Base): + """The predictor class to handle user interactions and manage inference states.""" + + def __init__( + self, + fill_hole_area=0, + # whether to apply non-overlapping constraints on the output object masks + non_overlap_masks=False, + # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks; + # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True) + clear_non_cond_mem_around_input=False, +<<<<<<< HEAD + # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True). + clear_non_cond_mem_for_multi_obj=False, +======= +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click + # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames + add_all_frames_to_correct_as_cond=False, + **kwargs, + ): + super().__init__(**kwargs) + self.fill_hole_area = fill_hole_area + self.non_overlap_masks = non_overlap_masks + self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input +<<<<<<< HEAD + self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj +======= +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond + + @torch.inference_mode() + def init_state( + self, + video_path, + offload_video_to_cpu=False, + offload_state_to_cpu=False, + async_loading_frames=False, + ): + """Initialize an inference state.""" + compute_device = self.device # device of the model + images, video_height, video_width = load_video_frames( + video_path=video_path, + image_size=self.image_size, + offload_video_to_cpu=offload_video_to_cpu, + async_loading_frames=async_loading_frames, + compute_device=compute_device, + ) + inference_state = {} + inference_state["images"] = images + inference_state["num_frames"] = len(images) + # whether to offload the video frames to CPU memory + # turning on this option saves the GPU memory with only a very small overhead + inference_state["offload_video_to_cpu"] = offload_video_to_cpu + # whether to offload the inference state to CPU memory + # turning on this option saves the GPU memory at the cost of a lower tracking fps + # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object + # and from 24 to 21 when tracking two objects) + inference_state["offload_state_to_cpu"] = offload_state_to_cpu + # the original video height and width, used for resizing final output scores + inference_state["video_height"] = video_height + inference_state["video_width"] = video_width + inference_state["device"] = compute_device + if offload_state_to_cpu: + inference_state["storage_device"] = torch.device("cpu") + else: + inference_state["storage_device"] = compute_device + # inputs on each frame + inference_state["point_inputs_per_obj"] = {} + inference_state["mask_inputs_per_obj"] = {} + # visual features on a small number of recently visited frames for quick interactions + inference_state["cached_features"] = {} + # values that don't change across frames (so we only need to hold one copy of them) + inference_state["constants"] = {} + # mapping between client-side object id and model-side object index + inference_state["obj_id_to_idx"] = OrderedDict() + inference_state["obj_idx_to_id"] = OrderedDict() + inference_state["obj_ids"] = [] + # Slice (view) of each object tracking results, sharing the same memory with "output_dict" + inference_state["output_dict_per_obj"] = {} + # A temporary storage to hold new outputs when user interact with a frame + # to add clicks or mask (it's merged into "output_dict" before propagation starts) + inference_state["temp_output_dict_per_obj"] = {} + # Frames that already holds consolidated outputs from click or mask inputs + # (we directly use their consolidated outputs during tracking) + # metadata for each tracking frame (e.g. which direction it's tracked) + inference_state["frames_tracked_per_obj"] = {} + # Warm up the visual backbone and cache the image feature on frame 0 + self._get_image_feature(inference_state, frame_idx=0, batch_size=1) + return inference_state + + @classmethod + def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2VideoPredictor": + """ + Load a pretrained model from the Hugging Face hub. + + Arguments: + model_id (str): The Hugging Face repository ID. + **kwargs: Additional arguments to pass to the model constructor. + + Returns: + (SAM2VideoPredictor): The loaded model. + """ + from sam2.build_sam import build_sam2_video_predictor_hf + + sam_model = build_sam2_video_predictor_hf(model_id, **kwargs) + return sam_model + + def _obj_id_to_idx(self, inference_state, obj_id): + """Map client-side object id to model-side object index.""" + obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None) + if obj_idx is not None: + return obj_idx + + # We always allow adding new objects (including after tracking starts). + allow_new_object = True + if allow_new_object: + # get the next object slot + obj_idx = len(inference_state["obj_id_to_idx"]) + inference_state["obj_id_to_idx"][obj_id] = obj_idx + inference_state["obj_idx_to_id"][obj_idx] = obj_id + inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"]) + # set up input and output structures for this object + inference_state["point_inputs_per_obj"][obj_idx] = {} + inference_state["mask_inputs_per_obj"][obj_idx] = {} + inference_state["output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + inference_state["temp_output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + inference_state["frames_tracked_per_obj"][obj_idx] = {} + return obj_idx + else: + raise RuntimeError( + f"Cannot add new object id {obj_id} after tracking starts. " + f"All existing object ids: {inference_state['obj_ids']}. " + f"Please call 'reset_state' to restart from scratch." + ) + + def _obj_idx_to_id(self, inference_state, obj_idx): + """Map model-side object index to client-side object id.""" + return inference_state["obj_idx_to_id"][obj_idx] + + def _get_obj_num(self, inference_state): + """Get the total number of unique object ids received so far in this session.""" + return len(inference_state["obj_idx_to_id"]) + + @torch.inference_mode() + def add_new_points_or_box( + self, + inference_state, + frame_idx, + obj_id, + points=None, + labels=None, + clear_old_points=True, + normalize_coords=True, + box=None, + ): + """Add new points to a frame.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx] + mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx] + + if (points is not None) != (labels is not None): + raise ValueError("points and labels must be provided together") + if points is None and box is None: + raise ValueError("at least one of points or box must be provided as input") + + if points is None: + points = torch.zeros(0, 2, dtype=torch.float32) + elif not isinstance(points, torch.Tensor): + points = torch.tensor(points, dtype=torch.float32) + if labels is None: + labels = torch.zeros(0, dtype=torch.int32) + elif not isinstance(labels, torch.Tensor): + labels = torch.tensor(labels, dtype=torch.int32) + if points.dim() == 2: + points = points.unsqueeze(0) # add batch dimension + if labels.dim() == 1: + labels = labels.unsqueeze(0) # add batch dimension + + # If `box` is provided, we add it as the first two points with labels 2 and 3 + # along with the user-provided points (consistent with how SAM 2 is trained). + if box is not None: + if not clear_old_points: + raise ValueError( + "cannot add box without clearing old points, since " + "box prompt must be provided before any point prompt " + "(please use clear_old_points=True instead)" + ) + if not isinstance(box, torch.Tensor): + box = torch.tensor(box, dtype=torch.float32, device=points.device) + box_coords = box.reshape(1, 2, 2) + box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device) + box_labels = box_labels.reshape(1, 2) + points = torch.cat([box_coords, points], dim=1) + labels = torch.cat([box_labels, labels], dim=1) + + if normalize_coords: + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + points = points / torch.tensor([video_W, video_H]).to(points.device) + # scale the (normalized) coordinates by the model's internal image size + points = points * self.image_size + points = points.to(inference_state["device"]) + labels = labels.to(inference_state["device"]) + + if not clear_old_points: + point_inputs = point_inputs_per_frame.get(frame_idx, None) + else: + point_inputs = None + point_inputs = concat_points(point_inputs, points, labels) + + point_inputs_per_frame[frame_idx] = point_inputs + mask_inputs_per_frame.pop(frame_idx, None) + # If this frame hasn't been tracked before, we treat it as an initial conditioning + # frame, meaning that the inputs points are to generate segments on this frame without + # using any memory from other frames, like in SAM. Otherwise (if it has been tracked), + # the input points will be used to correct the already tracked masks. + obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx] + is_init_cond_frame = frame_idx not in obj_frames_tracked + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = obj_frames_tracked[frame_idx]["reverse"] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + # Get any previously predicted mask logits on this object and feed it along with + # the new clicks into the SAM mask decoder. + prev_sam_mask_logits = None + # lookup temporary output dict first, which contains the most recent output + # (if not found, then lookup conditioning and non-conditioning frame output) + prev_out = obj_temp_output_dict[storage_key].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx) + + if prev_out is not None and prev_out["pred_masks"] is not None: + device = inference_state["device"] + prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True) + # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues. + prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0) + current_out, _ = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=point_inputs, + mask_inputs=None, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + prev_sam_mask_logits=prev_sam_mask_logits, + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks + + def add_new_points(self, *args, **kwargs): + """Deprecated method. Please use `add_new_points_or_box` instead.""" + return self.add_new_points_or_box(*args, **kwargs) + + @torch.inference_mode() + def add_new_mask( + self, + inference_state, + frame_idx, + obj_id, + mask, + ): + """Add new mask to a frame.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx] + mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx] + + if not isinstance(mask, torch.Tensor): + mask = torch.tensor(mask, dtype=torch.bool) + assert mask.dim() == 2 + mask_H, mask_W = mask.shape + mask_inputs_orig = mask[None, None] # add batch and channel dimension + mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"]) + + # resize the mask if it doesn't match the model's image size + if mask_H != self.image_size or mask_W != self.image_size: + mask_inputs = torch.nn.functional.interpolate( + mask_inputs_orig, + size=(self.image_size, self.image_size), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + mask_inputs = (mask_inputs >= 0.5).float() + else: + mask_inputs = mask_inputs_orig + + mask_inputs_per_frame[frame_idx] = mask_inputs + point_inputs_per_frame.pop(frame_idx, None) + # If this frame hasn't been tracked before, we treat it as an initial conditioning + # frame, meaning that the inputs points are to generate segments on this frame without + # using any memory from other frames, like in SAM. Otherwise (if it has been tracked), + # the input points will be used to correct the already tracked masks. + obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx] + is_init_cond_frame = frame_idx not in obj_frames_tracked + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = obj_frames_tracked[frame_idx]["reverse"] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + current_out, _ = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=None, + mask_inputs=mask_inputs, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks + + def _get_orig_video_res_output(self, inference_state, any_res_masks): + """ + Resize the object scores to the original video resolution (video_res_masks) + and apply non-overlapping constraints for final output. + """ + device = inference_state["device"] + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + any_res_masks = any_res_masks.to(device, non_blocking=True) + if any_res_masks.shape[-2:] == (video_H, video_W): + video_res_masks = any_res_masks + else: + video_res_masks = torch.nn.functional.interpolate( + any_res_masks, + size=(video_H, video_W), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks: + video_res_masks = self._apply_non_overlapping_constraints(video_res_masks) + return any_res_masks, video_res_masks + + def _consolidate_temp_output_across_obj( + self, + inference_state, + frame_idx, + is_cond, + consolidate_at_video_res=False, + ): + """ + Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on + a frame into a single output for all objects, including + 1) fill any missing objects either from `output_dict_per_obj` (if they exist in + `output_dict_per_obj` for this frame) or leave them as placeholder values + (if they don't exist in `output_dict_per_obj` for this frame); + 2) if specified, rerun memory encoder after apply non-overlapping constraints + on the object scores. + """ + batch_size = self._get_obj_num(inference_state) + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Optionally, we allow consolidating the temporary outputs at the original + # video resolution (to provide a better editing experience for mask prompts). + if consolidate_at_video_res: + consolidated_H = inference_state["video_height"] + consolidated_W = inference_state["video_width"] + consolidated_mask_key = "pred_masks_video_res" + else: + consolidated_H = consolidated_W = self.image_size // 4 + consolidated_mask_key = "pred_masks" + + # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc" + # will be added when rerunning the memory encoder after applying non-overlapping + # constraints to object scores. Its "pred_masks" are prefilled with a large + # negative value (NO_OBJ_SCORE) to represent missing objects. + consolidated_out = { + consolidated_mask_key: torch.full( + size=(batch_size, 1, consolidated_H, consolidated_W), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["storage_device"], + ), +<<<<<<< HEAD + "obj_ptr": torch.full( + size=(batch_size, self.hidden_dim), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["device"], + ), + "object_score_logits": torch.full( + size=(batch_size, 1), + # default to 10.0 for object_score_logits, i.e. assuming the object is + # present as sigmoid(10)=1, same as in `predict_masks` of `MaskDecoder` + fill_value=10.0, + dtype=torch.float32, + device=inference_state["device"], + ), +======= +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + } + for obj_idx in range(batch_size): + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + out = obj_temp_output_dict[storage_key].get(frame_idx, None) + # If the object doesn't appear in "temp_output_dict_per_obj" on this frame, + # we fall back and look up its previous output in "output_dict_per_obj". + # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in + # "output_dict_per_obj" to find a previous output for this object. + if out is None: + out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None) + if out is None: + out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None) + # If the object doesn't appear in "output_dict_per_obj" either, we skip it + # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE + # placeholder above) and set its object pointer to be a dummy pointer. + if out is None: + continue + # Add the temporary object output mask to consolidated output mask + obj_mask = out["pred_masks"] + consolidated_pred_masks = consolidated_out[consolidated_mask_key] + if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]: + consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask + else: + # Resize first if temporary object mask has a different resolution + resized_obj_mask = torch.nn.functional.interpolate( + obj_mask, + size=consolidated_pred_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ) + consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask +<<<<<<< HEAD + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"] + consolidated_out["object_score_logits"][obj_idx : obj_idx + 1] = out[ + "object_score_logits" + ] + + # Optionally, apply non-overlapping constraints on the consolidated scores + # and rerun the memory encoder + if run_mem_encoder: + device = inference_state["device"] + high_res_masks = torch.nn.functional.interpolate( + consolidated_out["pred_masks"].to(device, non_blocking=True), + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks_for_mem_enc: + high_res_masks = self._apply_non_overlapping_constraints(high_res_masks) + maskmem_features, maskmem_pos_enc = self._run_memory_encoder( + inference_state=inference_state, + frame_idx=frame_idx, + batch_size=batch_size, + high_res_masks=high_res_masks, + object_score_logits=consolidated_out["object_score_logits"], + is_mask_from_pts=True, # these frames are what the user interacted with + ) + consolidated_out["maskmem_features"] = maskmem_features + consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc +======= +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + + return consolidated_out + + @torch.inference_mode() + def propagate_in_video_preflight(self, inference_state): + """Prepare inference_state and consolidate temporary outputs before tracking.""" + # Check and make sure that every object has received input points or masks. + batch_size = self._get_obj_num(inference_state) + if batch_size == 0: + raise RuntimeError( + "No input points or masks are provided for any object; please add inputs first." + ) + + # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and + # add them into "output_dict". + for obj_idx in range(batch_size): + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + for is_cond in [False, True]: + # Separately consolidate conditioning and non-conditioning temp outputs + storage_key = ( + "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + ) + # Find all the frames that contain temporary outputs for any objects + # (these should be the frames that have just received clicks for mask inputs + # via `add_new_points_or_box` or `add_new_mask`) + for frame_idx, out in obj_temp_output_dict[storage_key].items(): + # Run memory encoder on the temporary outputs (if the memory feature is missing) + if out["maskmem_features"] is None: + high_res_masks = torch.nn.functional.interpolate( + out["pred_masks"].to(inference_state["device"]), + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + maskmem_features, maskmem_pos_enc = self._run_memory_encoder( + inference_state=inference_state, + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + high_res_masks=high_res_masks, + object_score_logits=out["object_score_logits"], + # these frames are what the user interacted with + is_mask_from_pts=True, + ) + out["maskmem_features"] = maskmem_features + out["maskmem_pos_enc"] = maskmem_pos_enc + + obj_output_dict[storage_key][frame_idx] = out + if self.clear_non_cond_mem_around_input: + # clear non-conditioning memory of the surrounding frames + self._clear_obj_non_cond_mem_around_input( + inference_state, frame_idx, obj_idx + ) + + # clear temporary outputs in `temp_output_dict_per_obj` + obj_temp_output_dict[storage_key].clear() + + # check and make sure that every object has received input points or masks + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + if len(obj_output_dict["cond_frame_outputs"]) == 0: + obj_id = self._obj_idx_to_id(inference_state, obj_idx) + raise RuntimeError( + f"No input points or masks are provided for object id {obj_id}; please add inputs first." + ) + # edge case: if an output is added to "cond_frame_outputs", we remove any prior + # output on the same frame in "non_cond_frame_outputs" + for frame_idx in obj_output_dict["cond_frame_outputs"]: + obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + + @torch.inference_mode() + def propagate_in_video( + self, + inference_state, + start_frame_idx=None, + max_frame_num_to_track=None, + reverse=False, + ): + """Propagate the input points across frames to track in the entire video.""" + self.propagate_in_video_preflight(inference_state) + + obj_ids = inference_state["obj_ids"] + num_frames = inference_state["num_frames"] + batch_size = self._get_obj_num(inference_state) + + # set start index, end index, and processing order + if start_frame_idx is None: + # default: start from the earliest frame with input points + start_frame_idx = min( + t + for obj_output_dict in inference_state["output_dict_per_obj"].values() + for t in obj_output_dict["cond_frame_outputs"] + ) + if max_frame_num_to_track is None: + # default: track all the frames in the video + max_frame_num_to_track = num_frames + if reverse: + end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0) + if start_frame_idx > 0: + processing_order = range(start_frame_idx, end_frame_idx - 1, -1) + else: + processing_order = [] # skip reverse tracking if starting from frame 0 + else: + end_frame_idx = min( + start_frame_idx + max_frame_num_to_track, num_frames - 1 + ) + processing_order = range(start_frame_idx, end_frame_idx + 1) + + for frame_idx in tqdm(processing_order, desc="propagate in video"): + pred_masks_per_obj = [None] * batch_size + for obj_idx in range(batch_size): + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + # We skip those frames already in consolidated outputs (these are frames + # that received input clicks or mask). Note that we cannot directly run + # batched forward on them via `_run_single_frame_inference` because the + # number of clicks on each object might be different. + if frame_idx in obj_output_dict["cond_frame_outputs"]: + storage_key = "cond_frame_outputs" + current_out = obj_output_dict[storage_key][frame_idx] + device = inference_state["device"] + pred_masks = current_out["pred_masks"].to(device, non_blocking=True) + if self.clear_non_cond_mem_around_input: + # clear non-conditioning memory of the surrounding frames + self._clear_obj_non_cond_mem_around_input( + inference_state, frame_idx, obj_idx + ) + else: + storage_key = "non_cond_frame_outputs" + current_out, pred_masks = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=False, + point_inputs=None, + mask_inputs=None, + reverse=reverse, + run_mem_encoder=True, + ) + obj_output_dict[storage_key][frame_idx] = current_out + + inference_state["frames_tracked_per_obj"][obj_idx][frame_idx] = { + "reverse": reverse + } + pred_masks_per_obj[obj_idx] = pred_masks + + # Resize the output mask to the original video resolution (we directly use + # the mask scores on GPU for output to avoid any CPU conversion in between) + if len(pred_masks_per_obj) > 1: + all_pred_masks = torch.cat(pred_masks_per_obj, dim=0) + else: + all_pred_masks = pred_masks_per_obj[0] + _, video_res_masks = self._get_orig_video_res_output( + inference_state, all_pred_masks + ) + yield frame_idx, obj_ids, video_res_masks + + @torch.inference_mode() + def clear_all_prompts_in_frame( + self, inference_state, frame_idx, obj_id, need_output=True + ): +<<<<<<< HEAD + """ + Split a multi-object output into per-object output slices and add them into + `output_dict_per_obj`. The resulting slices share the same tensor storage. + """ + maskmem_features = current_out["maskmem_features"] + assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor) + + maskmem_pos_enc = current_out["maskmem_pos_enc"] + assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list) + + output_dict_per_obj = inference_state["output_dict_per_obj"] + for obj_idx, obj_output_dict in output_dict_per_obj.items(): + obj_slice = slice(obj_idx, obj_idx + 1) + obj_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + "pred_masks": current_out["pred_masks"][obj_slice], + "obj_ptr": current_out["obj_ptr"][obj_slice], + "object_score_logits": current_out["object_score_logits"][obj_slice], + } + if maskmem_features is not None: + obj_out["maskmem_features"] = maskmem_features[obj_slice] + if maskmem_pos_enc is not None: + obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc] + obj_output_dict[storage_key][frame_idx] = obj_out +======= + """Remove all input points or mask in a specific frame for a given object.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + + # Clear the conditioning information on the given frame + inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None) + inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None) + + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None) + temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None) + + # Remove the frame's conditioning output (possibly downgrading it to non-conditioning) + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None) + if out is not None: + # The frame is not a conditioning frame anymore since it's not receiving inputs, + # so we "downgrade" its output (if exists) to a non-conditioning frame output. + obj_output_dict["non_cond_frame_outputs"][frame_idx] = out + inference_state["frames_tracked_per_obj"][obj_idx].pop(frame_idx, None) + + if not need_output: + return + # Finally, output updated masks per object (after removing the inputs above) + obj_ids = inference_state["obj_ids"] + is_cond = any( + frame_idx in obj_temp_output_dict["cond_frame_outputs"] + for obj_temp_output_dict in temp_output_dict_per_obj.values() + ) + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + + @torch.inference_mode() + def clear_all_prompts_in_frame( + self, inference_state, frame_idx, obj_id, need_output=True + ): + """Remove all input points or mask in a specific frame for a given object.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + + # Clear the conditioning information on the given frame + inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None) + inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None) + + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None) + temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None) + + # Check and see if there are still any inputs left on this frame + batch_size = self._get_obj_num(inference_state) + frame_has_input = False + for obj_idx2 in range(batch_size): + if frame_idx in inference_state["point_inputs_per_obj"][obj_idx2]: + frame_has_input = True + break + if frame_idx in inference_state["mask_inputs_per_obj"][obj_idx2]: + frame_has_input = True + break + + # If this frame has no remaining inputs for any objects, we further clear its + # conditioning frame status + if not frame_has_input: + output_dict = inference_state["output_dict"] + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + consolidated_frame_inds["cond_frame_outputs"].discard(frame_idx) + consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx) + # Remove the frame's conditioning output (possibly downgrading it to non-conditioning) + out = output_dict["cond_frame_outputs"].pop(frame_idx, None) + if out is not None: + # The frame is not a conditioning frame anymore since it's not receiving inputs, + # so we "downgrade" its output (if exists) to a non-conditioning frame output. + output_dict["non_cond_frame_outputs"][frame_idx] = out + inference_state["frames_already_tracked"].pop(frame_idx, None) + # Similarly, do it for the sliced output on each object. + for obj_idx2 in range(batch_size): + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx2] + obj_out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None) + if obj_out is not None: + obj_output_dict["non_cond_frame_outputs"][frame_idx] = obj_out + + # If all the conditioning frames have been removed, we also clear the tracking outputs + if len(output_dict["cond_frame_outputs"]) == 0: + self._reset_tracking_results(inference_state) + + if not need_output: + return + # Finally, output updated masks per object (after removing the inputs above) + obj_ids = inference_state["obj_ids"] + is_cond = any( + frame_idx in obj_temp_output_dict["cond_frame_outputs"] + for obj_temp_output_dict in temp_output_dict_per_obj.values() + ) + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks + + @torch.inference_mode() + def reset_state(self, inference_state): + """Remove all input points or mask in all frames throughout the video.""" + self._reset_tracking_results(inference_state) + # Remove all object ids + inference_state["obj_id_to_idx"].clear() + inference_state["obj_idx_to_id"].clear() + inference_state["obj_ids"].clear() + inference_state["point_inputs_per_obj"].clear() + inference_state["mask_inputs_per_obj"].clear() + inference_state["output_dict_per_obj"].clear() + inference_state["temp_output_dict_per_obj"].clear() + inference_state["frames_tracked_per_obj"].clear() + + def _reset_tracking_results(self, inference_state): + """Reset all tracking inputs and results across the videos.""" + for v in inference_state["point_inputs_per_obj"].values(): + v.clear() + for v in inference_state["mask_inputs_per_obj"].values(): + v.clear() + for v in inference_state["output_dict_per_obj"].values(): + v["cond_frame_outputs"].clear() + v["non_cond_frame_outputs"].clear() + for v in inference_state["temp_output_dict_per_obj"].values(): + v["cond_frame_outputs"].clear() + v["non_cond_frame_outputs"].clear() + for v in inference_state["frames_tracked_per_obj"].values(): + v.clear() + + def _get_image_feature(self, inference_state, frame_idx, batch_size): + """Compute the image features on a given frame.""" + # Look up in the cache first + image, backbone_out = inference_state["cached_features"].get( + frame_idx, (None, None) + ) + if backbone_out is None: + # Cache miss -- we will run inference on a single image + device = inference_state["device"] + image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0) + backbone_out = self.forward_image(image) + # Cache the most recent frame's feature (for repeated interactions with + # a frame; we can use an LRU cache for more frames in the future). + inference_state["cached_features"] = {frame_idx: (image, backbone_out)} + + # expand the features to have the same dimension as the number of objects + expanded_image = image.expand(batch_size, -1, -1, -1) + expanded_backbone_out = { + "backbone_fpn": backbone_out["backbone_fpn"].copy(), + "vision_pos_enc": backbone_out["vision_pos_enc"].copy(), + } + for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]): + expanded_backbone_out["backbone_fpn"][i] = feat.expand( + batch_size, -1, -1, -1 + ) + for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]): + pos = pos.expand(batch_size, -1, -1, -1) + expanded_backbone_out["vision_pos_enc"][i] = pos + + features = self._prepare_backbone_features(expanded_backbone_out) + features = (expanded_image,) + features + return features + + def _run_single_frame_inference( + self, + inference_state, + output_dict, + frame_idx, + batch_size, + is_init_cond_frame, + point_inputs, + mask_inputs, + reverse, + run_mem_encoder, + prev_sam_mask_logits=None, + ): + """Run tracking on a single frame based on current inputs and previous memory.""" + # Retrieve correct image features + ( + _, + _, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._get_image_feature(inference_state, frame_idx, batch_size) + + # point and mask should not appear as input simultaneously on the same frame + assert point_inputs is None or mask_inputs is None + current_out = self.track_step( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + output_dict=output_dict, + num_frames=inference_state["num_frames"], + track_in_reverse=reverse, + run_mem_encoder=run_mem_encoder, + prev_sam_mask_logits=prev_sam_mask_logits, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = current_out["maskmem_features"] + if maskmem_features is not None: + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + pred_masks_gpu = current_out["pred_masks"] # (B, 1, H, W) + # potentially fill holes in the predicted masks + if self.fill_hole_area > 0: + pred_masks_gpu = fill_holes_in_mask_scores( + pred_masks_gpu, self.fill_hole_area + ) + pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out) + # object pointer is a small tensor, so we always keep it on GPU memory for fast access + obj_ptr = current_out["obj_ptr"] + object_score_logits = current_out["object_score_logits"] +<<<<<<< HEAD + best_iou_score = current_out["best_iou_score"] +======= +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + # make a compact version of this frame's output to reduce the state size + compact_current_out = { + "maskmem_features": maskmem_features, # (B, C, H, W) + "maskmem_pos_enc": maskmem_pos_enc, + "pred_masks": pred_masks, + "obj_ptr": obj_ptr, + "object_score_logits": object_score_logits, +<<<<<<< HEAD + "best_iou_score": best_iou_score, +======= +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + } + return compact_current_out, pred_masks_gpu + + def _run_memory_encoder( + self, + inference_state, + frame_idx, + batch_size, + high_res_masks, + object_score_logits, + is_mask_from_pts, + ): + """ + Run the memory encoder on `high_res_masks`. This is usually after applying + non-overlapping constraints to object scores. Since their scores changed, their + memory also need to be computed again with the memory encoder. + """ + # Retrieve correct image features + _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( + inference_state, frame_idx, batch_size + ) + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks, + object_score_logits=object_score_logits, + is_mask_from_pts=is_mask_from_pts, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = self._get_maskmem_pos_enc( + inference_state, {"maskmem_pos_enc": maskmem_pos_enc} + ) + return maskmem_features, maskmem_pos_enc + + def _get_maskmem_pos_enc(self, inference_state, current_out): + """ + `maskmem_pos_enc` is the same across frames and objects, so we cache it as + a constant in the inference session to reduce session storage size. + """ + model_constants = inference_state["constants"] + # "out_maskmem_pos_enc" should be either a list of tensors or None + out_maskmem_pos_enc = current_out["maskmem_pos_enc"] + if out_maskmem_pos_enc is not None: + if "maskmem_pos_enc" not in model_constants: + assert isinstance(out_maskmem_pos_enc, list) + # only take the slice for one object, since it's same across objects + maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc] + model_constants["maskmem_pos_enc"] = maskmem_pos_enc + else: + maskmem_pos_enc = model_constants["maskmem_pos_enc"] + # expand the cached maskmem_pos_enc to the actual batch size + batch_size = out_maskmem_pos_enc[0].size(0) + expanded_maskmem_pos_enc = [ + x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc + ] + else: + expanded_maskmem_pos_enc = None + return expanded_maskmem_pos_enc + + @torch.inference_mode() + def remove_object(self, inference_state, obj_id, strict=False, need_output=True): + """ + Remove an object id from the tracking state. If strict is True, we check whether + the object id actually exists and raise an error if it doesn't exist. + """ + old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None) + updated_frames = [] + # Check whether this object_id to remove actually exists and possibly raise an error. + if old_obj_idx_to_rm is None: + if not strict: + return inference_state["obj_ids"], updated_frames + raise RuntimeError( + f"Cannot remove object id {obj_id} as it doesn't exist. " + f"All existing object ids: {inference_state['obj_ids']}." + ) + + # If this is the only remaining object id, we simply reset the state. + if len(inference_state["obj_id_to_idx"]) == 1: + self.reset_state(inference_state) + return inference_state["obj_ids"], updated_frames + + # There are still remaining objects after removing this object id. In this case, + # we need to delete the object storage from inference state tensors. + # Step 0: clear the input on those frames where this object id has point or mask input + # (note that this step is required as it might downgrade conditioning frames to + # non-conditioning ones) + obj_input_frames_inds = set() + obj_input_frames_inds.update( + inference_state["point_inputs_per_obj"][old_obj_idx_to_rm] + ) + obj_input_frames_inds.update( + inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm] + ) + for frame_idx in obj_input_frames_inds: + self.clear_all_prompts_in_frame( + inference_state, frame_idx, obj_id, need_output=False + ) + + # Step 1: Update the object id mapping (note that it must be done after Step 0, + # since Step 0 still requires the old object id mappings in inference_state) + old_obj_ids = inference_state["obj_ids"] + old_obj_inds = list(range(len(old_obj_ids))) + remain_old_obj_inds = old_obj_inds.copy() + remain_old_obj_inds.remove(old_obj_idx_to_rm) + new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds] + new_obj_inds = list(range(len(new_obj_ids))) + # build new mappings + old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds)) + inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds)) + inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids)) + inference_state["obj_ids"] = new_obj_ids + + # Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys. +<<<<<<< HEAD + # (note that "consolidated_frame_inds" doesn't need to be updated in this step as + # it's already handled in Step 0) +======= +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + def _map_keys(container): + new_kvs = [] + for k in old_obj_inds: + v = container.pop(k) + if k in old_idx_to_new_idx: + new_kvs.append((old_idx_to_new_idx[k], v)) + container.update(new_kvs) + + _map_keys(inference_state["point_inputs_per_obj"]) + _map_keys(inference_state["mask_inputs_per_obj"]) + _map_keys(inference_state["output_dict_per_obj"]) + _map_keys(inference_state["temp_output_dict_per_obj"]) +<<<<<<< HEAD + + # Step 3: For packed tensor storage, we index the remaining ids and rebuild the per-object slices. + def _slice_state(output_dict, storage_key): + for frame_idx, out in output_dict[storage_key].items(): + out["maskmem_features"] = out["maskmem_features"][remain_old_obj_inds] + out["maskmem_pos_enc"] = [ + x[remain_old_obj_inds] for x in out["maskmem_pos_enc"] + ] + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + out["maskmem_pos_enc"] = self._get_maskmem_pos_enc(inference_state, out) + out["pred_masks"] = out["pred_masks"][remain_old_obj_inds] + out["obj_ptr"] = out["obj_ptr"][remain_old_obj_inds] + out["object_score_logits"] = out["object_score_logits"][ + remain_old_obj_inds + ] + # also update the per-object slices + self._add_output_per_object( + inference_state, frame_idx, out, storage_key + ) + + _slice_state(inference_state["output_dict"], "cond_frame_outputs") + _slice_state(inference_state["output_dict"], "non_cond_frame_outputs") + + # Step 4: Further collect the outputs on those frames in `obj_input_frames_inds`, which +======= + _map_keys(inference_state["frames_tracked_per_obj"]) + + # Step 3: Further collect the outputs on those frames in `obj_input_frames_inds`, which +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + # could show an updated mask for objects previously occluded by the object being removed + if need_output: + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + for frame_idx in obj_input_frames_inds: + is_cond = any( + frame_idx in obj_temp_output_dict["cond_frame_outputs"] + for obj_temp_output_dict in temp_output_dict_per_obj.values() + ) + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, +<<<<<<< HEAD + run_mem_encoder=False, +======= +>>>>>>> 2b90b9f5ceec907a1c18123530e92e794ad901a4 + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + updated_frames.append((frame_idx, video_res_masks)) + + return inference_state["obj_ids"], updated_frames + + def _clear_non_cond_mem_around_input(self, inference_state, frame_idx): + """ + Remove the non-conditioning memory around the input frame. When users provide + correction clicks, the surrounding frames' non-conditioning memories can still + contain outdated object appearance information and could confuse the model. + + This method clears those non-conditioning memories surrounding the interacted + frame to avoid giving the model both old and new information about the object. + """ + r = self.memory_temporal_stride_for_eval + frame_idx_begin = frame_idx - r * self.num_maskmem + frame_idx_end = frame_idx + r * self.num_maskmem + batch_size = self._get_obj_num(inference_state) + for obj_idx in range(batch_size): + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + non_cond_frame_outputs = obj_output_dict["non_cond_frame_outputs"] + for t in range(frame_idx_begin, frame_idx_end + 1): + non_cond_frame_outputs.pop(t, None) + + +class SAM2VideoPredictorVOS(SAM2VideoPredictor): + """Optimized for the VOS setting""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._compile_all_components() + + def _compile_all_components(self): + print("Compiling all components for VOS setting. First time may be very slow.") + self.memory_encoder.forward = torch.compile( + self.memory_encoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, + ) + + self.memory_attention.forward = torch.compile( + self.memory_attention.forward, + mode="max-autotune", + fullgraph=True, + dynamic=True, # Num. of memories varies + ) + + self.sam_prompt_encoder.forward = torch.compile( + self.sam_prompt_encoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, # Accuracy regression on True + ) + + self.sam_mask_decoder.forward = torch.compile( + self.sam_mask_decoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, # Accuracy regression on True + ) + + def forward_image(self, img_batch: torch.Tensor): + """ + Identical to the corresponding method in the parent (SAM2VideoPredictor), but + cloning the backbone features and pos encoding to enable compilation. + """ + backbone_out = self.image_encoder(img_batch) + if self.use_high_res_features_in_sam: + # precompute projected level 0 and level 1 features in SAM decoder + # to avoid running it again on every SAM click + backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0( + backbone_out["backbone_fpn"][0] + ) + backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1( + backbone_out["backbone_fpn"][1] + ) + # Clone to help torch.compile + for i in range(len(backbone_out["backbone_fpn"])): + backbone_out["backbone_fpn"][i] = backbone_out["backbone_fpn"][i].clone() + backbone_out["vision_pos_enc"][i] = backbone_out["vision_pos_enc"][ + i + ].clone() + return backbone_out + + def _forward_sam_heads( + self, + backbone_features, + point_inputs=None, + mask_inputs=None, + high_res_features=None, + multimask_output=False, + ): + """ + Identical to the corresponding method in the parent (SAM2VideoPredictor), but + cloning the outputs of prompt_encoder and mask_decoder to enable compilation. + """ + B = backbone_features.size(0) + device = backbone_features.device + assert backbone_features.size(1) == self.sam_prompt_embed_dim + assert backbone_features.size(2) == self.sam_image_embedding_size + assert backbone_features.size(3) == self.sam_image_embedding_size + + # a) Handle point prompts + if point_inputs is not None: + sam_point_coords = point_inputs["point_coords"] + sam_point_labels = point_inputs["point_labels"] + assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B + else: + # If no points are provide, pad with an empty point (with label -1) + sam_point_coords = torch.zeros(B, 1, 2, device=device) + sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device) + + # b) Handle mask prompts + if mask_inputs is not None: + # If mask_inputs is provided, downsize it into low-res mask input if needed + # and feed it as a dense mask prompt into the SAM mask encoder + assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1) + if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size: + sam_mask_prompt = F.interpolate( + mask_inputs.float(), + size=self.sam_prompt_encoder.mask_input_size, + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + else: + sam_mask_prompt = mask_inputs + else: + # Otherwise, simply feed None (and SAM's prompt encoder will add + # a learned `no_mask_embed` to indicate no mask input in this case). + sam_mask_prompt = None + + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder( + points=(sam_point_coords, sam_point_labels), + boxes=None, + masks=sam_mask_prompt, + ) + # Clone image_pe and the outputs of sam_prompt_encoder + # to enable compilation + sparse_embeddings = sparse_embeddings.clone() + dense_embeddings = dense_embeddings.clone() + image_pe = self.sam_prompt_encoder.get_dense_pe().clone() + ( + low_res_multimasks, + ious, + sam_output_tokens, + object_score_logits, + ) = self.sam_mask_decoder( + image_embeddings=backbone_features, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=False, # the image is already batched + high_res_features=high_res_features, + ) + # Clone the output of sam_mask_decoder + # to enable compilation + low_res_multimasks = low_res_multimasks.clone() + ious = ious.clone() + sam_output_tokens = sam_output_tokens.clone() + object_score_logits = object_score_logits.clone() + + if self.pred_obj_scores: + is_obj_appearing = object_score_logits > 0 + + # Mask used for spatial memories is always a *hard* choice between obj and no obj, + # consistent with the actual mask prediction + low_res_multimasks = torch.where( + is_obj_appearing[:, None, None], + low_res_multimasks, + NO_OBJ_SCORE, + ) + + # convert masks from possibly bfloat16 (or float16) to float32 + # (older PyTorch versions before 2.1 don't support `interpolate` on bf16) + low_res_multimasks = low_res_multimasks.float() + high_res_multimasks = F.interpolate( + low_res_multimasks, + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + + sam_output_token = sam_output_tokens[:, 0] + if multimask_output: + # take the best mask prediction (with the highest IoU estimation) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + else: + low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks + + # Extract object pointer from the SAM output token (with occlusion handling) + obj_ptr = self.obj_ptr_proj(sam_output_token) + if self.pred_obj_scores: + # Allow *soft* no obj ptr, unlike for masks + if self.soft_no_obj_ptr: + lambda_is_obj_appearing = object_score_logits.sigmoid() + else: + lambda_is_obj_appearing = is_obj_appearing.float() + + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_multimasks, + high_res_multimasks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def _encode_new_memory( + self, + current_vision_feats, + feat_sizes, + pred_masks_high_res, + object_score_logits, + is_mask_from_pts, + ): + """ + Identical to the corresponding method in the parent (SAM2VideoPredictor), but + cloning the memories and their pos enc to enable compilation. + """ + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + # top-level feature, (HW)BC => BCHW + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + if self.non_overlap_masks_for_mem_enc and not self.training: + # optionally, apply non-overlapping constraints to the masks (it's applied + # in the batch dimension and should only be used during eval, where all + # the objects come from the same video under batch size 1). + pred_masks_high_res = self._apply_non_overlapping_constraints( + pred_masks_high_res + ) + # scale the raw mask logits with a temperature before applying sigmoid + binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts + if binarize and not self.training: + mask_for_mem = (pred_masks_high_res > 0).float() + else: + # apply sigmoid on the raw mask logits to turn them into range (0, 1) + mask_for_mem = torch.sigmoid(pred_masks_high_res) + # apply scale and bias terms to the sigmoid probabilities + if self.sigmoid_scale_for_mem_enc != 1.0: + mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc + if self.sigmoid_bias_for_mem_enc != 0.0: + mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc + maskmem_out = self.memory_encoder( + pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied + ) + # Clone the feats and pos_enc to enable compilation + maskmem_features = maskmem_out["vision_features"].clone() + maskmem_pos_enc = [m.clone() for m in maskmem_out["vision_pos_enc"]] + # add a no-object embedding to the spatial memory to indicate that the frame + # is predicted to be occluded (i.e. no object is appearing in the frame) + if self.no_obj_embed_spatial is not None: + is_obj_appearing = (object_score_logits > 0).float() + maskmem_features += ( + 1 - is_obj_appearing[..., None, None] + ) * self.no_obj_embed_spatial[..., None, None].expand( + *maskmem_features.shape + ) + + return maskmem_features, maskmem_pos_enc diff --git a/sam2/sam2_video_predictor_legacy.py b/sam2/sam2_video_predictor_legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..c7e01ccf972491904b013526333826b337354db1 --- /dev/null +++ b/sam2/sam2_video_predictor_legacy.py @@ -0,0 +1,1172 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import warnings +from collections import OrderedDict + +import torch + +from tqdm import tqdm + +from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base +from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames + + +class SAM2VideoPredictor(SAM2Base): + """The predictor class to handle user interactions and manage inference states.""" + + def __init__( + self, + fill_hole_area=0, + # whether to apply non-overlapping constraints on the output object masks + non_overlap_masks=False, + # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks; + # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True) + clear_non_cond_mem_around_input=False, + # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True). + clear_non_cond_mem_for_multi_obj=False, + # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click + # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames + add_all_frames_to_correct_as_cond=False, + **kwargs, + ): + super().__init__(**kwargs) + self.fill_hole_area = fill_hole_area + self.non_overlap_masks = non_overlap_masks + self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input + self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj + self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond + + @torch.inference_mode() + def init_state( + self, + video_path, + offload_video_to_cpu=False, + offload_state_to_cpu=False, + async_loading_frames=False, + ): + """Initialize an inference state.""" + compute_device = self.device # device of the model + images, video_height, video_width = load_video_frames( + video_path=video_path, + image_size=self.image_size, + offload_video_to_cpu=offload_video_to_cpu, + async_loading_frames=async_loading_frames, + compute_device=compute_device, + ) + inference_state = {} + inference_state["images"] = images + inference_state["num_frames"] = len(images) + # whether to offload the video frames to CPU memory + # turning on this option saves the GPU memory with only a very small overhead + inference_state["offload_video_to_cpu"] = offload_video_to_cpu + # whether to offload the inference state to CPU memory + # turning on this option saves the GPU memory at the cost of a lower tracking fps + # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object + # and from 24 to 21 when tracking two objects) + inference_state["offload_state_to_cpu"] = offload_state_to_cpu + # the original video height and width, used for resizing final output scores + inference_state["video_height"] = video_height + inference_state["video_width"] = video_width + inference_state["device"] = compute_device + if offload_state_to_cpu: + inference_state["storage_device"] = torch.device("cpu") + else: + inference_state["storage_device"] = compute_device + # inputs on each frame + inference_state["point_inputs_per_obj"] = {} + inference_state["mask_inputs_per_obj"] = {} + # visual features on a small number of recently visited frames for quick interactions + inference_state["cached_features"] = {} + # values that don't change across frames (so we only need to hold one copy of them) + inference_state["constants"] = {} + # mapping between client-side object id and model-side object index + inference_state["obj_id_to_idx"] = OrderedDict() + inference_state["obj_idx_to_id"] = OrderedDict() + inference_state["obj_ids"] = [] + # A storage to hold the model's tracking results and states on each frame + inference_state["output_dict"] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + # Slice (view) of each object tracking results, sharing the same memory with "output_dict" + inference_state["output_dict_per_obj"] = {} + # A temporary storage to hold new outputs when user interact with a frame + # to add clicks or mask (it's merged into "output_dict" before propagation starts) + inference_state["temp_output_dict_per_obj"] = {} + # Frames that already holds consolidated outputs from click or mask inputs + # (we directly use their consolidated outputs during tracking) + inference_state["consolidated_frame_inds"] = { + "cond_frame_outputs": set(), # set containing frame indices + "non_cond_frame_outputs": set(), # set containing frame indices + } + # metadata for each tracking frame (e.g. which direction it's tracked) + inference_state["tracking_has_started"] = False + inference_state["frames_already_tracked"] = {} + # Warm up the visual backbone and cache the image feature on frame 0 + self._get_image_feature(inference_state, frame_idx=0, batch_size=1) + return inference_state + + @classmethod + def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2VideoPredictor": + """ + Load a pretrained model from the Hugging Face hub. + + Arguments: + model_id (str): The Hugging Face repository ID. + **kwargs: Additional arguments to pass to the model constructor. + + Returns: + (SAM2VideoPredictor): The loaded model. + """ + from sam2.build_sam import build_sam2_video_predictor_hf + + sam_model = build_sam2_video_predictor_hf(model_id, **kwargs) + return sam_model + + def _obj_id_to_idx(self, inference_state, obj_id): + """Map client-side object id to model-side object index.""" + obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None) + if obj_idx is not None: + return obj_idx + + # This is a new object id not sent to the server before. We only allow adding + # new objects *before* the tracking starts. + allow_new_object = not inference_state["tracking_has_started"] + if allow_new_object: + # get the next object slot + obj_idx = len(inference_state["obj_id_to_idx"]) + inference_state["obj_id_to_idx"][obj_id] = obj_idx + inference_state["obj_idx_to_id"][obj_idx] = obj_id + inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"]) + # set up input and output structures for this object + inference_state["point_inputs_per_obj"][obj_idx] = {} + inference_state["mask_inputs_per_obj"][obj_idx] = {} + inference_state["output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + inference_state["temp_output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + return obj_idx + else: + raise RuntimeError( + f"Cannot add new object id {obj_id} after tracking starts. " + f"All existing object ids: {inference_state['obj_ids']}. " + f"Please call 'reset_state' to restart from scratch." + ) + + def _obj_idx_to_id(self, inference_state, obj_idx): + """Map model-side object index to client-side object id.""" + return inference_state["obj_idx_to_id"][obj_idx] + + def _get_obj_num(self, inference_state): + """Get the total number of unique object ids received so far in this session.""" + return len(inference_state["obj_idx_to_id"]) + + @torch.inference_mode() + def add_new_points_or_box( + self, + inference_state, + frame_idx, + obj_id, + points=None, + labels=None, + clear_old_points=True, + normalize_coords=True, + box=None, + ): + """Add new points to a frame.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx] + mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx] + + if (points is not None) != (labels is not None): + raise ValueError("points and labels must be provided together") + if points is None and box is None: + raise ValueError("at least one of points or box must be provided as input") + + if points is None: + points = torch.zeros(0, 2, dtype=torch.float32) + elif not isinstance(points, torch.Tensor): + points = torch.tensor(points, dtype=torch.float32) + if labels is None: + labels = torch.zeros(0, dtype=torch.int32) + elif not isinstance(labels, torch.Tensor): + labels = torch.tensor(labels, dtype=torch.int32) + if points.dim() == 2: + points = points.unsqueeze(0) # add batch dimension + if labels.dim() == 1: + labels = labels.unsqueeze(0) # add batch dimension + + # If `box` is provided, we add it as the first two points with labels 2 and 3 + # along with the user-provided points (consistent with how SAM 2 is trained). + if box is not None: + if not clear_old_points: + raise ValueError( + "cannot add box without clearing old points, since " + "box prompt must be provided before any point prompt " + "(please use clear_old_points=True instead)" + ) + if inference_state["tracking_has_started"]: + warnings.warn( + "You are adding a box after tracking starts. SAM 2 may not always be " + "able to incorporate a box prompt for *refinement*. If you intend to " + "use box prompt as an *initial* input before tracking, please call " + "'reset_state' on the inference state to restart from scratch.", + category=UserWarning, + stacklevel=2, + ) + if not isinstance(box, torch.Tensor): + box = torch.tensor(box, dtype=torch.float32, device=points.device) + box_coords = box.reshape(1, 2, 2) + box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device) + box_labels = box_labels.reshape(1, 2) + points = torch.cat([box_coords, points], dim=1) + labels = torch.cat([box_labels, labels], dim=1) + + if normalize_coords: + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + points = points / torch.tensor([video_W, video_H]).to(points.device) + # scale the (normalized) coordinates by the model's internal image size + points = points * self.image_size + points = points.to(inference_state["device"]) + labels = labels.to(inference_state["device"]) + + if not clear_old_points: + point_inputs = point_inputs_per_frame.get(frame_idx, None) + else: + point_inputs = None + point_inputs = concat_points(point_inputs, points, labels) + + point_inputs_per_frame[frame_idx] = point_inputs + mask_inputs_per_frame.pop(frame_idx, None) + # If this frame hasn't been tracked before, we treat it as an initial conditioning + # frame, meaning that the inputs points are to generate segments on this frame without + # using any memory from other frames, like in SAM. Otherwise (if it has been tracked), + # the input points will be used to correct the already tracked masks. + is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"] + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + # Get any previously predicted mask logits on this object and feed it along with + # the new clicks into the SAM mask decoder. + prev_sam_mask_logits = None + # lookup temporary output dict first, which contains the most recent output + # (if not found, then lookup conditioning and non-conditioning frame output) + prev_out = obj_temp_output_dict[storage_key].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx) + + if prev_out is not None and prev_out["pred_masks"] is not None: + device = inference_state["device"] + prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True) + # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues. + prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0) + current_out, _ = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=point_inputs, + mask_inputs=None, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + prev_sam_mask_logits=prev_sam_mask_logits, + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks + + def add_new_points(self, *args, **kwargs): + """Deprecated method. Please use `add_new_points_or_box` instead.""" + return self.add_new_points_or_box(*args, **kwargs) + + @torch.inference_mode() + def add_new_mask( + self, + inference_state, + frame_idx, + obj_id, + mask, + ): + """Add new mask to a frame.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx] + mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx] + + if not isinstance(mask, torch.Tensor): + mask = torch.tensor(mask, dtype=torch.bool) + assert mask.dim() == 2 + mask_H, mask_W = mask.shape + mask_inputs_orig = mask[None, None] # add batch and channel dimension + mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"]) + + # resize the mask if it doesn't match the model's image size + if mask_H != self.image_size or mask_W != self.image_size: + mask_inputs = torch.nn.functional.interpolate( + mask_inputs_orig, + size=(self.image_size, self.image_size), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + mask_inputs = (mask_inputs >= 0.5).float() + else: + mask_inputs = mask_inputs_orig + + mask_inputs_per_frame[frame_idx] = mask_inputs + point_inputs_per_frame.pop(frame_idx, None) + # If this frame hasn't been tracked before, we treat it as an initial conditioning + # frame, meaning that the inputs points are to generate segments on this frame without + # using any memory from other frames, like in SAM. Otherwise (if it has been tracked), + # the input points will be used to correct the already tracked masks. + is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"] + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + current_out, _ = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=None, + mask_inputs=mask_inputs, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks + + def _get_orig_video_res_output(self, inference_state, any_res_masks): + """ + Resize the object scores to the original video resolution (video_res_masks) + and apply non-overlapping constraints for final output. + """ + device = inference_state["device"] + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + any_res_masks = any_res_masks.to(device, non_blocking=True) + if any_res_masks.shape[-2:] == (video_H, video_W): + video_res_masks = any_res_masks + else: + video_res_masks = torch.nn.functional.interpolate( + any_res_masks, + size=(video_H, video_W), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks: + video_res_masks = self._apply_non_overlapping_constraints(video_res_masks) + return any_res_masks, video_res_masks + + def _consolidate_temp_output_across_obj( + self, + inference_state, + frame_idx, + is_cond, + run_mem_encoder, + consolidate_at_video_res=False, + ): + """ + Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on + a frame into a single output for all objects, including + 1) fill any missing objects either from `output_dict_per_obj` (if they exist in + `output_dict_per_obj` for this frame) or leave them as placeholder values + (if they don't exist in `output_dict_per_obj` for this frame); + 2) if specified, rerun memory encoder after apply non-overlapping constraints + on the object scores. + """ + batch_size = self._get_obj_num(inference_state) + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Optionally, we allow consolidating the temporary outputs at the original + # video resolution (to provide a better editing experience for mask prompts). + if consolidate_at_video_res: + assert not run_mem_encoder, "memory encoder cannot run at video resolution" + consolidated_H = inference_state["video_height"] + consolidated_W = inference_state["video_width"] + consolidated_mask_key = "pred_masks_video_res" + else: + consolidated_H = consolidated_W = self.image_size // 4 + consolidated_mask_key = "pred_masks" + + # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc" + # will be added when rerunning the memory encoder after applying non-overlapping + # constraints to object scores. Its "pred_masks" are prefilled with a large + # negative value (NO_OBJ_SCORE) to represent missing objects. + consolidated_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + consolidated_mask_key: torch.full( + size=(batch_size, 1, consolidated_H, consolidated_W), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["storage_device"], + ), + "obj_ptr": torch.full( + size=(batch_size, self.hidden_dim), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["device"], + ), + "object_score_logits": torch.full( + size=(batch_size, 1), + # default to 10.0 for object_score_logits, i.e. assuming the object is + # present as sigmoid(10)=1, same as in `predict_masks` of `MaskDecoder` + fill_value=10.0, + dtype=torch.float32, + device=inference_state["device"], + ), + } + empty_mask_ptr = None + for obj_idx in range(batch_size): + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + out = obj_temp_output_dict[storage_key].get(frame_idx, None) + # If the object doesn't appear in "temp_output_dict_per_obj" on this frame, + # we fall back and look up its previous output in "output_dict_per_obj". + # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in + # "output_dict_per_obj" to find a previous output for this object. + if out is None: + out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None) + if out is None: + out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None) + # If the object doesn't appear in "output_dict_per_obj" either, we skip it + # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE + # placeholder above) and set its object pointer to be a dummy pointer. + if out is None: + # Fill in dummy object pointers for those objects without any inputs or + # tracking outcomes on this frame (only do it under `run_mem_encoder=True`, + # i.e. when we need to build the memory for tracking). + if run_mem_encoder: + if empty_mask_ptr is None: + empty_mask_ptr = self._get_empty_mask_ptr( + inference_state, frame_idx + ) + # fill object pointer with a dummy pointer (based on an empty mask) + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr + continue + # Add the temporary object output mask to consolidated output mask + obj_mask = out["pred_masks"] + consolidated_pred_masks = consolidated_out[consolidated_mask_key] + if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]: + consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask + else: + # Resize first if temporary object mask has a different resolution + resized_obj_mask = torch.nn.functional.interpolate( + obj_mask, + size=consolidated_pred_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ) + consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"] + consolidated_out["object_score_logits"][obj_idx : obj_idx + 1] = out[ + "object_score_logits" + ] + + # Optionally, apply non-overlapping constraints on the consolidated scores + # and rerun the memory encoder + if run_mem_encoder: + device = inference_state["device"] + high_res_masks = torch.nn.functional.interpolate( + consolidated_out["pred_masks"].to(device, non_blocking=True), + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks_for_mem_enc: + high_res_masks = self._apply_non_overlapping_constraints(high_res_masks) + maskmem_features, maskmem_pos_enc = self._run_memory_encoder( + inference_state=inference_state, + frame_idx=frame_idx, + batch_size=batch_size, + high_res_masks=high_res_masks, + object_score_logits=consolidated_out["object_score_logits"], + is_mask_from_pts=True, # these frames are what the user interacted with + ) + consolidated_out["maskmem_features"] = maskmem_features + consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc + + return consolidated_out + + def _get_empty_mask_ptr(self, inference_state, frame_idx): + """Get a dummy object pointer based on an empty mask on the current frame.""" + # A dummy (empty) mask with a single object + batch_size = 1 + mask_inputs = torch.zeros( + (batch_size, 1, self.image_size, self.image_size), + dtype=torch.float32, + device=inference_state["device"], + ) + + # Retrieve correct image features + ( + _, + _, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._get_image_feature(inference_state, frame_idx, batch_size) + + # Feed the empty mask and image feature above to get a dummy object pointer + current_out = self.track_step( + frame_idx=frame_idx, + is_init_cond_frame=True, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + point_inputs=None, + mask_inputs=mask_inputs, + output_dict={}, + num_frames=inference_state["num_frames"], + track_in_reverse=False, + run_mem_encoder=False, + prev_sam_mask_logits=None, + ) + return current_out["obj_ptr"] + + @torch.inference_mode() + def propagate_in_video_preflight(self, inference_state): + """Prepare inference_state and consolidate temporary outputs before tracking.""" + # Tracking has started and we don't allow adding new objects until session is reset. + inference_state["tracking_has_started"] = True + batch_size = self._get_obj_num(inference_state) + + # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and + # add them into "output_dict". + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + output_dict = inference_state["output_dict"] + # "consolidated_frame_inds" contains indices of those frames where consolidated + # temporary outputs have been added (either in this call or any previous calls + # to `propagate_in_video_preflight`). + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + for is_cond in [False, True]: + # Separately consolidate conditioning and non-conditioning temp outputs + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Find all the frames that contain temporary outputs for any objects + # (these should be the frames that have just received clicks for mask inputs + # via `add_new_points_or_box` or `add_new_mask`) + temp_frame_inds = set() + for obj_temp_output_dict in temp_output_dict_per_obj.values(): + temp_frame_inds.update(obj_temp_output_dict[storage_key].keys()) + consolidated_frame_inds[storage_key].update(temp_frame_inds) + # consolidate the temporary output across all objects on this frame + for frame_idx in temp_frame_inds: + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True + ) + # merge them into "output_dict" and also create per-object slices + output_dict[storage_key][frame_idx] = consolidated_out + self._add_output_per_object( + inference_state, frame_idx, consolidated_out, storage_key + ) + clear_non_cond_mem = self.clear_non_cond_mem_around_input and ( + self.clear_non_cond_mem_for_multi_obj or batch_size <= 1 + ) + if clear_non_cond_mem: + # clear non-conditioning memory of the surrounding frames + self._clear_non_cond_mem_around_input(inference_state, frame_idx) + + # clear temporary outputs in `temp_output_dict_per_obj` + for obj_temp_output_dict in temp_output_dict_per_obj.values(): + obj_temp_output_dict[storage_key].clear() + + # edge case: if an output is added to "cond_frame_outputs", we remove any prior + # output on the same frame in "non_cond_frame_outputs" + for frame_idx in output_dict["cond_frame_outputs"]: + output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + for obj_output_dict in inference_state["output_dict_per_obj"].values(): + for frame_idx in obj_output_dict["cond_frame_outputs"]: + obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + for frame_idx in consolidated_frame_inds["cond_frame_outputs"]: + assert frame_idx in output_dict["cond_frame_outputs"] + consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx) + + # Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames + # with either points or mask inputs (which should be true under a correct workflow). + all_consolidated_frame_inds = ( + consolidated_frame_inds["cond_frame_outputs"] + | consolidated_frame_inds["non_cond_frame_outputs"] + ) + input_frames_inds = set() + for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values(): + input_frames_inds.update(point_inputs_per_frame.keys()) + for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values(): + input_frames_inds.update(mask_inputs_per_frame.keys()) + assert all_consolidated_frame_inds == input_frames_inds + + @torch.inference_mode() + def propagate_in_video( + self, + inference_state, + start_frame_idx=None, + max_frame_num_to_track=None, + reverse=False, + ): + """Propagate the input points across frames to track in the entire video.""" + self.propagate_in_video_preflight(inference_state) + + output_dict = inference_state["output_dict"] + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + obj_ids = inference_state["obj_ids"] + num_frames = inference_state["num_frames"] + batch_size = self._get_obj_num(inference_state) + if len(output_dict["cond_frame_outputs"]) == 0: + raise RuntimeError("No points are provided; please add points first") + clear_non_cond_mem = self.clear_non_cond_mem_around_input and ( + self.clear_non_cond_mem_for_multi_obj or batch_size <= 1 + ) + + # set start index, end index, and processing order + if start_frame_idx is None: + # default: start from the earliest frame with input points + start_frame_idx = min(output_dict["cond_frame_outputs"]) + if max_frame_num_to_track is None: + # default: track all the frames in the video + max_frame_num_to_track = num_frames + if reverse: + end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0) + if start_frame_idx > 0: + processing_order = range(start_frame_idx, end_frame_idx - 1, -1) + else: + processing_order = [] # skip reverse tracking if starting from frame 0 + else: + end_frame_idx = min( + start_frame_idx + max_frame_num_to_track, num_frames - 1 + ) + processing_order = range(start_frame_idx, end_frame_idx + 1) + + for frame_idx in tqdm(processing_order, desc="propagate in video"): + # We skip those frames already in consolidated outputs (these are frames + # that received input clicks or mask). Note that we cannot directly run + # batched forward on them via `_run_single_frame_inference` because the + # number of clicks on each object might be different. + if frame_idx in consolidated_frame_inds["cond_frame_outputs"]: + storage_key = "cond_frame_outputs" + current_out = output_dict[storage_key][frame_idx] + pred_masks = current_out["pred_masks"] + if clear_non_cond_mem: + # clear non-conditioning memory of the surrounding frames + self._clear_non_cond_mem_around_input(inference_state, frame_idx) + elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]: + storage_key = "non_cond_frame_outputs" + current_out = output_dict[storage_key][frame_idx] + pred_masks = current_out["pred_masks"] + else: + storage_key = "non_cond_frame_outputs" + current_out, pred_masks = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=output_dict, + frame_idx=frame_idx, + batch_size=batch_size, + is_init_cond_frame=False, + point_inputs=None, + mask_inputs=None, + reverse=reverse, + run_mem_encoder=True, + ) + output_dict[storage_key][frame_idx] = current_out + # Create slices of per-object outputs for subsequent interaction with each + # individual object after tracking. + self._add_output_per_object( + inference_state, frame_idx, current_out, storage_key + ) + inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse} + + # Resize the output mask to the original video resolution (we directly use + # the mask scores on GPU for output to avoid any CPU conversion in between) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, pred_masks + ) + yield frame_idx, obj_ids, video_res_masks + + def _add_output_per_object( + self, inference_state, frame_idx, current_out, storage_key + ): + """ + Split a multi-object output into per-object output slices and add them into + `output_dict_per_obj`. The resulting slices share the same tensor storage. + """ + maskmem_features = current_out["maskmem_features"] + assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor) + + maskmem_pos_enc = current_out["maskmem_pos_enc"] + assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list) + + output_dict_per_obj = inference_state["output_dict_per_obj"] + for obj_idx, obj_output_dict in output_dict_per_obj.items(): + obj_slice = slice(obj_idx, obj_idx + 1) + obj_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + "pred_masks": current_out["pred_masks"][obj_slice], + "obj_ptr": current_out["obj_ptr"][obj_slice], + "object_score_logits": current_out["object_score_logits"][obj_slice], + } + if maskmem_features is not None: + obj_out["maskmem_features"] = maskmem_features[obj_slice] + if maskmem_pos_enc is not None: + obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc] + obj_output_dict[storage_key][frame_idx] = obj_out + + @torch.inference_mode() + def clear_all_prompts_in_frame( + self, inference_state, frame_idx, obj_id, need_output=True + ): + """Remove all input points or mask in a specific frame for a given object.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + + # Clear the conditioning information on the given frame + inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None) + inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None) + + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None) + temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None) + + # Check and see if there are still any inputs left on this frame + batch_size = self._get_obj_num(inference_state) + frame_has_input = False + for obj_idx2 in range(batch_size): + if frame_idx in inference_state["point_inputs_per_obj"][obj_idx2]: + frame_has_input = True + break + if frame_idx in inference_state["mask_inputs_per_obj"][obj_idx2]: + frame_has_input = True + break + + # If this frame has no remaining inputs for any objects, we further clear its + # conditioning frame status + if not frame_has_input: + output_dict = inference_state["output_dict"] + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + consolidated_frame_inds["cond_frame_outputs"].discard(frame_idx) + consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx) + # Remove the frame's conditioning output (possibly downgrading it to non-conditioning) + out = output_dict["cond_frame_outputs"].pop(frame_idx, None) + if out is not None: + # The frame is not a conditioning frame anymore since it's not receiving inputs, + # so we "downgrade" its output (if exists) to a non-conditioning frame output. + output_dict["non_cond_frame_outputs"][frame_idx] = out + inference_state["frames_already_tracked"].pop(frame_idx, None) + # Similarly, do it for the sliced output on each object. + for obj_idx2 in range(batch_size): + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx2] + obj_out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None) + if obj_out is not None: + obj_output_dict["non_cond_frame_outputs"][frame_idx] = obj_out + + # If all the conditioning frames have been removed, we also clear the tracking outputs + if len(output_dict["cond_frame_outputs"]) == 0: + self._reset_tracking_results(inference_state) + + if not need_output: + return + # Finally, output updated masks per object (after removing the inputs above) + obj_ids = inference_state["obj_ids"] + is_cond = any( + frame_idx in obj_temp_output_dict["cond_frame_outputs"] + for obj_temp_output_dict in temp_output_dict_per_obj.values() + ) + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks + + @torch.inference_mode() + def reset_state(self, inference_state): + """Remove all input points or mask in all frames throughout the video.""" + self._reset_tracking_results(inference_state) + # Remove all object ids + inference_state["obj_id_to_idx"].clear() + inference_state["obj_idx_to_id"].clear() + inference_state["obj_ids"].clear() + inference_state["point_inputs_per_obj"].clear() + inference_state["mask_inputs_per_obj"].clear() + inference_state["output_dict_per_obj"].clear() + inference_state["temp_output_dict_per_obj"].clear() + + def _reset_tracking_results(self, inference_state): + """Reset all tracking inputs and results across the videos.""" + for v in inference_state["point_inputs_per_obj"].values(): + v.clear() + for v in inference_state["mask_inputs_per_obj"].values(): + v.clear() + for v in inference_state["output_dict_per_obj"].values(): + v["cond_frame_outputs"].clear() + v["non_cond_frame_outputs"].clear() + for v in inference_state["temp_output_dict_per_obj"].values(): + v["cond_frame_outputs"].clear() + v["non_cond_frame_outputs"].clear() + inference_state["output_dict"]["cond_frame_outputs"].clear() + inference_state["output_dict"]["non_cond_frame_outputs"].clear() + inference_state["consolidated_frame_inds"]["cond_frame_outputs"].clear() + inference_state["consolidated_frame_inds"]["non_cond_frame_outputs"].clear() + inference_state["tracking_has_started"] = False + inference_state["frames_already_tracked"].clear() + + def _get_image_feature(self, inference_state, frame_idx, batch_size): + """Compute the image features on a given frame.""" + # Look up in the cache first + image, backbone_out = inference_state["cached_features"].get( + frame_idx, (None, None) + ) + if backbone_out is None: + # Cache miss -- we will run inference on a single image + device = inference_state["device"] + image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0) + backbone_out = self.forward_image(image) + # Cache the most recent frame's feature (for repeated interactions with + # a frame; we can use an LRU cache for more frames in the future). + inference_state["cached_features"] = {frame_idx: (image, backbone_out)} + + # expand the features to have the same dimension as the number of objects + expanded_image = image.expand(batch_size, -1, -1, -1) + expanded_backbone_out = { + "backbone_fpn": backbone_out["backbone_fpn"].copy(), + "vision_pos_enc": backbone_out["vision_pos_enc"].copy(), + } + for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]): + expanded_backbone_out["backbone_fpn"][i] = feat.expand( + batch_size, -1, -1, -1 + ) + for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]): + pos = pos.expand(batch_size, -1, -1, -1) + expanded_backbone_out["vision_pos_enc"][i] = pos + + features = self._prepare_backbone_features(expanded_backbone_out) + features = (expanded_image,) + features + return features + + def _run_single_frame_inference( + self, + inference_state, + output_dict, + frame_idx, + batch_size, + is_init_cond_frame, + point_inputs, + mask_inputs, + reverse, + run_mem_encoder, + prev_sam_mask_logits=None, + ): + """Run tracking on a single frame based on current inputs and previous memory.""" + # Retrieve correct image features + ( + _, + _, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._get_image_feature(inference_state, frame_idx, batch_size) + + # point and mask should not appear as input simultaneously on the same frame + assert point_inputs is None or mask_inputs is None + current_out = self.track_step( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + output_dict=output_dict, + num_frames=inference_state["num_frames"], + track_in_reverse=reverse, + run_mem_encoder=run_mem_encoder, + prev_sam_mask_logits=prev_sam_mask_logits, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = current_out["maskmem_features"] + if maskmem_features is not None: + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + pred_masks_gpu = current_out["pred_masks"] + # potentially fill holes in the predicted masks + if self.fill_hole_area > 0: + pred_masks_gpu = fill_holes_in_mask_scores( + pred_masks_gpu, self.fill_hole_area + ) + pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out) + # object pointer is a small tensor, so we always keep it on GPU memory for fast access + obj_ptr = current_out["obj_ptr"] + object_score_logits = current_out["object_score_logits"] + # make a compact version of this frame's output to reduce the state size + compact_current_out = { + "maskmem_features": maskmem_features, + "maskmem_pos_enc": maskmem_pos_enc, + "pred_masks": pred_masks, + "obj_ptr": obj_ptr, + "object_score_logits": object_score_logits, + } + return compact_current_out, pred_masks_gpu + + def _run_memory_encoder( + self, + inference_state, + frame_idx, + batch_size, + high_res_masks, + object_score_logits, + is_mask_from_pts, + ): + """ + Run the memory encoder on `high_res_masks`. This is usually after applying + non-overlapping constraints to object scores. Since their scores changed, their + memory also need to be computed again with the memory encoder. + """ + # Retrieve correct image features + _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( + inference_state, frame_idx, batch_size + ) + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks, + object_score_logits=object_score_logits, + is_mask_from_pts=is_mask_from_pts, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = self._get_maskmem_pos_enc( + inference_state, {"maskmem_pos_enc": maskmem_pos_enc} + ) + return maskmem_features, maskmem_pos_enc + + def _get_maskmem_pos_enc(self, inference_state, current_out): + """ + `maskmem_pos_enc` is the same across frames and objects, so we cache it as + a constant in the inference session to reduce session storage size. + """ + model_constants = inference_state["constants"] + # "out_maskmem_pos_enc" should be either a list of tensors or None + out_maskmem_pos_enc = current_out["maskmem_pos_enc"] + if out_maskmem_pos_enc is not None: + if "maskmem_pos_enc" not in model_constants: + assert isinstance(out_maskmem_pos_enc, list) + # only take the slice for one object, since it's same across objects + maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc] + model_constants["maskmem_pos_enc"] = maskmem_pos_enc + else: + maskmem_pos_enc = model_constants["maskmem_pos_enc"] + # expand the cached maskmem_pos_enc to the actual batch size + batch_size = out_maskmem_pos_enc[0].size(0) + expanded_maskmem_pos_enc = [ + x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc + ] + else: + expanded_maskmem_pos_enc = None + return expanded_maskmem_pos_enc + + @torch.inference_mode() + def remove_object(self, inference_state, obj_id, strict=False, need_output=True): + """ + Remove an object id from the tracking state. If strict is True, we check whether + the object id actually exists and raise an error if it doesn't exist. + """ + old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None) + updated_frames = [] + # Check whether this object_id to remove actually exists and possibly raise an error. + if old_obj_idx_to_rm is None: + if not strict: + return inference_state["obj_ids"], updated_frames + raise RuntimeError( + f"Cannot remove object id {obj_id} as it doesn't exist. " + f"All existing object ids: {inference_state['obj_ids']}." + ) + + # If this is the only remaining object id, we simply reset the state. + if len(inference_state["obj_id_to_idx"]) == 1: + self.reset_state(inference_state) + return inference_state["obj_ids"], updated_frames + + # There are still remaining objects after removing this object id. In this case, + # we need to delete the object storage from inference state tensors. + # Step 0: clear the input on those frames where this object id has point or mask input + # (note that this step is required as it might downgrade conditioning frames to + # non-conditioning ones) + obj_input_frames_inds = set() + obj_input_frames_inds.update( + inference_state["point_inputs_per_obj"][old_obj_idx_to_rm] + ) + obj_input_frames_inds.update( + inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm] + ) + for frame_idx in obj_input_frames_inds: + self.clear_all_prompts_in_frame( + inference_state, frame_idx, obj_id, need_output=False + ) + + # Step 1: Update the object id mapping (note that it must be done after Step 0, + # since Step 0 still requires the old object id mappings in inference_state) + old_obj_ids = inference_state["obj_ids"] + old_obj_inds = list(range(len(old_obj_ids))) + remain_old_obj_inds = old_obj_inds.copy() + remain_old_obj_inds.remove(old_obj_idx_to_rm) + new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds] + new_obj_inds = list(range(len(new_obj_ids))) + # build new mappings + old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds)) + inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds)) + inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids)) + inference_state["obj_ids"] = new_obj_ids + + # Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys. + # (note that "consolidated_frame_inds" doesn't need to be updated in this step as + # it's already handled in Step 0) + def _map_keys(container): + new_kvs = [] + for k in old_obj_inds: + v = container.pop(k) + if k in old_idx_to_new_idx: + new_kvs.append((old_idx_to_new_idx[k], v)) + container.update(new_kvs) + + _map_keys(inference_state["point_inputs_per_obj"]) + _map_keys(inference_state["mask_inputs_per_obj"]) + _map_keys(inference_state["output_dict_per_obj"]) + _map_keys(inference_state["temp_output_dict_per_obj"]) + + # Step 3: For packed tensor storage, we index the remaining ids and rebuild the per-object slices. + def _slice_state(output_dict, storage_key): + for frame_idx, out in output_dict[storage_key].items(): + out["maskmem_features"] = out["maskmem_features"][remain_old_obj_inds] + out["maskmem_pos_enc"] = [ + x[remain_old_obj_inds] for x in out["maskmem_pos_enc"] + ] + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + out["maskmem_pos_enc"] = self._get_maskmem_pos_enc(inference_state, out) + out["pred_masks"] = out["pred_masks"][remain_old_obj_inds] + out["obj_ptr"] = out["obj_ptr"][remain_old_obj_inds] + out["object_score_logits"] = out["object_score_logits"][ + remain_old_obj_inds + ] + # also update the per-object slices + self._add_output_per_object( + inference_state, frame_idx, out, storage_key + ) + + _slice_state(inference_state["output_dict"], "cond_frame_outputs") + _slice_state(inference_state["output_dict"], "non_cond_frame_outputs") + + # Step 4: Further collect the outputs on those frames in `obj_input_frames_inds`, which + # could show an updated mask for objects previously occluded by the object being removed + if need_output: + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + for frame_idx in obj_input_frames_inds: + is_cond = any( + frame_idx in obj_temp_output_dict["cond_frame_outputs"] + for obj_temp_output_dict in temp_output_dict_per_obj.values() + ) + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + updated_frames.append((frame_idx, video_res_masks)) + + return inference_state["obj_ids"], updated_frames + + def _clear_non_cond_mem_around_input(self, inference_state, frame_idx): + """ + Remove the non-conditioning memory around the input frame. When users provide + correction clicks, the surrounding frames' non-conditioning memories can still + contain outdated object appearance information and could confuse the model. + + This method clears those non-conditioning memories surrounding the interacted + frame to avoid giving the model both old and new information about the object. + """ + r = self.memory_temporal_stride_for_eval + frame_idx_begin = frame_idx - r * self.num_maskmem + frame_idx_end = frame_idx + r * self.num_maskmem + output_dict = inference_state["output_dict"] + non_cond_frame_outputs = output_dict["non_cond_frame_outputs"] + for t in range(frame_idx_begin, frame_idx_end + 1): + non_cond_frame_outputs.pop(t, None) + for obj_output_dict in inference_state["output_dict_per_obj"].values(): + obj_output_dict["non_cond_frame_outputs"].pop(t, None) diff --git a/sam2/utils/__init__.py b/sam2/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/sam2/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/sam2/utils/amg.py b/sam2/utils/amg.py new file mode 100644 index 0000000000000000000000000000000000000000..986842960cf5deca00614b7b1cde1ab77dad7e6e --- /dev/null +++ b/sam2/utils/amg.py @@ -0,0 +1,348 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from copy import deepcopy +from itertools import product +from typing import Any, Dict, Generator, ItemsView, List, Tuple + +import numpy as np +import torch + +# Very lightly adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/utils/amg.py + + +class MaskData: + """ + A structure for storing masks and their related data in batched format. + Implements basic filtering and concatenation. + """ + + def __init__(self, **kwargs) -> None: + for v in kwargs.values(): + assert isinstance( + v, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats = dict(**kwargs) + + def __setitem__(self, key: str, item: Any) -> None: + assert isinstance( + item, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats[key] = item + + def __delitem__(self, key: str) -> None: + del self._stats[key] + + def __getitem__(self, key: str) -> Any: + return self._stats[key] + + def items(self) -> ItemsView[str, Any]: + return self._stats.items() + + def filter(self, keep: torch.Tensor) -> None: + for k, v in self._stats.items(): + if v is None: + self._stats[k] = None + elif isinstance(v, torch.Tensor): + self._stats[k] = v[torch.as_tensor(keep, device=v.device)] + elif isinstance(v, np.ndarray): + self._stats[k] = v[keep.detach().cpu().numpy()] + elif isinstance(v, list) and keep.dtype == torch.bool: + self._stats[k] = [a for i, a in enumerate(v) if keep[i]] + elif isinstance(v, list): + self._stats[k] = [v[i] for i in keep] + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def cat(self, new_stats: "MaskData") -> None: + for k, v in new_stats.items(): + if k not in self._stats or self._stats[k] is None: + self._stats[k] = deepcopy(v) + elif isinstance(v, torch.Tensor): + self._stats[k] = torch.cat([self._stats[k], v], dim=0) + elif isinstance(v, np.ndarray): + self._stats[k] = np.concatenate([self._stats[k], v], axis=0) + elif isinstance(v, list): + self._stats[k] = self._stats[k] + deepcopy(v) + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def to_numpy(self) -> None: + for k, v in self._stats.items(): + if isinstance(v, torch.Tensor): + self._stats[k] = v.float().detach().cpu().numpy() + + +def is_box_near_crop_edge( + boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 +) -> torch.Tensor: + """Filter masks at the edge of a crop, but not at the edge of the original image.""" + crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) + orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) + boxes = uncrop_boxes_xyxy(boxes, crop_box).float() + near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) + near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) + near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) + return torch.any(near_crop_edge, dim=1) + + +def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: + box_xywh = deepcopy(box_xyxy) + box_xywh[2] = box_xywh[2] - box_xywh[0] + box_xywh[3] = box_xywh[3] - box_xywh[1] + return box_xywh + + +def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: + assert len(args) > 0 and all( + len(a) == len(args[0]) for a in args + ), "Batched iteration must have inputs of all the same size." + n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) + for b in range(n_batches): + yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] + + +def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: + """ + Encodes masks to an uncompressed RLE, in the format expected by + pycoco tools. + """ + # Put in fortran order and flatten h,w + b, h, w = tensor.shape + tensor = tensor.permute(0, 2, 1).flatten(1) + + # Compute change indices + diff = tensor[:, 1:] ^ tensor[:, :-1] + change_indices = diff.nonzero() + + # Encode run length + out = [] + for i in range(b): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + cur_idxs = torch.cat( + [ + torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), + cur_idxs + 1, + torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), + ] + ) + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if tensor[i, 0] == 0 else [0] + counts.extend(btw_idxs.detach().cpu().tolist()) + out.append({"size": [h, w], "counts": counts}) + return out + + +def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: + """Compute a binary mask from an uncompressed RLE.""" + h, w = rle["size"] + mask = np.empty(h * w, dtype=bool) + idx = 0 + parity = False + for count in rle["counts"]: + mask[idx : idx + count] = parity + idx += count + parity ^= True + mask = mask.reshape(w, h) + return mask.transpose() # Put in C order + + +def area_from_rle(rle: Dict[str, Any]) -> int: + return sum(rle["counts"][1::2]) + + +def calculate_stability_score( + masks: torch.Tensor, mask_threshold: float, threshold_offset: float +) -> torch.Tensor: + """ + Computes the stability score for a batch of masks. The stability + score is the IoU between the binary masks obtained by thresholding + the predicted mask logits at high and low values. + """ + # One mask is always contained inside the other. + # Save memory by preventing unnecessary cast to torch.int64 + intersections = ( + (masks > (mask_threshold + threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + unions = ( + (masks > (mask_threshold - threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + return intersections / unions + + +def build_point_grid(n_per_side: int) -> np.ndarray: + """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" + offset = 1 / (2 * n_per_side) + points_one_side = np.linspace(offset, 1 - offset, n_per_side) + points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) + points_y = np.tile(points_one_side[:, None], (1, n_per_side)) + points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) + return points + + +def build_all_layer_point_grids( + n_per_side: int, n_layers: int, scale_per_layer: int +) -> List[np.ndarray]: + """Generates point grids for all crop layers.""" + points_by_layer = [] + for i in range(n_layers + 1): + n_points = int(n_per_side / (scale_per_layer**i)) + points_by_layer.append(build_point_grid(n_points)) + return points_by_layer + + +def generate_crop_boxes( + im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float +) -> Tuple[List[List[int]], List[int]]: + """ + Generates a list of crop boxes of different sizes. Each layer + has (2**i)**2 boxes for the ith layer. + """ + crop_boxes, layer_idxs = [], [] + im_h, im_w = im_size + short_side = min(im_h, im_w) + + # Original image + crop_boxes.append([0, 0, im_w, im_h]) + layer_idxs.append(0) + + def crop_len(orig_len, n_crops, overlap): + return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) + + for i_layer in range(n_layers): + n_crops_per_side = 2 ** (i_layer + 1) + overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) + + crop_w = crop_len(im_w, n_crops_per_side, overlap) + crop_h = crop_len(im_h, n_crops_per_side, overlap) + + crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] + crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] + + # Crops in XYWH format + for x0, y0 in product(crop_box_x0, crop_box_y0): + box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] + crop_boxes.append(box) + layer_idxs.append(i_layer + 1) + + return crop_boxes, layer_idxs + + +def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = offset.unsqueeze(1) + return boxes + offset + + +def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0]], device=points.device) + # Check if points has a channel dimension + if len(points.shape) == 3: + offset = offset.unsqueeze(1) + return points + offset + + +def uncrop_masks( + masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int +) -> torch.Tensor: + x0, y0, x1, y1 = crop_box + if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) + pad = (x0, pad_x - x0, y0, pad_y - y0) + return torch.nn.functional.pad(masks, pad, value=0) + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + + +def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: + from pycocotools import mask as mask_utils # type: ignore + + h, w = uncompressed_rle["size"] + rle = mask_utils.frPyObjects(uncompressed_rle, h, w) + rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json + return rle + + +def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: + """ + Calculates boxes in XYXY format around masks. Return [0,0,0,0] for + an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. + """ + # torch.max below raises an error on empty inputs, just skip in this case + if torch.numel(masks) == 0: + return torch.zeros(*masks.shape[:-2], 4, device=masks.device) + + # Normalize shape to CxHxW + shape = masks.shape + h, w = shape[-2:] + if len(shape) > 2: + masks = masks.flatten(0, -3) + else: + masks = masks.unsqueeze(0) + + # Get top and bottom edges + in_height, _ = torch.max(masks, dim=-1) + in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] + bottom_edges, _ = torch.max(in_height_coords, dim=-1) + in_height_coords = in_height_coords + h * (~in_height) + top_edges, _ = torch.min(in_height_coords, dim=-1) + + # Get left and right edges + in_width, _ = torch.max(masks, dim=-2) + in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] + right_edges, _ = torch.max(in_width_coords, dim=-1) + in_width_coords = in_width_coords + w * (~in_width) + left_edges, _ = torch.min(in_width_coords, dim=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) + out = out * (~empty_filter).unsqueeze(-1) + + # Return to original shape + if len(shape) > 2: + out = out.reshape(*shape[:-2], 4) + else: + out = out[0] + + return out diff --git a/sam2/utils/kalman_filter.py b/sam2/utils/kalman_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..4eba007a90b1272c69bf6607d4e3246100f0ac48 --- /dev/null +++ b/sam2/utils/kalman_filter.py @@ -0,0 +1,324 @@ +import numpy as np +import scipy.linalg + + +""" +Table for the 0.95 quantile of the chi-square distribution with N degrees of +freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv +function and used as Mahalanobis gating threshold. +""" +chi2inv95 = { + 1: 3.8415, + 2: 5.9915, + 3: 7.8147, + 4: 9.4877, + 5: 11.070, + 6: 12.592, + 7: 14.067, + 8: 15.507, + 9: 16.919} + + +class KalmanFilter(object): + """ + A simple Kalman filter for tracking bounding boxes in image space. + + The 8-dimensional state space + + x, y, a, h, vx, vy, va, vh + + contains the bounding box center position (x, y), aspect ratio a, height h, + and their respective velocities. + + Object motion follows a constant velocity model. The bounding box location + (x, y, a, h) is taken as direct observation of the state space (linear + observation model). + + """ + + def __init__(self): + ndim, dt = 4, 1. + + # Create Kalman filter model matrices. + self._motion_mat = np.eye(2 * ndim, 2 * ndim) + for i in range(ndim): + self._motion_mat[i, ndim + i] = dt + self._update_mat = np.eye(ndim, 2 * ndim) + + # Motion and observation uncertainty are chosen relative to the current + # state estimate. These weights control the amount of uncertainty in + # the model. This is a bit hacky. + self._std_weight_position = 1. / 20 + self._std_weight_velocity = 1. / 160 + + def initiate(self, measurement): + """Create track from unassociated measurement. + + Parameters + ---------- + measurement : ndarray + Bounding box coordinates (x, y, a, h) with center position (x, y), + aspect ratio a, and height h. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector (8 dimensional) and covariance matrix (8x8 + dimensional) of the new track. Unobserved velocities are initialized + to 0 mean. + + """ + mean_pos = measurement + mean_vel = np.zeros_like(mean_pos) + mean = np.r_[mean_pos, mean_vel] + + std = [ + 2 * self._std_weight_position * measurement[3], + 2 * self._std_weight_position * measurement[3], + 1e-2, + 2 * self._std_weight_position * measurement[3], + 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[3], + 1e-5, + 10 * self._std_weight_velocity * measurement[3]] + covariance = np.diag(np.square(std)) + return mean, covariance + + def predict(self, mean, covariance): + """Run Kalman filter prediction step. + + Parameters + ---------- + mean : ndarray + The 8 dimensional mean vector of the object state at the previous + time step. + covariance : ndarray + The 8x8 dimensional covariance matrix of the object state at the + previous time step. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + + """ + std_pos = [ + self._std_weight_position * mean[3], + self._std_weight_position * mean[3], + 1e-2, + self._std_weight_position * mean[3]] + std_vel = [ + self._std_weight_velocity * mean[3], + self._std_weight_velocity * mean[3], + 1e-5, + self._std_weight_velocity * mean[3]] + motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) + + #mean = np.dot(self._motion_mat, mean) + mean = np.dot(mean, self._motion_mat.T) + covariance = np.linalg.multi_dot(( + self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + + return mean, covariance + + def project(self, mean, covariance): + """Project state distribution to measurement space. + + Parameters + ---------- + mean : ndarray + The state's mean vector (8 dimensional array). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + + Returns + ------- + (ndarray, ndarray) + Returns the projected mean and covariance matrix of the given state + estimate. + + """ + std = [ + self._std_weight_position * mean[3], + self._std_weight_position * mean[3], + 1e-1, + self._std_weight_position * mean[3]] + innovation_cov = np.diag(np.square(std)) + + mean = np.dot(self._update_mat, mean) + covariance = np.linalg.multi_dot(( + self._update_mat, covariance, self._update_mat.T)) + return mean, covariance + innovation_cov + + def multi_predict(self, mean, covariance): + """Run Kalman filter prediction step (Vectorized version). + Parameters + ---------- + mean : ndarray + The Nx8 dimensional mean matrix of the object states at the previous + time step. + covariance : ndarray + The Nx8x8 dimensional covariance matrics of the object states at the + previous time step. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + """ + std_pos = [ + self._std_weight_position * mean[:, 3], + self._std_weight_position * mean[:, 3], + 1e-2 * np.ones_like(mean[:, 3]), + self._std_weight_position * mean[:, 3]] + std_vel = [ + self._std_weight_velocity * mean[:, 3], + self._std_weight_velocity * mean[:, 3], + 1e-5 * np.ones_like(mean[:, 3]), + self._std_weight_velocity * mean[:, 3]] + sqr = np.square(np.r_[std_pos, std_vel]).T + + motion_cov = [] + for i in range(len(mean)): + motion_cov.append(np.diag(sqr[i])) + motion_cov = np.asarray(motion_cov) + + mean = np.dot(mean, self._motion_mat.T) + left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) + covariance = np.dot(left, self._motion_mat.T) + motion_cov + + return mean, covariance + + def update(self, mean, covariance, measurement): + """Run Kalman filter correction step. + + Parameters + ---------- + mean : ndarray + The predicted state's mean vector (8 dimensional). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + measurement : ndarray + The 4 dimensional measurement vector (x, y, a, h), where (x, y) + is the center position, a the aspect ratio, and h the height of the + bounding box. + + Returns + ------- + (ndarray, ndarray) + Returns the measurement-corrected state distribution. + + """ + projected_mean, projected_cov = self.project(mean, covariance) + + chol_factor, lower = scipy.linalg.cho_factor( + projected_cov, lower=True, check_finite=False) + kalman_gain = scipy.linalg.cho_solve( + (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, + check_finite=False).T + innovation = measurement - projected_mean + + new_mean = mean + np.dot(innovation, kalman_gain.T) + new_covariance = covariance - np.linalg.multi_dot(( + kalman_gain, projected_cov, kalman_gain.T)) + return new_mean, new_covariance + + def gating_distance(self, mean, covariance, measurements, + only_position=False, metric='maha'): + """Compute gating distance between state distribution and measurements. + A suitable distance threshold can be obtained from `chi2inv95`. If + `only_position` is False, the chi-square distribution has 4 degrees of + freedom, otherwise 2. + Parameters + ---------- + mean : ndarray + Mean vector over the state distribution (8 dimensional). + covariance : ndarray + Covariance of the state distribution (8x8 dimensional). + measurements : ndarray + An Nx4 dimensional matrix of N measurements, each in + format (x, y, a, h) where (x, y) is the bounding box center + position, a the aspect ratio, and h the height. + only_position : Optional[bool] + If True, distance computation is done with respect to the bounding + box center position only. + Returns + ------- + ndarray + Returns an array of length N, where the i-th element contains the + squared Mahalanobis distance between (mean, covariance) and + `measurements[i]`. + """ + mean, covariance = self.project(mean, covariance) + if only_position: + mean, covariance = mean[:2], covariance[:2, :2] + measurements = measurements[:, :2] + + d = measurements - mean + if metric == 'gaussian': + return np.sum(d * d, axis=1) + elif metric == 'maha': + cholesky_factor = np.linalg.cholesky(covariance) + z = scipy.linalg.solve_triangular( + cholesky_factor, d.T, lower=True, check_finite=False, + overwrite_b=True) + squared_maha = np.sum(z * z, axis=0) + return squared_maha + else: + raise ValueError('invalid distance metric') + + def compute_iou(self, pred_bbox, bboxes): + """ + Compute the IoU between the bbox and the bboxes + """ + ious = [] + pred_bbox = self.xyah_to_xyxy(pred_bbox) + for bbox in bboxes: + iou = self._compute_iou(pred_bbox, bbox) + ious.append(iou) + return ious + + def _compute_iou(self, bbox1, bbox2): + """ + Compute the Intersection over Union (IoU) of two bounding boxes. + Parameters + ---------- + bbox1 : list + The first bounding box in the format [x1, y1, x2, y2]. + bbox2 : list + The second bounding box in the format [x1, y1, x2, y2]. + Returns + ------- + float + The IoU of the two bounding boxes. + """ + if bbox2 == [0, 0, 0, 0]: + return 0 + x1, y1, x2, y2 = bbox1 + x1_, y1_, x2_, y2_ = bbox2 + # Calculate intersection area + intersection_area = max(0, min(x2, x2_) - max(x1, x1_)) * max(0, min(y2, y2_) - max(y1, y1_)) + # Calculate union area + union_area = (x2 - x1) * (y2 - y1) + (x2_ - x1_) * (y2_ - y1_) - intersection_area + # Calculate IoU + iou = intersection_area / union_area if union_area != 0 else 0 + return iou + + def xyxy_to_xyah(self, bbox): + x1, y1, x2, y2 = bbox + xc = (x1 + x2) / 2 + yc = (y1 + y2) / 2 + w = x2 - x1 + h = y2 - y1 + if h == 0: + h = 1 + return [xc, yc, w / h, h] + + def xyah_to_xyxy(self, bbox): + xc, yc, a, h = bbox + x1 = xc - a * h / 2 + y1 = yc - h / 2 + x2 = xc + a * h / 2 + y2 = yc + h / 2 + return [x1, y1, x2, y2] diff --git a/sam2/utils/misc.py b/sam2/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..9c214039093c8d78bc662fbf2855eb5c6ee2980a --- /dev/null +++ b/sam2/utils/misc.py @@ -0,0 +1,349 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import warnings +from threading import Thread + +import numpy as np +import torch +from PIL import Image +from tqdm import tqdm + + +def get_sdpa_settings(): + if torch.cuda.is_available(): + old_gpu = torch.cuda.get_device_properties(0).major < 7 + # only use Flash Attention on Ampere (8.0) or newer GPUs + use_flash_attn = torch.cuda.get_device_properties(0).major >= 8 + if not use_flash_attn: + warnings.warn( + "Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.", + category=UserWarning, + stacklevel=2, + ) + # keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only + # available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases) + pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2]) + if pytorch_version < (2, 2): + warnings.warn( + f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. " + "Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).", + category=UserWarning, + stacklevel=2, + ) + math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn + else: + old_gpu = True + use_flash_attn = False + math_kernel_on = True + + return old_gpu, use_flash_attn, math_kernel_on + + +def get_connected_components(mask): + """ + Get the connected components (8-connectivity) of binary masks of shape (N, 1, H, W). + + Inputs: + - mask: A binary mask tensor of shape (N, 1, H, W), where 1 is foreground and 0 is + background. + + Outputs: + - labels: A tensor of shape (N, 1, H, W) containing the connected component labels + for foreground pixels and 0 for background pixels. + - counts: A tensor of shape (N, 1, H, W) containing the area of the connected + components for foreground pixels and 0 for background pixels. + """ + from sam2 import _C + + return _C.get_connected_componnets(mask.to(torch.uint8).contiguous()) + + +def mask_to_box(masks: torch.Tensor): + """ + compute bounding box given an input mask + + Inputs: + - masks: [B, 1, H, W] masks, dtype=torch.Tensor + + Returns: + - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor + """ + B, _, h, w = masks.shape + device = masks.device + xs = torch.arange(w, device=device, dtype=torch.int32) + ys = torch.arange(h, device=device, dtype=torch.int32) + grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy") + grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w) + grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w) + min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1) + max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1) + min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1) + max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1) + bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1) + + return bbox_coords + + +def _load_img_as_tensor(img_path, image_size): + img_pil = Image.open(img_path) + img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size))) + if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images + img_np = img_np / 255.0 + else: + raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}") + img = torch.from_numpy(img_np).permute(2, 0, 1) + video_width, video_height = img_pil.size # the original video size + return img, video_height, video_width + + +class AsyncVideoFrameLoader: + """ + A list of video frames to be load asynchronously without blocking session start. + """ + + def __init__( + self, + img_paths, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + compute_device, + ): + self.img_paths = img_paths + self.image_size = image_size + self.offload_video_to_cpu = offload_video_to_cpu + self.img_mean = img_mean + self.img_std = img_std + # items in `self.images` will be loaded asynchronously + self.images = [None] * len(img_paths) + # catch and raise any exceptions in the async loading thread + self.exception = None + # video_height and video_width be filled when loading the first image + self.video_height = None + self.video_width = None + self.compute_device = compute_device + + # load the first frame to fill video_height and video_width and also + # to cache it (since it's most likely where the user will click) + self.__getitem__(0) + + # load the rest of frames asynchronously without blocking the session start + def _load_frames(): + try: + for n in tqdm(range(len(self.images)), desc="frame loading (JPEG)"): + self.__getitem__(n) + except Exception as e: + self.exception = e + + self.thread = Thread(target=_load_frames, daemon=True) + self.thread.start() + + def __getitem__(self, index): + if self.exception is not None: + raise RuntimeError("Failure in frame loading thread") from self.exception + + img = self.images[index] + if img is not None: + return img + + img, video_height, video_width = _load_img_as_tensor( + self.img_paths[index], self.image_size + ) + self.video_height = video_height + self.video_width = video_width + # normalize by mean and std + img -= self.img_mean + img /= self.img_std + if not self.offload_video_to_cpu: + img = img.to(self.compute_device, non_blocking=True) + # self.images[index] = img + return img + + def __len__(self): + return len(self.images) + + +def load_video_frames( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.485, 0.456, 0.406), + img_std=(0.229, 0.224, 0.225), + async_loading_frames=False, + compute_device=torch.device("cuda"), +): + """ + Load the video frames from video_path. The frames are resized to image_size as in + the model and are loaded to GPU if offload_video_to_cpu=False. This is used by the demo. + """ + is_bytes = isinstance(video_path, bytes) + is_str = isinstance(video_path, str) + is_mp4_path = is_str and os.path.splitext(video_path)[-1] in [".mp4", ".MP4"] + if is_bytes or is_mp4_path: + return load_video_frames_from_video_file( + video_path=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + compute_device=compute_device, + ) + elif is_str and os.path.isdir(video_path): + return load_video_frames_from_jpg_images( + video_path=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + async_loading_frames=async_loading_frames, + compute_device=compute_device, + ) + else: + raise NotImplementedError( + "Only MP4 video and JPEG folder are supported at this moment" + ) + + +def load_video_frames_from_jpg_images( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.485, 0.456, 0.406), + img_std=(0.229, 0.224, 0.225), + async_loading_frames=False, + compute_device=torch.device("cuda"), +): + """ + Load the video frames from a directory of JPEG files (".jpg" format). + + The frames are resized to image_size x image_size and are loaded to GPU if + `offload_video_to_cpu` is `False` and to CPU if `offload_video_to_cpu` is `True`. + + You can load a frame asynchronously by setting `async_loading_frames` to `True`. + """ + if isinstance(video_path, str) and os.path.isdir(video_path): + jpg_folder = video_path + else: + raise NotImplementedError( + "Only JPEG frames are supported at this moment. For video files, you may use " + "ffmpeg (https://ffmpeg.org/) to extract frames into a folder of JPEG files, such as \n" + "```\n" + "ffmpeg -i .mp4 -q:v 2 -start_number 0 /'%05d.jpg'\n" + "```\n" + "where `-q:v` generates high-quality JPEG frames and `-start_number 0` asks " + "ffmpeg to start the JPEG file from 00000.jpg." + ) + + frame_names = [ + p + for p in os.listdir(jpg_folder) + if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"] + ] + frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) + num_frames = len(frame_names) + if num_frames == 0: + raise RuntimeError(f"no images found in {jpg_folder}") + img_paths = [os.path.join(jpg_folder, frame_name) for frame_name in frame_names] + img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None] + + if async_loading_frames: + lazy_images = AsyncVideoFrameLoader( + img_paths, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + compute_device, + ) + return lazy_images, lazy_images.video_height, lazy_images.video_width + + images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float32) + for n, img_path in enumerate(tqdm(img_paths, desc="frame loading (JPEG)")): + images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size) + if not offload_video_to_cpu: + images = images.to(compute_device) + img_mean = img_mean.to(compute_device) + img_std = img_std.to(compute_device) + # normalize by mean and std + images -= img_mean + images /= img_std + return images, video_height, video_width + + +def load_video_frames_from_video_file( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.485, 0.456, 0.406), + img_std=(0.229, 0.224, 0.225), + compute_device=torch.device("cuda"), +): + """Load the video frames from a video file.""" + import decord + + img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None] + # Get the original video height and width + decord.bridge.set_bridge("torch") + video_height, video_width, _ = decord.VideoReader(video_path).next().shape + # Iterate over all frames in the video + images = [] + for frame in decord.VideoReader(video_path, width=image_size, height=image_size): + images.append(frame.permute(2, 0, 1)) + + images = torch.stack(images, dim=0).float() / 255.0 + if not offload_video_to_cpu: + images = images.to(compute_device) + img_mean = img_mean.to(compute_device) + img_std = img_std.to(compute_device) + # normalize by mean and std + images -= img_mean + images /= img_std + return images, video_height, video_width + + +def fill_holes_in_mask_scores(mask, max_area): + """ + A post processor to fill small holes in mask scores with area under `max_area`. + """ + # Holes are those connected components in background with area <= self.max_area + # (background regions are those with mask scores <= 0) + assert max_area > 0, "max_area must be positive" + + input_mask = mask + try: + labels, areas = get_connected_components(mask <= 0) + is_hole = (labels > 0) & (areas <= max_area) + # We fill holes with a small positive mask score (0.1) to change them to foreground. + mask = torch.where(is_hole, 0.1, mask) + except Exception as e: + # Skip the post-processing step on removing small holes if the CUDA kernel fails + warnings.warn( + f"{e}\n\nSkipping the post-processing step due to the error above. You can " + "still use SAM 2 and it's OK to ignore the error above, although some post-processing " + "functionality may be limited (which doesn't affect the results in most cases; see " + "https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).", + category=UserWarning, + stacklevel=2, + ) + mask = input_mask + + return mask + + +def concat_points(old_point_inputs, new_points, new_labels): + """Add new points and labels to previous point inputs (add at the end).""" + if old_point_inputs is None: + points, labels = new_points, new_labels + else: + points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1) + labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1) + + return {"point_coords": points, "point_labels": labels} diff --git a/sam2/utils/transforms.py b/sam2/utils/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..cc17bebfab104b659c5469e8434cf357ae7e24b6 --- /dev/null +++ b/sam2/utils/transforms.py @@ -0,0 +1,118 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.transforms import Normalize, Resize, ToTensor + + +class SAM2Transforms(nn.Module): + def __init__( + self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0 + ): + """ + Transforms for SAM2. + """ + super().__init__() + self.resolution = resolution + self.mask_threshold = mask_threshold + self.max_hole_area = max_hole_area + self.max_sprinkle_area = max_sprinkle_area + self.mean = [0.485, 0.456, 0.406] + self.std = [0.229, 0.224, 0.225] + self.to_tensor = ToTensor() + self.transforms = torch.jit.script( + nn.Sequential( + Resize((self.resolution, self.resolution)), + Normalize(self.mean, self.std), + ) + ) + + def __call__(self, x): + x = self.to_tensor(x) + return self.transforms(x) + + def forward_batch(self, img_list): + img_batch = [self.transforms(self.to_tensor(img)) for img in img_list] + img_batch = torch.stack(img_batch, dim=0) + return img_batch + + def transform_coords( + self, coords: torch.Tensor, normalize=False, orig_hw=None + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates, + If the coords are in absolute image coordinates, normalize should be set to True and original image size is required. + + Returns + Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model. + """ + if normalize: + assert orig_hw is not None + h, w = orig_hw + coords = coords.clone() + coords[..., 0] = coords[..., 0] / w + coords[..., 1] = coords[..., 1] / h + + coords = coords * self.resolution # unnormalize coords + return coords + + def transform_boxes( + self, boxes: torch.Tensor, normalize=False, orig_hw=None + ) -> torch.Tensor: + """ + Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates, + if the coords are in absolute image coordinates, normalize should be set to True and original image size is required. + """ + boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw) + return boxes + + def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor: + """ + Perform PostProcessing on output masks. + """ + from sam2.utils.misc import get_connected_components + + masks = masks.float() + input_masks = masks + mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image + try: + if self.max_hole_area > 0: + # Holes are those connected components in background with area <= self.fill_hole_area + # (background regions are those with mask scores <= self.mask_threshold) + labels, areas = get_connected_components( + mask_flat <= self.mask_threshold + ) + is_hole = (labels > 0) & (areas <= self.max_hole_area) + is_hole = is_hole.reshape_as(masks) + # We fill holes with a small positive mask score (10.0) to change them to foreground. + masks = torch.where(is_hole, self.mask_threshold + 10.0, masks) + + if self.max_sprinkle_area > 0: + labels, areas = get_connected_components( + mask_flat > self.mask_threshold + ) + is_hole = (labels > 0) & (areas <= self.max_sprinkle_area) + is_hole = is_hole.reshape_as(masks) + # We fill holes with negative mask score (-10.0) to change them to background. + masks = torch.where(is_hole, self.mask_threshold - 10.0, masks) + except Exception as e: + # Skip the post-processing step if the CUDA kernel fails + warnings.warn( + f"{e}\n\nSkipping the post-processing step due to the error above. You can " + "still use SAM 2 and it's OK to ignore the error above, although some post-processing " + "functionality may be limited (which doesn't affect the results in most cases; see " + "https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).", + category=UserWarning, + stacklevel=2, + ) + masks = input_masks + + masks = F.interpolate(masks, orig_hw, mode="bilinear", align_corners=False) + return masks diff --git a/sam2/visualization.py b/sam2/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..a506ef92ad573802de6d685ba5f11a68a674fa48 --- /dev/null +++ b/sam2/visualization.py @@ -0,0 +1,127 @@ +import os +import cv2 +import numpy as np + +from sam2.distinctipy import get_colors + +from pycocotools import mask as Mask + + +def batch_visualize_masks(args, image, masks_rle, image_kpts, bboxes_xyxy, dt_bboxes, gt_masks_raw, bbox_ious, mask_ious, image_path=None, mask_out=False, alpha=1.0): + # Decode dt_masks_rle + dt_masks = [] + for mask_rle in masks_rle: + mask = Mask.decode(mask_rle) + dt_masks.append(mask) + dt_masks = np.array(dt_masks) + + # Decode gt_masks_raw + gt_masks = [] + for gt_mask in gt_masks_raw: + if gt_mask is None: + gt_masks.append(np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)) + else: + # gt_mask_rle = Mask.frPyObjects(gt_mask, image.shape[0], image.shape[1]) + # gt_mask_rle = Mask.merge(gt_mask_rle) + gt_mask_rle = gt_mask + mask = Mask.decode(gt_mask_rle) + gt_masks.append(mask) + gt_masks = np.array(gt_masks) + + # Generate random color for each mask + if mask_out: + dt_mask_image = dt_masks.max(axis=0) + dt_mask_image = (~ dt_mask_image.astype(bool)).astype(np.uint8) + dt_mask_image = cv2.resize(dt_mask_image, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST) + dt_mask_image = image * dt_mask_image[:, :, None] + dt_mask_image = cv2.addWeighted(image, 1-alpha, dt_mask_image, alpha, 0) + else: + colors = (np.array(get_colors(dt_masks.shape[0])) * 255).astype(int) + + # colors = np.random.randint(0, 255, (dt_masks.shape[0], 3)) + # # Make sure no colors are too dark + # np.clip(colors, 50, 255, out=colors) + + # Repeat masks to 3 channels + dt_masks = np.repeat(dt_masks[:, :, :, None], 3, axis=3) + gt_masks = np.repeat(gt_masks[:, :, :, None], 3, axis=3) + + # Colorize masks + dt_masks = dt_masks * colors[:, None, None, :] + gt_masks = gt_masks * colors[:, None, None, :] + + # # Remove masks that are too small + # dt_masks_area = dt_masks.any(axis=3).sum(axis=(1, 2)) + # dt_masks[dt_masks_area < 300*300] = 0 + + # Collapse masks to 3 channels + dt_mask_image = dt_masks.max(axis=0) + gt_mask_image = gt_masks.max(axis=0) + + # Convert to uint8 + dt_mask_image = dt_mask_image.astype(np.uint8) + gt_mask_image = gt_mask_image.astype(np.uint8) + + # Resize masks to image size + dt_mask_image = cv2.resize(dt_mask_image, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST) + gt_mask_image = cv2.resize(gt_mask_image, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST) + + # Add masks to image + if not mask_out: + + dt_mask_image = cv2.addWeighted(image, 0.6, dt_mask_image, 0.4, 0) + # Draw contours around the masks + for mask, color in zip(dt_masks, colors): + + color = color.astype(int).tolist() + + mask = mask.astype(np.uint8) + mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) + mask = cv2.resize(mask, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST) + contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + cv2.drawContours(dt_mask_image, contours, -1, color, 1) + + gt_mask_image = cv2.addWeighted(image, 0.6, gt_mask_image, 0.4, 0) + + # Draw keypoints + if image_kpts is not None and not mask_out: + for instance_kpts, color in zip(image_kpts, colors): + color = tuple(color.astype(int).tolist()) + for kpt in instance_kpts: + cv2.circle(dt_mask_image, kpt.astype(int)[:2], 3, color, -1) + cv2.circle(gt_mask_image, kpt.astype(int)[:2], 3, color, -1) + + # Draw bboxes + if bboxes_xyxy is not None and not mask_out: + bboxes_xyxy = np.array(bboxes_xyxy) + dt_bboxes = np.array(dt_bboxes) + dt_bboxes[:, 2:] += dt_bboxes[:, :2] + for gt_bbox, dt_bbox, color, biou in zip(bboxes_xyxy, dt_bboxes, colors, bbox_ious): + color = tuple(color.astype(int).tolist()) + gbox = gt_bbox.astype(int) + dbox = dt_bbox.astype(int) + cv2.rectangle(dt_mask_image, (dbox[0], dbox[1]), (dbox[2], dbox[3]), color, 2) + cv2.rectangle(gt_mask_image, (gbox[0], gbox[1]), (gbox[2], gbox[3]), color, 2) + + # Write IOU on th etop-left corner of the bbox + # cv2.putText(dt_mask_image, "{:.2f}".format(biou), (dbox[0], dbox[1]-2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1) + # cv2.putText(gt_mask_image, "{:.2f}".format(biou), (gbox[0], gbox[1]-2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1) + + # Save the image + bbox_ious = np.array(bbox_ious) + mask_ious = np.array(mask_ious) + if image_path is not None: + save_name = os.path.basename(image_path) + else: + save_name = "batch_bbox_{:06.2f}_mask_{:06.2f}_{:02d}kpts_{:06d}.jpg".format( + bbox_ious.mean(), mask_ious.mean(), args.num_pos_keypoints, np.random.randint(1000000), + ) + + if 'debug_folder' not in args: + args.debug_folder = "debug" + + if mask_out: + cv2.imwrite(os.path.join(args.debug_folder, save_name), dt_mask_image) + else: + cv2.imwrite(os.path.join(args.debug_folder, save_name), np.hstack([gt_mask_image, dt_mask_image])) + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..80e433ba13b94735423d7d51023cc6264b5b24c8 --- /dev/null +++ b/setup.py @@ -0,0 +1,208 @@ +import os +import os.path as osp +import platform +import shutil +import sys +import warnings +from setuptools import find_packages, setup + +try: + import google.colab # noqa + ON_COLAB = True +except ImportError: + ON_COLAB = False + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +version_file = 'version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + import sys + + # return short version for sdist + if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + return locals()['short_version'] + else: + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + elif '@git+' in line: + info['package'] = line + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + + if ON_COLAB and info['package'] == 'xtcocotools': + # Due to an incompatibility between the Colab platform and the + # pre-built xtcocotools PyPI package, it is necessary to + # compile xtcocotools from source on Colab. + info = dict( + line=info['line'], + package='xtcocotools@' + 'git+https://github.com/jin-s13/xtcocoapi') + + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + if platform.system() == 'Windows': + mode = 'copy' + else: + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + mode = 'copy' + else: + return + + filenames = [ + 'tools', 'configs', 'demo', 'model-index.yml', 'dataset-index.yml' + ] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmpose', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + os.symlink(src_relpath, tar_path) + elif mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extension() + setup( + name='bbox-mask-pose', + version=get_version(), + description='BBoxMaskPose framework for itterative detection, segmentation and pose estimation.', + author='Miroslav Purkrabek', + author_email='mira.purkrabek@gmail.com', + keywords='computer vision, pose estimation, detection, segmentation, research, iccv 2025', + long_description=readme(), + long_description_content_type='text/markdown', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + package_data={'mmpose.ops': ['*/*.so'], 'sam2': ['*.so'], 'sam2.*': ['*.so']}, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + ], + url='https://github.com/MiraPurkrabek/BBoxMaskPose/', + license='General Public License 3.0', + python_requires='>=3.7', + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + 'mim': parse_requirements('requirements/mminstall.txt'), + 'sam2_extras': parse_requirements('requirements/sam2_extras.txt'), + }, + zip_safe=False) diff --git a/version.py b/version.py new file mode 100644 index 0000000000000000000000000000000000000000..eadc2ae91bb3843e09aa9e0c5c1110f0def4bf33 --- /dev/null +++ b/version.py @@ -0,0 +1,29 @@ +__version__ = '1.1.0' +short_version = __version__ + + +def parse_version_info(version_str): + """Parse a version string into a tuple. + + Args: + version_str (str): The version string. + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). + """ + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + elif x.find('b') != -1: + patch_version = x.split('b') + version_info.append(int(patch_version[0])) + version_info.append(f'b{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) \ No newline at end of file